code_text
stringlengths
604
999k
repo_name
stringlengths
4
100
file_path
stringlengths
4
873
language
stringclasses
23 values
license
stringclasses
15 values
size
int32
1.02k
999k
<?php /** * @file * Default theme implementation to display the basic html structure of a single * Drupal page. * * Variables: * - $css: An array of CSS files for the current page. * - $language: (object) The language the site is being displayed in. * $language->language contains its textual representation. * $language->dir contains the language direction. It will either be 'ltr' or 'rtl'. * - $rdf_namespaces: All the RDF namespace prefixes used in the HTML document. * - $grddl_profile: A GRDDL profile allowing agents to extract the RDF data. * - $head_title: A modified version of the page title, for use in the TITLE * tag. * - $head_title_array: (array) An associative array containing the string parts * that were used to generate the $head_title variable, already prepared to be * output as TITLE tag. The key/value pairs may contain one or more of the * following, depending on conditions: * - title: The title of the current page, if any. * - name: The name of the site. * - slogan: The slogan of the site, if any, and if there is no title. * - $head: Markup for the HEAD section (including meta tags, keyword tags, and * so on). * - $styles: Style tags necessary to import all CSS files for the page. * - $scripts: Script tags necessary to load the JavaScript files and settings * for the page. * - $page_top: Initial markup from any modules that have altered the * page. This variable should always be output first, before all other dynamic * content. * - $page: The rendered page content. * - $page_bottom: Final closing markup from any modules that have altered the * page. This variable should always be output last, after all other dynamic * content. * - $classes String of classes that can be used to style contextually through * CSS. * * @see template_preprocess() * @see template_preprocess_html() * @see template_process() * * @ingroup themeable */ ?><!DOCTYPE html PUBLIC "-//W3C//DTD XHTML+RDFa 1.0//EN" "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="<?php print $language->language; ?>" version="XHTML+RDFa 1.0" dir="<?php print $language->dir; ?>"<?php print $rdf_namespaces; ?>> <head profile="<?php print $grddl_profile; ?>"> <?php print $head; ?> <title><?php print $head_title; ?></title> <?php print $styles; ?> <?php print $scripts; ?> </head> <body class="<?php print $classes; ?>" <?php print $attributes;?>> <div id="skip-link"> <a href="#main-content" class="element-invisible element-focusable"><?php print t('Skip to main content'); ?></a> </div> <?php print $page_top; ?> <?php print $page; ?> <?php print $page_bottom; ?> </body> </html>
Vaibhav14890/dsource
modules/system/html.tpl.php
PHP
gpl-2.0
2,733
/* * Locations of devices in the Calliope ASIC. * * Copyright (C) 2005-2009 Scientific-Atlanta, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Author: Ken Eppinett * David Schleef <ds@schleef.org> * * Description: Defines the platform resources for the SA settop. */ #include <linux/init.h> #include <asm/mach-powertv/asic.h> #define CALLIOPE_ADDR(x) (CALLIOPE_IO_BASE + (x)) const struct register_map calliope_register_map __initdata = { .eic_slow0_strt_add = {.phys = CALLIOPE_ADDR(0x800000)}, .eic_cfg_bits = {.phys = CALLIOPE_ADDR(0x800038)}, .eic_ready_status = {.phys = CALLIOPE_ADDR(0x80004c)}, .chipver3 = {.phys = CALLIOPE_ADDR(0xA00800)}, .chipver2 = {.phys = CALLIOPE_ADDR(0xA00804)}, .chipver1 = {.phys = CALLIOPE_ADDR(0xA00808)}, .chipver0 = {.phys = CALLIOPE_ADDR(0xA0080c)}, /* The registers of IRBlaster */ .uart1_intstat = {.phys = CALLIOPE_ADDR(0xA01800)}, .uart1_inten = {.phys = CALLIOPE_ADDR(0xA01804)}, .uart1_config1 = {.phys = CALLIOPE_ADDR(0xA01808)}, .uart1_config2 = {.phys = CALLIOPE_ADDR(0xA0180C)}, .uart1_divisorhi = {.phys = CALLIOPE_ADDR(0xA01810)}, .uart1_divisorlo = {.phys = CALLIOPE_ADDR(0xA01814)}, .uart1_data = {.phys = CALLIOPE_ADDR(0xA01818)}, .uart1_status = {.phys = CALLIOPE_ADDR(0xA0181C)}, .int_stat_3 = {.phys = CALLIOPE_ADDR(0xA02800)}, .int_stat_2 = {.phys = CALLIOPE_ADDR(0xA02804)}, .int_stat_1 = {.phys = CALLIOPE_ADDR(0xA02808)}, .int_stat_0 = {.phys = CALLIOPE_ADDR(0xA0280c)}, .int_config = {.phys = CALLIOPE_ADDR(0xA02810)}, .int_int_scan = {.phys = CALLIOPE_ADDR(0xA02818)}, .ien_int_3 = {.phys = CALLIOPE_ADDR(0xA02830)}, .ien_int_2 = {.phys = CALLIOPE_ADDR(0xA02834)}, .ien_int_1 = {.phys = CALLIOPE_ADDR(0xA02838)}, .ien_int_0 = {.phys = CALLIOPE_ADDR(0xA0283c)}, .int_level_3_3 = {.phys = CALLIOPE_ADDR(0xA02880)}, .int_level_3_2 = {.phys = CALLIOPE_ADDR(0xA02884)}, .int_level_3_1 = {.phys = CALLIOPE_ADDR(0xA02888)}, .int_level_3_0 = {.phys = CALLIOPE_ADDR(0xA0288c)}, .int_level_2_3 = {.phys = CALLIOPE_ADDR(0xA02890)}, .int_level_2_2 = {.phys = CALLIOPE_ADDR(0xA02894)}, .int_level_2_1 = {.phys = CALLIOPE_ADDR(0xA02898)}, .int_level_2_0 = {.phys = CALLIOPE_ADDR(0xA0289c)}, .int_level_1_3 = {.phys = CALLIOPE_ADDR(0xA028a0)}, .int_level_1_2 = {.phys = CALLIOPE_ADDR(0xA028a4)}, .int_level_1_1 = {.phys = CALLIOPE_ADDR(0xA028a8)}, .int_level_1_0 = {.phys = CALLIOPE_ADDR(0xA028ac)}, .int_level_0_3 = {.phys = CALLIOPE_ADDR(0xA028b0)}, .int_level_0_2 = {.phys = CALLIOPE_ADDR(0xA028b4)}, .int_level_0_1 = {.phys = CALLIOPE_ADDR(0xA028b8)}, .int_level_0_0 = {.phys = CALLIOPE_ADDR(0xA028bc)}, .int_docsis_en = {.phys = CALLIOPE_ADDR(0xA028F4)}, .mips_pll_setup = {.phys = CALLIOPE_ADDR(0x980000)}, .fs432x4b4_usb_ctl = {.phys = CALLIOPE_ADDR(0x980030)}, .test_bus = {.phys = CALLIOPE_ADDR(0x9800CC)}, .crt_spare = {.phys = CALLIOPE_ADDR(0x9800d4)}, .usb2_ohci_int_mask = {.phys = CALLIOPE_ADDR(0x9A000c)}, .usb2_strap = {.phys = CALLIOPE_ADDR(0x9A0014)}, .ehci_hcapbase = {.phys = CALLIOPE_ADDR(0x9BFE00)}, .ohci_hc_revision = {.phys = CALLIOPE_ADDR(0x9BFC00)}, .bcm1_bs_lmi_steer = {.phys = CALLIOPE_ADDR(0x9E0004)}, .usb2_control = {.phys = CALLIOPE_ADDR(0x9E0054)}, .usb2_stbus_obc = {.phys = CALLIOPE_ADDR(0x9BFF00)}, .usb2_stbus_mess_size = {.phys = CALLIOPE_ADDR(0x9BFF04)}, .usb2_stbus_chunk_size = {.phys = CALLIOPE_ADDR(0x9BFF08)}, .pcie_regs = {.phys = 0x000000}, /* -doesn't exist- */ .tim_ch = {.phys = CALLIOPE_ADDR(0xA02C10)}, .tim_cl = {.phys = CALLIOPE_ADDR(0xA02C14)}, .gpio_dout = {.phys = CALLIOPE_ADDR(0xA02c20)}, .gpio_din = {.phys = CALLIOPE_ADDR(0xA02c24)}, .gpio_dir = {.phys = CALLIOPE_ADDR(0xA02c2C)}, .watchdog = {.phys = CALLIOPE_ADDR(0xA02c30)}, .front_panel = {.phys = 0x000000}, /* -not used- */ };
phalf/android_kernel_samsung_mint-vlx-all
arch/mips/powertv/asic/asic-calliope.c
C
gpl-2.0
4,482
/* * Backlight Driver for HP Jornada 680 * * Copyright (c) 2005 Andriy Skulysh * * Based on Sharp's Corgi Backlight Driver * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/fb.h> #include <linux/backlight.h> #include <cpu/dac.h> #include <mach/hp6xx.h> #include <asm/hd64461.h> #define HP680_MAX_INTENSITY 255 #define HP680_DEFAULT_INTENSITY 10 static int hp680bl_suspended; static int current_intensity = 0; static DEFINE_SPINLOCK(bl_lock); static void hp680bl_send_intensity(struct backlight_device *bd) { unsigned long flags; u16 v; int intensity = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; if (hp680bl_suspended) intensity = 0; spin_lock_irqsave(&bl_lock, flags); if (intensity && current_intensity == 0) { sh_dac_enable(DAC_LCD_BRIGHTNESS); v = inw(HD64461_GPBDR); v &= ~HD64461_GPBDR_LCDOFF; outw(v, HD64461_GPBDR); sh_dac_output(255-(u8)intensity, DAC_LCD_BRIGHTNESS); } else if (intensity == 0 && current_intensity != 0) { sh_dac_output(255-(u8)intensity, DAC_LCD_BRIGHTNESS); sh_dac_disable(DAC_LCD_BRIGHTNESS); v = inw(HD64461_GPBDR); v |= HD64461_GPBDR_LCDOFF; outw(v, HD64461_GPBDR); } else if (intensity) { sh_dac_output(255-(u8)intensity, DAC_LCD_BRIGHTNESS); } spin_unlock_irqrestore(&bl_lock, flags); current_intensity = intensity; } #ifdef CONFIG_PM static int hp680bl_suspend(struct platform_device *pdev, pm_message_t state) { struct backlight_device *bd = platform_get_drvdata(pdev); hp680bl_suspended = 1; hp680bl_send_intensity(bd); return 0; } static int hp680bl_resume(struct platform_device *pdev) { struct backlight_device *bd = platform_get_drvdata(pdev); hp680bl_suspended = 0; hp680bl_send_intensity(bd); return 0; } #else #define hp680bl_suspend NULL #define hp680bl_resume NULL #endif static int hp680bl_set_intensity(struct backlight_device *bd) { hp680bl_send_intensity(bd); return 0; } static int hp680bl_get_intensity(struct backlight_device *bd) { return current_intensity; } static const struct backlight_ops hp680bl_ops = { .get_brightness = hp680bl_get_intensity, .update_status = hp680bl_set_intensity, }; static int __devinit hp680bl_probe(struct platform_device *pdev) { struct backlight_properties props; struct backlight_device *bd; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = HP680_MAX_INTENSITY; bd = backlight_device_register("hp680-bl", &pdev->dev, NULL, &hp680bl_ops, &props); if (IS_ERR(bd)) return PTR_ERR(bd); platform_set_drvdata(pdev, bd); bd->props.brightness = HP680_DEFAULT_INTENSITY; hp680bl_send_intensity(bd); return 0; } static int hp680bl_remove(struct platform_device *pdev) { struct backlight_device *bd = platform_get_drvdata(pdev); bd->props.brightness = 0; bd->props.power = 0; hp680bl_send_intensity(bd); backlight_device_unregister(bd); return 0; } static struct platform_driver hp680bl_driver = { .probe = hp680bl_probe, .remove = hp680bl_remove, .suspend = hp680bl_suspend, .resume = hp680bl_resume, .driver = { .name = "hp680-bl", }, }; static struct platform_device *hp680bl_device; static int __init hp680bl_init(void) { int ret; ret = platform_driver_register(&hp680bl_driver); if (ret) return ret; hp680bl_device = platform_device_register_simple("hp680-bl", -1, NULL, 0); if (IS_ERR(hp680bl_device)) { platform_driver_unregister(&hp680bl_driver); return PTR_ERR(hp680bl_device); } return 0; } static void __exit hp680bl_exit(void) { platform_device_unregister(hp680bl_device); platform_driver_unregister(&hp680bl_driver); } module_init(hp680bl_init); module_exit(hp680bl_exit); MODULE_AUTHOR("Andriy Skulysh <askulysh@gmail.com>"); MODULE_DESCRIPTION("HP Jornada 680 Backlight Driver"); MODULE_LICENSE("GPL");
zarboz/s2w-VilleZ
drivers/video/backlight/hp680_bl.c
C
gpl-2.0
4,190
<?php /** * SimplePie * * A PHP-Based RSS and Atom Feed Framework. * Takes the hard work out of managing a complete RSS/Atom solution. * * Copyright (c) 2004-2012, Ryan Parman, Geoffrey Sneddon, Ryan McCue, and contributors * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * * Neither the name of the SimplePie Team nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS * AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @package SimplePie * @version 1.3.1 * @copyright 2004-2012 Ryan Parman, Geoffrey Sneddon, Ryan McCue * @author Ryan Parman * @author Geoffrey Sneddon * @author Ryan McCue * @link http://simplepie.org/ SimplePie * @license http://www.opensource.org/licenses/bsd-license.php BSD License */ /** * Handles everything related to enclosures (including Media RSS and iTunes RSS) * * Used by {@see SimplePie_Item::get_enclosure()} and {@see SimplePie_Item::get_enclosures()} * * This class can be overloaded with {@see SimplePie::set_enclosure_class()} * * @package SimplePie * @subpackage API */ class SimplePie_Enclosure { /** * @var string * @see get_bitrate() */ var $bitrate; /** * @var array * @see get_captions() */ var $captions; /** * @var array * @see get_categories() */ var $categories; /** * @var int * @see get_channels() */ var $channels; /** * @var SimplePie_Copyright * @see get_copyright() */ var $copyright; /** * @var array * @see get_credits() */ var $credits; /** * @var string * @see get_description() */ var $description; /** * @var int * @see get_duration() */ var $duration; /** * @var string * @see get_expression() */ var $expression; /** * @var string * @see get_framerate() */ var $framerate; /** * @var string * @see get_handler() */ var $handler; /** * @var array * @see get_hashes() */ var $hashes; /** * @var string * @see get_height() */ var $height; /** * @deprecated * @var null */ var $javascript; /** * @var array * @see get_keywords() */ var $keywords; /** * @var string * @see get_language() */ var $lang; /** * @var string * @see get_length() */ var $length; /** * @var string * @see get_link() */ var $link; /** * @var string * @see get_medium() */ var $medium; /** * @var string * @see get_player() */ var $player; /** * @var array * @see get_ratings() */ var $ratings; /** * @var array * @see get_restrictions() */ var $restrictions; /** * @var string * @see get_sampling_rate() */ var $samplingrate; /** * @var array * @see get_thumbnails() */ var $thumbnails; /** * @var string * @see get_title() */ var $title; /** * @var string * @see get_type() */ var $type; /** * @var string * @see get_width() */ var $width; /** * Constructor, used to input the data * * For documentation on all the parameters, see the corresponding * properties and their accessors * * @uses idna_convert If available, this will convert an IDN */ public function __construct($link = null, $type = null, $length = null, $javascript = null, $bitrate = null, $captions = null, $categories = null, $channels = null, $copyright = null, $credits = null, $description = null, $duration = null, $expression = null, $framerate = null, $hashes = null, $height = null, $keywords = null, $lang = null, $medium = null, $player = null, $ratings = null, $restrictions = null, $samplingrate = null, $thumbnails = null, $title = null, $width = null) { $this->bitrate = $bitrate; $this->captions = $captions; $this->categories = $categories; $this->channels = $channels; $this->copyright = $copyright; $this->credits = $credits; $this->description = $description; $this->duration = $duration; $this->expression = $expression; $this->framerate = $framerate; $this->hashes = $hashes; $this->height = $height; $this->keywords = $keywords; $this->lang = $lang; $this->length = $length; $this->link = $link; $this->medium = $medium; $this->player = $player; $this->ratings = $ratings; $this->restrictions = $restrictions; $this->samplingrate = $samplingrate; $this->thumbnails = $thumbnails; $this->title = $title; $this->type = $type; $this->width = $width; if (class_exists('idna_convert')) { $idn = new idna_convert(); $parsed = SimplePie_Misc::parse_url($link); $this->link = SimplePie_Misc::compress_parse_url($parsed['scheme'], $idn->encode($parsed['authority']), $parsed['path'], $parsed['query'], $parsed['fragment']); } $this->handler = $this->get_handler(); // Needs to load last } /** * String-ified version * * @return string */ public function __toString() { // There is no $this->data here return md5(serialize($this)); } /** * Get the bitrate * * @return string|null */ public function get_bitrate() { if ($this->bitrate !== null) { return $this->bitrate; } else { return null; } } /** * Get a single caption * * @param int $key * @return SimplePie_Caption|null */ public function get_caption($key = 0) { $captions = $this->get_captions(); if (isset($captions[$key])) { return $captions[$key]; } else { return null; } } /** * Get all captions * * @return array|null Array of {@see SimplePie_Caption} objects */ public function get_captions() { if ($this->captions !== null) { return $this->captions; } else { return null; } } /** * Get a single category * * @param int $key * @return SimplePie_Category|null */ public function get_category($key = 0) { $categories = $this->get_categories(); if (isset($categories[$key])) { return $categories[$key]; } else { return null; } } /** * Get all categories * * @return array|null Array of {@see SimplePie_Category} objects */ public function get_categories() { if ($this->categories !== null) { return $this->categories; } else { return null; } } /** * Get the number of audio channels * * @return int|null */ public function get_channels() { if ($this->channels !== null) { return $this->channels; } else { return null; } } /** * Get the copyright information * * @return SimplePie_Copyright|null */ public function get_copyright() { if ($this->copyright !== null) { return $this->copyright; } else { return null; } } /** * Get a single credit * * @param int $key * @return SimplePie_Credit|null */ public function get_credit($key = 0) { $credits = $this->get_credits(); if (isset($credits[$key])) { return $credits[$key]; } else { return null; } } /** * Get all credits * * @return array|null Array of {@see SimplePie_Credit} objects */ public function get_credits() { if ($this->credits !== null) { return $this->credits; } else { return null; } } /** * Get the description of the enclosure * * @return string|null */ public function get_description() { if ($this->description !== null) { return $this->description; } else { return null; } } /** * Get the duration of the enclosure * * @param string $convert Convert seconds into hh:mm:ss * @return string|int|null 'hh:mm:ss' string if `$convert` was specified, otherwise integer (or null if none found) */ public function get_duration($convert = false) { if ($this->duration !== null) { if ($convert) { $time = SimplePie_Misc::time_hms($this->duration); return $time; } else { return $this->duration; } } else { return null; } } /** * Get the expression * * @return string Probably one of 'sample', 'full', 'nonstop', 'clip'. Defaults to 'full' */ public function get_expression() { if ($this->expression !== null) { return $this->expression; } else { return 'full'; } } /** * Get the file extension * * @return string|null */ public function get_extension() { if ($this->link !== null) { $url = SimplePie_Misc::parse_url($this->link); if ($url['path'] !== '') { return pathinfo($url['path'], PATHINFO_EXTENSION); } } return null; } /** * Get the framerate (in frames-per-second) * * @return string|null */ public function get_framerate() { if ($this->framerate !== null) { return $this->framerate; } else { return null; } } /** * Get the preferred handler * * @return string|null One of 'flash', 'fmedia', 'quicktime', 'wmedia', 'mp3' */ public function get_handler() { return $this->get_real_type(true); } /** * Get a single hash * * @link http://www.rssboard.org/media-rss#media-hash * @param int $key * @return string|null Hash as per `media:hash`, prefixed with "$algo:" */ public function get_hash($key = 0) { $hashes = $this->get_hashes(); if (isset($hashes[$key])) { return $hashes[$key]; } else { return null; } } /** * Get all credits * * @return array|null Array of strings, see {@see get_hash()} */ public function get_hashes() { if ($this->hashes !== null) { return $this->hashes; } else { return null; } } /** * Get the height * * @return string|null */ public function get_height() { if ($this->height !== null) { return $this->height; } else { return null; } } /** * Get the language * * @link http://tools.ietf.org/html/rfc3066 * @return string|null Language code as per RFC 3066 */ public function get_language() { if ($this->lang !== null) { return $this->lang; } else { return null; } } /** * Get a single keyword * * @param int $key * @return string|null */ public function get_keyword($key = 0) { $keywords = $this->get_keywords(); if (isset($keywords[$key])) { return $keywords[$key]; } else { return null; } } /** * Get all keywords * * @return array|null Array of strings */ public function get_keywords() { if ($this->keywords !== null) { return $this->keywords; } else { return null; } } /** * Get length * * @return float Length in bytes */ public function get_length() { if ($this->length !== null) { return $this->length; } else { return null; } } /** * Get the URL * * @return string|null */ public function get_link() { if ($this->link !== null) { return urldecode($this->link); } else { return null; } } /** * Get the medium * * @link http://www.rssboard.org/media-rss#media-content * @return string|null Should be one of 'image', 'audio', 'video', 'document', 'executable' */ public function get_medium() { if ($this->medium !== null) { return $this->medium; } else { return null; } } /** * Get the player URL * * Typically the same as {@see get_permalink()} * @return string|null Player URL */ public function get_player() { if ($this->player !== null) { return $this->player; } else { return null; } } /** * Get a single rating * * @param int $key * @return SimplePie_Rating|null */ public function get_rating($key = 0) { $ratings = $this->get_ratings(); if (isset($ratings[$key])) { return $ratings[$key]; } else { return null; } } /** * Get all ratings * * @return array|null Array of {@see SimplePie_Rating} objects */ public function get_ratings() { if ($this->ratings !== null) { return $this->ratings; } else { return null; } } /** * Get a single restriction * * @param int $key * @return SimplePie_Restriction|null */ public function get_restriction($key = 0) { $restrictions = $this->get_restrictions(); if (isset($restrictions[$key])) { return $restrictions[$key]; } else { return null; } } /** * Get all restrictions * * @return array|null Array of {@see SimplePie_Restriction} objects */ public function get_restrictions() { if ($this->restrictions !== null) { return $this->restrictions; } else { return null; } } /** * Get the sampling rate (in kHz) * * @return string|null */ public function get_sampling_rate() { if ($this->samplingrate !== null) { return $this->samplingrate; } else { return null; } } /** * Get the file size (in MiB) * * @return float|null File size in mebibytes (1048 bytes) */ public function get_size() { $length = $this->get_length(); if ($length !== null) { return round($length/1048576, 2); } else { return null; } } /** * Get a single thumbnail * * @param int $key * @return string|null Thumbnail URL */ public function get_thumbnail($key = 0) { $thumbnails = $this->get_thumbnails(); if (isset($thumbnails[$key])) { return $thumbnails[$key]; } else { return null; } } /** * Get all thumbnails * * @return array|null Array of thumbnail URLs */ public function get_thumbnails() { if ($this->thumbnails !== null) { return $this->thumbnails; } else { return null; } } /** * Get the title * * @return string|null */ public function get_title() { if ($this->title !== null) { return $this->title; } else { return null; } } /** * Get mimetype of the enclosure * * @see get_real_type() * @return string|null MIME type */ public function get_type() { if ($this->type !== null) { return $this->type; } else { return null; } } /** * Get the width * * @return string|null */ public function get_width() { if ($this->width !== null) { return $this->width; } else { return null; } } /** * Embed the enclosure using `<embed>` * * @deprecated Use the second parameter to {@see embed} instead * * @param array|string $options See first paramter to {@see embed} * @return string HTML string to output */ public function native_embed($options='') { return $this->embed($options, true); } /** * Embed the enclosure using Javascript * * `$options` is an array or comma-separated key:value string, with the * following properties: * * - `alt` (string): Alternate content for when an end-user does not have * the appropriate handler installed or when a file type is * unsupported. Can be any text or HTML. Defaults to blank. * - `altclass` (string): If a file type is unsupported, the end-user will * see the alt text (above) linked directly to the content. That link * will have this value as its class name. Defaults to blank. * - `audio` (string): This is an image that should be used as a * placeholder for audio files before they're loaded (QuickTime-only). * Can be any relative or absolute URL. Defaults to blank. * - `bgcolor` (string): The background color for the media, if not * already transparent. Defaults to `#ffffff`. * - `height` (integer): The height of the embedded media. Accepts any * numeric pixel value (such as `360`) or `auto`. Defaults to `auto`, * and it is recommended that you use this default. * - `loop` (boolean): Do you want the media to loop when its done? * Defaults to `false`. * - `mediaplayer` (string): The location of the included * `mediaplayer.swf` file. This allows for the playback of Flash Video * (`.flv`) files, and is the default handler for non-Odeo MP3's. * Defaults to blank. * - `video` (string): This is an image that should be used as a * placeholder for video files before they're loaded (QuickTime-only). * Can be any relative or absolute URL. Defaults to blank. * - `width` (integer): The width of the embedded media. Accepts any * numeric pixel value (such as `480`) or `auto`. Defaults to `auto`, * and it is recommended that you use this default. * - `widescreen` (boolean): Is the enclosure widescreen or standard? * This applies only to video enclosures, and will automatically resize * the content appropriately. Defaults to `false`, implying 4:3 mode. * * Note: Non-widescreen (4:3) mode with `width` and `height` set to `auto` * will default to 480x360 video resolution. Widescreen (16:9) mode with * `width` and `height` set to `auto` will default to 480x270 video resolution. * * @todo If the dimensions for media:content are defined, use them when width/height are set to 'auto'. * @param array|string $options Comma-separated key:value list, or array * @param bool $native Use `<embed>` * @return string HTML string to output */ public function embed($options = '', $native = false) { // Set up defaults $audio = ''; $video = ''; $alt = ''; $altclass = ''; $loop = 'false'; $width = 'auto'; $height = 'auto'; $bgcolor = '#ffffff'; $mediaplayer = ''; $widescreen = false; $handler = $this->get_handler(); $type = $this->get_real_type(); // Process options and reassign values as necessary if (is_array($options)) { extract($options); } else { $options = explode(',', $options); foreach($options as $option) { $opt = explode(':', $option, 2); if (isset($opt[0], $opt[1])) { $opt[0] = trim($opt[0]); $opt[1] = trim($opt[1]); switch ($opt[0]) { case 'audio': $audio = $opt[1]; break; case 'video': $video = $opt[1]; break; case 'alt': $alt = $opt[1]; break; case 'altclass': $altclass = $opt[1]; break; case 'loop': $loop = $opt[1]; break; case 'width': $width = $opt[1]; break; case 'height': $height = $opt[1]; break; case 'bgcolor': $bgcolor = $opt[1]; break; case 'mediaplayer': $mediaplayer = $opt[1]; break; case 'widescreen': $widescreen = $opt[1]; break; } } } } $mime = explode('/', $type, 2); $mime = $mime[0]; // Process values for 'auto' if ($width === 'auto') { if ($mime === 'video') { if ($height === 'auto') { $width = 480; } elseif ($widescreen) { $width = round((intval($height)/9)*16); } else { $width = round((intval($height)/3)*4); } } else { $width = '100%'; } } if ($height === 'auto') { if ($mime === 'audio') { $height = 0; } elseif ($mime === 'video') { if ($width === 'auto') { if ($widescreen) { $height = 270; } else { $height = 360; } } elseif ($widescreen) { $height = round((intval($width)/16)*9); } else { $height = round((intval($width)/4)*3); } } else { $height = 376; } } elseif ($mime === 'audio') { $height = 0; } // Set proper placeholder value if ($mime === 'audio') { $placeholder = $audio; } elseif ($mime === 'video') { $placeholder = $video; } $embed = ''; // Flash if ($handler === 'flash') { if ($native) { $embed .= "<embed src=\"" . $this->get_link() . "\" pluginspage=\"http://adobe.com/go/getflashplayer\" type=\"$type\" quality=\"high\" width=\"$width\" height=\"$height\" bgcolor=\"$bgcolor\" loop=\"$loop\"></embed>"; } else { $embed .= "<script type='text/javascript'>embed_flash('$bgcolor', '$width', '$height', '" . $this->get_link() . "', '$loop', '$type');</script>"; } } // Flash Media Player file types. // Preferred handler for MP3 file types. elseif ($handler === 'fmedia' || ($handler === 'mp3' && $mediaplayer !== '')) { $height += 20; if ($native) { $embed .= "<embed src=\"$mediaplayer\" pluginspage=\"http://adobe.com/go/getflashplayer\" type=\"application/x-shockwave-flash\" quality=\"high\" width=\"$width\" height=\"$height\" wmode=\"transparent\" flashvars=\"file=" . rawurlencode($this->get_link().'?file_extension=.'.$this->get_extension()) . "&autostart=false&repeat=$loop&showdigits=true&showfsbutton=false\"></embed>"; } else { $embed .= "<script type='text/javascript'>embed_flv('$width', '$height', '" . rawurlencode($this->get_link().'?file_extension=.'.$this->get_extension()) . "', '$placeholder', '$loop', '$mediaplayer');</script>"; } } // QuickTime 7 file types. Need to test with QuickTime 6. // Only handle MP3's if the Flash Media Player is not present. elseif ($handler === 'quicktime' || ($handler === 'mp3' && $mediaplayer === '')) { $height += 16; if ($native) { if ($placeholder !== '') { $embed .= "<embed type=\"$type\" style=\"cursor:hand; cursor:pointer;\" href=\"" . $this->get_link() . "\" src=\"$placeholder\" width=\"$width\" height=\"$height\" autoplay=\"false\" target=\"myself\" controller=\"false\" loop=\"$loop\" scale=\"aspect\" bgcolor=\"$bgcolor\" pluginspage=\"http://apple.com/quicktime/download/\"></embed>"; } else { $embed .= "<embed type=\"$type\" style=\"cursor:hand; cursor:pointer;\" src=\"" . $this->get_link() . "\" width=\"$width\" height=\"$height\" autoplay=\"false\" target=\"myself\" controller=\"true\" loop=\"$loop\" scale=\"aspect\" bgcolor=\"$bgcolor\" pluginspage=\"http://apple.com/quicktime/download/\"></embed>"; } } else { $embed .= "<script type='text/javascript'>embed_quicktime('$type', '$bgcolor', '$width', '$height', '" . $this->get_link() . "', '$placeholder', '$loop');</script>"; } } // Windows Media elseif ($handler === 'wmedia') { $height += 45; if ($native) { $embed .= "<embed type=\"application/x-mplayer2\" src=\"" . $this->get_link() . "\" autosize=\"1\" width=\"$width\" height=\"$height\" showcontrols=\"1\" showstatusbar=\"0\" showdisplay=\"0\" autostart=\"0\"></embed>"; } else { $embed .= "<script type='text/javascript'>embed_wmedia('$width', '$height', '" . $this->get_link() . "');</script>"; } } // Everything else else $embed .= '<a href="' . $this->get_link() . '" class="' . $altclass . '">' . $alt . '</a>'; return $embed; } /** * Get the real media type * * Often, feeds lie to us, necessitating a bit of deeper inspection. This * converts types to their canonical representations based on the file * extension * * @see get_type() * @param bool $find_handler Internal use only, use {@see get_handler()} instead * @return string MIME type */ public function get_real_type($find_handler = false) { // Mime-types by handler. $types_flash = array('application/x-shockwave-flash', 'application/futuresplash'); // Flash $types_fmedia = array('video/flv', 'video/x-flv','flv-application/octet-stream'); // Flash Media Player $types_quicktime = array('audio/3gpp', 'audio/3gpp2', 'audio/aac', 'audio/x-aac', 'audio/aiff', 'audio/x-aiff', 'audio/mid', 'audio/midi', 'audio/x-midi', 'audio/mp4', 'audio/m4a', 'audio/x-m4a', 'audio/wav', 'audio/x-wav', 'video/3gpp', 'video/3gpp2', 'video/m4v', 'video/x-m4v', 'video/mp4', 'video/mpeg', 'video/x-mpeg', 'video/quicktime', 'video/sd-video'); // QuickTime $types_wmedia = array('application/asx', 'application/x-mplayer2', 'audio/x-ms-wma', 'audio/x-ms-wax', 'video/x-ms-asf-plugin', 'video/x-ms-asf', 'video/x-ms-wm', 'video/x-ms-wmv', 'video/x-ms-wvx'); // Windows Media $types_mp3 = array('audio/mp3', 'audio/x-mp3', 'audio/mpeg', 'audio/x-mpeg'); // MP3 if ($this->get_type() !== null) { $type = strtolower($this->type); } else { $type = null; } // If we encounter an unsupported mime-type, check the file extension and guess intelligently. if (!in_array($type, array_merge($types_flash, $types_fmedia, $types_quicktime, $types_wmedia, $types_mp3))) { switch (strtolower($this->get_extension())) { // Audio mime-types case 'aac': case 'adts': $type = 'audio/acc'; break; case 'aif': case 'aifc': case 'aiff': case 'cdda': $type = 'audio/aiff'; break; case 'bwf': $type = 'audio/wav'; break; case 'kar': case 'mid': case 'midi': case 'smf': $type = 'audio/midi'; break; case 'm4a': $type = 'audio/x-m4a'; break; case 'mp3': case 'swa': $type = 'audio/mp3'; break; case 'wav': $type = 'audio/wav'; break; case 'wax': $type = 'audio/x-ms-wax'; break; case 'wma': $type = 'audio/x-ms-wma'; break; // Video mime-types case '3gp': case '3gpp': $type = 'video/3gpp'; break; case '3g2': case '3gp2': $type = 'video/3gpp2'; break; case 'asf': $type = 'video/x-ms-asf'; break; case 'flv': $type = 'video/x-flv'; break; case 'm1a': case 'm1s': case 'm1v': case 'm15': case 'm75': case 'mp2': case 'mpa': case 'mpeg': case 'mpg': case 'mpm': case 'mpv': $type = 'video/mpeg'; break; case 'm4v': $type = 'video/x-m4v'; break; case 'mov': case 'qt': $type = 'video/quicktime'; break; case 'mp4': case 'mpg4': $type = 'video/mp4'; break; case 'sdv': $type = 'video/sd-video'; break; case 'wm': $type = 'video/x-ms-wm'; break; case 'wmv': $type = 'video/x-ms-wmv'; break; case 'wvx': $type = 'video/x-ms-wvx'; break; // Flash mime-types case 'spl': $type = 'application/futuresplash'; break; case 'swf': $type = 'application/x-shockwave-flash'; break; } } if ($find_handler) { if (in_array($type, $types_flash)) { return 'flash'; } elseif (in_array($type, $types_fmedia)) { return 'fmedia'; } elseif (in_array($type, $types_quicktime)) { return 'quicktime'; } elseif (in_array($type, $types_wmedia)) { return 'wmedia'; } elseif (in_array($type, $types_mp3)) { return 'mp3'; } else { return null; } } else { return $type; } } }
sofiacamming/Wordpress
wp-includes/SimplePie/Enclosure.php
PHP
gpl-2.0
27,487
<?php /** * SimplePie * * A PHP-Based RSS and Atom Feed Framework. * Takes the hard work out of managing a complete RSS/Atom solution. * * Copyright (c) 2004-2012, Ryan Parman, Geoffrey Sneddon, Ryan McCue, and contributors * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * * Neither the name of the SimplePie Team nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS * AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @package SimplePie * @version 1.3.1 * @copyright 2004-2012 Ryan Parman, Geoffrey Sneddon, Ryan McCue * @author Ryan Parman * @author Geoffrey Sneddon * @author Ryan McCue * @link http://simplepie.org/ SimplePie * @license http://www.opensource.org/licenses/bsd-license.php BSD License */ /** * HTTP Response Parser * * @package SimplePie * @subpackage HTTP */ class SimplePie_HTTP_Parser { /** * HTTP Version * * @var float */ public $http_version = 0.0; /** * Status code * * @var int */ public $status_code = 0; /** * Reason phrase * * @var string */ public $reason = ''; /** * Key/value pairs of the headers * * @var array */ public $headers = array(); /** * Body of the response * * @var string */ public $body = ''; /** * Current state of the state machine * * @var string */ protected $state = 'http_version'; /** * Input data * * @var string */ protected $data = ''; /** * Input data length (to avoid calling strlen() everytime this is needed) * * @var int */ protected $data_length = 0; /** * Current position of the pointer * * @var int */ protected $position = 0; /** * Name of the hedaer currently being parsed * * @var string */ protected $name = ''; /** * Value of the hedaer currently being parsed * * @var string */ protected $value = ''; /** * Create an instance of the class with the input data * * @param string $data Input data */ public function __construct($data) { $this->data = $data; $this->data_length = strlen($this->data); } /** * Parse the input data * * @return bool true on success, false on failure */ public function parse() { while ($this->state && $this->state !== 'emit' && $this->has_data()) { $state = $this->state; $this->$state(); } $this->data = ''; if ($this->state === 'emit' || $this->state === 'body') { return true; } else { $this->http_version = ''; $this->status_code = ''; $this->reason = ''; $this->headers = array(); $this->body = ''; return false; } } /** * Check whether there is data beyond the pointer * * @return bool true if there is further data, false if not */ protected function has_data() { return (bool) ($this->position < $this->data_length); } /** * See if the next character is LWS * * @return bool true if the next character is LWS, false if not */ protected function is_linear_whitespace() { return (bool) ($this->data[$this->position] === "\x09" || $this->data[$this->position] === "\x20" || ($this->data[$this->position] === "\x0A" && isset($this->data[$this->position + 1]) && ($this->data[$this->position + 1] === "\x09" || $this->data[$this->position + 1] === "\x20"))); } /** * Parse the HTTP version */ protected function http_version() { if (strpos($this->data, "\x0A") !== false && strtoupper(substr($this->data, 0, 5)) === 'HTTP/') { $len = strspn($this->data, '0123456789.', 5); $this->http_version = substr($this->data, 5, $len); $this->position += 5 + $len; if (substr_count($this->http_version, '.') <= 1) { $this->http_version = (float) $this->http_version; $this->position += strspn($this->data, "\x09\x20", $this->position); $this->state = 'status'; } else { $this->state = false; } } else { $this->state = false; } } /** * Parse the status code */ protected function status() { if ($len = strspn($this->data, '0123456789', $this->position)) { $this->status_code = (int) substr($this->data, $this->position, $len); $this->position += $len; $this->state = 'reason'; } else { $this->state = false; } } /** * Parse the reason phrase */ protected function reason() { $len = strcspn($this->data, "\x0A", $this->position); $this->reason = trim(substr($this->data, $this->position, $len), "\x09\x0D\x20"); $this->position += $len + 1; $this->state = 'new_line'; } /** * Deal with a new line, shifting data around as needed */ protected function new_line() { $this->value = trim($this->value, "\x0D\x20"); if ($this->name !== '' && $this->value !== '') { $this->name = strtolower($this->name); // We should only use the last Content-Type header. c.f. issue #1 if (isset($this->headers[$this->name]) && $this->name !== 'content-type') { $this->headers[$this->name] .= ', ' . $this->value; } else { $this->headers[$this->name] = $this->value; } } $this->name = ''; $this->value = ''; if (substr($this->data[$this->position], 0, 2) === "\x0D\x0A") { $this->position += 2; $this->state = 'body'; } elseif ($this->data[$this->position] === "\x0A") { $this->position++; $this->state = 'body'; } else { $this->state = 'name'; } } /** * Parse a header name */ protected function name() { $len = strcspn($this->data, "\x0A:", $this->position); if (isset($this->data[$this->position + $len])) { if ($this->data[$this->position + $len] === "\x0A") { $this->position += $len; $this->state = 'new_line'; } else { $this->name = substr($this->data, $this->position, $len); $this->position += $len + 1; $this->state = 'value'; } } else { $this->state = false; } } /** * Parse LWS, replacing consecutive LWS characters with a single space */ protected function linear_whitespace() { do { if (substr($this->data, $this->position, 2) === "\x0D\x0A") { $this->position += 2; } elseif ($this->data[$this->position] === "\x0A") { $this->position++; } $this->position += strspn($this->data, "\x09\x20", $this->position); } while ($this->has_data() && $this->is_linear_whitespace()); $this->value .= "\x20"; } /** * See what state to move to while within non-quoted header values */ protected function value() { if ($this->is_linear_whitespace()) { $this->linear_whitespace(); } else { switch ($this->data[$this->position]) { case '"': // Workaround for ETags: we have to include the quotes as // part of the tag. if (strtolower($this->name) === 'etag') { $this->value .= '"'; $this->position++; $this->state = 'value_char'; break; } $this->position++; $this->state = 'quote'; break; case "\x0A": $this->position++; $this->state = 'new_line'; break; default: $this->state = 'value_char'; break; } } } /** * Parse a header value while outside quotes */ protected function value_char() { $len = strcspn($this->data, "\x09\x20\x0A\"", $this->position); $this->value .= substr($this->data, $this->position, $len); $this->position += $len; $this->state = 'value'; } /** * See what state to move to while within quoted header values */ protected function quote() { if ($this->is_linear_whitespace()) { $this->linear_whitespace(); } else { switch ($this->data[$this->position]) { case '"': $this->position++; $this->state = 'value'; break; case "\x0A": $this->position++; $this->state = 'new_line'; break; case '\\': $this->position++; $this->state = 'quote_escaped'; break; default: $this->state = 'quote_char'; break; } } } /** * Parse a header value while within quotes */ protected function quote_char() { $len = strcspn($this->data, "\x09\x20\x0A\"\\", $this->position); $this->value .= substr($this->data, $this->position, $len); $this->position += $len; $this->state = 'value'; } /** * Parse an escaped character within quotes */ protected function quote_escaped() { $this->value .= $this->data[$this->position]; $this->position++; $this->state = 'quote'; } /** * Parse the body */ protected function body() { $this->body = substr($this->data, $this->position); if (!empty($this->headers['transfer-encoding'])) { unset($this->headers['transfer-encoding']); $this->state = 'chunked'; } else { $this->state = 'emit'; } } /** * Parsed a "Transfer-Encoding: chunked" body */ protected function chunked() { if (!preg_match('/^([0-9a-f]+)[^\r\n]*\r\n/i', trim($this->body))) { $this->state = 'emit'; return; } $decoded = ''; $encoded = $this->body; while (true) { $is_chunked = (bool) preg_match( '/^([0-9a-f]+)[^\r\n]*\r\n/i', $encoded, $matches ); if (!$is_chunked) { // Looks like it's not chunked after all $this->state = 'emit'; return; } $length = hexdec(trim($matches[1])); if ($length === 0) { // Ignore trailer headers $this->state = 'emit'; $this->body = $decoded; return; } $chunk_length = strlen($matches[0]); $decoded .= $part = substr($encoded, $chunk_length, $length); $encoded = substr($encoded, $chunk_length + $length + 2); if (trim($encoded) === '0' || empty($encoded)) { $this->state = 'emit'; $this->body = $decoded; return; } } } }
thienit/vgp
wp-includes/SimplePie/HTTP/Parser.php
PHP
gpl-2.0
10,882
<?php /** * DigestAuthenticateTest file * * PHP 5 * * CakePHP(tm) : Rapid Development Framework (http://cakephp.org) * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org) * * Licensed under The MIT License * For full copyright and license information, please see the LICENSE.txt * Redistributions of files must retain the above copyright notice. * * @copyright Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org) * @link http://cakephp.org CakePHP(tm) Project * @package Cake.Test.Case.Controller.Component.Auth * @since CakePHP(tm) v 2.0 * @license MIT License (http://www.opensource.org/licenses/mit-license.php) */ App::uses('DigestAuthenticate', 'Controller/Component/Auth'); App::uses('AppModel', 'Model'); App::uses('CakeRequest', 'Network'); App::uses('CakeResponse', 'Network'); require_once CAKE . 'Test' . DS . 'Case' . DS . 'Model' . DS . 'models.php'; /** * Test case for DigestAuthentication * * @package Cake.Test.Case.Controller.Component.Auth */ class DigestAuthenticateTest extends CakeTestCase { public $fixtures = array('core.user', 'core.auth_user'); /** * setup * * @return void */ public function setUp() { parent::setUp(); $this->Collection = $this->getMock('ComponentCollection'); $this->server = $_SERVER; $this->auth = new DigestAuthenticate($this->Collection, array( 'fields' => array('username' => 'user', 'password' => 'password'), 'userModel' => 'User', 'realm' => 'localhost', 'nonce' => 123, 'opaque' => '123abc' )); $password = DigestAuthenticate::password('mariano', 'cake', 'localhost'); $User = ClassRegistry::init('User'); $User->updateAll(array('password' => $User->getDataSource()->value($password))); $_SERVER['REQUEST_METHOD'] = 'GET'; $this->response = $this->getMock('CakeResponse'); } /** * tearDown * * @return void */ public function tearDown() { parent::tearDown(); $_SERVER = $this->server; } /** * test applying settings in the constructor * * @return void */ public function testConstructor() { $object = new DigestAuthenticate($this->Collection, array( 'userModel' => 'AuthUser', 'fields' => array('username' => 'user', 'password' => 'password'), 'nonce' => 123456 )); $this->assertEquals('AuthUser', $object->settings['userModel']); $this->assertEquals(array('username' => 'user', 'password' => 'password'), $object->settings['fields']); $this->assertEquals(123456, $object->settings['nonce']); $this->assertEquals(env('SERVER_NAME'), $object->settings['realm']); } /** * test the authenticate method * * @return void */ public function testAuthenticateNoData() { $request = new CakeRequest('posts/index', false); $this->response->expects($this->once()) ->method('header') ->with('WWW-Authenticate: Digest realm="localhost",qop="auth",nonce="123",opaque="123abc"'); $this->assertFalse($this->auth->authenticate($request, $this->response)); } /** * test the authenticate method * * @return void */ public function testAuthenticateWrongUsername() { $request = new CakeRequest('posts/index', false); $request->addParams(array('pass' => array(), 'named' => array())); $_SERVER['PHP_AUTH_DIGEST'] = <<<DIGEST Digest username="incorrect_user", realm="localhost", nonce="123456", uri="/dir/index.html", qop=auth, nc=00000001, cnonce="0a4f113b", response="6629fae49393a05397450978507c4ef1", opaque="123abc" DIGEST; $this->response->expects($this->at(0)) ->method('header') ->with('WWW-Authenticate: Digest realm="localhost",qop="auth",nonce="123",opaque="123abc"'); $this->response->expects($this->at(1)) ->method('statusCode') ->with(401); $this->response->expects($this->at(2)) ->method('send'); $this->assertFalse($this->auth->authenticate($request, $this->response)); } /** * test that challenge headers are sent when no credentials are found. * * @return void */ public function testAuthenticateChallenge() { $request = new CakeRequest('posts/index', false); $request->addParams(array('pass' => array(), 'named' => array())); $this->response->expects($this->at(0)) ->method('header') ->with('WWW-Authenticate: Digest realm="localhost",qop="auth",nonce="123",opaque="123abc"'); $this->response->expects($this->at(1)) ->method('statusCode') ->with(401); $this->response->expects($this->at(2)) ->method('send'); $result = $this->auth->authenticate($request, $this->response); $this->assertFalse($result); } /** * test authenticate success * * @return void */ public function testAuthenticateSuccess() { $request = new CakeRequest('posts/index', false); $request->addParams(array('pass' => array(), 'named' => array())); $_SERVER['PHP_AUTH_DIGEST'] = <<<DIGEST Digest username="mariano", realm="localhost", nonce="123", uri="/dir/index.html", qop=auth, nc=1, cnonce="123", response="06b257a54befa2ddfb9bfa134224aa29", opaque="123abc" DIGEST; $result = $this->auth->authenticate($request, $this->response); $expected = array( 'id' => 1, 'user' => 'mariano', 'created' => '2007-03-17 01:16:23', 'updated' => '2007-03-17 01:18:31' ); $this->assertEquals($expected, $result); } /** * test scope failure. * * @return void */ public function testAuthenticateFailReChallenge() { $this->auth->settings['scope'] = array('user' => 'nate'); $request = new CakeRequest('posts/index', false); $request->addParams(array('pass' => array(), 'named' => array())); $_SERVER['PHP_AUTH_DIGEST'] = <<<DIGEST Digest username="mariano", realm="localhost", nonce="123", uri="/dir/index.html", qop=auth, nc=1, cnonce="123", response="6629fae49393a05397450978507c4ef1", opaque="123abc" DIGEST; $this->response->expects($this->at(0)) ->method('header') ->with('WWW-Authenticate: Digest realm="localhost",qop="auth",nonce="123",opaque="123abc"'); $this->response->expects($this->at(1)) ->method('statusCode') ->with(401); $this->response->expects($this->at(2)) ->method('send'); $this->assertFalse($this->auth->authenticate($request, $this->response)); } /** * testParseDigestAuthData method * * @return void */ public function testParseAuthData() { $digest = <<<DIGEST Digest username="Mufasa", realm="testrealm@host.com", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", uri="/dir/index.html", qop=auth, nc=00000001, cnonce="0a4f113b", response="6629fae49393a05397450978507c4ef1", opaque="5ccc069c403ebaf9f0171e9517f40e41" DIGEST; $expected = array( 'username' => 'Mufasa', 'realm' => 'testrealm@host.com', 'nonce' => 'dcd98b7102dd2f0e8b11d0f600bfb0c093', 'uri' => '/dir/index.html', 'qop' => 'auth', 'nc' => '00000001', 'cnonce' => '0a4f113b', 'response' => '6629fae49393a05397450978507c4ef1', 'opaque' => '5ccc069c403ebaf9f0171e9517f40e41' ); $result = $this->auth->parseAuthData($digest); $this->assertSame($expected, $result); $result = $this->auth->parseAuthData(''); $this->assertNull($result); } /** * test parsing digest information with email addresses * * @return void */ public function testParseAuthEmailAddress() { $digest = <<<DIGEST Digest username="mark@example.com", realm="testrealm@host.com", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", uri="/dir/index.html", qop=auth, nc=00000001, cnonce="0a4f113b", response="6629fae49393a05397450978507c4ef1", opaque="5ccc069c403ebaf9f0171e9517f40e41" DIGEST; $expected = array( 'username' => 'mark@example.com', 'realm' => 'testrealm@host.com', 'nonce' => 'dcd98b7102dd2f0e8b11d0f600bfb0c093', 'uri' => '/dir/index.html', 'qop' => 'auth', 'nc' => '00000001', 'cnonce' => '0a4f113b', 'response' => '6629fae49393a05397450978507c4ef1', 'opaque' => '5ccc069c403ebaf9f0171e9517f40e41' ); $result = $this->auth->parseAuthData($digest); $this->assertSame($expected, $result); } /** * test password hashing * * @return void */ public function testPassword() { $result = DigestAuthenticate::password('mark', 'password', 'localhost'); $expected = md5('mark:localhost:password'); $this->assertEquals($expected, $result); } }
govbr/suindara.govbr
Fontes/lib/Cake/Test/Case/Controller/Component/Auth/DigestAuthenticateTest.php
PHP
gpl-2.0
8,215
/* libxmms-flac - XMMS FLAC input plugin * Copyright (C) 2002,2003,2004,2005,2006,2007,2008,2009 Daisuke Shimamura * * Based on mpg123 plugin * and prefs.c - 2000/05/06 * EasyTAG - Tag editor for MP3 and OGG files * Copyright (C) 2000-2002 Jerome Couderc <j.couderc@ifrance.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "plugin.h" #include <stdlib.h> #include <string.h> #include <glib.h> #include <gtk/gtk.h> #include <pthread.h> #include <math.h> #include <xmms/configfile.h> #include <xmms/dirbrowser.h> #include <xmms/titlestring.h> #include <xmms/util.h> #include <xmms/plugin.h> #include "share/replaygain_synthesis.h" /* for NOISE_SHAPING_LOW */ #include "charset.h" #include "configure.h" #include "locale_hack.h" /* * Initialize Global Valueable */ flac_config_t flac_cfg = { /* title */ { FALSE, /* tag_override */ NULL, /* tag_format */ FALSE, /* convert_char_set */ NULL /* user_char_set */ }, /* stream */ { 100 /* KB */, /* http_buffer_size */ 50, /* http_prebuffer */ FALSE, /* use_proxy */ NULL, /* proxy_host */ 0, /* proxy_port */ FALSE, /* proxy_use_auth */ NULL, /* proxy_user */ NULL, /* proxy_pass */ FALSE, /* save_http_stream */ NULL, /* save_http_path */ FALSE, /* cast_title_streaming */ FALSE /* use_udp_channel */ }, /* output */ { /* replaygain */ { FALSE, /* enable */ TRUE, /* album_mode */ 0, /* preamp */ FALSE /* hard_limit */ }, /* resolution */ { /* normal */ { TRUE /* dither_24_to_16 */ }, /* replaygain */ { TRUE, /* dither */ NOISE_SHAPING_LOW, /* noise_shaping */ 16 /* bps_out */ } } } }; static GtkWidget *flac_configurewin = NULL; static GtkWidget *vbox, *notebook; static GtkWidget *title_tag_override, *title_tag_box, *title_tag_entry, *title_desc; static GtkWidget *convert_char_set, *fileCharacterSetEntry, *userCharacterSetEntry; static GtkWidget *replaygain_enable, *replaygain_album_mode; static GtkWidget *replaygain_preamp_hscale, *replaygain_preamp_label, *replaygain_hard_limit; static GtkObject *replaygain_preamp; static GtkWidget *resolution_normal_dither_24_to_16; static GtkWidget *resolution_replaygain_dither; static GtkWidget *resolution_replaygain_noise_shaping_frame; static GtkWidget *resolution_replaygain_noise_shaping_radio_none; static GtkWidget *resolution_replaygain_noise_shaping_radio_low; static GtkWidget *resolution_replaygain_noise_shaping_radio_medium; static GtkWidget *resolution_replaygain_noise_shaping_radio_high; static GtkWidget *resolution_replaygain_bps_out_frame; static GtkWidget *resolution_replaygain_bps_out_radio_16bps; static GtkWidget *resolution_replaygain_bps_out_radio_24bps; static GtkObject *streaming_size_adj, *streaming_pre_adj; static GtkWidget *streaming_proxy_use, *streaming_proxy_host_entry; static GtkWidget *streaming_proxy_port_entry, *streaming_save_use, *streaming_save_entry; static GtkWidget *streaming_proxy_auth_use; static GtkWidget *streaming_proxy_auth_pass_entry, *streaming_proxy_auth_user_entry; static GtkWidget *streaming_proxy_auth_user_label, *streaming_proxy_auth_pass_label; #ifdef FLAC_ICECAST static GtkWidget *streaming_cast_title, *streaming_udp_title; #endif static GtkWidget *streaming_proxy_hbox, *streaming_proxy_auth_hbox, *streaming_save_dirbrowser; static GtkWidget *streaming_save_hbox; static gchar *gtk_entry_get_text_1 (GtkWidget *widget); static void flac_configurewin_ok(GtkWidget * widget, gpointer data); static void configure_destroy(GtkWidget * w, gpointer data); static void flac_configurewin_ok(GtkWidget * widget, gpointer data) { ConfigFile *cfg; gchar *filename; (void)widget, (void)data; /* unused arguments */ g_free(flac_cfg.title.tag_format); flac_cfg.title.tag_format = g_strdup(gtk_entry_get_text(GTK_ENTRY(title_tag_entry))); flac_cfg.title.user_char_set = Charset_Get_Name_From_Title(gtk_entry_get_text_1(userCharacterSetEntry)); filename = g_strconcat(g_get_home_dir(), "/.xmms/config", NULL); cfg = xmms_cfg_open_file(filename); if (!cfg) cfg = xmms_cfg_new(); /* title */ xmms_cfg_write_boolean(cfg, "flac", "title.tag_override", flac_cfg.title.tag_override); xmms_cfg_write_string(cfg, "flac", "title.tag_format", flac_cfg.title.tag_format); xmms_cfg_write_boolean(cfg, "flac", "title.convert_char_set", flac_cfg.title.convert_char_set); xmms_cfg_write_string(cfg, "flac", "title.user_char_set", flac_cfg.title.user_char_set); /* output */ xmms_cfg_write_boolean(cfg, "flac", "output.replaygain.enable", flac_cfg.output.replaygain.enable); xmms_cfg_write_boolean(cfg, "flac", "output.replaygain.album_mode", flac_cfg.output.replaygain.album_mode); xmms_cfg_write_int(cfg, "flac", "output.replaygain.preamp", flac_cfg.output.replaygain.preamp); xmms_cfg_write_boolean(cfg, "flac", "output.replaygain.hard_limit", flac_cfg.output.replaygain.hard_limit); xmms_cfg_write_boolean(cfg, "flac", "output.resolution.normal.dither_24_to_16", flac_cfg.output.resolution.normal.dither_24_to_16); xmms_cfg_write_boolean(cfg, "flac", "output.resolution.replaygain.dither", flac_cfg.output.resolution.replaygain.dither); xmms_cfg_write_int(cfg, "flac", "output.resolution.replaygain.noise_shaping", flac_cfg.output.resolution.replaygain.noise_shaping); xmms_cfg_write_int(cfg, "flac", "output.resolution.replaygain.bps_out", flac_cfg.output.resolution.replaygain.bps_out); /* streaming */ flac_cfg.stream.http_buffer_size = (gint) GTK_ADJUSTMENT(streaming_size_adj)->value; flac_cfg.stream.http_prebuffer = (gint) GTK_ADJUSTMENT(streaming_pre_adj)->value; flac_cfg.stream.use_proxy = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(streaming_proxy_use)); if(flac_cfg.stream.proxy_host) g_free(flac_cfg.stream.proxy_host); flac_cfg.stream.proxy_host = g_strdup(gtk_entry_get_text(GTK_ENTRY(streaming_proxy_host_entry))); flac_cfg.stream.proxy_port = atoi(gtk_entry_get_text(GTK_ENTRY(streaming_proxy_port_entry))); flac_cfg.stream.proxy_use_auth = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(streaming_proxy_auth_use)); if(flac_cfg.stream.proxy_user) g_free(flac_cfg.stream.proxy_user); flac_cfg.stream.proxy_user = NULL; if(strlen(gtk_entry_get_text(GTK_ENTRY(streaming_proxy_auth_user_entry))) > 0) flac_cfg.stream.proxy_user = g_strdup(gtk_entry_get_text(GTK_ENTRY(streaming_proxy_auth_user_entry))); if(flac_cfg.stream.proxy_pass) g_free(flac_cfg.stream.proxy_pass); flac_cfg.stream.proxy_pass = NULL; if(strlen(gtk_entry_get_text(GTK_ENTRY(streaming_proxy_auth_pass_entry))) > 0) flac_cfg.stream.proxy_pass = g_strdup(gtk_entry_get_text(GTK_ENTRY(streaming_proxy_auth_pass_entry))); flac_cfg.stream.save_http_stream = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(streaming_save_use)); if (flac_cfg.stream.save_http_path) g_free(flac_cfg.stream.save_http_path); flac_cfg.stream.save_http_path = g_strdup(gtk_entry_get_text(GTK_ENTRY(streaming_save_entry))); #ifdef FLAC_ICECAST flac_cfg.stream.cast_title_streaming = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(streaming_cast_title)); flac_cfg.stream.use_udp_channel = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(streaming_udp_title)); #endif xmms_cfg_write_int(cfg, "flac", "stream.http_buffer_size", flac_cfg.stream.http_buffer_size); xmms_cfg_write_int(cfg, "flac", "stream.http_prebuffer", flac_cfg.stream.http_prebuffer); xmms_cfg_write_boolean(cfg, "flac", "stream.use_proxy", flac_cfg.stream.use_proxy); xmms_cfg_write_string(cfg, "flac", "stream.proxy_host", flac_cfg.stream.proxy_host); xmms_cfg_write_int(cfg, "flac", "stream.proxy_port", flac_cfg.stream.proxy_port); xmms_cfg_write_boolean(cfg, "flac", "stream.proxy_use_auth", flac_cfg.stream.proxy_use_auth); if(flac_cfg.stream.proxy_user) xmms_cfg_write_string(cfg, "flac", "stream.proxy_user", flac_cfg.stream.proxy_user); else xmms_cfg_remove_key(cfg, "flac", "stream.proxy_user"); if(flac_cfg.stream.proxy_pass) xmms_cfg_write_string(cfg, "flac", "stream.proxy_pass", flac_cfg.stream.proxy_pass); else xmms_cfg_remove_key(cfg, "flac", "stream.proxy_pass"); xmms_cfg_write_boolean(cfg, "flac", "stream.save_http_stream", flac_cfg.stream.save_http_stream); xmms_cfg_write_string(cfg, "flac", "stream.save_http_path", flac_cfg.stream.save_http_path); #ifdef FLAC_ICECAST xmms_cfg_write_boolean(cfg, "flac", "stream.cast_title_streaming", flac_cfg.stream.cast_title_streaming); xmms_cfg_write_boolean(cfg, "flac", "stream.use_udp_channel", flac_cfg.stream.use_udp_channel); #endif xmms_cfg_write_file(cfg, filename); xmms_cfg_free(cfg); g_free(filename); gtk_widget_destroy(flac_configurewin); } static void configure_destroy(GtkWidget *widget, gpointer data) { (void)widget, (void)data; /* unused arguments */ } static void title_tag_override_cb(GtkWidget *widget, gpointer data) { (void)widget, (void)data; /* unused arguments */ flac_cfg.title.tag_override = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(title_tag_override)); gtk_widget_set_sensitive(title_tag_box, flac_cfg.title.tag_override); gtk_widget_set_sensitive(title_desc, flac_cfg.title.tag_override); } static void convert_char_set_cb(GtkWidget *widget, gpointer data) { (void)widget, (void)data; /* unused arguments */ flac_cfg.title.convert_char_set = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(convert_char_set)); gtk_widget_set_sensitive(fileCharacterSetEntry, FALSE); gtk_widget_set_sensitive(userCharacterSetEntry, flac_cfg.title.convert_char_set); } static void replaygain_enable_cb(GtkWidget *widget, gpointer data) { (void)widget, (void)data; /* unused arguments */ flac_cfg.output.replaygain.enable = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(replaygain_enable)); gtk_widget_set_sensitive(replaygain_album_mode, flac_cfg.output.replaygain.enable); gtk_widget_set_sensitive(replaygain_preamp_hscale, flac_cfg.output.replaygain.enable); gtk_widget_set_sensitive(replaygain_hard_limit, flac_cfg.output.replaygain.enable); } static void replaygain_album_mode_cb(GtkWidget *widget, gpointer data) { (void)widget, (void)data; /* unused arguments */ flac_cfg.output.replaygain.album_mode = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(replaygain_album_mode)); } static void replaygain_hard_limit_cb(GtkWidget *widget, gpointer data) { (void)widget, (void)data; /* unused arguments */ flac_cfg.output.replaygain.hard_limit = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(replaygain_hard_limit)); } static void replaygain_preamp_cb(GtkWidget *widget, gpointer data) { GString *gstring = g_string_new(""); (void)widget, (void)data; /* unused arguments */ flac_cfg.output.replaygain.preamp = (int) floor(GTK_ADJUSTMENT(replaygain_preamp)->value + 0.5); g_string_sprintf(gstring, "%i dB", flac_cfg.output.replaygain.preamp); gtk_label_set_text(GTK_LABEL(replaygain_preamp_label), _(gstring->str)); } static void resolution_normal_dither_24_to_16_cb(GtkWidget *widget, gpointer data) { (void)widget, (void)data; /* unused arguments */ flac_cfg.output.resolution.normal.dither_24_to_16 = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(resolution_normal_dither_24_to_16)); } static void resolution_replaygain_dither_cb(GtkWidget *widget, gpointer data) { (void)widget, (void)data; /* unused arguments */ flac_cfg.output.resolution.replaygain.dither = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(resolution_replaygain_dither)); gtk_widget_set_sensitive(resolution_replaygain_noise_shaping_frame, flac_cfg.output.resolution.replaygain.dither); } static void resolution_replaygain_noise_shaping_cb(GtkWidget *widget, gpointer data) { (void)widget, (void)data; /* unused arguments */ flac_cfg.output.resolution.replaygain.noise_shaping = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(resolution_replaygain_noise_shaping_radio_none))? 0 : gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(resolution_replaygain_noise_shaping_radio_low))? 1 : gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(resolution_replaygain_noise_shaping_radio_medium))? 2 : gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(resolution_replaygain_noise_shaping_radio_high))? 3 : 0 ; } static void resolution_replaygain_bps_out_cb(GtkWidget *widget, gpointer data) { (void)widget, (void)data; /* unused arguments */ flac_cfg.output.resolution.replaygain.bps_out = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(resolution_replaygain_bps_out_radio_16bps))? 16 : gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(resolution_replaygain_bps_out_radio_24bps))? 24 : 16 ; } static void proxy_use_cb(GtkWidget * w, gpointer data) { gboolean use_proxy, use_proxy_auth; (void) w; (void) data; use_proxy = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(streaming_proxy_use)); use_proxy_auth = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(streaming_proxy_auth_use)); gtk_widget_set_sensitive(streaming_proxy_hbox, use_proxy); gtk_widget_set_sensitive(streaming_proxy_auth_use, use_proxy); gtk_widget_set_sensitive(streaming_proxy_auth_hbox, use_proxy && use_proxy_auth); } static void proxy_auth_use_cb(GtkWidget *w, gpointer data) { gboolean use_proxy, use_proxy_auth; (void) w; (void) data; use_proxy = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(streaming_proxy_use)); use_proxy_auth = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(streaming_proxy_auth_use)); gtk_widget_set_sensitive(streaming_proxy_auth_hbox, use_proxy && use_proxy_auth); } static void streaming_save_dirbrowser_cb(gchar * dir) { gtk_entry_set_text(GTK_ENTRY(streaming_save_entry), dir); } static void streaming_save_browse_cb(GtkWidget * w, gpointer data) { (void) w; (void) data; if (!streaming_save_dirbrowser) { streaming_save_dirbrowser = xmms_create_dir_browser(_("Select the directory where you want to store the MPEG streams:"), flac_cfg.stream.save_http_path, GTK_SELECTION_SINGLE, streaming_save_dirbrowser_cb); gtk_signal_connect(GTK_OBJECT(streaming_save_dirbrowser), "destroy", GTK_SIGNAL_FUNC(gtk_widget_destroyed), &streaming_save_dirbrowser); gtk_window_set_transient_for(GTK_WINDOW(streaming_save_dirbrowser), GTK_WINDOW(flac_configurewin)); gtk_widget_show(streaming_save_dirbrowser); } } static void streaming_save_use_cb(GtkWidget * w, gpointer data) { gboolean save_stream; (void) w; (void) data; save_stream = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(streaming_save_use)); gtk_widget_set_sensitive(streaming_save_hbox, save_stream); } void FLAC_XMMS__configure(void) { GtkWidget *title_frame, *title_tag_vbox, *title_tag_label; GtkWidget *replaygain_frame, *resolution_frame, *output_vbox, *resolution_normal_frame, *resolution_replaygain_frame; GtkWidget *replaygain_vbox, *resolution_hbox, *resolution_normal_vbox, *resolution_replaygain_vbox; GtkWidget *resolution_replaygain_noise_shaping_vbox; GtkWidget *resolution_replaygain_bps_out_vbox; GtkWidget *label, *hbox; GtkWidget *bbox, *ok, *cancel; GList *list; GtkWidget *streaming_vbox; GtkWidget *streaming_buf_frame, *streaming_buf_hbox; GtkWidget *streaming_size_box, *streaming_size_label, *streaming_size_spin; GtkWidget *streaming_pre_box, *streaming_pre_label, *streaming_pre_spin; GtkWidget *streaming_proxy_frame, *streaming_proxy_vbox; GtkWidget *streaming_proxy_port_label, *streaming_proxy_host_label; GtkWidget *streaming_save_frame, *streaming_save_vbox; GtkWidget *streaming_save_label, *streaming_save_browse; #ifdef FLAC_ICECAST GtkWidget *streaming_cast_frame, *streaming_cast_vbox; #endif char *temp; if (flac_configurewin != NULL) { gdk_window_raise(flac_configurewin->window); return; } flac_configurewin = gtk_window_new(GTK_WINDOW_DIALOG); gtk_signal_connect(GTK_OBJECT(flac_configurewin), "destroy", GTK_SIGNAL_FUNC(gtk_widget_destroyed), &flac_configurewin); gtk_signal_connect(GTK_OBJECT(flac_configurewin), "destroy", GTK_SIGNAL_FUNC(configure_destroy), &flac_configurewin); gtk_window_set_title(GTK_WINDOW(flac_configurewin), _("Flac Configuration")); gtk_window_set_policy(GTK_WINDOW(flac_configurewin), FALSE, FALSE, FALSE); gtk_container_border_width(GTK_CONTAINER(flac_configurewin), 10); vbox = gtk_vbox_new(FALSE, 10); gtk_container_add(GTK_CONTAINER(flac_configurewin), vbox); notebook = gtk_notebook_new(); gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); /* Title config.. */ title_frame = gtk_frame_new(_("Tag Handling")); gtk_container_border_width(GTK_CONTAINER(title_frame), 5); title_tag_vbox = gtk_vbox_new(FALSE, 10); gtk_container_border_width(GTK_CONTAINER(title_tag_vbox), 5); gtk_container_add(GTK_CONTAINER(title_frame), title_tag_vbox); /* Convert Char Set */ convert_char_set = gtk_check_button_new_with_label(_("Convert Character Set")); gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(convert_char_set), flac_cfg.title.convert_char_set); gtk_signal_connect(GTK_OBJECT(convert_char_set), "clicked", convert_char_set_cb, NULL); gtk_box_pack_start(GTK_BOX(title_tag_vbox), convert_char_set, FALSE, FALSE, 0); /* Combo boxes... */ hbox = gtk_hbox_new(FALSE,4); gtk_container_add(GTK_CONTAINER(title_tag_vbox),hbox); label = gtk_label_new(_("Convert character set from :")); gtk_box_pack_start(GTK_BOX(hbox),label,FALSE,FALSE,0); fileCharacterSetEntry = gtk_combo_new(); gtk_box_pack_start(GTK_BOX(hbox),fileCharacterSetEntry,TRUE,TRUE,0); label = gtk_label_new (_("to :")); gtk_box_pack_start(GTK_BOX(hbox),label,FALSE,FALSE,0); userCharacterSetEntry = gtk_combo_new(); gtk_box_pack_start(GTK_BOX(hbox),userCharacterSetEntry,TRUE,TRUE,0); gtk_entry_set_editable(GTK_ENTRY(GTK_COMBO(fileCharacterSetEntry)->entry),FALSE); gtk_entry_set_editable(GTK_ENTRY(GTK_COMBO(userCharacterSetEntry)->entry),FALSE); gtk_combo_set_value_in_list(GTK_COMBO(fileCharacterSetEntry),TRUE,FALSE); gtk_combo_set_value_in_list(GTK_COMBO(userCharacterSetEntry),TRUE,FALSE); list = Charset_Create_List(); gtk_combo_set_popdown_strings(GTK_COMBO(fileCharacterSetEntry),Charset_Create_List_UTF8_Only()); gtk_combo_set_popdown_strings(GTK_COMBO(userCharacterSetEntry),list); gtk_entry_set_text(GTK_ENTRY(GTK_COMBO(userCharacterSetEntry)->entry),Charset_Get_Title_From_Name(flac_cfg.title.user_char_set)); gtk_widget_set_sensitive(fileCharacterSetEntry, FALSE); gtk_widget_set_sensitive(userCharacterSetEntry, flac_cfg.title.convert_char_set); /* Override Tagging Format */ title_tag_override = gtk_check_button_new_with_label(_("Override generic titles")); gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(title_tag_override), flac_cfg.title.tag_override); gtk_signal_connect(GTK_OBJECT(title_tag_override), "clicked", title_tag_override_cb, NULL); gtk_box_pack_start(GTK_BOX(title_tag_vbox), title_tag_override, FALSE, FALSE, 0); title_tag_box = gtk_hbox_new(FALSE, 5); gtk_widget_set_sensitive(title_tag_box, flac_cfg.title.tag_override); gtk_box_pack_start(GTK_BOX(title_tag_vbox), title_tag_box, FALSE, FALSE, 0); title_tag_label = gtk_label_new(_("Title format:")); gtk_box_pack_start(GTK_BOX(title_tag_box), title_tag_label, FALSE, FALSE, 0); title_tag_entry = gtk_entry_new(); gtk_entry_set_text(GTK_ENTRY(title_tag_entry), flac_cfg.title.tag_format); gtk_box_pack_start(GTK_BOX(title_tag_box), title_tag_entry, TRUE, TRUE, 0); title_desc = xmms_titlestring_descriptions("pafFetnygc", 2); gtk_widget_set_sensitive(title_desc, flac_cfg.title.tag_override); gtk_box_pack_start(GTK_BOX(title_tag_vbox), title_desc, FALSE, FALSE, 0); gtk_notebook_append_page(GTK_NOTEBOOK(notebook), title_frame, gtk_label_new(_("Title"))); /* Output config.. */ output_vbox = gtk_vbox_new(FALSE, 10); gtk_container_border_width(GTK_CONTAINER(output_vbox), 5); /* replaygain */ replaygain_frame = gtk_frame_new(_("ReplayGain")); gtk_container_border_width(GTK_CONTAINER(replaygain_frame), 5); gtk_box_pack_start(GTK_BOX(output_vbox), replaygain_frame, TRUE, TRUE, 0); replaygain_vbox = gtk_vbox_new(FALSE, 10); gtk_container_border_width(GTK_CONTAINER(replaygain_vbox), 5); gtk_container_add(GTK_CONTAINER(replaygain_frame), replaygain_vbox); replaygain_enable = gtk_check_button_new_with_label(_("Enable ReplayGain processing")); gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(replaygain_enable), flac_cfg.output.replaygain.enable); gtk_signal_connect(GTK_OBJECT(replaygain_enable), "clicked", replaygain_enable_cb, NULL); gtk_box_pack_start(GTK_BOX(replaygain_vbox), replaygain_enable, FALSE, FALSE, 0); replaygain_album_mode = gtk_check_button_new_with_label(_("Album mode")); gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(replaygain_album_mode), flac_cfg.output.replaygain.album_mode); gtk_signal_connect(GTK_OBJECT(replaygain_album_mode), "clicked", replaygain_album_mode_cb, NULL); gtk_box_pack_start(GTK_BOX(replaygain_vbox), replaygain_album_mode, FALSE, FALSE, 0); hbox = gtk_hbox_new(FALSE,3); gtk_container_add(GTK_CONTAINER(replaygain_vbox),hbox); label = gtk_label_new(_("Preamp:")); gtk_box_pack_start(GTK_BOX(hbox),label,FALSE,FALSE,0); replaygain_preamp = gtk_adjustment_new(flac_cfg.output.replaygain.preamp, -24.0, +24.0, 1.0, 6.0, 0.0); gtk_signal_connect(GTK_OBJECT(replaygain_preamp), "value-changed", replaygain_preamp_cb, NULL); replaygain_preamp_hscale = gtk_hscale_new(GTK_ADJUSTMENT(replaygain_preamp)); gtk_scale_set_draw_value(GTK_SCALE(replaygain_preamp_hscale), FALSE); gtk_box_pack_start(GTK_BOX(hbox),replaygain_preamp_hscale,TRUE,TRUE,0); replaygain_preamp_label = gtk_label_new(_("0 dB")); gtk_box_pack_start(GTK_BOX(hbox),replaygain_preamp_label,FALSE,FALSE,0); gtk_adjustment_value_changed(GTK_ADJUSTMENT(replaygain_preamp)); replaygain_hard_limit = gtk_check_button_new_with_label(_("6dB hard limiting")); gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(replaygain_hard_limit), flac_cfg.output.replaygain.hard_limit); gtk_signal_connect(GTK_OBJECT(replaygain_hard_limit), "clicked", replaygain_hard_limit_cb, NULL); gtk_box_pack_start(GTK_BOX(replaygain_vbox), replaygain_hard_limit, FALSE, FALSE, 0); replaygain_enable_cb(replaygain_enable, NULL); /* resolution */ resolution_frame = gtk_frame_new(_("Resolution")); gtk_container_border_width(GTK_CONTAINER(resolution_frame), 5); gtk_box_pack_start(GTK_BOX(output_vbox), resolution_frame, TRUE, TRUE, 0); resolution_hbox = gtk_hbox_new(FALSE, 10); gtk_container_border_width(GTK_CONTAINER(resolution_hbox), 5); gtk_container_add(GTK_CONTAINER(resolution_frame), resolution_hbox); resolution_normal_frame = gtk_frame_new(_("Without ReplayGain")); gtk_container_border_width(GTK_CONTAINER(resolution_normal_frame), 5); gtk_box_pack_start(GTK_BOX(resolution_hbox), resolution_normal_frame, TRUE, TRUE, 0); resolution_normal_vbox = gtk_vbox_new(FALSE, 10); gtk_container_border_width(GTK_CONTAINER(resolution_normal_vbox), 5); gtk_container_add(GTK_CONTAINER(resolution_normal_frame), resolution_normal_vbox); resolution_normal_dither_24_to_16 = gtk_check_button_new_with_label(_("Dither 24bps to 16bps")); gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(resolution_normal_dither_24_to_16), flac_cfg.output.resolution.normal.dither_24_to_16); gtk_signal_connect(GTK_OBJECT(resolution_normal_dither_24_to_16), "clicked", resolution_normal_dither_24_to_16_cb, NULL); gtk_box_pack_start(GTK_BOX(resolution_normal_vbox), resolution_normal_dither_24_to_16, FALSE, FALSE, 0); resolution_replaygain_frame = gtk_frame_new(_("With ReplayGain")); gtk_container_border_width(GTK_CONTAINER(resolution_replaygain_frame), 5); gtk_box_pack_start(GTK_BOX(resolution_hbox), resolution_replaygain_frame, TRUE, TRUE, 0); resolution_replaygain_vbox = gtk_vbox_new(FALSE, 10); gtk_container_border_width(GTK_CONTAINER(resolution_replaygain_vbox), 5); gtk_container_add(GTK_CONTAINER(resolution_replaygain_frame), resolution_replaygain_vbox); resolution_replaygain_dither = gtk_check_button_new_with_label(_("Enable dithering")); gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(resolution_replaygain_dither), flac_cfg.output.resolution.replaygain.dither); gtk_signal_connect(GTK_OBJECT(resolution_replaygain_dither), "clicked", resolution_replaygain_dither_cb, NULL); gtk_box_pack_start(GTK_BOX(resolution_replaygain_vbox), resolution_replaygain_dither, FALSE, FALSE, 0); hbox = gtk_hbox_new(FALSE, 10); gtk_container_border_width(GTK_CONTAINER(hbox), 5); gtk_box_pack_start(GTK_BOX(resolution_replaygain_vbox), hbox, TRUE, TRUE, 0); resolution_replaygain_noise_shaping_frame = gtk_frame_new(_("Noise shaping")); gtk_container_border_width(GTK_CONTAINER(resolution_replaygain_noise_shaping_frame), 5); gtk_box_pack_start(GTK_BOX(hbox), resolution_replaygain_noise_shaping_frame, TRUE, TRUE, 0); resolution_replaygain_noise_shaping_vbox = gtk_vbutton_box_new(); gtk_container_border_width(GTK_CONTAINER(resolution_replaygain_noise_shaping_vbox), 5); gtk_container_add(GTK_CONTAINER(resolution_replaygain_noise_shaping_frame), resolution_replaygain_noise_shaping_vbox); resolution_replaygain_noise_shaping_radio_none = gtk_radio_button_new_with_label(NULL, _("none")); if(flac_cfg.output.resolution.replaygain.noise_shaping == 0) gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(resolution_replaygain_noise_shaping_radio_none), TRUE); gtk_signal_connect(GTK_OBJECT(resolution_replaygain_noise_shaping_radio_none), "clicked", resolution_replaygain_noise_shaping_cb, NULL); gtk_container_add(GTK_CONTAINER(resolution_replaygain_noise_shaping_vbox), resolution_replaygain_noise_shaping_radio_none); resolution_replaygain_noise_shaping_radio_low = gtk_radio_button_new_with_label_from_widget(GTK_RADIO_BUTTON(resolution_replaygain_noise_shaping_radio_none), _("low")); if(flac_cfg.output.resolution.replaygain.noise_shaping == 1) gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(resolution_replaygain_noise_shaping_radio_low), TRUE); gtk_signal_connect(GTK_OBJECT(resolution_replaygain_noise_shaping_radio_low), "clicked", resolution_replaygain_noise_shaping_cb, NULL); gtk_container_add(GTK_CONTAINER(resolution_replaygain_noise_shaping_vbox), resolution_replaygain_noise_shaping_radio_low); resolution_replaygain_noise_shaping_radio_medium = gtk_radio_button_new_with_label_from_widget(GTK_RADIO_BUTTON(resolution_replaygain_noise_shaping_radio_none), _("medium")); if(flac_cfg.output.resolution.replaygain.noise_shaping == 2) gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(resolution_replaygain_noise_shaping_radio_medium), TRUE); gtk_signal_connect(GTK_OBJECT(resolution_replaygain_noise_shaping_radio_medium), "clicked", resolution_replaygain_noise_shaping_cb, NULL); gtk_container_add(GTK_CONTAINER(resolution_replaygain_noise_shaping_vbox), resolution_replaygain_noise_shaping_radio_medium); resolution_replaygain_noise_shaping_radio_high = gtk_radio_button_new_with_label_from_widget(GTK_RADIO_BUTTON(resolution_replaygain_noise_shaping_radio_none), _("high")); if(flac_cfg.output.resolution.replaygain.noise_shaping == 3) gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(resolution_replaygain_noise_shaping_radio_high), TRUE); gtk_signal_connect(GTK_OBJECT(resolution_replaygain_noise_shaping_radio_high), "clicked", resolution_replaygain_noise_shaping_cb, NULL); gtk_container_add(GTK_CONTAINER(resolution_replaygain_noise_shaping_vbox), resolution_replaygain_noise_shaping_radio_high); resolution_replaygain_bps_out_frame = gtk_frame_new(_("Dither to")); gtk_container_border_width(GTK_CONTAINER(resolution_replaygain_bps_out_frame), 5); gtk_box_pack_start(GTK_BOX(hbox), resolution_replaygain_bps_out_frame, FALSE, FALSE, 0); resolution_replaygain_bps_out_vbox = gtk_vbutton_box_new(); gtk_container_border_width(GTK_CONTAINER(resolution_replaygain_bps_out_vbox), 0); gtk_container_add(GTK_CONTAINER(resolution_replaygain_bps_out_frame), resolution_replaygain_bps_out_vbox); resolution_replaygain_bps_out_radio_16bps = gtk_radio_button_new_with_label(NULL, _("16 bps")); if(flac_cfg.output.resolution.replaygain.bps_out == 16) gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(resolution_replaygain_bps_out_radio_16bps), TRUE); gtk_signal_connect(GTK_OBJECT(resolution_replaygain_bps_out_radio_16bps), "clicked", resolution_replaygain_bps_out_cb, NULL); gtk_container_add(GTK_CONTAINER(resolution_replaygain_bps_out_vbox), resolution_replaygain_bps_out_radio_16bps); resolution_replaygain_bps_out_radio_24bps = gtk_radio_button_new_with_label_from_widget(GTK_RADIO_BUTTON(resolution_replaygain_bps_out_radio_16bps), _("24 bps")); if(flac_cfg.output.resolution.replaygain.bps_out == 24) gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(resolution_replaygain_bps_out_radio_24bps), TRUE); gtk_signal_connect(GTK_OBJECT(resolution_replaygain_bps_out_radio_24bps), "clicked", resolution_replaygain_bps_out_cb, NULL); gtk_container_add(GTK_CONTAINER(resolution_replaygain_bps_out_vbox), resolution_replaygain_bps_out_radio_24bps); resolution_replaygain_dither_cb(resolution_replaygain_dither, NULL); gtk_notebook_append_page(GTK_NOTEBOOK(notebook), output_vbox, gtk_label_new(_("Output"))); /* Streaming */ streaming_vbox = gtk_vbox_new(FALSE, 0); streaming_buf_frame = gtk_frame_new(_("Buffering:")); gtk_container_set_border_width(GTK_CONTAINER(streaming_buf_frame), 5); gtk_box_pack_start(GTK_BOX(streaming_vbox), streaming_buf_frame, FALSE, FALSE, 0); streaming_buf_hbox = gtk_hbox_new(TRUE, 5); gtk_container_set_border_width(GTK_CONTAINER(streaming_buf_hbox), 5); gtk_container_add(GTK_CONTAINER(streaming_buf_frame), streaming_buf_hbox); streaming_size_box = gtk_hbox_new(FALSE, 5); /*gtk_table_attach_defaults(GTK_TABLE(streaming_buf_table),streaming_size_box,0,1,0,1); */ gtk_box_pack_start(GTK_BOX(streaming_buf_hbox), streaming_size_box, TRUE, TRUE, 0); streaming_size_label = gtk_label_new(_("Buffer size (kb):")); gtk_box_pack_start(GTK_BOX(streaming_size_box), streaming_size_label, FALSE, FALSE, 0); streaming_size_adj = gtk_adjustment_new(flac_cfg.stream.http_buffer_size, 4, 4096, 4, 4, 4); streaming_size_spin = gtk_spin_button_new(GTK_ADJUSTMENT(streaming_size_adj), 8, 0); gtk_widget_set_usize(streaming_size_spin, 60, -1); gtk_box_pack_start(GTK_BOX(streaming_size_box), streaming_size_spin, FALSE, FALSE, 0); streaming_pre_box = gtk_hbox_new(FALSE, 5); /*gtk_table_attach_defaults(GTK_TABLE(streaming_buf_table),streaming_pre_box,1,2,0,1); */ gtk_box_pack_start(GTK_BOX(streaming_buf_hbox), streaming_pre_box, TRUE, TRUE, 0); streaming_pre_label = gtk_label_new(_("Pre-buffer (percent):")); gtk_box_pack_start(GTK_BOX(streaming_pre_box), streaming_pre_label, FALSE, FALSE, 0); streaming_pre_adj = gtk_adjustment_new(flac_cfg.stream.http_prebuffer, 0, 90, 1, 1, 1); streaming_pre_spin = gtk_spin_button_new(GTK_ADJUSTMENT(streaming_pre_adj), 1, 0); gtk_widget_set_usize(streaming_pre_spin, 60, -1); gtk_box_pack_start(GTK_BOX(streaming_pre_box), streaming_pre_spin, FALSE, FALSE, 0); /* * Proxy config. */ streaming_proxy_frame = gtk_frame_new(_("Proxy:")); gtk_container_set_border_width(GTK_CONTAINER(streaming_proxy_frame), 5); gtk_box_pack_start(GTK_BOX(streaming_vbox), streaming_proxy_frame, FALSE, FALSE, 0); streaming_proxy_vbox = gtk_vbox_new(FALSE, 5); gtk_container_set_border_width(GTK_CONTAINER(streaming_proxy_vbox), 5); gtk_container_add(GTK_CONTAINER(streaming_proxy_frame), streaming_proxy_vbox); streaming_proxy_use = gtk_check_button_new_with_label(_("Use proxy")); gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(streaming_proxy_use), flac_cfg.stream.use_proxy); gtk_signal_connect(GTK_OBJECT(streaming_proxy_use), "clicked", GTK_SIGNAL_FUNC(proxy_use_cb), NULL); gtk_box_pack_start(GTK_BOX(streaming_proxy_vbox), streaming_proxy_use, FALSE, FALSE, 0); streaming_proxy_hbox = gtk_hbox_new(FALSE, 5); gtk_widget_set_sensitive(streaming_proxy_hbox, flac_cfg.stream.use_proxy); gtk_box_pack_start(GTK_BOX(streaming_proxy_vbox), streaming_proxy_hbox, FALSE, FALSE, 0); streaming_proxy_host_label = gtk_label_new(_("Host:")); gtk_box_pack_start(GTK_BOX(streaming_proxy_hbox), streaming_proxy_host_label, FALSE, FALSE, 0); streaming_proxy_host_entry = gtk_entry_new(); gtk_entry_set_text(GTK_ENTRY(streaming_proxy_host_entry), flac_cfg.stream.proxy_host? flac_cfg.stream.proxy_host : ""); gtk_box_pack_start(GTK_BOX(streaming_proxy_hbox), streaming_proxy_host_entry, TRUE, TRUE, 0); streaming_proxy_port_label = gtk_label_new(_("Port:")); gtk_box_pack_start(GTK_BOX(streaming_proxy_hbox), streaming_proxy_port_label, FALSE, FALSE, 0); streaming_proxy_port_entry = gtk_entry_new(); gtk_widget_set_usize(streaming_proxy_port_entry, 50, -1); temp = g_strdup_printf("%d", flac_cfg.stream.proxy_port); gtk_entry_set_text(GTK_ENTRY(streaming_proxy_port_entry), temp); g_free(temp); gtk_box_pack_start(GTK_BOX(streaming_proxy_hbox), streaming_proxy_port_entry, FALSE, FALSE, 0); streaming_proxy_auth_use = gtk_check_button_new_with_label(_("Use authentication")); gtk_widget_set_sensitive(streaming_proxy_auth_use, flac_cfg.stream.use_proxy); gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(streaming_proxy_auth_use), flac_cfg.stream.proxy_use_auth); gtk_signal_connect(GTK_OBJECT(streaming_proxy_auth_use), "clicked", GTK_SIGNAL_FUNC(proxy_auth_use_cb), NULL); gtk_box_pack_start(GTK_BOX(streaming_proxy_vbox), streaming_proxy_auth_use, FALSE, FALSE, 0); streaming_proxy_auth_hbox = gtk_hbox_new(FALSE, 5); gtk_widget_set_sensitive(streaming_proxy_auth_hbox, flac_cfg.stream.use_proxy && flac_cfg.stream.proxy_use_auth); gtk_box_pack_start(GTK_BOX(streaming_proxy_vbox), streaming_proxy_auth_hbox, FALSE, FALSE, 0); streaming_proxy_auth_user_label = gtk_label_new(_("Username:")); gtk_box_pack_start(GTK_BOX(streaming_proxy_auth_hbox), streaming_proxy_auth_user_label, FALSE, FALSE, 0); streaming_proxy_auth_user_entry = gtk_entry_new(); if(flac_cfg.stream.proxy_user) gtk_entry_set_text(GTK_ENTRY(streaming_proxy_auth_user_entry), flac_cfg.stream.proxy_user); gtk_box_pack_start(GTK_BOX(streaming_proxy_auth_hbox), streaming_proxy_auth_user_entry, TRUE, TRUE, 0); streaming_proxy_auth_pass_label = gtk_label_new(_("Password:")); gtk_box_pack_start(GTK_BOX(streaming_proxy_auth_hbox), streaming_proxy_auth_pass_label, FALSE, FALSE, 0); streaming_proxy_auth_pass_entry = gtk_entry_new(); if(flac_cfg.stream.proxy_pass) gtk_entry_set_text(GTK_ENTRY(streaming_proxy_auth_pass_entry), flac_cfg.stream.proxy_pass); gtk_entry_set_visibility(GTK_ENTRY(streaming_proxy_auth_pass_entry), FALSE); gtk_box_pack_start(GTK_BOX(streaming_proxy_auth_hbox), streaming_proxy_auth_pass_entry, TRUE, TRUE, 0); /* * Save to disk config. */ streaming_save_frame = gtk_frame_new(_("Save stream to disk:")); gtk_container_set_border_width(GTK_CONTAINER(streaming_save_frame), 5); gtk_box_pack_start(GTK_BOX(streaming_vbox), streaming_save_frame, FALSE, FALSE, 0); streaming_save_vbox = gtk_vbox_new(FALSE, 5); gtk_container_set_border_width(GTK_CONTAINER(streaming_save_vbox), 5); gtk_container_add(GTK_CONTAINER(streaming_save_frame), streaming_save_vbox); streaming_save_use = gtk_check_button_new_with_label(_("Save stream to disk")); gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(streaming_save_use), flac_cfg.stream.save_http_stream); gtk_signal_connect(GTK_OBJECT(streaming_save_use), "clicked", GTK_SIGNAL_FUNC(streaming_save_use_cb), NULL); gtk_box_pack_start(GTK_BOX(streaming_save_vbox), streaming_save_use, FALSE, FALSE, 0); streaming_save_hbox = gtk_hbox_new(FALSE, 5); gtk_widget_set_sensitive(streaming_save_hbox, flac_cfg.stream.save_http_stream); gtk_box_pack_start(GTK_BOX(streaming_save_vbox), streaming_save_hbox, FALSE, FALSE, 0); streaming_save_label = gtk_label_new(_("Path:")); gtk_box_pack_start(GTK_BOX(streaming_save_hbox), streaming_save_label, FALSE, FALSE, 0); streaming_save_entry = gtk_entry_new(); gtk_entry_set_text(GTK_ENTRY(streaming_save_entry), flac_cfg.stream.save_http_path? flac_cfg.stream.save_http_path : ""); gtk_box_pack_start(GTK_BOX(streaming_save_hbox), streaming_save_entry, TRUE, TRUE, 0); streaming_save_browse = gtk_button_new_with_label(_("Browse")); gtk_signal_connect(GTK_OBJECT(streaming_save_browse), "clicked", GTK_SIGNAL_FUNC(streaming_save_browse_cb), NULL); gtk_box_pack_start(GTK_BOX(streaming_save_hbox), streaming_save_browse, FALSE, FALSE, 0); #ifdef FLAC_ICECAST streaming_cast_frame = gtk_frame_new(_("SHOUT/Icecast:")); gtk_container_set_border_width(GTK_CONTAINER(streaming_cast_frame), 5); gtk_box_pack_start(GTK_BOX(streaming_vbox), streaming_cast_frame, FALSE, FALSE, 0); streaming_cast_vbox = gtk_vbox_new(5, FALSE); gtk_container_add(GTK_CONTAINER(streaming_cast_frame), streaming_cast_vbox); streaming_cast_title = gtk_check_button_new_with_label(_("Enable SHOUT/Icecast title streaming")); gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(streaming_cast_title), flac_cfg.stream.cast_title_streaming); gtk_box_pack_start(GTK_BOX(streaming_cast_vbox), streaming_cast_title, FALSE, FALSE, 0); streaming_udp_title = gtk_check_button_new_with_label(_("Enable Icecast Metadata UDP Channel")); gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(streaming_udp_title), flac_cfg.stream.use_udp_channel); gtk_box_pack_start(GTK_BOX(streaming_cast_vbox), streaming_udp_title, FALSE, FALSE, 0); #endif gtk_notebook_append_page(GTK_NOTEBOOK(notebook), streaming_vbox, gtk_label_new(_("Streaming"))); /* Buttons */ bbox = gtk_hbutton_box_new(); gtk_button_box_set_layout(GTK_BUTTON_BOX(bbox), GTK_BUTTONBOX_END); gtk_button_box_set_spacing(GTK_BUTTON_BOX(bbox), 5); gtk_box_pack_start(GTK_BOX(vbox), bbox, FALSE, FALSE, 0); ok = gtk_button_new_with_label(_("Ok")); gtk_signal_connect(GTK_OBJECT(ok), "clicked", GTK_SIGNAL_FUNC(flac_configurewin_ok), NULL); GTK_WIDGET_SET_FLAGS(ok, GTK_CAN_DEFAULT); gtk_box_pack_start(GTK_BOX(bbox), ok, TRUE, TRUE, 0); gtk_widget_grab_default(ok); cancel = gtk_button_new_with_label(_("Cancel")); gtk_signal_connect_object(GTK_OBJECT(cancel), "clicked", GTK_SIGNAL_FUNC(gtk_widget_destroy), GTK_OBJECT(flac_configurewin)); GTK_WIDGET_SET_FLAGS(cancel, GTK_CAN_DEFAULT); gtk_box_pack_start(GTK_BOX(bbox), cancel, TRUE, TRUE, 0); gtk_widget_show_all(flac_configurewin); } void FLAC_XMMS__aboutbox(void) { static GtkWidget *about_window; if (about_window) gdk_window_raise(about_window->window); about_window = xmms_show_message( _("About Flac Plugin"), _("Flac Plugin by Josh Coalson\n" "contributions by\n" "......\n" "......\n" "and\n" "Daisuke Shimamura\n" "Visit http://flac.sourceforge.net/"), _("Ok"), FALSE, NULL, NULL); gtk_signal_connect(GTK_OBJECT(about_window), "destroy", GTK_SIGNAL_FUNC(gtk_widget_destroyed), &about_window); } /* * Get text of an Entry or a ComboBox */ static gchar *gtk_entry_get_text_1 (GtkWidget *widget) { if (GTK_IS_COMBO(widget)) { return gtk_entry_get_text(GTK_ENTRY(GTK_COMBO(widget)->entry)); }else if (GTK_IS_ENTRY(widget)) { return gtk_entry_get_text(GTK_ENTRY(widget)); }else { return NULL; } }
svn2github/audacity
lib-src/libflac/src/plugin_xmms/configure.c
C
gpl-2.0
39,590
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_trans.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_alloc.h" #include "xfs_error.h" #include "xfs_rw.h" #include "xfs_iomap.h" #include "xfs_vnodeops.h" #include "xfs_trace.h" #include "xfs_bmap.h" #include <linux/gfp.h> #include <linux/mpage.h> #include <linux/pagevec.h> #include <linux/writeback.h> void xfs_count_page_state( struct page *page, int *delalloc, int *unwritten) { struct buffer_head *bh, *head; *delalloc = *unwritten = 0; bh = head = page_buffers(page); do { if (buffer_unwritten(bh)) (*unwritten) = 1; else if (buffer_delay(bh)) (*delalloc) = 1; } while ((bh = bh->b_this_page) != head); } STATIC struct block_device * xfs_find_bdev_for_inode( struct inode *inode) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; if (XFS_IS_REALTIME_INODE(ip)) return mp->m_rtdev_targp->bt_bdev; else return mp->m_ddev_targp->bt_bdev; } /* * We're now finished for good with this ioend structure. * Update the page state via the associated buffer_heads, * release holds on the inode and bio, and finally free * up memory. Do not use the ioend after this. */ STATIC void xfs_destroy_ioend( xfs_ioend_t *ioend) { struct buffer_head *bh, *next; for (bh = ioend->io_buffer_head; bh; bh = next) { next = bh->b_private; bh->b_end_io(bh, !ioend->io_error); } if (ioend->io_iocb) { if (ioend->io_isasync) { aio_complete(ioend->io_iocb, ioend->io_error ? ioend->io_error : ioend->io_result, 0); } inode_dio_done(ioend->io_inode); } mempool_free(ioend, xfs_ioend_pool); } /* * If the end of the current ioend is beyond the current EOF, * return the new EOF value, otherwise zero. */ STATIC xfs_fsize_t xfs_ioend_new_eof( xfs_ioend_t *ioend) { xfs_inode_t *ip = XFS_I(ioend->io_inode); xfs_fsize_t isize; xfs_fsize_t bsize; bsize = ioend->io_offset + ioend->io_size; isize = MAX(ip->i_size, ip->i_new_size); isize = MIN(isize, bsize); return isize > ip->i_d.di_size ? isize : 0; } /* * Fast and loose check if this write could update the on-disk inode size. */ static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend) { return ioend->io_offset + ioend->io_size > XFS_I(ioend->io_inode)->i_d.di_size; } /* * Update on-disk file size now that data has been written to disk. The * current in-memory file size is i_size. If a write is beyond eof i_new_size * will be the intended file size until i_size is updated. If this write does * not extend all the way to the valid file size then restrict this update to * the end of the write. * * This function does not block as blocking on the inode lock in IO completion * can lead to IO completion order dependency deadlocks.. If it can't get the * inode ilock it will return EAGAIN. Callers must handle this. */ STATIC int xfs_setfilesize( xfs_ioend_t *ioend) { xfs_inode_t *ip = XFS_I(ioend->io_inode); xfs_fsize_t isize; if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) return EAGAIN; isize = xfs_ioend_new_eof(ioend); if (isize) { trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); ip->i_d.di_size = isize; xfs_mark_inode_dirty(ip); } xfs_iunlock(ip, XFS_ILOCK_EXCL); return 0; } /* * Schedule IO completion handling on the final put of an ioend. * * If there is no work to do we might as well call it a day and free the * ioend right now. */ STATIC void xfs_finish_ioend( struct xfs_ioend *ioend) { if (atomic_dec_and_test(&ioend->io_remaining)) { if (ioend->io_type == IO_UNWRITTEN) queue_work(xfsconvertd_workqueue, &ioend->io_work); else if (xfs_ioend_is_append(ioend)) queue_work(xfsdatad_workqueue, &ioend->io_work); else xfs_destroy_ioend(ioend); } } /* * IO write completion. */ STATIC void xfs_end_io( struct work_struct *work) { xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); struct xfs_inode *ip = XFS_I(ioend->io_inode); int error = 0; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { error = -EIO; goto done; } if (ioend->io_error) goto done; /* * For unwritten extents we need to issue transactions to convert a * range to normal written extens after the data I/O has finished. */ if (ioend->io_type == IO_UNWRITTEN) { error = xfs_iomap_write_unwritten(ip, ioend->io_offset, ioend->io_size); if (error) { ioend->io_error = -error; goto done; } } /* * We might have to update the on-disk file size after extending * writes. */ error = xfs_setfilesize(ioend); ASSERT(!error || error == EAGAIN); done: /* * If we didn't complete processing of the ioend, requeue it to the * tail of the workqueue for another attempt later. Otherwise destroy * it. */ if (error == EAGAIN) { atomic_inc(&ioend->io_remaining); xfs_finish_ioend(ioend); /* ensure we don't spin on blocked ioends */ delay(1); } else { xfs_destroy_ioend(ioend); } } /* * Call IO completion handling in caller context on the final put of an ioend. */ STATIC void xfs_finish_ioend_sync( struct xfs_ioend *ioend) { if (atomic_dec_and_test(&ioend->io_remaining)) xfs_end_io(&ioend->io_work); } /* * Allocate and initialise an IO completion structure. * We need to track unwritten extent write completion here initially. * We'll need to extend this for updating the ondisk inode size later * (vs. incore size). */ STATIC xfs_ioend_t * xfs_alloc_ioend( struct inode *inode, unsigned int type) { xfs_ioend_t *ioend; ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS); /* * Set the count to 1 initially, which will prevent an I/O * completion callback from happening before we have started * all the I/O from calling the completion routine too early. */ atomic_set(&ioend->io_remaining, 1); ioend->io_isasync = 0; ioend->io_error = 0; ioend->io_list = NULL; ioend->io_type = type; ioend->io_inode = inode; ioend->io_buffer_head = NULL; ioend->io_buffer_tail = NULL; ioend->io_offset = 0; ioend->io_size = 0; ioend->io_iocb = NULL; ioend->io_result = 0; INIT_WORK(&ioend->io_work, xfs_end_io); return ioend; } STATIC int xfs_map_blocks( struct inode *inode, loff_t offset, struct xfs_bmbt_irec *imap, int type, int nonblocking) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; ssize_t count = 1 << inode->i_blkbits; xfs_fileoff_t offset_fsb, end_fsb; int error = 0; int bmapi_flags = XFS_BMAPI_ENTIRE; int nimaps = 1; if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); if (type == IO_UNWRITTEN) bmapi_flags |= XFS_BMAPI_IGSTATE; if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { if (nonblocking) return -XFS_ERROR(EAGAIN); xfs_ilock(ip, XFS_ILOCK_SHARED); } ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || (ip->i_df.if_flags & XFS_IFEXTENTS)); ASSERT(offset <= mp->m_maxioffset); if (offset + count > mp->m_maxioffset) count = mp->m_maxioffset - offset; end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); offset_fsb = XFS_B_TO_FSBT(mp, offset); error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, imap, &nimaps, bmapi_flags); xfs_iunlock(ip, XFS_ILOCK_SHARED); if (error) return -XFS_ERROR(error); if (type == IO_DELALLOC && (!nimaps || isnullstartblock(imap->br_startblock))) { error = xfs_iomap_write_allocate(ip, offset, count, imap); if (!error) trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); return -XFS_ERROR(error); } #ifdef DEBUG if (type == IO_UNWRITTEN) { ASSERT(nimaps); ASSERT(imap->br_startblock != HOLESTARTBLOCK); ASSERT(imap->br_startblock != DELAYSTARTBLOCK); } #endif if (nimaps) trace_xfs_map_blocks_found(ip, offset, count, type, imap); return 0; } STATIC int xfs_imap_valid( struct inode *inode, struct xfs_bmbt_irec *imap, xfs_off_t offset) { offset >>= inode->i_blkbits; return offset >= imap->br_startoff && offset < imap->br_startoff + imap->br_blockcount; } /* * BIO completion handler for buffered IO. */ STATIC void xfs_end_bio( struct bio *bio, int error) { xfs_ioend_t *ioend = bio->bi_private; ASSERT(atomic_read(&bio->bi_cnt) >= 1); ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error; /* Toss bio and pass work off to an xfsdatad thread */ bio->bi_private = NULL; bio->bi_end_io = NULL; bio_put(bio); xfs_finish_ioend(ioend); } STATIC void xfs_submit_ioend_bio( struct writeback_control *wbc, xfs_ioend_t *ioend, struct bio *bio) { atomic_inc(&ioend->io_remaining); bio->bi_private = ioend; bio->bi_end_io = xfs_end_bio; /* * If the I/O is beyond EOF we mark the inode dirty immediately * but don't update the inode size until I/O completion. */ if (xfs_ioend_new_eof(ioend)) xfs_mark_inode_dirty(XFS_I(ioend->io_inode)); submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio); } STATIC struct bio * xfs_alloc_ioend_bio( struct buffer_head *bh) { int nvecs = bio_get_nr_vecs(bh->b_bdev); struct bio *bio = bio_alloc(GFP_NOIO, nvecs); ASSERT(bio->bi_private == NULL); bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_bdev = bh->b_bdev; return bio; } STATIC void xfs_start_buffer_writeback( struct buffer_head *bh) { ASSERT(buffer_mapped(bh)); ASSERT(buffer_locked(bh)); ASSERT(!buffer_delay(bh)); ASSERT(!buffer_unwritten(bh)); mark_buffer_async_write(bh); set_buffer_uptodate(bh); clear_buffer_dirty(bh); } STATIC void xfs_start_page_writeback( struct page *page, int clear_dirty, int buffers) { ASSERT(PageLocked(page)); ASSERT(!PageWriteback(page)); if (clear_dirty) clear_page_dirty_for_io(page); set_page_writeback(page); unlock_page(page); /* If no buffers on the page are to be written, finish it here */ if (!buffers) end_page_writeback(page); } static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) { return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); } /* * Submit all of the bios for all of the ioends we have saved up, covering the * initial writepage page and also any probed pages. * * Because we may have multiple ioends spanning a page, we need to start * writeback on all the buffers before we submit them for I/O. If we mark the * buffers as we got, then we can end up with a page that only has buffers * marked async write and I/O complete on can occur before we mark the other * buffers async write. * * The end result of this is that we trip a bug in end_page_writeback() because * we call it twice for the one page as the code in end_buffer_async_write() * assumes that all buffers on the page are started at the same time. * * The fix is two passes across the ioend list - one to start writeback on the * buffer_heads, and then submit them for I/O on the second pass. */ STATIC void xfs_submit_ioend( struct writeback_control *wbc, xfs_ioend_t *ioend) { xfs_ioend_t *head = ioend; xfs_ioend_t *next; struct buffer_head *bh; struct bio *bio; sector_t lastblock = 0; /* Pass 1 - start writeback */ do { next = ioend->io_list; for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) xfs_start_buffer_writeback(bh); } while ((ioend = next) != NULL); /* Pass 2 - submit I/O */ ioend = head; do { next = ioend->io_list; bio = NULL; for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { if (!bio) { retry: bio = xfs_alloc_ioend_bio(bh); } else if (bh->b_blocknr != lastblock + 1) { xfs_submit_ioend_bio(wbc, ioend, bio); goto retry; } if (bio_add_buffer(bio, bh) != bh->b_size) { xfs_submit_ioend_bio(wbc, ioend, bio); goto retry; } lastblock = bh->b_blocknr; } if (bio) xfs_submit_ioend_bio(wbc, ioend, bio); xfs_finish_ioend(ioend); } while ((ioend = next) != NULL); } /* * Cancel submission of all buffer_heads so far in this endio. * Toss the endio too. Only ever called for the initial page * in a writepage request, so only ever one page. */ STATIC void xfs_cancel_ioend( xfs_ioend_t *ioend) { xfs_ioend_t *next; struct buffer_head *bh, *next_bh; do { next = ioend->io_list; bh = ioend->io_buffer_head; do { next_bh = bh->b_private; clear_buffer_async_write(bh); unlock_buffer(bh); } while ((bh = next_bh) != NULL); mempool_free(ioend, xfs_ioend_pool); } while ((ioend = next) != NULL); } /* * Test to see if we've been building up a completion structure for * earlier buffers -- if so, we try to append to this ioend if we * can, otherwise we finish off any current ioend and start another. * Return true if we've finished the given ioend. */ STATIC void xfs_add_to_ioend( struct inode *inode, struct buffer_head *bh, xfs_off_t offset, unsigned int type, xfs_ioend_t **result, int need_ioend) { xfs_ioend_t *ioend = *result; if (!ioend || need_ioend || type != ioend->io_type) { xfs_ioend_t *previous = *result; ioend = xfs_alloc_ioend(inode, type); ioend->io_offset = offset; ioend->io_buffer_head = bh; ioend->io_buffer_tail = bh; if (previous) previous->io_list = ioend; *result = ioend; } else { ioend->io_buffer_tail->b_private = bh; ioend->io_buffer_tail = bh; } bh->b_private = NULL; ioend->io_size += bh->b_size; } STATIC void xfs_map_buffer( struct inode *inode, struct buffer_head *bh, struct xfs_bmbt_irec *imap, xfs_off_t offset) { sector_t bn; struct xfs_mount *m = XFS_I(inode)->i_mount; xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff); xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock); ASSERT(imap->br_startblock != HOLESTARTBLOCK); ASSERT(imap->br_startblock != DELAYSTARTBLOCK); bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) + ((offset - iomap_offset) >> inode->i_blkbits); ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode))); bh->b_blocknr = bn; set_buffer_mapped(bh); } STATIC void xfs_map_at_offset( struct inode *inode, struct buffer_head *bh, struct xfs_bmbt_irec *imap, xfs_off_t offset) { ASSERT(imap->br_startblock != HOLESTARTBLOCK); ASSERT(imap->br_startblock != DELAYSTARTBLOCK); xfs_map_buffer(inode, bh, imap, offset); set_buffer_mapped(bh); clear_buffer_delay(bh); clear_buffer_unwritten(bh); } /* * Test if a given page is suitable for writing as part of an unwritten * or delayed allocate extent. */ STATIC int xfs_is_delayed_page( struct page *page, unsigned int type) { if (PageWriteback(page)) return 0; if (page->mapping && page_has_buffers(page)) { struct buffer_head *bh, *head; int acceptable = 0; bh = head = page_buffers(page); do { if (buffer_unwritten(bh)) acceptable = (type == IO_UNWRITTEN); else if (buffer_delay(bh)) acceptable = (type == IO_DELALLOC); else if (buffer_dirty(bh) && buffer_mapped(bh)) acceptable = (type == IO_OVERWRITE); else break; } while ((bh = bh->b_this_page) != head); if (acceptable) return 1; } return 0; } /* * Allocate & map buffers for page given the extent map. Write it out. * except for the original page of a writepage, this is called on * delalloc/unwritten pages only, for the original page it is possible * that the page has no mapping at all. */ STATIC int xfs_convert_page( struct inode *inode, struct page *page, loff_t tindex, struct xfs_bmbt_irec *imap, xfs_ioend_t **ioendp, struct writeback_control *wbc) { struct buffer_head *bh, *head; xfs_off_t end_offset; unsigned long p_offset; unsigned int type; int len, page_dirty; int count = 0, done = 0, uptodate = 1; xfs_off_t offset = page_offset(page); if (page->index != tindex) goto fail; if (!trylock_page(page)) goto fail; if (PageWriteback(page)) goto fail_unlock_page; if (page->mapping != inode->i_mapping) goto fail_unlock_page; if (!xfs_is_delayed_page(page, (*ioendp)->io_type)) goto fail_unlock_page; /* * page_dirty is initially a count of buffers on the page before * EOF and is decremented as we move each into a cleanable state. * * Derivation: * * End offset is the highest offset that this page should represent. * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1)) * will evaluate non-zero and be less than PAGE_CACHE_SIZE and * hence give us the correct page_dirty count. On any other page, * it will be zero and in that case we need page_dirty to be the * count of buffers on the page. */ end_offset = min_t(unsigned long long, (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, i_size_read(inode)); len = 1 << inode->i_blkbits; p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1), PAGE_CACHE_SIZE); p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; page_dirty = p_offset / len; bh = head = page_buffers(page); do { if (offset >= end_offset) break; if (!buffer_uptodate(bh)) uptodate = 0; if (!(PageUptodate(page) || buffer_uptodate(bh))) { done = 1; continue; } if (buffer_unwritten(bh) || buffer_delay(bh) || buffer_mapped(bh)) { if (buffer_unwritten(bh)) type = IO_UNWRITTEN; else if (buffer_delay(bh)) type = IO_DELALLOC; else type = IO_OVERWRITE; if (!xfs_imap_valid(inode, imap, offset)) { done = 1; continue; } lock_buffer(bh); if (type != IO_OVERWRITE) xfs_map_at_offset(inode, bh, imap, offset); xfs_add_to_ioend(inode, bh, offset, type, ioendp, done); page_dirty--; count++; } else { done = 1; } } while (offset += len, (bh = bh->b_this_page) != head); if (uptodate && bh == head) SetPageUptodate(page); if (count) { if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) done = 1; } xfs_start_page_writeback(page, !page_dirty, count); return done; fail_unlock_page: unlock_page(page); fail: return 1; } /* * Convert & write out a cluster of pages in the same extent as defined * by mp and following the start page. */ STATIC void xfs_cluster_write( struct inode *inode, pgoff_t tindex, struct xfs_bmbt_irec *imap, xfs_ioend_t **ioendp, struct writeback_control *wbc, pgoff_t tlast) { struct pagevec pvec; int done = 0, i; pagevec_init(&pvec, 0); while (!done && tindex <= tlast) { unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1); if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len)) break; for (i = 0; i < pagevec_count(&pvec); i++) { done = xfs_convert_page(inode, pvec.pages[i], tindex++, imap, ioendp, wbc); if (done) break; } pagevec_release(&pvec); cond_resched(); } } STATIC void xfs_vm_invalidatepage( struct page *page, unsigned long offset) { trace_xfs_invalidatepage(page->mapping->host, page, offset); block_invalidatepage(page, offset); } /* * If the page has delalloc buffers on it, we need to punch them out before we * invalidate the page. If we don't, we leave a stale delalloc mapping on the * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read * is done on that same region - the delalloc extent is returned when none is * supposed to be there. * * We prevent this by truncating away the delalloc regions on the page before * invalidating it. Because they are delalloc, we can do this without needing a * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this * truncation without a transaction as there is no space left for block * reservation (typically why we see a ENOSPC in writeback). * * This is not a performance critical path, so for now just do the punching a * buffer head at a time. */ STATIC void xfs_aops_discard_page( struct page *page) { struct inode *inode = page->mapping->host; struct xfs_inode *ip = XFS_I(inode); struct buffer_head *bh, *head; loff_t offset = page_offset(page); if (!xfs_is_delayed_page(page, IO_DELALLOC)) goto out_invalidate; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) goto out_invalidate; xfs_alert(ip->i_mount, "page discard on page %p, inode 0x%llx, offset %llu.", page, ip->i_ino, offset); xfs_ilock(ip, XFS_ILOCK_EXCL); bh = head = page_buffers(page); do { int error; xfs_fileoff_t start_fsb; if (!buffer_delay(bh)) goto next_buffer; start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1); if (error) { /* something screwed, just bail */ if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { xfs_alert(ip->i_mount, "page discard unable to remove delalloc mapping."); } break; } next_buffer: offset += 1 << inode->i_blkbits; } while ((bh = bh->b_this_page) != head); xfs_iunlock(ip, XFS_ILOCK_EXCL); out_invalidate: xfs_vm_invalidatepage(page, 0); return; } /* * Write out a dirty page. * * For delalloc space on the page we need to allocate space and flush it. * For unwritten space on the page we need to start the conversion to * regular allocated space. * For any other dirty buffer heads on the page we should flush them. */ STATIC int xfs_vm_writepage( struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct buffer_head *bh, *head; struct xfs_bmbt_irec imap; xfs_ioend_t *ioend = NULL, *iohead = NULL; loff_t offset; unsigned int type; __uint64_t end_offset; pgoff_t end_index, last_index; ssize_t len; int err, imap_valid = 0, uptodate = 1; int count = 0; int nonblocking = 0; trace_xfs_writepage(inode, page, 0); ASSERT(page_has_buffers(page)); /* * Refuse to write the page out if we are called from reclaim context. * * This avoids stack overflows when called from deeply used stacks in * random callers for direct reclaim or memcg reclaim. We explicitly * allow reclaim from kswapd as the stack usage there is relatively low. * * This should never happen except in the case of a VM regression so * warn about it. */ if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC)) goto redirty; /* * Given that we do not allow direct reclaim to call us, we should * never be called while in a filesystem transaction. */ if (WARN_ON(current->flags & PF_FSTRANS)) goto redirty; /* Is this page beyond the end of the file? */ offset = i_size_read(inode); end_index = offset >> PAGE_CACHE_SHIFT; last_index = (offset - 1) >> PAGE_CACHE_SHIFT; if (page->index >= end_index) { if ((page->index >= end_index + 1) || !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { unlock_page(page); return 0; } } end_offset = min_t(unsigned long long, (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset); len = 1 << inode->i_blkbits; bh = head = page_buffers(page); offset = page_offset(page); type = IO_OVERWRITE; if (wbc->sync_mode == WB_SYNC_NONE) nonblocking = 1; do { int new_ioend = 0; if (offset >= end_offset) break; if (!buffer_uptodate(bh)) uptodate = 0; /* * set_page_dirty dirties all buffers in a page, independent * of their state. The dirty state however is entirely * meaningless for holes (!mapped && uptodate), so skip * buffers covering holes here. */ if (!buffer_mapped(bh) && buffer_uptodate(bh)) { imap_valid = 0; continue; } if (buffer_unwritten(bh)) { if (type != IO_UNWRITTEN) { type = IO_UNWRITTEN; imap_valid = 0; } } else if (buffer_delay(bh)) { if (type != IO_DELALLOC) { type = IO_DELALLOC; imap_valid = 0; } } else if (buffer_uptodate(bh)) { if (type != IO_OVERWRITE) { type = IO_OVERWRITE; imap_valid = 0; } } else { if (PageUptodate(page)) { ASSERT(buffer_mapped(bh)); imap_valid = 0; } continue; } if (imap_valid) imap_valid = xfs_imap_valid(inode, &imap, offset); if (!imap_valid) { /* * If we didn't have a valid mapping then we need to * put the new mapping into a separate ioend structure. * This ensures non-contiguous extents always have * separate ioends, which is particularly important * for unwritten extent conversion at I/O completion * time. */ new_ioend = 1; err = xfs_map_blocks(inode, offset, &imap, type, nonblocking); if (err) goto error; imap_valid = xfs_imap_valid(inode, &imap, offset); } if (imap_valid) { lock_buffer(bh); if (type != IO_OVERWRITE) xfs_map_at_offset(inode, bh, &imap, offset); xfs_add_to_ioend(inode, bh, offset, type, &ioend, new_ioend); count++; } if (!iohead) iohead = ioend; } while (offset += len, ((bh = bh->b_this_page) != head)); if (uptodate && bh == head) SetPageUptodate(page); xfs_start_page_writeback(page, 1, count); if (ioend && imap_valid) { xfs_off_t end_index; end_index = imap.br_startoff + imap.br_blockcount; /* to bytes */ end_index <<= inode->i_blkbits; /* to pages */ end_index = (end_index - 1) >> PAGE_CACHE_SHIFT; /* check against file size */ if (end_index > last_index) end_index = last_index; xfs_cluster_write(inode, page->index + 1, &imap, &ioend, wbc, end_index); } if (iohead) xfs_submit_ioend(wbc, iohead); return 0; error: if (iohead) xfs_cancel_ioend(iohead); if (err == -EAGAIN) goto redirty; xfs_aops_discard_page(page); ClearPageUptodate(page); unlock_page(page); return err; redirty: redirty_page_for_writepage(wbc, page); unlock_page(page); return 0; } STATIC int xfs_vm_writepages( struct address_space *mapping, struct writeback_control *wbc) { xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); return generic_writepages(mapping, wbc); } /* * Called to move a page into cleanable state - and from there * to be released. The page should already be clean. We always * have buffer heads in this call. * * Returns 1 if the page is ok to release, 0 otherwise. */ STATIC int xfs_vm_releasepage( struct page *page, gfp_t gfp_mask) { int delalloc, unwritten; trace_xfs_releasepage(page->mapping->host, page, 0); xfs_count_page_state(page, &delalloc, &unwritten); if (WARN_ON(delalloc)) return 0; if (WARN_ON(unwritten)) return 0; return try_to_free_buffers(page); } STATIC int __xfs_get_blocks( struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create, int direct) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t offset_fsb, end_fsb; int error = 0; int lockmode = 0; struct xfs_bmbt_irec imap; int nimaps = 1; xfs_off_t offset; ssize_t size; int new = 0; if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); offset = (xfs_off_t)iblock << inode->i_blkbits; ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); size = bh_result->b_size; if (!create && direct && offset >= i_size_read(inode)) return 0; if (create) { lockmode = XFS_ILOCK_EXCL; xfs_ilock(ip, lockmode); } else { lockmode = xfs_ilock_map_shared(ip); } ASSERT(offset <= mp->m_maxioffset); if (offset + size > mp->m_maxioffset) size = mp->m_maxioffset - offset; end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); offset_fsb = XFS_B_TO_FSBT(mp, offset); error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, &nimaps, XFS_BMAPI_ENTIRE); if (error) goto out_unlock; if (create && (!nimaps || (imap.br_startblock == HOLESTARTBLOCK || imap.br_startblock == DELAYSTARTBLOCK))) { if (direct) { error = xfs_iomap_write_direct(ip, offset, size, &imap, nimaps); } else { error = xfs_iomap_write_delay(ip, offset, size, &imap); } if (error) goto out_unlock; trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap); } else if (nimaps) { trace_xfs_get_blocks_found(ip, offset, size, 0, &imap); } else { trace_xfs_get_blocks_notfound(ip, offset, size); goto out_unlock; } xfs_iunlock(ip, lockmode); if (imap.br_startblock != HOLESTARTBLOCK && imap.br_startblock != DELAYSTARTBLOCK) { /* * For unwritten extents do not report a disk address on * the read case (treat as if we're reading into a hole). */ if (create || !ISUNWRITTEN(&imap)) xfs_map_buffer(inode, bh_result, &imap, offset); if (create && ISUNWRITTEN(&imap)) { if (direct) bh_result->b_private = inode; set_buffer_unwritten(bh_result); } } /* * If this is a realtime file, data may be on a different device. * to that pointed to from the buffer_head b_bdev currently. */ bh_result->b_bdev = xfs_find_bdev_for_inode(inode); /* * If we previously allocated a block out beyond eof and we are now * coming back to use it then we will need to flag it as new even if it * has a disk address. * * With sub-block writes into unwritten extents we also need to mark * the buffer as new so that the unwritten parts of the buffer gets * correctly zeroed. */ if (create && ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || (offset >= i_size_read(inode)) || (new || ISUNWRITTEN(&imap)))) set_buffer_new(bh_result); if (imap.br_startblock == DELAYSTARTBLOCK) { BUG_ON(direct); if (create) { set_buffer_uptodate(bh_result); set_buffer_mapped(bh_result); set_buffer_delay(bh_result); } } /* * If this is O_DIRECT or the mpage code calling tell them how large * the mapping is, so that we can avoid repeated get_blocks calls. */ if (direct || size > (1 << inode->i_blkbits)) { xfs_off_t mapping_size; mapping_size = imap.br_startoff + imap.br_blockcount - iblock; mapping_size <<= inode->i_blkbits; ASSERT(mapping_size > 0); if (mapping_size > size) mapping_size = size; if (mapping_size > LONG_MAX) mapping_size = LONG_MAX; bh_result->b_size = mapping_size; } return 0; out_unlock: xfs_iunlock(ip, lockmode); return -error; } int xfs_get_blocks( struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { return __xfs_get_blocks(inode, iblock, bh_result, create, 0); } STATIC int xfs_get_blocks_direct( struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { return __xfs_get_blocks(inode, iblock, bh_result, create, 1); } /* * Complete a direct I/O write request. * * If the private argument is non-NULL __xfs_get_blocks signals us that we * need to issue a transaction to convert the range from unwritten to written * extents. In case this is regular synchronous I/O we just call xfs_end_io * to do this and we are done. But in case this was a successful AIO * request this handler is called from interrupt context, from which we * can't start transactions. In that case offload the I/O completion to * the workqueues we also use for buffered I/O completion. */ STATIC void xfs_end_io_direct_write( struct kiocb *iocb, loff_t offset, ssize_t size, void *private, int ret, bool is_async) { struct xfs_ioend *ioend = iocb->private; /* * blockdev_direct_IO can return an error even after the I/O * completion handler was called. Thus we need to protect * against double-freeing. */ iocb->private = NULL; ioend->io_offset = offset; ioend->io_size = size; ioend->io_iocb = iocb; ioend->io_result = ret; if (private && size > 0) ioend->io_type = IO_UNWRITTEN; if (is_async) { ioend->io_isasync = 1; xfs_finish_ioend(ioend); } else { xfs_finish_ioend_sync(ioend); } } STATIC ssize_t xfs_vm_direct_IO( int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct inode *inode = iocb->ki_filp->f_mapping->host; struct block_device *bdev = xfs_find_bdev_for_inode(inode); ssize_t ret; if (rw & WRITE) { iocb->private = xfs_alloc_ioend(inode, IO_DIRECT); ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, nr_segs, xfs_get_blocks_direct, xfs_end_io_direct_write, NULL, 0); if (ret != -EIOCBQUEUED && iocb->private) xfs_destroy_ioend(iocb->private); } else { ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, nr_segs, xfs_get_blocks_direct, NULL, NULL, 0); } return ret; } STATIC void xfs_vm_write_failed( struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) { /* * punch out the delalloc blocks we have already allocated. We * don't call xfs_setattr() to do this as we may be in the * middle of a multi-iovec write and so the vfs inode->i_size * will not match the xfs ip->i_size and so it will zero too * much. Hence we jus truncate the page cache to zero what is * necessary and punch the delalloc blocks directly. */ struct xfs_inode *ip = XFS_I(inode); xfs_fileoff_t start_fsb; xfs_fileoff_t end_fsb; int error; truncate_pagecache(inode, to, inode->i_size); /* * Check if there are any blocks that are outside of i_size * that need to be trimmed back. */ start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1; end_fsb = XFS_B_TO_FSB(ip->i_mount, to); if (end_fsb <= start_fsb) return; xfs_ilock(ip, XFS_ILOCK_EXCL); error = xfs_bmap_punch_delalloc_range(ip, start_fsb, end_fsb - start_fsb); if (error) { /* something screwed, just bail */ if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { xfs_alert(ip->i_mount, "xfs_vm_write_failed: unable to clean up ino %lld", ip->i_ino); } } xfs_iunlock(ip, XFS_ILOCK_EXCL); } } STATIC int xfs_vm_write_begin( struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS, pagep, xfs_get_blocks); if (unlikely(ret)) xfs_vm_write_failed(mapping, pos + len); return ret; } STATIC int xfs_vm_write_end( struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { int ret; ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); if (unlikely(ret < len)) xfs_vm_write_failed(mapping, pos + len); return ret; } STATIC sector_t xfs_vm_bmap( struct address_space *mapping, sector_t block) { struct inode *inode = (struct inode *)mapping->host; struct xfs_inode *ip = XFS_I(inode); trace_xfs_vm_bmap(XFS_I(inode)); xfs_ilock(ip, XFS_IOLOCK_SHARED); xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); xfs_iunlock(ip, XFS_IOLOCK_SHARED); return generic_block_bmap(mapping, block, xfs_get_blocks); } STATIC int xfs_vm_readpage( struct file *unused, struct page *page) { return mpage_readpage(page, xfs_get_blocks); } STATIC int xfs_vm_readpages( struct file *unused, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); } const struct address_space_operations xfs_address_space_operations = { .readpage = xfs_vm_readpage, .readpages = xfs_vm_readpages, .writepage = xfs_vm_writepage, .writepages = xfs_vm_writepages, .releasepage = xfs_vm_releasepage, .invalidatepage = xfs_vm_invalidatepage, .write_begin = xfs_vm_write_begin, .write_end = xfs_vm_write_end, .bmap = xfs_vm_bmap, .direct_IO = xfs_vm_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, };
omasab/linux
fs/xfs/xfs_aops.c
C
gpl-2.0
36,167
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Corey Minyard <wf-rch!minyard@relay.EU.net> * Florian La Roche, <flla@stud.uni-sb.de> * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> * Linus Torvalds, <torvalds@cs.helsinki.fi> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Matthew Dillon, <dillon@apollo.west.oic.com> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Jorge Cwik, <jorge@laser.satlink.net> */ /* * Changes: Pedro Roque : Retransmit queue handled by TCP. * : Fragmentation on mtu decrease * : Segment collapse on retransmit * : AF independence * * Linus Torvalds : send_delayed_ack * David S. Miller : Charge memory using the right skb * during syn/ack processing. * David S. Miller : Output engine completely rewritten. * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. * Cacophonix Gaul : draft-minshall-nagle-01 * J Hadi Salim : ECN support * */ #define pr_fmt(fmt) "TCP: " fmt #include <net/tcp.h> #include <linux/compiler.h> #include <linux/gfp.h> #include <linux/module.h> /* People can turn this off for buggy TCP's found in printers etc. */ int sysctl_tcp_retrans_collapse __read_mostly = 1; /* People can turn this on to work with those rare, broken TCPs that * interpret the window field as a signed quantity. */ int sysctl_tcp_workaround_signed_windows __read_mostly = 0; /* Default TSQ limit of two TSO segments */ int sysctl_tcp_limit_output_bytes __read_mostly = 131072; /* This limits the percentage of the congestion window which we * will allow a single TSO frame to consume. Building TSO frames * which are too large can cause TCP streams to be bursty. */ int sysctl_tcp_tso_win_divisor __read_mostly = 3; int sysctl_tcp_mtu_probing __read_mostly = 0; int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS; /* By default, RFC2861 behavior. */ int sysctl_tcp_slow_start_after_idle __read_mostly = 1; int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */ EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size); static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, int push_one, gfp_t gfp); /* Account for new data that has been sent to the network. */ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); unsigned int prior_packets = tp->packets_out; tcp_advance_send_head(sk, skb); tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; /* Don't override Nagle indefinitely with F-RTO */ if (tp->frto_counter == 2) tp->frto_counter = 3; tp->packets_out += tcp_skb_pcount(skb); if (!prior_packets || tp->early_retrans_delayed) tcp_rearm_rto(sk); } /* SND.NXT, if window was not shrunk. * If window has been shrunk, what should we make? It is not clear at all. * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( * Anything in between SND.UNA...SND.UNA+SND.WND also can be already * invalid. OK, let's make this for now: */ static inline __u32 tcp_acceptable_seq(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); if (!before(tcp_wnd_end(tp), tp->snd_nxt)) return tp->snd_nxt; else return tcp_wnd_end(tp); } /* Calculate mss to advertise in SYN segment. * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: * * 1. It is independent of path mtu. * 2. Ideally, it is maximal possible segment size i.e. 65535-40. * 3. For IPv4 it is reasonable to calculate it from maximal MTU of * attached devices, because some buggy hosts are confused by * large MSS. * 4. We do not make 3, we advertise MSS, calculated from first * hop device mtu, but allow to raise it to ip_rt_min_advmss. * This may be overridden via information stored in routing table. * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, * probably even Jumbo". */ static __u16 tcp_advertise_mss(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); const struct dst_entry *dst = __sk_dst_get(sk); int mss = tp->advmss; if (dst) { unsigned int metric = dst_metric_advmss(dst); if (metric < mss) { mss = metric; tp->advmss = mss; } } return (__u16)mss; } /* RFC2861. Reset CWND after idle period longer RTO to "restart window". * This is the first part of cwnd validation mechanism. */ static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst) { struct tcp_sock *tp = tcp_sk(sk); s32 delta = tcp_time_stamp - tp->lsndtime; u32 restart_cwnd = tcp_init_cwnd(tp, dst); u32 cwnd = tp->snd_cwnd; tcp_ca_event(sk, CA_EVENT_CWND_RESTART); tp->snd_ssthresh = tcp_current_ssthresh(sk); restart_cwnd = min(restart_cwnd, cwnd); while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) cwnd >>= 1; tp->snd_cwnd = max(cwnd, restart_cwnd); tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_used = 0; } /* Congestion state accounting after a packet has been sent. */ static void tcp_event_data_sent(struct tcp_sock *tp, struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); const u32 now = tcp_time_stamp; if (sysctl_tcp_slow_start_after_idle && (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) tcp_cwnd_restart(sk, __sk_dst_get(sk)); tp->lsndtime = now; /* If it is a reply for ato after last received * packet, enter pingpong mode. */ if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) icsk->icsk_ack.pingpong = 1; } /* Account for an ACK we sent. */ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) { tcp_dec_quickack_mode(sk, pkts); inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); } /* Determine a window scaling and initial window to offer. * Based on the assumption that the given amount of space * will be offered. Store the results in the tp structure. * NOTE: for smooth operation initial space offering should * be a multiple of mss if possible. We assume here that mss >= 1. * This MUST be enforced by all callers. */ void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd, __u32 *window_clamp, int wscale_ok, __u8 *rcv_wscale, __u32 init_rcv_wnd) { unsigned int space = (__space < 0 ? 0 : __space); /* If no clamp set the clamp to the max possible scaled window */ if (*window_clamp == 0) (*window_clamp) = (65535 << 14); space = min(*window_clamp, space); /* Quantize space offering to a multiple of mss if possible. */ if (space > mss) space = (space / mss) * mss; /* NOTE: offering an initial window larger than 32767 * will break some buggy TCP stacks. If the admin tells us * it is likely we could be speaking with such a buggy stack * we will truncate our initial window offering to 32K-1 * unless the remote has sent us a window scaling option, * which we interpret as a sign the remote TCP is not * misinterpreting the window field as a signed quantity. */ if (sysctl_tcp_workaround_signed_windows) (*rcv_wnd) = min(space, MAX_TCP_WINDOW); else (*rcv_wnd) = space; (*rcv_wscale) = 0; if (wscale_ok) { /* Set window scaling on max possible window * See RFC1323 for an explanation of the limit to 14 */ space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); space = min_t(u32, space, *window_clamp); while (space > 65535 && (*rcv_wscale) < 14) { space >>= 1; (*rcv_wscale)++; } } /* Set initial window to a value enough for senders starting with * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place * a limit on the initial window when mss is larger than 1460. */ if (mss > (1 << *rcv_wscale)) { int init_cwnd = TCP_DEFAULT_INIT_RCVWND; if (mss > 1460) init_cwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2); /* when initializing use the value from init_rcv_wnd * rather than the default from above */ if (init_rcv_wnd) *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); else *rcv_wnd = min(*rcv_wnd, init_cwnd * mss); } /* Set the clamp no higher than max representable value */ (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); } EXPORT_SYMBOL(tcp_select_initial_window); /* Chose a new window to advertise, update state in tcp_sock for the * socket, and return result with RFC1323 scaling applied. The return * value can be stuffed directly into th->window for an outgoing * frame. */ static u16 tcp_select_window(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); u32 cur_win = tcp_receive_window(tp); u32 new_win = __tcp_select_window(sk); /* Never shrink the offered window */ if (new_win < cur_win) { /* Danger Will Robinson! * Don't update rcv_wup/rcv_wnd here or else * we will not be able to advertise a zero * window in time. --DaveM * * Relax Will Robinson. */ new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); } tp->rcv_wnd = new_win; tp->rcv_wup = tp->rcv_nxt; /* Make sure we do not exceed the maximum possible * scaled window. */ if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) new_win = min(new_win, MAX_TCP_WINDOW); else new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); /* RFC1323 scaling applied */ new_win >>= tp->rx_opt.rcv_wscale; /* If we advertise zero window, disable fast path. */ if (new_win == 0) tp->pred_flags = 0; return new_win; } /* Packet ECN state for a SYN-ACK */ static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb) { TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; if (!(tp->ecn_flags & TCP_ECN_OK)) TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; } /* Packet ECN state for a SYN. */ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); tp->ecn_flags = 0; if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) { TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; tp->ecn_flags = TCP_ECN_OK; } } static __inline__ void TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th) { if (inet_rsk(req)->ecn_ok) th->ece = 1; } /* Set up ECN state for a packet on a ESTABLISHED socket that is about to * be sent. */ static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, int tcp_header_len) { struct tcp_sock *tp = tcp_sk(sk); if (tp->ecn_flags & TCP_ECN_OK) { /* Not-retransmitted data segment: set ECT and inject CWR. */ if (skb->len != tcp_header_len && !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { INET_ECN_xmit(sk); if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; tcp_hdr(skb)->cwr = 1; skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; } } else { /* ACK or retransmitted segment: clear ECT|CE */ INET_ECN_dontxmit(sk); } if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) tcp_hdr(skb)->ece = 1; } } /* Constructs common control bits of non-data skb. If SYN/FIN is present, * auto increment end seqno. */ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum = 0; TCP_SKB_CB(skb)->tcp_flags = flags; TCP_SKB_CB(skb)->sacked = 0; skb_shinfo(skb)->gso_segs = 1; skb_shinfo(skb)->gso_size = 0; skb_shinfo(skb)->gso_type = 0; TCP_SKB_CB(skb)->seq = seq; if (flags & (TCPHDR_SYN | TCPHDR_FIN)) seq++; TCP_SKB_CB(skb)->end_seq = seq; } static inline bool tcp_urg_mode(const struct tcp_sock *tp) { return tp->snd_una != tp->snd_up; } #define OPTION_SACK_ADVERTISE (1 << 0) #define OPTION_TS (1 << 1) #define OPTION_MD5 (1 << 2) #define OPTION_WSCALE (1 << 3) #define OPTION_COOKIE_EXTENSION (1 << 4) #define OPTION_FAST_OPEN_COOKIE (1 << 8) struct tcp_out_options { u16 options; /* bit field of OPTION_* */ u16 mss; /* 0 to disable */ u8 ws; /* window scale, 0 to disable */ u8 num_sack_blocks; /* number of SACK blocks to include */ u8 hash_size; /* bytes in hash_location */ __u8 *hash_location; /* temporary pointer, overloaded */ __u32 tsval, tsecr; /* need to include OPTION_TS */ struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ }; /* The sysctl int routines are generic, so check consistency here. */ static u8 tcp_cookie_size_check(u8 desired) { int cookie_size; if (desired > 0) /* previously specified */ return desired; cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size); if (cookie_size <= 0) /* no default specified */ return 0; if (cookie_size <= TCP_COOKIE_MIN) /* value too small, specify minimum */ return TCP_COOKIE_MIN; if (cookie_size >= TCP_COOKIE_MAX) /* value too large, specify maximum */ return TCP_COOKIE_MAX; if (cookie_size & 1) /* 8-bit multiple, illegal, fix it */ cookie_size++; return (u8)cookie_size; } /* Write previously computed TCP options to the packet. * * Beware: Something in the Internet is very sensitive to the ordering of * TCP options, we learned this through the hard way, so be careful here. * Luckily we can at least blame others for their non-compliance but from * inter-operatibility perspective it seems that we're somewhat stuck with * the ordering which we have been using if we want to keep working with * those broken things (not that it currently hurts anybody as there isn't * particular reason why the ordering would need to be changed). * * At least SACK_PERM as the first option is known to lead to a disaster * (but it may well be that other scenarios fail similarly). */ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, struct tcp_out_options *opts) { u16 options = opts->options; /* mungable copy */ /* Having both authentication and cookies for security is redundant, * and there's certainly not enough room. Instead, the cookie-less * extension variant is proposed. * * Consider the pessimal case with authentication. The options * could look like: * COOKIE|MD5(20) + MSS(4) + SACK|TS(12) + WSCALE(4) == 40 */ if (unlikely(OPTION_MD5 & options)) { if (unlikely(OPTION_COOKIE_EXTENSION & options)) { *ptr++ = htonl((TCPOPT_COOKIE << 24) | (TCPOLEN_COOKIE_BASE << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); } else { *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); } options &= ~OPTION_COOKIE_EXTENSION; /* overload cookie hash location */ opts->hash_location = (__u8 *)ptr; ptr += 4; } if (unlikely(opts->mss)) { *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | opts->mss); } if (likely(OPTION_TS & options)) { if (unlikely(OPTION_SACK_ADVERTISE & options)) { *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); options &= ~OPTION_SACK_ADVERTISE; } else { *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); } *ptr++ = htonl(opts->tsval); *ptr++ = htonl(opts->tsecr); } /* Specification requires after timestamp, so do it now. * * Consider the pessimal case without authentication. The options * could look like: * MSS(4) + SACK|TS(12) + COOKIE(20) + WSCALE(4) == 40 */ if (unlikely(OPTION_COOKIE_EXTENSION & options)) { __u8 *cookie_copy = opts->hash_location; u8 cookie_size = opts->hash_size; /* 8-bit multiple handled in tcp_cookie_size_check() above, * and elsewhere. */ if (0x2 & cookie_size) { __u8 *p = (__u8 *)ptr; /* 16-bit multiple */ *p++ = TCPOPT_COOKIE; *p++ = TCPOLEN_COOKIE_BASE + cookie_size; *p++ = *cookie_copy++; *p++ = *cookie_copy++; ptr++; cookie_size -= 2; } else { /* 32-bit multiple */ *ptr++ = htonl(((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_COOKIE << 8) | TCPOLEN_COOKIE_BASE) + cookie_size); } if (cookie_size > 0) { memcpy(ptr, cookie_copy, cookie_size); ptr += (cookie_size / 4); } } if (unlikely(OPTION_SACK_ADVERTISE & options)) { *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM); } if (unlikely(OPTION_WSCALE & options)) { *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | opts->ws); } if (unlikely(opts->num_sack_blocks)) { struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks; int this_sack; *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_SACK << 8) | (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK))); for (this_sack = 0; this_sack < opts->num_sack_blocks; ++this_sack) { *ptr++ = htonl(sp[this_sack].start_seq); *ptr++ = htonl(sp[this_sack].end_seq); } tp->rx_opt.dsack = 0; } if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) { struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; *ptr++ = htonl((TCPOPT_EXP << 24) | ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) | TCPOPT_FASTOPEN_MAGIC); memcpy(ptr, foc->val, foc->len); if ((foc->len & 3) == 2) { u8 *align = ((u8 *)ptr) + foc->len; align[0] = align[1] = TCPOPT_NOP; } ptr += (foc->len + 3) >> 2; } } /* Compute TCP options for SYN packets. This is not the final * network wire format yet. */ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, struct tcp_out_options *opts, struct tcp_md5sig_key **md5) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_cookie_values *cvp = tp->cookie_values; unsigned int remaining = MAX_TCP_OPTION_SPACE; u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? tcp_cookie_size_check(cvp->cookie_desired) : 0; struct tcp_fastopen_request *fastopen = tp->fastopen_req; #ifdef CONFIG_TCP_MD5SIG *md5 = tp->af_specific->md5_lookup(sk, sk); if (*md5) { opts->options |= OPTION_MD5; remaining -= TCPOLEN_MD5SIG_ALIGNED; } #else *md5 = NULL; #endif /* We always get an MSS option. The option bytes which will be seen in * normal data packets should timestamps be used, must be in the MSS * advertised. But we subtract them from tp->mss_cache so that * calculations in tcp_sendmsg are simpler etc. So account for this * fact here if necessary. If we don't do this correctly, as a * receiver we won't recognize data packets as being full sized when we * should, and thus we won't abide by the delayed ACK rules correctly. * SACKs don't matter, we never delay an ACK when we have any of those * going out. */ opts->mss = tcp_advertise_mss(sk); remaining -= TCPOLEN_MSS_ALIGNED; if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { opts->options |= OPTION_TS; opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset; opts->tsecr = tp->rx_opt.ts_recent; remaining -= TCPOLEN_TSTAMP_ALIGNED; } if (likely(sysctl_tcp_window_scaling)) { opts->ws = tp->rx_opt.rcv_wscale; opts->options |= OPTION_WSCALE; remaining -= TCPOLEN_WSCALE_ALIGNED; } if (likely(sysctl_tcp_sack)) { opts->options |= OPTION_SACK_ADVERTISE; if (unlikely(!(OPTION_TS & opts->options))) remaining -= TCPOLEN_SACKPERM_ALIGNED; } if (fastopen && fastopen->cookie.len >= 0) { u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len; need = (need + 3) & ~3U; /* Align to 32 bits */ if (remaining >= need) { opts->options |= OPTION_FAST_OPEN_COOKIE; opts->fastopen_cookie = &fastopen->cookie; remaining -= need; tp->syn_fastopen = 1; } } /* Note that timestamps are required by the specification. * * Odd numbers of bytes are prohibited by the specification, ensuring * that the cookie is 16-bit aligned, and the resulting cookie pair is * 32-bit aligned. */ if (*md5 == NULL && (OPTION_TS & opts->options) && cookie_size > 0) { int need = TCPOLEN_COOKIE_BASE + cookie_size; if (0x2 & need) { /* 32-bit multiple */ need += 2; /* NOPs */ if (need > remaining) { /* try shrinking cookie to fit */ cookie_size -= 2; need -= 4; } } while (need > remaining && TCP_COOKIE_MIN <= cookie_size) { cookie_size -= 4; need -= 4; } if (TCP_COOKIE_MIN <= cookie_size) { opts->options |= OPTION_COOKIE_EXTENSION; opts->hash_location = (__u8 *)&cvp->cookie_pair[0]; opts->hash_size = cookie_size; /* Remember for future incarnations. */ cvp->cookie_desired = cookie_size; if (cvp->cookie_desired != cvp->cookie_pair_size) { /* Currently use random bytes as a nonce, * assuming these are completely unpredictable * by hostile users of the same system. */ get_random_bytes(&cvp->cookie_pair[0], cookie_size); cvp->cookie_pair_size = cookie_size; } remaining -= need; } } return MAX_TCP_OPTION_SPACE - remaining; } /* Set up TCP options for SYN-ACKs. */ static unsigned int tcp_synack_options(struct sock *sk, struct request_sock *req, unsigned int mss, struct sk_buff *skb, struct tcp_out_options *opts, struct tcp_md5sig_key **md5, struct tcp_extend_values *xvp, struct tcp_fastopen_cookie *foc) { struct inet_request_sock *ireq = inet_rsk(req); unsigned int remaining = MAX_TCP_OPTION_SPACE; u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ? xvp->cookie_plus : 0; #ifdef CONFIG_TCP_MD5SIG *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); if (*md5) { opts->options |= OPTION_MD5; remaining -= TCPOLEN_MD5SIG_ALIGNED; /* We can't fit any SACK blocks in a packet with MD5 + TS * options. There was discussion about disabling SACK * rather than TS in order to fit in better with old, * buggy kernels, but that was deemed to be unnecessary. */ ireq->tstamp_ok &= !ireq->sack_ok; } #else *md5 = NULL; #endif /* We always send an MSS option. */ opts->mss = mss; remaining -= TCPOLEN_MSS_ALIGNED; if (likely(ireq->wscale_ok)) { opts->ws = ireq->rcv_wscale; opts->options |= OPTION_WSCALE; remaining -= TCPOLEN_WSCALE_ALIGNED; } if (likely(ireq->tstamp_ok)) { opts->options |= OPTION_TS; opts->tsval = TCP_SKB_CB(skb)->when; opts->tsecr = req->ts_recent; remaining -= TCPOLEN_TSTAMP_ALIGNED; } if (likely(ireq->sack_ok)) { opts->options |= OPTION_SACK_ADVERTISE; if (unlikely(!ireq->tstamp_ok)) remaining -= TCPOLEN_SACKPERM_ALIGNED; } if (foc != NULL) { u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; need = (need + 3) & ~3U; /* Align to 32 bits */ if (remaining >= need) { opts->options |= OPTION_FAST_OPEN_COOKIE; opts->fastopen_cookie = foc; remaining -= need; } } /* Similar rationale to tcp_syn_options() applies here, too. * If the <SYN> options fit, the same options should fit now! */ if (*md5 == NULL && ireq->tstamp_ok && cookie_plus > TCPOLEN_COOKIE_BASE) { int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */ if (0x2 & need) { /* 32-bit multiple */ need += 2; /* NOPs */ } if (need <= remaining) { opts->options |= OPTION_COOKIE_EXTENSION; opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE; remaining -= need; } else { /* There's no error return, so flag it. */ xvp->cookie_out_never = 1; /* true */ opts->hash_size = 0; } } return MAX_TCP_OPTION_SPACE - remaining; } /* Compute TCP options for ESTABLISHED sockets. This is not the * final wire format yet. */ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, struct tcp_out_options *opts, struct tcp_md5sig_key **md5) { struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; struct tcp_sock *tp = tcp_sk(sk); unsigned int size = 0; unsigned int eff_sacks; #ifdef CONFIG_TCP_MD5SIG *md5 = tp->af_specific->md5_lookup(sk, sk); if (unlikely(*md5)) { opts->options |= OPTION_MD5; size += TCPOLEN_MD5SIG_ALIGNED; } #else *md5 = NULL; #endif if (likely(tp->rx_opt.tstamp_ok)) { opts->options |= OPTION_TS; opts->tsval = tcb ? tcb->when + tp->tsoffset : 0; opts->tsecr = tp->rx_opt.ts_recent; size += TCPOLEN_TSTAMP_ALIGNED; } eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; if (unlikely(eff_sacks)) { const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; opts->num_sack_blocks = min_t(unsigned int, eff_sacks, (remaining - TCPOLEN_SACK_BASE_ALIGNED) / TCPOLEN_SACK_PERBLOCK); size += TCPOLEN_SACK_BASE_ALIGNED + opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; } return size; } /* TCP SMALL QUEUES (TSQ) * * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev) * to reduce RTT and bufferbloat. * We do this using a special skb destructor (tcp_wfree). * * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb * needs to be reallocated in a driver. * The invariant being skb->truesize substracted from sk->sk_wmem_alloc * * Since transmit from skb destructor is forbidden, we use a tasklet * to process all sockets that eventually need to send more skbs. * We use one tasklet per cpu, with its own queue of sockets. */ struct tsq_tasklet { struct tasklet_struct tasklet; struct list_head head; /* queue of tcp sockets */ }; static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet); static void tcp_tsq_handler(struct sock *sk) { if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC); } /* * One tasklest per cpu tries to send more skbs. * We run in tasklet context but need to disable irqs when * transfering tsq->head because tcp_wfree() might * interrupt us (non NAPI drivers) */ static void tcp_tasklet_func(unsigned long data) { struct tsq_tasklet *tsq = (struct tsq_tasklet *)data; LIST_HEAD(list); unsigned long flags; struct list_head *q, *n; struct tcp_sock *tp; struct sock *sk; local_irq_save(flags); list_splice_init(&tsq->head, &list); local_irq_restore(flags); list_for_each_safe(q, n, &list) { tp = list_entry(q, struct tcp_sock, tsq_node); list_del(&tp->tsq_node); sk = (struct sock *)tp; bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { tcp_tsq_handler(sk); } else { /* defer the work to tcp_release_cb() */ set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); } bh_unlock_sock(sk); clear_bit(TSQ_QUEUED, &tp->tsq_flags); sk_free(sk); } } #define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \ (1UL << TCP_WRITE_TIMER_DEFERRED) | \ (1UL << TCP_DELACK_TIMER_DEFERRED) | \ (1UL << TCP_MTU_REDUCED_DEFERRED)) /** * tcp_release_cb - tcp release_sock() callback * @sk: socket * * called from release_sock() to perform protocol dependent * actions before socket release. */ void tcp_release_cb(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); unsigned long flags, nflags; /* perform an atomic operation only if at least one flag is set */ do { flags = tp->tsq_flags; if (!(flags & TCP_DEFERRED_ALL)) return; nflags = flags & ~TCP_DEFERRED_ALL; } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags); if (flags & (1UL << TCP_TSQ_DEFERRED)) tcp_tsq_handler(sk); if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { tcp_write_timer_handler(sk); __sock_put(sk); } if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) { tcp_delack_timer_handler(sk); __sock_put(sk); } if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { sk->sk_prot->mtu_reduced(sk); __sock_put(sk); } } EXPORT_SYMBOL(tcp_release_cb); void __init tcp_tasklet_init(void) { int i; for_each_possible_cpu(i) { struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i); INIT_LIST_HEAD(&tsq->head); tasklet_init(&tsq->tasklet, tcp_tasklet_func, (unsigned long)tsq); } } /* * Write buffer destructor automatically called from kfree_skb. * We cant xmit new skbs from this context, as we might already * hold qdisc lock. */ static void tcp_wfree(struct sk_buff *skb) { struct sock *sk = skb->sk; struct tcp_sock *tp = tcp_sk(sk); if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) && !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) { unsigned long flags; struct tsq_tasklet *tsq; /* Keep a ref on socket. * This last ref will be released in tcp_tasklet_func() */ atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc); /* queue this socket to tasklet queue */ local_irq_save(flags); tsq = &__get_cpu_var(tsq_tasklet); list_add(&tp->tsq_node, &tsq->head); tasklet_schedule(&tsq->tasklet); local_irq_restore(flags); } else { sock_wfree(skb); } } /* This routine actually transmits TCP packets queued in by * tcp_do_sendmsg(). This is used by both the initial * transmission and possible later retransmissions. * All SKB's seen here are completely headerless. It is our * job to build the TCP header, and pass the packet down to * IP so it can do the same plus pass the packet off to the * device. * * We are working here with either a clone of the original * SKB, or a fresh unique copy made by the retransmit engine. */ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask) { const struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet; struct tcp_sock *tp; struct tcp_skb_cb *tcb; struct tcp_out_options opts; unsigned int tcp_options_size, tcp_header_size; struct tcp_md5sig_key *md5; struct tcphdr *th; int err; BUG_ON(!skb || !tcp_skb_pcount(skb)); /* If congestion control is doing timestamping, we must * take such a timestamp before we potentially clone/copy. */ if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) __net_timestamp(skb); if (likely(clone_it)) { if (unlikely(skb_cloned(skb))) skb = pskb_copy(skb, gfp_mask); else skb = skb_clone(skb, gfp_mask); if (unlikely(!skb)) return -ENOBUFS; } inet = inet_sk(sk); tp = tcp_sk(sk); tcb = TCP_SKB_CB(skb); memset(&opts, 0, sizeof(opts)); if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); else tcp_options_size = tcp_established_options(sk, skb, &opts, &md5); tcp_header_size = tcp_options_size + sizeof(struct tcphdr); if (tcp_packets_in_flight(tp) == 0) { tcp_ca_event(sk, CA_EVENT_TX_START); skb->ooo_okay = 1; } else skb->ooo_okay = 0; skb_push(skb, tcp_header_size); skb_reset_transport_header(skb); skb_orphan(skb); skb->sk = sk; skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ? tcp_wfree : sock_wfree; atomic_add(skb->truesize, &sk->sk_wmem_alloc); /* Build TCP header and checksum it. */ th = tcp_hdr(skb); th->source = inet->inet_sport; th->dest = inet->inet_dport; th->seq = htonl(tcb->seq); th->ack_seq = htonl(tp->rcv_nxt); *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | tcb->tcp_flags); if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { /* RFC1323: The window in SYN & SYN/ACK segments * is never scaled. */ th->window = htons(min(tp->rcv_wnd, 65535U)); } else { th->window = htons(tcp_select_window(sk)); } th->check = 0; th->urg_ptr = 0; /* The urg_mode check is necessary during a below snd_una win probe */ if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { if (before(tp->snd_up, tcb->seq + 0x10000)) { th->urg_ptr = htons(tp->snd_up - tcb->seq); th->urg = 1; } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { th->urg_ptr = htons(0xFFFF); th->urg = 1; } } tcp_options_write((__be32 *)(th + 1), tp, &opts); if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0)) TCP_ECN_send(sk, skb, tcp_header_size); #ifdef CONFIG_TCP_MD5SIG /* Calculate the MD5 hash, as we have all we need now */ if (md5) { sk_nocaps_add(sk, NETIF_F_GSO_MASK); tp->af_specific->calc_md5_hash(opts.hash_location, md5, sk, NULL, skb); } #endif icsk->icsk_af_ops->send_check(sk, skb); if (likely(tcb->tcp_flags & TCPHDR_ACK)) tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); if (skb->len != tcp_header_size) tcp_event_data_sent(tp, sk); if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb)); err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); if (likely(err <= 0)) return err; tcp_enter_cwr(sk, 1); return net_xmit_eval(err); } /* This routine just queues the buffer for sending. * * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, * otherwise socket can stall. */ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); /* Advance write_seq and place onto the write_queue. */ tp->write_seq = TCP_SKB_CB(skb)->end_seq; skb_header_release(skb); tcp_add_write_queue_tail(sk, skb); sk->sk_wmem_queued += skb->truesize; sk_mem_charge(sk, skb->truesize); } /* Initialize TSO segments for a packet. */ static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, unsigned int mss_now) { if (skb->len <= mss_now || !sk_can_gso(sk) || skb->ip_summed == CHECKSUM_NONE) { /* Avoid the costly divide in the normal * non-TSO case. */ skb_shinfo(skb)->gso_segs = 1; skb_shinfo(skb)->gso_size = 0; skb_shinfo(skb)->gso_type = 0; } else { skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); skb_shinfo(skb)->gso_size = mss_now; skb_shinfo(skb)->gso_type = sk->sk_gso_type; } } /* When a modification to fackets out becomes necessary, we need to check * skb is counted to fackets_out or not. */ static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, int decr) { struct tcp_sock *tp = tcp_sk(sk); if (!tp->sacked_out || tcp_is_reno(tp)) return; if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) tp->fackets_out -= decr; } /* Pcount in the middle of the write queue got changed, we need to do various * tweaks to fix counters */ static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) { struct tcp_sock *tp = tcp_sk(sk); tp->packets_out -= decr; if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) tp->sacked_out -= decr; if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) tp->retrans_out -= decr; if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) tp->lost_out -= decr; /* Reno case is special. Sigh... */ if (tcp_is_reno(tp) && decr > 0) tp->sacked_out -= min_t(u32, tp->sacked_out, decr); tcp_adjust_fackets_out(sk, skb, decr); if (tp->lost_skb_hint && before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) tp->lost_cnt_hint -= decr; tcp_verify_left_out(tp); } /* Function to create two new TCP segments. Shrinks the given segment * to the specified size and appends a new segment with the rest of the * packet to the list. This won't be called frequently, I hope. * Remember, these are still headerless SKBs at this point. */ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *buff; int nsize, old_factor; int nlen; u8 flags; if (WARN_ON(len > skb->len)) return -EINVAL; nsize = skb_headlen(skb) - len; if (nsize < 0) nsize = 0; if (skb_cloned(skb) && skb_is_nonlinear(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) return -ENOMEM; /* Get a new skb... force flag on. */ buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); if (buff == NULL) return -ENOMEM; /* We'll just try again later. */ sk->sk_wmem_queued += buff->truesize; sk_mem_charge(sk, buff->truesize); nlen = skb->len - len - nsize; buff->truesize += nlen; skb->truesize -= nlen; /* Correct the sequence numbers. */ TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; /* PSH and FIN should only be set in the second packet. */ flags = TCP_SKB_CB(skb)->tcp_flags; TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); TCP_SKB_CB(buff)->tcp_flags = flags; TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { /* Copy and checksum data tail into the new buffer. */ buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize), nsize, 0); skb_trim(skb, len); skb->csum = csum_block_sub(skb->csum, buff->csum, len); } else { skb->ip_summed = CHECKSUM_PARTIAL; skb_split(skb, buff, len); } buff->ip_summed = skb->ip_summed; /* Looks stupid, but our code really uses when of * skbs, which it never sent before. --ANK */ TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; buff->tstamp = skb->tstamp; old_factor = tcp_skb_pcount(skb); /* Fix up tso_factor for both original and new SKB. */ tcp_set_skb_tso_segs(sk, skb, mss_now); tcp_set_skb_tso_segs(sk, buff, mss_now); /* If this packet has been sent out already, we must * adjust the various packet counters. */ if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { int diff = old_factor - tcp_skb_pcount(skb) - tcp_skb_pcount(buff); if (diff) tcp_adjust_pcount(sk, skb, diff); } /* Link BUFF into the send queue. */ skb_header_release(buff); tcp_insert_write_queue_after(skb, buff, sk); return 0; } /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c * eventually). The difference is that pulled data not copied, but * immediately discarded. */ static void __pskb_trim_head(struct sk_buff *skb, int len) { int i, k, eat; eat = min_t(int, len, skb_headlen(skb)); if (eat) { __skb_pull(skb, eat); skb->avail_size -= eat; len -= eat; if (!len) return; } eat = len; k = 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); if (size <= eat) { skb_frag_unref(skb, i); eat -= size; } else { skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; if (eat) { skb_shinfo(skb)->frags[k].page_offset += eat; skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); eat = 0; } k++; } } skb_shinfo(skb)->nr_frags = k; skb_reset_tail_pointer(skb); skb->data_len -= len; skb->len = skb->data_len; } /* Remove acked data from a packet in the transmit queue. */ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) { if (skb_unclone(skb, GFP_ATOMIC)) return -ENOMEM; __pskb_trim_head(skb, len); TCP_SKB_CB(skb)->seq += len; skb->ip_summed = CHECKSUM_PARTIAL; skb->truesize -= len; sk->sk_wmem_queued -= len; sk_mem_uncharge(sk, len); sock_set_flag(sk, SOCK_QUEUE_SHRUNK); /* Any change of skb->len requires recalculation of tso factor. */ if (tcp_skb_pcount(skb) > 1) tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb)); return 0; } /* Calculate MSS not accounting any TCP options. */ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); int mss_now; /* Calculate base mss without TCP options: It is MMS_S - sizeof(tcphdr) of rfc1122 */ mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ if (icsk->icsk_af_ops->net_frag_header_len) { const struct dst_entry *dst = __sk_dst_get(sk); if (dst && dst_allfrag(dst)) mss_now -= icsk->icsk_af_ops->net_frag_header_len; } /* Clamp it (mss_clamp does not include tcp options) */ if (mss_now > tp->rx_opt.mss_clamp) mss_now = tp->rx_opt.mss_clamp; /* Now subtract optional transport overhead */ mss_now -= icsk->icsk_ext_hdr_len; /* Then reserve room for full set of TCP options and 8 bytes of data */ if (mss_now < 48) mss_now = 48; return mss_now; } /* Calculate MSS. Not accounting for SACKs here. */ int tcp_mtu_to_mss(struct sock *sk, int pmtu) { /* Subtract TCP options size, not including SACKs */ return __tcp_mtu_to_mss(sk, pmtu) - (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); } /* Inverse of above */ int tcp_mss_to_mtu(struct sock *sk, int mss) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); int mtu; mtu = mss + tp->tcp_header_len + icsk->icsk_ext_hdr_len + icsk->icsk_af_ops->net_header_len; /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ if (icsk->icsk_af_ops->net_frag_header_len) { const struct dst_entry *dst = __sk_dst_get(sk); if (dst && dst_allfrag(dst)) mtu += icsk->icsk_af_ops->net_frag_header_len; } return mtu; } /* MTU probing init per socket */ void tcp_mtup_init(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + icsk->icsk_af_ops->net_header_len; icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); icsk->icsk_mtup.probe_size = 0; } EXPORT_SYMBOL(tcp_mtup_init); /* This function synchronize snd mss to current pmtu/exthdr set. tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts for TCP options, but includes only bare TCP header. tp->rx_opt.mss_clamp is mss negotiated at connection setup. It is minimum of user_mss and mss received with SYN. It also does not include TCP options. inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. tp->mss_cache is current effective sending mss, including all tcp options except for SACKs. It is evaluated, taking into account current pmtu, but never exceeds tp->rx_opt.mss_clamp. NOTE1. rfc1122 clearly states that advertised MSS DOES NOT include either tcp or ip options. NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache are READ ONLY outside this function. --ANK (980731) */ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); int mss_now; if (icsk->icsk_mtup.search_high > pmtu) icsk->icsk_mtup.search_high = pmtu; mss_now = tcp_mtu_to_mss(sk, pmtu); mss_now = tcp_bound_to_half_wnd(tp, mss_now); /* And store cached results */ icsk->icsk_pmtu_cookie = pmtu; if (icsk->icsk_mtup.enabled) mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); tp->mss_cache = mss_now; return mss_now; } EXPORT_SYMBOL(tcp_sync_mss); /* Compute the current effective MSS, taking SACKs and IP options, * and even PMTU discovery events into account. */ unsigned int tcp_current_mss(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); const struct dst_entry *dst = __sk_dst_get(sk); u32 mss_now; unsigned int header_len; struct tcp_out_options opts; struct tcp_md5sig_key *md5; mss_now = tp->mss_cache; if (dst) { u32 mtu = dst_mtu(dst); if (mtu != inet_csk(sk)->icsk_pmtu_cookie) mss_now = tcp_sync_mss(sk, mtu); } header_len = tcp_established_options(sk, NULL, &opts, &md5) + sizeof(struct tcphdr); /* The mss_cache is sized based on tp->tcp_header_len, which assumes * some common options. If this is an odd packet (because we have SACK * blocks etc) then our calculated header_len will be different, and * we have to adjust mss_now correspondingly */ if (header_len != tp->tcp_header_len) { int delta = (int) header_len - tp->tcp_header_len; mss_now -= delta; } return mss_now; } /* Congestion window validation. (RFC2861) */ static void tcp_cwnd_validate(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tp->packets_out >= tp->snd_cwnd) { /* Network is feed fully. */ tp->snd_cwnd_used = 0; tp->snd_cwnd_stamp = tcp_time_stamp; } else { /* Network starves. */ if (tp->packets_out > tp->snd_cwnd_used) tp->snd_cwnd_used = tp->packets_out; if (sysctl_tcp_slow_start_after_idle && (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) tcp_cwnd_application_limited(sk); } } /* Returns the portion of skb which can be sent right away without * introducing MSS oddities to segment boundaries. In rare cases where * mss_now != mss_cache, we will request caller to create a small skb * per input skb which could be mostly avoided here (if desired). * * We explicitly want to create a request for splitting write queue tail * to a small skb for Nagle purposes while avoiding unnecessary modulos, * thus all the complexity (cwnd_len is always MSS multiple which we * return whenever allowed by the other factors). Basically we need the * modulo only when the receiver window alone is the limiting factor or * when we would be allowed to send the split-due-to-Nagle skb fully. */ static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, unsigned int mss_now, unsigned int max_segs) { const struct tcp_sock *tp = tcp_sk(sk); u32 needed, window, max_len; window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; max_len = mss_now * max_segs; if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) return max_len; needed = min(skb->len, window); if (max_len <= needed) return max_len; return needed - needed % mss_now; } /* Can at least one segment of SKB be sent right now, according to the * congestion window rules? If so, return how many segments are allowed. */ static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, const struct sk_buff *skb) { u32 in_flight, cwnd; /* Don't be strict about the congestion window for the final FIN. */ if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1) return 1; in_flight = tcp_packets_in_flight(tp); cwnd = tp->snd_cwnd; if (in_flight < cwnd) return (cwnd - in_flight); return 0; } /* Initialize TSO state of a skb. * This must be invoked the first time we consider transmitting * SKB onto the wire. */ static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, unsigned int mss_now) { int tso_segs = tcp_skb_pcount(skb); if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { tcp_set_skb_tso_segs(sk, skb, mss_now); tso_segs = tcp_skb_pcount(skb); } return tso_segs; } /* Minshall's variant of the Nagle send check. */ static inline bool tcp_minshall_check(const struct tcp_sock *tp) { return after(tp->snd_sml, tp->snd_una) && !after(tp->snd_sml, tp->snd_nxt); } /* Return false, if packet can be sent now without violation Nagle's rules: * 1. It is full sized. * 2. Or it contains FIN. (already checked by caller) * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. * 4. Or TCP_CORK is not set, and all sent packets are ACKed. * With Minshall's modification: all sent small packets are ACKed. */ static inline bool tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb, unsigned int mss_now, int nonagle) { return skb->len < mss_now && ((nonagle & TCP_NAGLE_CORK) || (!nonagle && tp->packets_out && tcp_minshall_check(tp))); } /* Return true if the Nagle test allows this packet to be * sent now. */ static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, unsigned int cur_mss, int nonagle) { /* Nagle rule does not apply to frames, which sit in the middle of the * write_queue (they have no chances to get new data). * * This is implemented in the callers, where they modify the 'nonagle' * argument based upon the location of SKB in the send queue. */ if (nonagle & TCP_NAGLE_PUSH) return true; /* Don't use the nagle rule for urgent data (or for the final FIN). * Nagle can be ignored during F-RTO too (see RFC4138). */ if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) return true; if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) return true; return false; } /* Does at least the first segment of SKB fit into the send window? */ static bool tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb, unsigned int cur_mss) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; if (skb->len > cur_mss) end_seq = TCP_SKB_CB(skb)->seq + cur_mss; return !after(end_seq, tcp_wnd_end(tp)); } /* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) * should be put on the wire right now. If so, it returns the number of * packets allowed by the congestion window. */ static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, unsigned int cur_mss, int nonagle) { const struct tcp_sock *tp = tcp_sk(sk); unsigned int cwnd_quota; tcp_init_tso_segs(sk, skb, cur_mss); if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) return 0; cwnd_quota = tcp_cwnd_test(tp, skb); if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) cwnd_quota = 0; return cwnd_quota; } /* Test if sending is allowed right now. */ bool tcp_may_send_now(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = tcp_send_head(sk); return skb && tcp_snd_test(sk, skb, tcp_current_mss(sk), (tcp_skb_is_last(sk, skb) ? tp->nonagle : TCP_NAGLE_PUSH)); } /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet * which is put after SKB on the list. It is very much like * tcp_fragment() except that it may make several kinds of assumptions * in order to speed up the splitting operation. In particular, we * know that all the data is in scatter-gather pages, and that the * packet has never been sent out before (and thus is not cloned). */ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now, gfp_t gfp) { struct sk_buff *buff; int nlen = skb->len - len; u8 flags; /* All of a TSO frame must be composed of paged data. */ if (skb->len != skb->data_len) return tcp_fragment(sk, skb, len, mss_now); buff = sk_stream_alloc_skb(sk, 0, gfp); if (unlikely(buff == NULL)) return -ENOMEM; sk->sk_wmem_queued += buff->truesize; sk_mem_charge(sk, buff->truesize); buff->truesize += nlen; skb->truesize -= nlen; /* Correct the sequence numbers. */ TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; /* PSH and FIN should only be set in the second packet. */ flags = TCP_SKB_CB(skb)->tcp_flags; TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); TCP_SKB_CB(buff)->tcp_flags = flags; /* This packet was never sent out yet, so no SACK bits. */ TCP_SKB_CB(buff)->sacked = 0; buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; skb_split(skb, buff, len); /* Fix up tso_factor for both original and new SKB. */ tcp_set_skb_tso_segs(sk, skb, mss_now); tcp_set_skb_tso_segs(sk, buff, mss_now); /* Link BUFF into the send queue. */ skb_header_release(buff); tcp_insert_write_queue_after(skb, buff, sk); return 0; } /* Try to defer sending, if possible, in order to minimize the amount * of TSO splitting we do. View it as a kind of TSO Nagle test. * * This algorithm is from John Heffner. */ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); u32 send_win, cong_win, limit, in_flight; int win_divisor; if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto send_now; if (icsk->icsk_ca_state != TCP_CA_Open) goto send_now; /* Defer for less than two clock ticks. */ if (tp->tso_deferred && (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1) goto send_now; in_flight = tcp_packets_in_flight(tp); BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; /* From in_flight test above, we know that cwnd > in_flight. */ cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; limit = min(send_win, cong_win); /* If a full-sized TSO skb can be sent, do it. */ if (limit >= min_t(unsigned int, sk->sk_gso_max_size, sk->sk_gso_max_segs * tp->mss_cache)) goto send_now; /* Middle in queue won't get any more data, full sendable already? */ if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) goto send_now; win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor); if (win_divisor) { u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); /* If at least some fraction of a window is available, * just use it. */ chunk /= win_divisor; if (limit >= chunk) goto send_now; } else { /* Different approach, try not to defer past a single * ACK. Receiver should ACK every other full sized * frame, so if we have space for more than 3 frames * then send now. */ if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) goto send_now; } /* Ok, it looks like it is advisable to defer. */ tp->tso_deferred = 1 | (jiffies << 1); return true; send_now: tp->tso_deferred = 0; return false; } /* Create a new MTU probe if we are ready. * MTU probe is regularly attempting to increase the path MTU by * deliberately sending larger packets. This discovers routing * changes resulting in larger path MTUs. * * Returns 0 if we should wait to probe (no cwnd available), * 1 if a probe was sent, * -1 otherwise */ static int tcp_mtu_probe(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); struct sk_buff *skb, *nskb, *next; int len; int probe_size; int size_needed; int copy; int mss_now; /* Not currently probing/verifying, * not in recovery, * have enough cwnd, and * not SACKing (the variable headers throw things off) */ if (!icsk->icsk_mtup.enabled || icsk->icsk_mtup.probe_size || inet_csk(sk)->icsk_ca_state != TCP_CA_Open || tp->snd_cwnd < 11 || tp->rx_opt.num_sacks || tp->rx_opt.dsack) return -1; /* Very simple search strategy: just double the MSS. */ mss_now = tcp_current_mss(sk); probe_size = 2 * tp->mss_cache; size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { /* TODO: set timer for probe_converge_event */ return -1; } /* Have enough data in the send queue to probe? */ if (tp->write_seq - tp->snd_nxt < size_needed) return -1; if (tp->snd_wnd < size_needed) return -1; if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) return 0; /* Do we need to wait to drain cwnd? With none in flight, don't stall */ if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { if (!tcp_packets_in_flight(tp)) return -1; else return 0; } /* We're allowed to probe. Build it now. */ if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) return -1; sk->sk_wmem_queued += nskb->truesize; sk_mem_charge(sk, nskb->truesize); skb = tcp_send_head(sk); TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; TCP_SKB_CB(nskb)->sacked = 0; nskb->csum = 0; nskb->ip_summed = skb->ip_summed; tcp_insert_write_queue_before(nskb, skb, sk); len = 0; tcp_for_write_queue_from_safe(skb, next, sk) { copy = min_t(int, skb->len, probe_size - len); if (nskb->ip_summed) skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); else nskb->csum = skb_copy_and_csum_bits(skb, 0, skb_put(nskb, copy), copy, nskb->csum); if (skb->len <= copy) { /* We've eaten all the data from this skb. * Throw it away. */ TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); } else { TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & ~(TCPHDR_FIN|TCPHDR_PSH); if (!skb_shinfo(skb)->nr_frags) { skb_pull(skb, copy); if (skb->ip_summed != CHECKSUM_PARTIAL) skb->csum = csum_partial(skb->data, skb->len, 0); } else { __pskb_trim_head(skb, copy); tcp_set_skb_tso_segs(sk, skb, mss_now); } TCP_SKB_CB(skb)->seq += copy; } len += copy; if (len >= probe_size) break; } tcp_init_tso_segs(sk, nskb, nskb->len); /* We're ready to send. If this fails, the probe will * be resegmented into mss-sized pieces by tcp_write_xmit(). */ TCP_SKB_CB(nskb)->when = tcp_time_stamp; if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { /* Decrement cwnd here because we are sending * effectively two packets. */ tp->snd_cwnd--; tcp_event_new_data_sent(sk, nskb); icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; return 1; } return -1; } /* This routine writes packets to the network. It advances the * send_head. This happens as incoming acks open up the remote * window for us. * * LARGESEND note: !tcp_urg_mode is overkill, only frames between * snd_up-64k-mss .. snd_up cannot be large. However, taking into * account rare use of URG, this is not a big flaw. * * Returns true, if no segments are in flight and we have queued segments, * but cannot send anything now because of SWS or another problem. */ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, int push_one, gfp_t gfp) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; unsigned int tso_segs, sent_pkts; int cwnd_quota; int result; sent_pkts = 0; if (!push_one) { /* Do MTU probing. */ result = tcp_mtu_probe(sk); if (!result) { return false; } else if (result > 0) { sent_pkts = 1; } } while ((skb = tcp_send_head(sk))) { unsigned int limit; tso_segs = tcp_init_tso_segs(sk, skb, mss_now); BUG_ON(!tso_segs); if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) goto repair; /* Skip network transmission */ cwnd_quota = tcp_cwnd_test(tp, skb); if (!cwnd_quota) break; if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) break; if (tso_segs == 1) { if (unlikely(!tcp_nagle_test(tp, skb, mss_now, (tcp_skb_is_last(sk, skb) ? nonagle : TCP_NAGLE_PUSH)))) break; } else { if (!push_one && tcp_tso_should_defer(sk, skb)) break; } /* TSQ : sk_wmem_alloc accounts skb truesize, * including skb overhead. But thats OK. */ if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) { set_bit(TSQ_THROTTLED, &tp->tsq_flags); break; } limit = mss_now; if (tso_segs > 1 && !tcp_urg_mode(tp)) limit = tcp_mss_split_point(sk, skb, mss_now, min_t(unsigned int, cwnd_quota, sk->sk_gso_max_segs)); if (skb->len > limit && unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) break; TCP_SKB_CB(skb)->when = tcp_time_stamp; if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) break; repair: /* Advance the send_head. This one is sent out. * This call will increment packets_out. */ tcp_event_new_data_sent(sk, skb); tcp_minshall_update(tp, mss_now, skb); sent_pkts += tcp_skb_pcount(skb); if (push_one) break; } if (likely(sent_pkts)) { if (tcp_in_cwnd_reduction(sk)) tp->prr_out += sent_pkts; tcp_cwnd_validate(sk); return false; } return !tp->packets_out && tcp_send_head(sk); } /* Push out any pending frames which were held back due to * TCP_CORK or attempt at coalescing tiny packets. * The socket must be locked by the caller. */ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, int nonagle) { /* If we are closed, the bytes will have to remain here. * In time closedown will finish, we empty the write queue and * all will be happy. */ if (unlikely(sk->sk_state == TCP_CLOSE)) return; if (tcp_write_xmit(sk, cur_mss, nonagle, 0, sk_gfp_atomic(sk, GFP_ATOMIC))) tcp_check_probe_timer(sk); } /* Send _single_ skb sitting at the send head. This function requires * true push pending frames to setup probe timer etc. */ void tcp_push_one(struct sock *sk, unsigned int mss_now) { struct sk_buff *skb = tcp_send_head(sk); BUG_ON(!skb || skb->len < mss_now); tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); } /* This function returns the amount that we can raise the * usable window based on the following constraints * * 1. The window can never be shrunk once it is offered (RFC 793) * 2. We limit memory per socket * * RFC 1122: * "the suggested [SWS] avoidance algorithm for the receiver is to keep * RECV.NEXT + RCV.WIN fixed until: * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" * * i.e. don't raise the right edge of the window until you can raise * it at least MSS bytes. * * Unfortunately, the recommended algorithm breaks header prediction, * since header prediction assumes th->window stays fixed. * * Strictly speaking, keeping th->window fixed violates the receiver * side SWS prevention criteria. The problem is that under this rule * a stream of single byte packets will cause the right side of the * window to always advance by a single byte. * * Of course, if the sender implements sender side SWS prevention * then this will not be a problem. * * BSD seems to make the following compromise: * * If the free space is less than the 1/4 of the maximum * space available and the free space is less than 1/2 mss, * then set the window to 0. * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] * Otherwise, just prevent the window from shrinking * and from being larger than the largest representable value. * * This prevents incremental opening of the window in the regime * where TCP is limited by the speed of the reader side taking * data out of the TCP receive queue. It does nothing about * those cases where the window is constrained on the sender side * because the pipeline is full. * * BSD also seems to "accidentally" limit itself to windows that are a * multiple of MSS, at least until the free space gets quite small. * This would appear to be a side effect of the mbuf implementation. * Combining these two algorithms results in the observed behavior * of having a fixed window size at almost all times. * * Below we obtain similar behavior by forcing the offered window to * a multiple of the mss when it is feasible to do so. * * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. * Regular options like TIMESTAMP are taken into account. */ u32 __tcp_select_window(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); /* MSS for the peer's data. Previous versions used mss_clamp * here. I don't know if the value based on our guesses * of peer's MSS is better for the performance. It's more correct * but may be worse for the performance because of rcv_mss * fluctuations. --SAW 1998/11/1 */ int mss = icsk->icsk_ack.rcv_mss; int free_space = tcp_space(sk); int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); int window; if (mss > full_space) mss = full_space; if (free_space < (full_space >> 1)) { icsk->icsk_ack.quick = 0; if (sk_under_memory_pressure(sk)) tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); if (free_space < mss) return 0; } if (free_space > tp->rcv_ssthresh) free_space = tp->rcv_ssthresh; /* Don't do rounding if we are using window scaling, since the * scaled window will not line up with the MSS boundary anyway. */ window = tp->rcv_wnd; if (tp->rx_opt.rcv_wscale) { window = free_space; /* Advertise enough space so that it won't get scaled away. * Import case: prevent zero window announcement if * 1<<rcv_wscale > mss. */ if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) window = (((window >> tp->rx_opt.rcv_wscale) + 1) << tp->rx_opt.rcv_wscale); } else { /* Get the largest window that is a nice multiple of mss. * Window clamp already applied above. * If our current window offering is within 1 mss of the * free space we just keep it. This prevents the divide * and multiply from happening most of the time. * We also don't do any window rounding when the free space * is too small. */ if (window <= free_space - mss || window > free_space) window = (free_space / mss) * mss; else if (mss == full_space && free_space > window + (full_space >> 1)) window = free_space; } return window; } /* Collapses two adjacent SKB's during retransmission. */ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); int skb_size, next_skb_size; skb_size = skb->len; next_skb_size = next_skb->len; BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); tcp_highest_sack_combine(sk, next_skb, skb); tcp_unlink_write_queue(next_skb, sk); skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size), next_skb_size); if (next_skb->ip_summed == CHECKSUM_PARTIAL) skb->ip_summed = CHECKSUM_PARTIAL; if (skb->ip_summed != CHECKSUM_PARTIAL) skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); /* Update sequence range on original skb. */ TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; /* Merge over control information. This moves PSH/FIN etc. over */ TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; /* All done, get rid of second SKB and account for it so * packet counting does not break. */ TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; /* changed transmit queue under us so clear hints */ tcp_clear_retrans_hints_partial(tp); if (next_skb == tp->retransmit_skb_hint) tp->retransmit_skb_hint = skb; tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); sk_wmem_free_skb(sk, next_skb); } /* Check if coalescing SKBs is legal. */ static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) { if (tcp_skb_pcount(skb) > 1) return false; /* TODO: SACK collapsing could be used to remove this condition */ if (skb_shinfo(skb)->nr_frags != 0) return false; if (skb_cloned(skb)) return false; if (skb == tcp_send_head(sk)) return false; /* Some heurestics for collapsing over SACK'd could be invented */ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) return false; return true; } /* Collapse packets in the retransmit queue to make to create * less packets on the wire. This is only done on retransmission. */ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, int space) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = to, *tmp; bool first = true; if (!sysctl_tcp_retrans_collapse) return; if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) return; tcp_for_write_queue_from_safe(skb, tmp, sk) { if (!tcp_can_collapse(sk, skb)) break; space -= skb->len; if (first) { first = false; continue; } if (space < 0) break; /* Punt if not enough space exists in the first SKB for * the data in the second */ if (skb->len > skb_availroom(to)) break; if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) break; tcp_collapse_retrans(sk, to); } } /* This retransmits one SKB. Policy decisions and retransmit queue * state updates are done by the caller. Returns non-zero if an * error occurred which prevented the send. */ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); unsigned int cur_mss; /* Inconslusive MTU probe */ if (icsk->icsk_mtup.probe_size) { icsk->icsk_mtup.probe_size = 0; } /* Do not sent more than we queued. 1/4 is reserved for possible * copying overhead: fragmentation, tunneling, mangling etc. */ if (atomic_read(&sk->sk_wmem_alloc) > min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) return -EAGAIN; if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) BUG(); if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) return -ENOMEM; } if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) return -EHOSTUNREACH; /* Routing failure or similar. */ cur_mss = tcp_current_mss(sk); /* If receiver has shrunk his window, and skb is out of * new window, do not retransmit it. The exception is the * case, when window is shrunk to zero. In this case * our retransmit serves as a zero window probe. */ if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && TCP_SKB_CB(skb)->seq != tp->snd_una) return -EAGAIN; if (skb->len > cur_mss) { if (tcp_fragment(sk, skb, cur_mss, cur_mss)) return -ENOMEM; /* We'll try again later. */ } else { int oldpcount = tcp_skb_pcount(skb); if (unlikely(oldpcount > 1)) { tcp_init_tso_segs(sk, skb, cur_mss); tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); } } tcp_retrans_try_collapse(sk, skb, cur_mss); /* Some Solaris stacks overoptimize and ignore the FIN on a * retransmit when old data is attached. So strip it off * since it is cheap to do so and saves bytes on the network. */ if (skb->len > 0 && (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { if (!pskb_trim(skb, 0)) { /* Reuse, even though it does some unnecessary work */ tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1, TCP_SKB_CB(skb)->tcp_flags); skb->ip_summed = CHECKSUM_NONE; } } /* Make a copy, if the first transmission SKB clone we made * is still in somebody's hands, else make a clone. */ TCP_SKB_CB(skb)->when = tcp_time_stamp; /* make sure skb->data is aligned on arches that require it */ if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : -ENOBUFS; } else { return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); } } int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); int err = __tcp_retransmit_skb(sk, skb); if (err == 0) { /* Update global TCP statistics. */ TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); tp->total_retrans++; #if FASTRETRANS_DEBUG > 0 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { net_dbg_ratelimited("retrans_out leaked\n"); } #endif if (!tp->retrans_out) tp->lost_retrans_low = tp->snd_nxt; TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; tp->retrans_out += tcp_skb_pcount(skb); /* Save stamp of the first retransmit. */ if (!tp->retrans_stamp) tp->retrans_stamp = TCP_SKB_CB(skb)->when; tp->undo_retrans += tcp_skb_pcount(skb); /* snd_nxt is stored to detect loss of retransmitted segment, * see tcp_input.c tcp_sacktag_write_queue(). */ TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; } return err; } /* Check if we forward retransmits are possible in the current * window/congestion state. */ static bool tcp_can_forward_retransmit(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); /* Forward retransmissions are possible only during Recovery. */ if (icsk->icsk_ca_state != TCP_CA_Recovery) return false; /* No forward retransmissions in Reno are possible. */ if (tcp_is_reno(tp)) return false; /* Yeah, we have to make difficult choice between forward transmission * and retransmission... Both ways have their merits... * * For now we do not retransmit anything, while we have some new * segments to send. In the other cases, follow rule 3 for * NextSeg() specified in RFC3517. */ if (tcp_may_send_now(sk)) return false; return true; } /* This gets called after a retransmit timeout, and the initially * retransmitted data is acknowledged. It tries to continue * resending the rest of the retransmit queue, until either * we've sent it all or the congestion window limit is reached. * If doing SACK, the first ACK which comes back for a timeout * based retransmit packet might feed us FACK information again. * If so, we use it to avoid unnecessarily retransmissions. */ void tcp_xmit_retransmit_queue(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; struct sk_buff *hole = NULL; u32 last_lost; int mib_idx; int fwd_rexmitting = 0; if (!tp->packets_out) return; if (!tp->lost_out) tp->retransmit_high = tp->snd_una; if (tp->retransmit_skb_hint) { skb = tp->retransmit_skb_hint; last_lost = TCP_SKB_CB(skb)->end_seq; if (after(last_lost, tp->retransmit_high)) last_lost = tp->retransmit_high; } else { skb = tcp_write_queue_head(sk); last_lost = tp->snd_una; } tcp_for_write_queue_from(skb, sk) { __u8 sacked = TCP_SKB_CB(skb)->sacked; if (skb == tcp_send_head(sk)) break; /* we could do better than to assign each time */ if (hole == NULL) tp->retransmit_skb_hint = skb; /* Assume this retransmit will generate * only one packet for congestion window * calculation purposes. This works because * tcp_retransmit_skb() will chop up the * packet to be MSS sized and all the * packet counting works out. */ if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) return; if (fwd_rexmitting) { begin_fwd: if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) break; mib_idx = LINUX_MIB_TCPFORWARDRETRANS; } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { tp->retransmit_high = last_lost; if (!tcp_can_forward_retransmit(sk)) break; /* Backtrack if necessary to non-L'ed skb */ if (hole != NULL) { skb = hole; hole = NULL; } fwd_rexmitting = 1; goto begin_fwd; } else if (!(sacked & TCPCB_LOST)) { if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) hole = skb; continue; } else { last_lost = TCP_SKB_CB(skb)->end_seq; if (icsk->icsk_ca_state != TCP_CA_Loss) mib_idx = LINUX_MIB_TCPFASTRETRANS; else mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; } if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) continue; if (tcp_retransmit_skb(sk, skb)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); return; } NET_INC_STATS_BH(sock_net(sk), mib_idx); if (tcp_in_cwnd_reduction(sk)) tp->prr_out += tcp_skb_pcount(skb); if (skb == tcp_write_queue_head(sk)) inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX); } } /* Send a fin. The caller locks the socket for us. This cannot be * allowed to fail queueing a FIN frame under any circumstances. */ void tcp_send_fin(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = tcp_write_queue_tail(sk); int mss_now; /* Optimization, tack on the FIN if we have a queue of * unsent frames. But be careful about outgoing SACKS * and IP options. */ mss_now = tcp_current_mss(sk); if (tcp_send_head(sk) != NULL) { TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; TCP_SKB_CB(skb)->end_seq++; tp->write_seq++; } else { /* Socket is locked, keep trying until memory is available. */ for (;;) { skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); if (skb) break; yield(); } /* Reserve space for headers and prepare control bits. */ skb_reserve(skb, MAX_TCP_HEADER); /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ tcp_init_nondata_skb(skb, tp->write_seq, TCPHDR_ACK | TCPHDR_FIN); tcp_queue_skb(sk, skb); } __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); } /* We get here when a process closes a file descriptor (either due to * an explicit close() or as a byproduct of exit()'ing) and there * was unread data in the receive queue. This behavior is recommended * by RFC 2525, section 2.17. -DaveM */ void tcp_send_active_reset(struct sock *sk, gfp_t priority) { struct sk_buff *skb; /* NOTE: No TCP options attached and we never retransmit this. */ skb = alloc_skb(MAX_TCP_HEADER, priority); if (!skb) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); return; } /* Reserve space for headers and prepare control bits. */ skb_reserve(skb, MAX_TCP_HEADER); tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), TCPHDR_ACK | TCPHDR_RST); /* Send it off. */ TCP_SKB_CB(skb)->when = tcp_time_stamp; if (tcp_transmit_skb(sk, skb, 0, priority)) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); } /* Send a crossed SYN-ACK during socket establishment. * WARNING: This routine must only be called when we have already sent * a SYN packet that crossed the incoming SYN that caused this routine * to get called. If this assumption fails then the initial rcv_wnd * and rcv_wscale values will not be correct. */ int tcp_send_synack(struct sock *sk) { struct sk_buff *skb; skb = tcp_write_queue_head(sk); if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { pr_debug("%s: wrong queue state\n", __func__); return -EFAULT; } if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { if (skb_cloned(skb)) { struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); if (nskb == NULL) return -ENOMEM; tcp_unlink_write_queue(skb, sk); skb_header_release(nskb); __tcp_add_write_queue_head(sk, nskb); sk_wmem_free_skb(sk, skb); sk->sk_wmem_queued += nskb->truesize; sk_mem_charge(sk, nskb->truesize); skb = nskb; } TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; TCP_ECN_send_synack(tcp_sk(sk), skb); } TCP_SKB_CB(skb)->when = tcp_time_stamp; return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); } /** * tcp_make_synack - Prepare a SYN-ACK. * sk: listener socket * dst: dst entry attached to the SYNACK * req: request_sock pointer * rvp: request_values pointer * * Allocate one skb and build a SYNACK packet. * @dst is consumed : Caller should not use it again. */ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, struct request_sock *req, struct request_values *rvp, struct tcp_fastopen_cookie *foc) { struct tcp_out_options opts; struct tcp_extend_values *xvp = tcp_xv(rvp); struct inet_request_sock *ireq = inet_rsk(req); struct tcp_sock *tp = tcp_sk(sk); const struct tcp_cookie_values *cvp = tp->cookie_values; struct tcphdr *th; struct sk_buff *skb; struct tcp_md5sig_key *md5; int tcp_header_size; int mss; int s_data_desired = 0; if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) s_data_desired = cvp->s_data_desired; skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired, sk_gfp_atomic(sk, GFP_ATOMIC)); if (unlikely(!skb)) { dst_release(dst); return NULL; } /* Reserve space for headers. */ skb_reserve(skb, MAX_TCP_HEADER); skb_dst_set(skb, dst); mss = dst_metric_advmss(dst); if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) mss = tp->rx_opt.user_mss; if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ __u8 rcv_wscale; /* Set this up on the first call only */ req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); /* limit the window selection if the user enforce a smaller rx buffer */ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0)) req->window_clamp = tcp_full_space(sk); /* tcp_full_space because it is guaranteed to be the first packet */ tcp_select_initial_window(tcp_full_space(sk), mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), &req->rcv_wnd, &req->window_clamp, ireq->wscale_ok, &rcv_wscale, dst_metric(dst, RTAX_INITRWND)); ireq->rcv_wscale = rcv_wscale; } memset(&opts, 0, sizeof(opts)); #ifdef CONFIG_SYN_COOKIES if (unlikely(req->cookie_ts)) TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); else #endif TCP_SKB_CB(skb)->when = tcp_time_stamp; tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, xvp, foc) + sizeof(*th); skb_push(skb, tcp_header_size); skb_reset_transport_header(skb); th = tcp_hdr(skb); memset(th, 0, sizeof(struct tcphdr)); th->syn = 1; th->ack = 1; TCP_ECN_make_synack(req, th); th->source = ireq->loc_port; th->dest = ireq->rmt_port; /* Setting of flags are superfluous here for callers (and ECE is * not even correctly set) */ tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, TCPHDR_SYN | TCPHDR_ACK); if (OPTION_COOKIE_EXTENSION & opts.options) { if (s_data_desired) { u8 *buf = skb_put(skb, s_data_desired); /* copy data directly from the listening socket. */ memcpy(buf, cvp->s_data_payload, s_data_desired); TCP_SKB_CB(skb)->end_seq += s_data_desired; } if (opts.hash_size > 0) { __u32 workspace[SHA_WORKSPACE_WORDS]; u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS]; u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1]; /* Secret recipe depends on the Timestamp, (future) * Sequence and Acknowledgment Numbers, Initiator * Cookie, and others handled by IP variant caller. */ *tail-- ^= opts.tsval; *tail-- ^= tcp_rsk(req)->rcv_isn + 1; *tail-- ^= TCP_SKB_CB(skb)->seq + 1; /* recommended */ *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source); *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */ sha_transform((__u32 *)&xvp->cookie_bakery[0], (char *)mess, &workspace[0]); opts.hash_location = (__u8 *)&xvp->cookie_bakery[0]; } } th->seq = htonl(TCP_SKB_CB(skb)->seq); /* XXX data is queued and acked as is. No buffer/window check */ th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ th->window = htons(min(req->rcv_wnd, 65535U)); tcp_options_write((__be32 *)(th + 1), tp, &opts); th->doff = (tcp_header_size >> 2); TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb)); #ifdef CONFIG_TCP_MD5SIG /* Okay, we have all we need - do the md5 hash if needed */ if (md5) { tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, md5, NULL, req, skb); } #endif return skb; } EXPORT_SYMBOL(tcp_make_synack); /* Do all connect socket setups that can be done AF independent. */ void tcp_connect_init(struct sock *sk) { const struct dst_entry *dst = __sk_dst_get(sk); struct tcp_sock *tp = tcp_sk(sk); __u8 rcv_wscale; /* We'll fix this up when we get a response from the other end. * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. */ tp->tcp_header_len = sizeof(struct tcphdr) + (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); #ifdef CONFIG_TCP_MD5SIG if (tp->af_specific->md5_lookup(sk, sk) != NULL) tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; #endif /* If user gave his TCP_MAXSEG, record it to clamp */ if (tp->rx_opt.user_mss) tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; tp->max_window = 0; tcp_mtup_init(sk); tcp_sync_mss(sk, dst_mtu(dst)); if (!tp->window_clamp) tp->window_clamp = dst_metric(dst, RTAX_WINDOW); tp->advmss = dst_metric_advmss(dst); if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) tp->advmss = tp->rx_opt.user_mss; tcp_initialize_rcv_mss(sk); /* limit the window selection if the user enforce a smaller rx buffer */ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) tp->window_clamp = tcp_full_space(sk); tcp_select_initial_window(tcp_full_space(sk), tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), &tp->rcv_wnd, &tp->window_clamp, sysctl_tcp_window_scaling, &rcv_wscale, dst_metric(dst, RTAX_INITRWND)); tp->rx_opt.rcv_wscale = rcv_wscale; tp->rcv_ssthresh = tp->rcv_wnd; sk->sk_err = 0; sock_reset_flag(sk, SOCK_DONE); tp->snd_wnd = 0; tcp_init_wl(tp, 0); tp->snd_una = tp->write_seq; tp->snd_sml = tp->write_seq; tp->snd_up = tp->write_seq; tp->snd_nxt = tp->write_seq; if (likely(!tp->repair)) tp->rcv_nxt = 0; tp->rcv_wup = tp->rcv_nxt; tp->copied_seq = tp->rcv_nxt; inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; inet_csk(sk)->icsk_retransmits = 0; tcp_clear_retrans(tp); } static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); tcb->end_seq += skb->len; skb_header_release(skb); __tcp_add_write_queue_tail(sk, skb); sk->sk_wmem_queued += skb->truesize; sk_mem_charge(sk, skb->truesize); tp->write_seq = tcb->end_seq; tp->packets_out += tcp_skb_pcount(skb); } /* Build and send a SYN with data and (cached) Fast Open cookie. However, * queue a data-only packet after the regular SYN, such that regular SYNs * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges * only the SYN sequence, the data are retransmitted in the first ACK. * If cookie is not cached or other error occurs, falls back to send a * regular SYN with Fast Open cookie request option. */ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_fastopen_request *fo = tp->fastopen_req; int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen; struct sk_buff *syn_data = NULL, *data; unsigned long last_syn_loss = 0; tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie, &syn_loss, &last_syn_loss); /* Recurring FO SYN losses: revert to regular handshake temporarily */ if (syn_loss > 1 && time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) { fo->cookie.len = -1; goto fallback; } if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE) fo->cookie.len = -1; else if (fo->cookie.len <= 0) goto fallback; /* MSS for SYN-data is based on cached MSS and bounded by PMTU and * user-MSS. Reserve maximum option space for middleboxes that add * private TCP options. The cost is reduced data space in SYN :( */ if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - MAX_TCP_OPTION_SPACE; syn_data = skb_copy_expand(syn, skb_headroom(syn), space, sk->sk_allocation); if (syn_data == NULL) goto fallback; for (i = 0; i < iovlen && syn_data->len < space; ++i) { struct iovec *iov = &fo->data->msg_iov[i]; unsigned char __user *from = iov->iov_base; int len = iov->iov_len; if (syn_data->len + len > space) len = space - syn_data->len; else if (i + 1 == iovlen) /* No more data pending in inet_wait_for_connect() */ fo->data = NULL; if (skb_add_data(syn_data, from, len)) goto fallback; } /* Queue a data-only packet after the regular SYN for retransmission */ data = pskb_copy(syn_data, sk->sk_allocation); if (data == NULL) goto fallback; TCP_SKB_CB(data)->seq++; TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN; TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH); tcp_connect_queue_skb(sk, data); fo->copied = data->len; if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) { tp->syn_data = (fo->copied > 0); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); goto done; } syn_data = NULL; fallback: /* Send a regular SYN with Fast Open cookie request option */ if (fo->cookie.len > 0) fo->cookie.len = 0; err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); if (err) tp->syn_fastopen = 0; kfree_skb(syn_data); done: fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ return err; } /* Build a SYN and send it off. */ int tcp_connect(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *buff; int err; tcp_connect_init(sk); if (unlikely(tp->repair)) { tcp_finish_connect(sk, NULL); return 0; } buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); if (unlikely(buff == NULL)) return -ENOBUFS; /* Reserve space for headers. */ skb_reserve(buff, MAX_TCP_HEADER); tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp; tcp_connect_queue_skb(sk, buff); TCP_ECN_send_syn(sk, buff); /* Send off SYN; include data in Fast Open. */ err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); if (err == -ECONNREFUSED) return err; /* We change tp->snd_nxt after the tcp_transmit_skb() call * in order to make this packet get counted in tcpOutSegs. */ tp->snd_nxt = tp->write_seq; tp->pushed_seq = tp->write_seq; TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); /* Timer for repeating the SYN until an answer. */ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX); return 0; } EXPORT_SYMBOL(tcp_connect); /* Send out a delayed ack, the caller does the policy checking * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() * for details. */ void tcp_send_delayed_ack(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); int ato = icsk->icsk_ack.ato; unsigned long timeout; if (ato > TCP_DELACK_MIN) { const struct tcp_sock *tp = tcp_sk(sk); int max_ato = HZ / 2; if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) max_ato = TCP_DELACK_MAX; /* Slow path, intersegment interval is "high". */ /* If some rtt estimate is known, use it to bound delayed ack. * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements * directly. */ if (tp->srtt) { int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN); if (rtt < max_ato) max_ato = rtt; } ato = min(ato, max_ato); } /* Stay within the limit we were given */ timeout = jiffies + ato; /* Use new timeout only if there wasn't a older one earlier. */ if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { /* If delack timer was blocked or is about to expire, * send ACK now. */ if (icsk->icsk_ack.blocked || time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { tcp_send_ack(sk); return; } if (!time_before(timeout, icsk->icsk_ack.timeout)) timeout = icsk->icsk_ack.timeout; } icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; icsk->icsk_ack.timeout = timeout; sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); } /* This routine sends an ack and also updates the window. */ void tcp_send_ack(struct sock *sk) { struct sk_buff *buff; /* If we have been reset, we may not send again. */ if (sk->sk_state == TCP_CLOSE) return; /* We are not putting this on the write queue, so * tcp_transmit_skb() will set the ownership to this * sock. */ buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); if (buff == NULL) { inet_csk_schedule_ack(sk); inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX, TCP_RTO_MAX); return; } /* Reserve space for headers and prepare control bits. */ skb_reserve(buff, MAX_TCP_HEADER); tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); /* Send it off, this clears delayed acks for us. */ TCP_SKB_CB(buff)->when = tcp_time_stamp; tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); } /* This routine sends a packet with an out of date sequence * number. It assumes the other end will try to ack it. * * Question: what should we make while urgent mode? * 4.4BSD forces sending single byte of data. We cannot send * out of window data, because we have SND.NXT==SND.MAX... * * Current solution: to send TWO zero-length segments in urgent mode: * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is * out-of-date with SND.UNA-1 to probe window. */ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; /* We don't queue it, tcp_transmit_skb() sets ownership. */ skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); if (skb == NULL) return -1; /* Reserve space for headers and set control bits. */ skb_reserve(skb, MAX_TCP_HEADER); /* Use a previous sequence. This should cause the other * end to send an ack. Don't queue or clone SKB, just * send it. */ tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); TCP_SKB_CB(skb)->when = tcp_time_stamp; return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); } void tcp_send_window_probe(struct sock *sk) { if (sk->sk_state == TCP_ESTABLISHED) { tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq; tcp_xmit_probe_skb(sk, 0); } } /* Initiate keepalive or window probe from timer. */ int tcp_write_wakeup(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if (sk->sk_state == TCP_CLOSE) return -1; if ((skb = tcp_send_head(sk)) != NULL && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { int err; unsigned int mss = tcp_current_mss(sk); unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; /* We are probing the opening of a window * but the window size is != 0 * must have been a result SWS avoidance ( sender ) */ if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || skb->len > mss) { seg_size = min(seg_size, mss); TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; if (tcp_fragment(sk, skb, seg_size, mss)) return -1; } else if (!tcp_skb_pcount(skb)) tcp_set_skb_tso_segs(sk, skb, mss); TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; TCP_SKB_CB(skb)->when = tcp_time_stamp; err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); if (!err) tcp_event_new_data_sent(sk, skb); return err; } else { if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) tcp_xmit_probe_skb(sk, 1); return tcp_xmit_probe_skb(sk, 0); } } /* A window probe timeout has occurred. If window is not closed send * a partial packet else a zero probe. */ void tcp_send_probe0(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int err; err = tcp_write_wakeup(sk); if (tp->packets_out || !tcp_send_head(sk)) { /* Cancel probe timer, if it is not required. */ icsk->icsk_probes_out = 0; icsk->icsk_backoff = 0; return; } if (err <= 0) { if (icsk->icsk_backoff < sysctl_tcp_retries2) icsk->icsk_backoff++; icsk->icsk_probes_out++; inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), TCP_RTO_MAX); } else { /* If packet was not sent due to local congestion, * do not backoff and do not remember icsk_probes_out. * Let local senders to fight for local resources. * * Use accumulated backoff yet. */ if (!icsk->icsk_probes_out) icsk->icsk_probes_out = 1; inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RESOURCE_PROBE_INTERVAL), TCP_RTO_MAX); } }
kozmikkick/kozmikkernel3.8
net/ipv4/tcp_output.c
C
gpl-2.0
95,695
/* hzip: file compression for sorted dictionaries with optional encryption, * algorithm: prefix-suffix encoding and 16-bit Huffman encoding */ #include <stdio.h> #include <stdlib.h> #include <string.h> #define CODELEN 65536 #define BUFSIZE 65536 #define EXTENSION ".hz" #define ESCAPE 31 #define MAGIC "hz0" #define MAGIC_ENCRYPTED "hz1" #define DESC "hzip - dictionary compression utility\n" \ "Usage: hzip [-h | -P password ] [file1 file2 ..]\n" \ " -P password encrypted compression\n" \ " -h display this help and exit\n" enum { code_LEAF, code_TERM, code_NODE}; struct item { unsigned short word; int count; char type; struct item * left; struct item * right; }; int fail(const char * err, const char * par) { fprintf(stderr, err, par); return 1; } void code2table(struct item * tree, char **table, char * code, int deep) { int first = 0; if (!code) { first = 1; code = malloc(CODELEN); } code[deep] = '1'; if (tree->left) code2table(tree->left, table, code, deep + 1); if (tree->type != code_NODE) { int i = tree->word; code[deep] = '\0'; if (tree->type == code_TERM) i = CODELEN; /* terminal code */ table[i] = malloc(deep + 1); strcpy(table[i], code); } code[deep] = '0'; if (tree->right) code2table(tree->right, table, code, deep + 1); if (first) free(code); } struct item * newitem(int c, struct item * l, struct item * r, int t) { struct item * ni = (struct item *) malloc(sizeof(struct item)); ni->type = t; ni->word = 0; ni->count = c; ni->left = l; ni->right = r; return ni; } /* return length of the freq array */ int get_freqdata(struct item *** dest, FILE * f, unsigned short * termword) { int freq[CODELEN]; int i, j, k, n; union { char c[2]; unsigned short word; } u; for (i = 0; i < CODELEN; i++) freq[i] = 0; while((j = getc(f)) != -1 && (k = getc(f)) != -1) { u.c[0] = j; u.c[1] = k; freq[u.word]++; } if (j != -1) { u.c[0] = 1; u.c[1] = j; } else { u.c[0] = 0; u.c[1] = 0; } *dest = (struct item **) malloc((CODELEN + 1) * sizeof(struct item *)); if (!*dest) return -1; for (i = 0, n = 0; i < CODELEN; i++) if (freq[i]) { (*dest)[n] = newitem(freq[i], NULL, NULL, code_LEAF); (*dest)[n]->word = i; n++; } /* terminal sequence (also contains the last odd byte of the file) */ (*dest)[n] = newitem(1, NULL, NULL, code_TERM); *termword = u.word; return n + 1; } void get_codetable(struct item **l, int n, char ** table) { int i; while (n > 1) { int min = 0; int mi2 = 1; for (i = 1; i < n; i++) { if (l[i]->count < l[min]->count) { mi2 = min; min = i; } else if (l[i]->count < l[mi2]->count) mi2 = i; } l[min] = newitem(l[min]->count + l[mi2]->count, l[min], l[mi2], code_NODE); for (i = mi2 + 1; i < n; i++) l[i - 1] = l[i]; n--; } code2table(l[0], table, NULL, 0); } int write_bits(FILE *f, char * bitbuf, int *bits, char * code) { while (*code) { int b = (*bits) % 8; if (!b) bitbuf[(*bits) / 8] = ((*code) - '0') << 7; else bitbuf[(*bits) / 8] |= (((*code) - '0') << (7 - b)); (*bits)++; code++; if (*bits == BUFSIZE * 8) { if (BUFSIZE != fwrite(bitbuf, 1, BUFSIZE, f)) return 1; *bits = 0; } } return 0; } int encode_file(char ** table, int n, FILE *f, FILE *f2, unsigned short tw, char * key) { char bitbuf[BUFSIZE]; int i, bits = 0; unsigned char cl, ch; int cx[2]; union { char c[2]; unsigned short word; } u; char * enc = key; /* header and codes */ fprintf(f2, "%s", (key ? MAGIC_ENCRYPTED : MAGIC)); /* 3-byte HEADER */ cl = (unsigned char) (n & 0x00ff); ch = (unsigned char) (n >> 8); if (key) { unsigned char cs; for (cs = 0; *enc; enc++) cs ^= *enc; fprintf(f2, "%c", cs); /* 1-byte check sum */ enc = key; ch ^= *enc; if ((*(++enc)) == '\0') enc = key; cl ^= *enc; } fprintf(f2, "%c%c", ch, cl); /* upper and lower byte of record count */ for (i = 0; i < BUFSIZE; i++) bitbuf[i] = '\0'; for (i = 0; i < CODELEN + 1; i++) if (table[i]) { size_t nmemb; u.word = (unsigned short) i; if (i == CODELEN) u.word = tw; if (key) { if (*(++enc) == '\0') enc = key; u.c[0] ^= *enc; if (*(++enc) == '\0') enc = key; u.c[1] ^= *enc; } fprintf(f2, "%c%c", u.c[0], u.c[1]); /* 2-character code id */ bits = 0; if (write_bits(f2, bitbuf, &bits, table[i]) != 0) return 1; if (key) { if (*(++enc) == '\0') enc = key; fprintf(f2, "%c", ((unsigned char) bits) ^ *enc); for (cl = 0; cl <= bits/8; cl++) { if (*(++enc) == '\0') enc = key; bitbuf[cl] ^= *enc; } } else fprintf(f2, "%c", (unsigned char) bits); /* 1-byte code length */ nmemb = bits/8 + 1; if (fwrite(bitbuf, 1, bits/8 + 1, f2) != nmemb) /* x-byte code */ return 1; } /* file encoding */ bits = 0; while((cx[0] = getc(f)) != -1 && (cx[1] = getc(f)) != -1) { u.c[0] = cx[0]; u.c[1] = cx[1]; if (write_bits(f2, bitbuf, &bits, table[u.word]) != 0) return 1; } /* terminal suffixes */ if (write_bits(f2, bitbuf, &bits, table[CODELEN]) != 0) return 1; if (bits > 0) { size_t nmemb = bits/8 + 1; if (fwrite(bitbuf, 1, nmemb, f2) != nmemb) return 1; } return 0; } int prefixcompress(FILE *f, FILE *tempfile) { char buf[BUFSIZE]; char buf2[BUFSIZE * 2]; char prev[BUFSIZE]; int prevlen = 0; while(fgets(buf,BUFSIZE,f)) { int i, j, k, m, c=0; int pfx = prevlen; char * p = buf2; m = j = 0; for (i = 0; buf[i]; i++) { if ((pfx > 0) && (buf[i] == prev[i])) { j++; } else pfx = 0; } if (i > 0 && buf[i - 1] == '\n') { if (j == i) j--; /* line duplicate */ if (j > 29) j = 29; c = j; if (c == '\t') c = 30; /* common suffix */ for (; buf[i - m - 2] == prev[prevlen - m - 2] && m < i - j - 1 && m < 15; m++); if (m == 1) m = 0; } else { j = 0; m = -1; } for (k = j; k < i - m - 1; k++, p++) { if (((unsigned char) buf[k]) < 47 && buf[k] != '\t' && buf[k] != ' ') { *p = ESCAPE; p++; } *p = buf[k]; } if (m > 0) { *p = m + 31; /* 33-46 */ p++; } if (i > 0 && buf[i - 1] == '\n') { size_t nmemb = p - buf2 + 1; *p = c; if (fwrite(buf2, 1, nmemb, tempfile) != nmemb) return 1; } else { size_t nmemb = p - buf2; if (fwrite(buf2, 1, nmemb, tempfile) != nmemb) return 1; } memcpy(prev, buf, i); prevlen = i; } return 0; } int hzip(const char * filename, char * key) { struct item ** list; char * table[CODELEN + 1]; int n; char out[BUFSIZE]; FILE *f, *f2, *tempfile; unsigned short termword; strcpy(out, filename); strcat(out, EXTENSION); f = fopen(filename, "r"); if (!f) return fail("hzip: %s: Permission denied\n", filename); tempfile = tmpfile(); if (!tempfile) { fclose(f); return fail("hzip: cannot create temporary file\n", NULL); } f2 = fopen(out, "wb"); if (!f2) { fclose(tempfile); fclose(f); return fail("hzip: %s: Permission denied\n", out); } for (n = 0; n < CODELEN; n++) table[n] = NULL; if (prefixcompress(f, tempfile) != 0) { fclose(f2); fclose(tempfile); fclose(f); return fail("hzip: cannot write file\n", NULL); } rewind(tempfile); n = get_freqdata(&list, tempfile, &termword); get_codetable(list, n, table); rewind(tempfile); n = encode_file(table, n, tempfile, f2, termword, key); fclose(f2); fclose(tempfile); fclose(f); if (n != 0) return fail("hzip: cannot write file\n", NULL); return n; } int main(int argc, char** argv) { int i, j = 0; char * key = NULL; for (i = 1; i < argc; i++) { if (*(argv[i]) == '-') { if (*(argv[i] + 1) == 'h') return fail(DESC, NULL); if (*(argv[i] + 1) == 'P') { if (i + 1 == argc) return fail("hzip: missing password\n", NULL); key = argv[i + 1]; i++; continue; } return fail("hzip: no such option: %s\n", argv[i]); } else if (hzip(argv[i], key) != 0) return 1; else j = 1; } if (j == 0) return fail("hzip: need a filename parameter\n", NULL); return 0; }
arnotixe/qu_EC_spell
chimborazo/hunspell/hunspell-1.3.3/src/tools/hzip.c
C
gpl-2.0
9,409
/* * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ import java.awt.AlphaComposite; import java.awt.Color; import java.awt.Graphics2D; import java.awt.GraphicsConfiguration; import java.awt.GraphicsEnvironment; import java.awt.Image; import java.awt.image.BufferedImage; import java.awt.image.VolatileImage; import java.io.File; import java.io.IOException; import javax.imageio.ImageIO; /** * @test * @key headful * @bug 8041129 * @summary Tests asymmetric source offsets. * @author Sergey Bylokhov */ public final class IncorrectSourceOffset { public static void main(final String[] args) throws IOException { GraphicsEnvironment ge = GraphicsEnvironment .getLocalGraphicsEnvironment(); GraphicsConfiguration gc = ge.getDefaultScreenDevice() .getDefaultConfiguration(); VolatileImage vi = gc.createCompatibleVolatileImage(511, 255); BufferedImage bi = new BufferedImage(511, 255, BufferedImage.TYPE_INT_ARGB); BufferedImage gold = new BufferedImage(511, 255, BufferedImage.TYPE_INT_ARGB); fill(gold); while (true) { vi.validate(gc); fill(vi); if (vi.validate(gc) != VolatileImage.IMAGE_OK) { try { Thread.sleep(100); } catch (final InterruptedException ignored) { } continue; } Graphics2D big = bi.createGraphics(); big.drawImage(vi, 7, 11, 127, 111, 7, 11, 127, 111, null); big.dispose(); if (vi.contentsLost()) { try { Thread.sleep(100); } catch (final InterruptedException ignored) { } continue; } break; } for (int x = 7; x < 127; ++x) { for (int y = 11; y < 111; ++y) { if (gold.getRGB(x, y) != bi.getRGB(x, y)) { ImageIO.write(gold, "png", new File("gold.png")); ImageIO.write(bi, "png", new File("bi.png")); throw new RuntimeException("Test failed."); } } } } private static void fill(Image image) { Graphics2D graphics = (Graphics2D) image.getGraphics(); graphics.setComposite(AlphaComposite.Src); for (int i = 0; i < image.getHeight(null); ++i) { graphics.setColor(new Color(i, 0, 0)); graphics.fillRect(0, i, image.getWidth(null), 1); } graphics.dispose(); } }
dmlloyd/openjdk-modules
jdk/test/java/awt/image/DrawImage/IncorrectSourceOffset.java
Java
gpl-2.0
3,699
//===-- FindBugs.cpp - Run Many Different Optimizations -------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines an interface that allows bugpoint to choose different // combinations of optimizations to run on the selected input. Bugpoint will // run these optimizations and record the success/failure of each. This way // we can hopefully spot bugs in the optimizations. // //===----------------------------------------------------------------------===// #include "BugDriver.h" #include "ToolRunner.h" #include "llvm/Pass.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> #include <ctime> using namespace llvm; /// runManyPasses - Take the specified pass list and create different /// combinations of passes to compile the program with. Compile the program with /// each set and mark test to see if it compiled correctly. If the passes /// compiled correctly output nothing and rearrange the passes into a new order. /// If the passes did not compile correctly, output the command required to /// recreate the failure. This returns true if a compiler error is found. /// bool BugDriver::runManyPasses(const std::vector<std::string> &AllPasses, std::string &ErrMsg) { setPassesToRun(AllPasses); outs() << "Starting bug finding procedure...\n\n"; // Creating a reference output if necessary if (initializeExecutionEnvironment()) return false; outs() << "\n"; if (ReferenceOutputFile.empty()) { outs() << "Generating reference output from raw program: \n"; if (!createReferenceFile(Program)) return false; } srand(time(NULL)); unsigned num = 1; while(1) { // // Step 1: Randomize the order of the optimizer passes. // std::random_shuffle(PassesToRun.begin(), PassesToRun.end()); // // Step 2: Run optimizer passes on the program and check for success. // outs() << "Running selected passes on program to test for crash: "; for(int i = 0, e = PassesToRun.size(); i != e; i++) { outs() << "-" << PassesToRun[i] << " "; } std::string Filename; if(runPasses(Program, PassesToRun, Filename, false)) { outs() << "\n"; outs() << "Optimizer passes caused failure!\n\n"; debugOptimizerCrash(); return true; } else { outs() << "Combination " << num << " optimized successfully!\n"; } // // Step 3: Compile the optimized code. // outs() << "Running the code generator to test for a crash: "; std::string Error; compileProgram(Program, &Error); if (!Error.empty()) { outs() << "\n*** compileProgram threw an exception: "; outs() << Error; return debugCodeGeneratorCrash(ErrMsg); } outs() << '\n'; // // Step 4: Run the program and compare its output to the reference // output (created above). // outs() << "*** Checking if passes caused miscompliation:\n"; bool Diff = diffProgram(Program, Filename, "", false, &Error); if (Error.empty() && Diff) { outs() << "\n*** diffProgram returned true!\n"; debugMiscompilation(&Error); if (Error.empty()) return true; } if (!Error.empty()) { errs() << Error; debugCodeGeneratorCrash(ErrMsg); return true; } outs() << "\n*** diff'd output matches!\n"; sys::Path(Filename).eraseFromDisk(); outs() << "\n\n"; num++; } //end while // Unreachable. }
rex-xxx/mt6572_x201
external/llvm/tools/bugpoint/FindBugs.cpp
C++
gpl-2.0
3,707
<?php if ( ! class_exists( 'GFForms' ) ) { die(); } class GF_Field_Checkbox extends GF_Field { public $type = 'checkbox'; public function get_form_editor_field_title() { return esc_attr__( 'Checkboxes', 'gravityforms' ); } public function get_form_editor_field_settings() { return array( 'conditional_logic_field_setting', 'prepopulate_field_setting', 'error_message_setting', 'label_setting', 'label_placement_setting', 'admin_label_setting', 'choices_setting', 'rules_setting', 'visibility_setting', 'description_setting', 'css_class_setting', ); } public function is_conditional_logic_supported() { return true; } public function get_field_input( $form, $value = '', $entry = null ) { $form_id = absint( $form['id'] ); $is_entry_detail = $this->is_entry_detail(); $is_form_editor = $this->is_form_editor(); $id = $this->id; $field_id = $is_entry_detail || $is_form_editor || $form_id == 0 ? "input_$id" : 'input_' . $form_id . "_$id"; $disabled_text = $is_form_editor ? 'disabled="disabled"' : ''; return sprintf( "<div class='ginput_container ginput_container_checkbox'><ul class='gfield_checkbox' id='%s'>%s</ul></div>", esc_attr( $field_id ), $this->get_checkbox_choices( $value, $disabled_text, $form_id ) ); } public function get_first_input_id( $form ) { return ''; } public function get_value_default() { return $this->is_form_editor() ? $this->defaultValue : GFCommon::replace_variables_prepopulate( $this->defaultValue ); } public function get_value_submission( $field_values, $get_from_post_global_var = true ) { $parameter_values = GFFormsModel::get_parameter_value( $this->inputName, $field_values, $this ); if ( ! empty( $parameter_values ) && ! is_array( $parameter_values ) ) { $parameter_values = explode( ',', $parameter_values ); } if ( ! is_array( $this->inputs ) ) { return ''; } $choice_index = 0; $value = array(); foreach ( $this->inputs as $input ) { if ( ! empty( $_POST[ 'is_submit_' . $this->formId ] ) && $get_from_post_global_var ) { $value[ strval( $input['id'] ) ] = rgpost( 'input_' . str_replace( '.', '_', strval( $input['id'] ) ) ); } else { if ( is_array( $parameter_values ) ) { foreach ( $parameter_values as $item ) { $item = trim( $item ); if ( GFFormsModel::choice_value_match( $this, $this->choices[ $choice_index ], $item ) ) { $value[ $input['id'] . '' ] = $item; break; } } } } $choice_index ++; } return $value; } public function get_value_entry_list( $value, $entry, $field_id, $columns, $form ) { //if this is the main checkbox field (not an input), display a comma separated list of all inputs if ( absint( $field_id ) == $field_id ) { $lead_field_keys = array_keys( $entry ); $items = array(); foreach ( $lead_field_keys as $input_id ) { if ( is_numeric( $input_id ) && absint( $input_id ) == $field_id ) { $items[] = GFCommon::selection_display( rgar( $entry, $input_id ), null, $entry['currency'], false ); } } $value = GFCommon::implode_non_blank( ', ', $items ); // special case for post category checkbox fields if ( $this->type == 'post_category' ) { $value = GFCommon::prepare_post_category_value( $value, $this, 'entry_list' ); } } else { $value = ''; if ( $this->is_checkbox_checked( $field_id, $columns[ $field_id ]['label'], $entry ) ) { $value = "<i class='fa fa-check gf_valid'></i>"; } } return $value; } public function get_value_entry_detail( $value, $currency = '', $use_text = false, $format = 'html', $media = 'screen' ) { if ( is_array( $value ) ) { $items = ''; foreach ( $value as $key => $item ) { if ( ! empty( $item ) ) { switch ( $format ) { case 'text' : $items .= GFCommon::selection_display( $item, $this, $currency, $use_text ) . ', '; break; default: $items .= '<li>' . GFCommon::selection_display( $item, $this, $currency, $use_text ) . '</li>'; break; } } } if ( empty( $items ) ) { return ''; } elseif ( $format == 'text' ) { return substr( $items, 0, strlen( $items ) - 2 ); //removing last comma } else { return "<ul class='bulleted'>$items</ul>"; } } else { return $value; } } public function get_value_merge_tag( $value, $input_id, $entry, $form, $modifier, $raw_value, $url_encode, $esc_html, $format, $nl2br ) { $use_value = $modifier == 'value'; $use_price = in_array( $modifier, array( 'price', 'currency' ) ); $format_currency = $modifier == 'currency'; if ( is_array( $raw_value ) && (string) intval( $input_id ) != $input_id ) { $items = array( $input_id => $value ); //float input Ids. (i.e. 4.1 ). Used when targeting specific checkbox items } elseif ( is_array( $raw_value ) ) { $items = $raw_value; } else { $items = array( $input_id => $raw_value ); } $ary = array(); foreach ( $items as $input_id => $item ) { if ( $use_value ) { list( $val, $price ) = rgexplode( '|', $item, 2 ); } elseif ( $use_price ) { list( $name, $val ) = rgexplode( '|', $item, 2 ); if ( $format_currency ) { $val = GFCommon::to_money( $val, rgar( $entry, 'currency' ) ); } } elseif ( $this->type == 'post_category' ) { $use_id = strtolower( $modifier ) == 'id'; $item_value = GFCommon::format_post_category( $item, $use_id ); $val = RGFormsModel::is_field_hidden( $form, $this, array(), $entry ) ? '' : $item_value; } else { $val = RGFormsModel::is_field_hidden( $form, $this, array(), $entry ) ? '' : RGFormsModel::get_choice_text( $this, $raw_value, $input_id ); } $ary[] = GFCommon::format_variable_value( $val, $url_encode, $esc_html, $format ); } return GFCommon::implode_non_blank( ', ', $ary ); } public function get_value_save_entry( $value, $form, $input_name, $lead_id, $lead ) { if ( empty( $value ) ){ return ''; } elseif ( is_array( $value ) ){ return implode( ',', $value ); } else { return $this->sanitize_entry_value( $value, $form['id'] ); } } public function get_checkbox_choices( $value, $disabled_text, $form_id = 0 ) { $choices = ''; $is_entry_detail = $this->is_entry_detail(); $is_form_editor = $this->is_form_editor(); if ( is_array( $this->choices ) ) { $choice_number = 1; $count = 1; foreach ( $this->choices as $choice ) { if ( $choice_number % 10 == 0 ) { //hack to skip numbers ending in 0. so that 5.1 doesn't conflict with 5.10 $choice_number ++; } $input_id = $this->id . '.' . $choice_number; if ( $is_entry_detail || $is_form_editor || $form_id == 0 ){ $id = $this->id . '_' . $choice_number ++; } else { $id = $form_id . '_' . $this->id . '_' . $choice_number ++; } if ( ! isset( $_GET['gf_token'] ) && empty( $_POST ) && rgar( $choice, 'isSelected' ) ) { $checked = "checked='checked'"; } elseif ( is_array( $value ) && RGFormsModel::choice_value_match( $this, $choice, rgget( $input_id, $value ) ) ) { $checked = "checked='checked'"; } elseif ( ! is_array( $value ) && RGFormsModel::choice_value_match( $this, $choice, $value ) ) { $checked = "checked='checked'"; } else { $checked = ''; } $logic_event = $this->get_conditional_logic_event( 'click' ); $tabindex = $this->get_tabindex(); $choice_value = $choice['value']; if ( $this->enablePrice ) { $price = rgempty( 'price', $choice ) ? 0 : GFCommon::to_number( rgar( $choice, 'price' ) ); $choice_value .= '|' . $price; } $choice_value = esc_attr( $choice_value ); $choice_markup = "<li class='gchoice_{$id}'> <input name='input_{$input_id}' type='checkbox' $logic_event value='{$choice_value}' {$checked} id='choice_{$id}' {$tabindex} {$disabled_text} /> <label for='choice_{$id}' id='label_{$id}'>{$choice['text']}</label> </li>"; $choices .= gf_apply_filters( 'gform_field_choice_markup_pre_render', array( $this->formId, $this->id ), $choice_markup, $choice, $this, $value ); $is_entry_detail = $this->is_entry_detail(); $is_form_editor = $this->is_form_editor(); $is_admin = $is_entry_detail || $is_form_editor; if ( $is_admin && RG_CURRENT_VIEW != 'entry' && $count >= 5 ) { break; } $count ++; } $total = sizeof( $this->choices ); if ( $count < $total ) { $choices .= "<li class='gchoice_total'>" . sprintf( esc_html__( '%d of %d items shown. Edit field to view all', 'gravityforms' ), $count, $total ) . '</li>'; } } return gf_apply_filters( 'gform_field_choices', $this->formId, $choices, $this ); } public function allow_html() { return true; } public function sanitize_settings() { parent::sanitize_settings(); if ( $this->type === 'option' ) { $this->productField = absint( $this->productField ); } if ( $this->type === 'post_category' ) { $this->displayAllCategories = (bool) $this->displayAllCategories; } } public function get_value_export( $entry, $input_id = '', $use_text = false, $is_csv = false ) { if ( empty( $input_id ) || absint( $input_id ) == $input_id ) { $selected = array(); foreach ( $this->inputs as $input ) { $index = (string) $input['id']; if ( ! rgempty( $index, $entry ) ) { $selected[] = GFCommon::selection_display( rgar( $entry, $index ), $this, rgar( $entry, 'currency' ), $use_text ); } } return implode( ', ', $selected ); } elseif ( $is_csv ) { $value = $this->is_checkbox_checked( $input_id, GFCommon::get_label( $this, $input_id ), $entry ); return empty( $value ) ? '' : $value; } else { return GFCommon::selection_display( rgar( $entry, $input_id ), $this, rgar( $entry, 'currency' ), $use_text ); } } public function is_checkbox_checked( $field_id, $field_label, $entry ) { //looping through lead detail values trying to find an item identical to the column label. Mark with a tick if found. $lead_field_keys = array_keys( $entry ); foreach ( $lead_field_keys as $input_id ) { //mark as a tick if input label (from form meta) is equal to submitted value (from lead) if ( is_numeric( $input_id ) && absint( $input_id ) == absint( $field_id ) ) { $sanitized_value = wp_kses( $entry[ $input_id ], wp_kses_allowed_html( 'post' ) ); if ( $sanitized_value == $field_label ) { return $entry[ $input_id ]; } else { if ( $this->enableChoiceValue || $this->enablePrice ) { foreach ( $this->choices as $choice ) { if ( $choice['value'] == $entry[ $field_id ] ) { return $choice['value']; } elseif ( $this->enablePrice ) { $ary = explode( '|', $entry[ $field_id ] ); $val = count( $ary ) > 0 ? $ary[0] : ''; $price = count( $ary ) > 1 ? $ary[1] : ''; if ( $val == $choice['value'] ) { return $choice['value']; } } } } } } } return false; } } GF_Fields::register( new GF_Field_Checkbox() );
AdamChlan/special-olympics-lancaster
wp-content/plugins/gravityforms/includes/fields/class-gf-field-checkbox.php
PHP
gpl-2.0
11,107
<?php /************************************************************************************* * typoscript.php * -------- * Author: Jan-Philipp Halle (typo3@jphalle.de) * Copyright: (c) 2005 Jan-Philipp Halle (http://www.jphalle.de/) * Release Version: 1.0.9.0 * Date Started: 2005/07/29 * * TypoScript language file for GeSHi. * * CHANGES * ------- * 2008/07/11 (1.0.8) * - Michiel Roos <geshi@typofree.org> Complete rewrite * 2005/07/29 (1.0.0) * - First Release * * TODO (updated 2004/07/14) * ------------------------- * <things-to-do> * ************************************************************************************* * * This file is part of GeSHi. * * GeSHi is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GeSHi is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GeSHi; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * ************************************************************************************/ $language_data = array ( 'LANG_NAME' => 'TypoScript', 'COMMENT_SINGLE' => array(1 => '//'), 'COMMENT_MULTI' => array('/*' => '*/'), 'COMMENT_REGEXP' => array(2 => '/(?<!(#|\'|"))(?:#(?!(?:[a-fA-F0-9]{6}|[a-fA-F0-9]{3}))[^\n#]+|#{2}[^\n#]+|#{7,999}[^\n]+)/'), 'CASE_KEYWORDS' => GESHI_CAPS_NO_CHANGE, 'QUOTEMARKS' => array(), 'ESCAPE_CHAR' => '', 'KEYWORDS' => array( // Conditions: http://documentation.typo3.org/documentation/tsref/conditions/ 1 => array( 'browser', 'compatVersion', 'dayofmonth', 'dayofweek', 'device', 'globalString', 'globalVars', 'hostname', 'hour', 'ip', 'language', 'loginUser', 'loginuser', 'minute', 'month', 'PIDinRootline', 'PIDupinRootline', 'system', 'treelevel', 'useragent', 'userFunc', 'usergroup', 'version' ), // Functions: http://documentation.typo3.org/documentation/tsref/functions/ 2 => array( 'addParams', 'encapsLines', 'filelink', 'HTMLparser', 'HTMLparser_tags', 'if', 'imageLinkWrap', 'imgResource', 'makelinks', 'numRows', 'parseFunc', 'select', 'split', 'stdWrap', 'tableStyle', 'tags', 'textStyle', 'typolink' ), // Toplevel objects: http://documentation.typo3.org/documentation/tsref/tlo-objects/ 3 => array( 'CARRAY', 'CONFIG', 'CONSTANTS', 'FE_DATA', 'FE_TABLE', 'FRAME', 'FRAMESET', 'META', 'PAGE', 'plugin' ), // Content Objects (cObject) : http://documentation.typo3.org/documentation/tsref/cobjects/ 4 => array( 'CASE', 'CLEARGIF', 'COA', 'COA_INT', 'COBJ_ARRAY', 'COLUMNS', 'CONTENT', 'CTABLE', 'EDITPANEL', 'FILE', 'FORM', 'HMENU', 'HRULER', 'HTML', 'IMAGE', 'IMGTEXT', 'IMG_RESOURCE', 'LOAD_REGISTER', 'MULTIMEDIA', 'OTABLE', 'PHP_SCRIPT', 'PHP_SCRIPT_EXT', 'PHP_SCRIPT_INT', 'RECORDS', 'RESTORE_REGISTER', 'SEARCHRESULT', 'TEMPLATE', 'TEXT', 'USER', 'USER_INT' ), // GIFBUILDER toplevel link: http://documentation.typo3.org/documentation/tsref/gifbuilder/ 5 => array( 'GIFBUILDER', ), // GIFBUILDER: http://documentation.typo3.org/documentation/tsref/gifbuilder/ // skipped fields: IMAGE, TEXT // NOTE! the IMAGE and TEXT field already are linked in group 4, they // cannot be linked twice . . . . unfortunately 6 => array( 'ADJUST', 'BOX', 'CROP', 'EFFECT', 'EMBOSS', 'IMGMAP', 'OUTLINE', 'SCALE', 'SHADOW', 'WORKAREA' ), // MENU Objects: http://documentation.typo3.org/documentation/tsref/menu/ 7 => array( 'GMENU', 'GMENU_FOLDOUT', 'GMENU_LAYERS', 'IMGMENU', 'IMGMENUITEM', 'JSMENU', 'JSMENUITEM', 'TMENU', 'TMENUITEM', 'TMENU_LAYERS' ), // MENU common properties: http://documentation.typo3.org/documentation/tsref/menu/common-properties/ 8 => array( 'alternativeSortingField', 'begin', 'debugItemConf', 'imgNameNotRandom', 'imgNamePrefix', 'itemArrayProcFunc', 'JSWindow', 'maxItems', 'minItems', 'overrideId', 'sectionIndex', 'showAccessRestrictedPages', 'submenuObjSuffixes' ), // MENU item states: http://documentation.typo3.org/documentation/tsref/menu/item-states/ 9 => array( 'ACT', 'ACTIFSUB', 'ACTIFSUBRO', 'ACTRO', 'CUR', 'CURIFSUB', 'CURIFSUBRO', 'CURRO', 'IFSUB', 'IFSUBRO', 'NO', 'SPC', 'USERDEF1', 'USERDEF1RO', 'USERDEF2', 'USERDEF2RO', 'USR', 'USRRO' ), ), // Does not include '-' because of stuff like htmlTag_langKey = en-GB and // lib.nav-sub 'SYMBOLS' => array( 0 => array( '|', '+', '*', '/', '%', '!', '&&', '^', '<', '>', '=', '?', ':', '.' ), 1 => array( '(', ')', '{', '}', '[', ']' ) ), 'CASE_SENSITIVE' => array( GESHI_COMMENTS => false, 1 => true, 2 => true, 3 => true, 4 => true, 5 => true, 6 => true, 7 => true, 8 => true, 9 => true ), 'STYLES' => array( 'KEYWORDS' => array( 1 => 'color: #ed7d14;', 2 => 'font-weight: bold;', 3 => 'color: #990000; font-weight: bold;', 4 => 'color: #990000; font-weight: bold;', 5 => 'color: #990000; font-weight: bold;', 6 => 'color: #990000; font-weight: bold;', 7 => 'color: #990000; font-weight: bold;', 8 => 'font-weight: bold;', 9 => 'color: #990000; font-weight: bold;', ), 'COMMENTS' => array( 1 => 'color: #aaa; font-style: italic;', 2 => 'color: #aaa; font-style: italic;', 'MULTI' => 'color: #aaa; font-style: italic;' ), 'STRINGS' => array( 0 => 'color: #ac14aa;' ), 'NUMBERS' => array( 0 => 'color: #cc0000;' ), 'METHODS' => array( 1 => 'color: #0000e0; font-weight: bold;', 2 => 'color: #0000e0; font-weight: bold;' ), 'BRACKETS' => array( 0 => 'color: #009900;' ), 'SYMBOLS' => array( 0 => 'color: #339933; font-weight: bold;', // Set this to the same value as brackets above 1 => 'color: #009900; font-weight: bold;' ), 'REGEXPS' => array( 0 => 'color: #009900;', 1 => 'color: #009900; font-weight: bold;', 2 => 'color: #3366CC;', 3 => 'color: #000066; font-weight: bold;', 4 => 'color: #ed7d14;', 5 => 'color: #000066; font-weight: bold;', 6 => 'color: #009900;', 7 => 'color: #3366CC;' ), 'ESCAPE_CHAR' => array( ), 'SCRIPT' => array( ) ), 'URLS' => array( 1 => 'http://documentation.typo3.org/documentation/tsref/conditions/{FNAME}/', 2 => 'http://documentation.typo3.org/documentation/tsref/functions/{FNAME}/', 3 => 'http://documentation.typo3.org/documentation/tsref/tlo-objects/{FNAME}/', 4 => 'http://documentation.typo3.org/documentation/tsref/cobjects/{FNAME}/', 5 => 'http://documentation.typo3.org/documentation/tsref/gifbuilder/', 6 => 'http://documentation.typo3.org/documentation/tsref/gifbuilder/{FNAME}/', 7 => 'http://documentation.typo3.org/documentation/tsref/menu/{FNAME}/', 8 => 'http://documentation.typo3.org/documentation/tsref/menu/common-properties/', 9 => 'http://documentation.typo3.org/documentation/tsref/menu/item-states/' ), 'OOLANG' => false, 'OBJECT_SPLITTERS' => array( ), 'REGEXPS' => array( // xhtml tag 2 => array( GESHI_SEARCH => '(&lt;)([a-zA-Z\\/][^\\/\\\x7C]*?)(&gt;)', GESHI_REPLACE => '\\2', GESHI_MODIFIERS => 's', GESHI_BEFORE => '\\1', GESHI_AFTER => '\\3' ), // Constant 0 => array( GESHI_SEARCH => '(\{)(\$[a-zA-Z_\.]+[a-zA-Z0-9_\.]*)(\})', GESHI_REPLACE => '\\2', GESHI_MODIFIERS => '', GESHI_BEFORE => '\\1', GESHI_AFTER => '\\3' ), // Constant dollar sign 1 => array( GESHI_SEARCH => '(\$)([a-zA-Z_\.]+[a-zA-Z0-9_\.]*)', GESHI_REPLACE => '\\1', GESHI_MODIFIERS => '', GESHI_BEFORE => '', GESHI_AFTER => '\\2' ), // extension keys / tables: (static|user|ttx|tx|tt|fe)_something[_something] 3 => array( GESHI_SEARCH => '(plugin\.|[^\.]\b)((?:static|user|ttx|tx|tt|fe)(?:_[0-9A-Za-z_]+?)\b)', GESHI_REPLACE => '\\2', GESHI_MODIFIERS => '', GESHI_BEFORE => '\\1', GESHI_AFTER => '' ), // conditions and controls 4 => array( GESHI_SEARCH => '(\[)(globalVar|global|end)\b', GESHI_REPLACE => '\\2', GESHI_MODIFIERS => 'i', GESHI_BEFORE => '\\1', GESHI_AFTER => '' ), // lowlevel setup and constant objects 5 => array( GESHI_SEARCH => '([^\.\$-\{]\b)(cObj|field|config|content|file|frameset|includeLibs|lib|page|plugin|register|resources|sitemap|sitetitle|styles|temp|tt_content|tt_news|types|xmlnews)\b', GESHI_REPLACE => '\\2', GESHI_MODIFIERS => '', GESHI_BEFORE => '\\1', GESHI_AFTER => '' ), // markers 6 => array( GESHI_SEARCH => '(###[^#]+###)', GESHI_REPLACE => '\\1', GESHI_MODIFIERS => '', GESHI_BEFORE => '', GESHI_AFTER => '' ), // hex color codes 7 => array( GESHI_SEARCH => '(#[a-fA-F0-9]{6}\b|#[a-fA-F0-9]{3}\b)', GESHI_REPLACE => '\\1', GESHI_MODIFIERS => '', GESHI_BEFORE => '', GESHI_AFTER => '' ) ), 'STRICT_MODE_APPLIES' => GESHI_NEVER, 'SCRIPT_DELIMITERS' => array( ), 'HIGHLIGHT_STRICT_BLOCK' => array( ) );
smboy86/zzing-wiki
vendor/geshi/geshi/src/geshi/typoscript.php
PHP
gpl-2.0
11,154
<?php /** * Abstract Order * * The WooCommerce order class handles order data. * * @class WC_Order * @version 2.2.0 * @package WooCommerce/Classes * @category Class * @author WooThemes */ abstract class WC_Abstract_Order { /** @public int Order (post) ID */ public $id; /** @public string Order type */ public $order_type = null; /** * Get the order if ID is passed, otherwise the order is new and empty. * This class should NOT be instantiated, but the get_order function or new WC_Order_Factory * should be used. It is possible, but the aforementioned are preferred and are the only * methods that will be maintained going forward. * */ public function __construct( $order = '' ) { $this->prices_include_tax = get_option('woocommerce_prices_include_tax') == 'yes' ? true : false; $this->tax_display_cart = get_option( 'woocommerce_tax_display_cart' ); $this->display_totals_ex_tax = $this->tax_display_cart == 'excl' ? true : false; $this->display_cart_ex_tax = $this->tax_display_cart == 'excl' ? true : false; $this->order_type = 'simple'; if ( is_numeric( $order ) ) { $this->id = absint( $order ); $this->post = get_post( $order ); $this->get_order( $this->id ); } elseif ( $order instanceof WC_Order ) { $this->id = absint( $order->id ); $this->post = $order->post; $this->get_order( $this->id ); } elseif ( $order instanceof WP_Post || isset( $order->ID ) ) { $this->id = absint( $order->ID ); $this->post = $order; $this->get_order( $this->id ); } } /** * Remove all line items (products, coupons, shipping, taxes) from the order. * * @param string $type Order item type. Default null. */ public function remove_order_items( $type = null ) { global $wpdb; if ( $type ) { $wpdb->query( $wpdb->prepare( "DELETE FROM {$wpdb->prefix}woocommerce_order_itemmeta WHERE order_item_id IN ( SELECT order_item_id FROM {$wpdb->prefix}woocommerce_order_items WHERE order_id = %d AND order_item_type = %s )", $this->id, $type ) ); $wpdb->query( $wpdb->prepare( "DELETE FROM {$wpdb->prefix}woocommerce_order_items WHERE order_id = %d AND order_item_type = %s", $this->id, $type ) ); } else { $wpdb->query( $wpdb->prepare( "DELETE FROM {$wpdb->prefix}woocommerce_order_itemmeta WHERE order_item_id IN ( SELECT order_item_id FROM {$wpdb->prefix}woocommerce_order_items WHERE order_id = %d )", $this->id ) ); $wpdb->query( $wpdb->prepare( "DELETE FROM {$wpdb->prefix}woocommerce_order_items WHERE order_id = %d", $this->id ) ); } } /** * Set the payment method for the order * * @param WC_Payment_Gateway * @param WC_Payment_Gateway $payment_method */ public function set_payment_method( $payment_method ) { if ( is_object( $payment_method ) ) { update_post_meta( $this->id, '_payment_method', $payment_method->id ); update_post_meta( $this->id, '_payment_method_title', $payment_method->get_title() ); } } /** * Set the customer address * * @param array $address Address data * @param string $type billing or shipping */ public function set_address( $address, $type = 'billing' ) { foreach ( $address as $key => $value ) { update_post_meta( $this->id, "_{$type}_" . $key, $value ); } } /** * Add a product line item to the order * * @since 2.2 * @param \WC_Product $product * @param int $qty Line item quantity * @param array $args * @return int|bool Item ID or false */ public function add_product( $product, $qty = 1, $args = array() ) { $default_args = array( 'variation' => array(), 'totals' => array() ); $args = wp_parse_args( $args, $default_args ); $item_id = wc_add_order_item( $this->id, array( 'order_item_name' => $product->get_title(), 'order_item_type' => 'line_item' ) ); if ( ! $item_id ) { return false; } wc_add_order_item_meta( $item_id, '_qty', wc_stock_amount( $qty ) ); wc_add_order_item_meta( $item_id, '_tax_class', $product->get_tax_class() ); wc_add_order_item_meta( $item_id, '_product_id', $product->id ); wc_add_order_item_meta( $item_id, '_variation_id', isset( $product->variation_id ) ? $product->variation_id : 0 ); // Set line item totals, either passed in or from the product wc_add_order_item_meta( $item_id, '_line_subtotal', wc_format_decimal( isset( $args['totals']['subtotal'] ) ? $args['totals']['subtotal'] : $product->get_price_excluding_tax( $qty ) ) ); wc_add_order_item_meta( $item_id, '_line_total', wc_format_decimal( isset( $args['totals']['total'] ) ? $args['totals']['total'] : $product->get_price_excluding_tax( $qty ) ) ); wc_add_order_item_meta( $item_id, '_line_subtotal_tax', wc_format_decimal( isset( $args['totals']['subtotal_tax'] ) ? $args['totals']['subtotal_tax'] : 0 ) ); wc_add_order_item_meta( $item_id, '_line_tax', wc_format_decimal( isset( $args['totals']['tax'] ) ? $args['totals']['tax'] : 0 ) ); // Save tax data - Since 2.2 if ( isset( $args['totals']['tax_data'] ) ) { $tax_data = array(); $tax_data['total'] = array_map( 'wc_format_decimal', $args['totals']['tax_data']['total'] ); $tax_data['subtotal'] = array_map( 'wc_format_decimal', $args['totals']['tax_data']['subtotal'] ); wc_add_order_item_meta( $item_id, '_line_tax_data', $tax_data ); } else { wc_add_order_item_meta( $item_id, '_line_tax_data', array( 'total' => array(), 'subtotal' => array() ) ); } // Add variation meta if ( ! empty( $args['variation'] ) ) { foreach ( $args['variation'] as $key => $value ) { wc_add_order_item_meta( $item_id, str_replace( 'attribute_', '', $key ), $value ); } } // Backorders if ( $product->backorders_require_notification() && $product->is_on_backorder( $qty ) ) { wc_add_order_item_meta( $item_id, apply_filters( 'woocommerce_backordered_item_meta_name', __( 'Backordered', 'woocommerce' ) ), $qty - max( 0, $product->get_total_stock() ) ); } do_action( 'woocommerce_order_add_product', $this->id, $item_id, $product, $qty, $args ); return $item_id; } /** * Update a line item for the order * * Note this does not update order totals * * @since 2.2 * @param int $item_id order item ID * @param array $args data to update * @return bool */ public function update_product( $item_id, $product, $args ) { if ( ! $item_id || ! is_object( $product ) ) { return false; } // quantity if ( isset( $args['qty'] ) ) { wc_update_order_item_meta( $item_id, '_qty', wc_stock_amount( $args['qty'] ) ); } // tax class if ( isset( $args['tax_class'] ) ) { wc_update_order_item_meta( $item_id, '_tax_class', $args['tax_class'] ); } // set item totals, either provided or from product if ( isset( $args['qty'] ) ) { wc_update_order_item_meta( $item_id, '_line_subtotal', wc_format_decimal( isset( $args['totals']['subtotal'] ) ? $args['totals']['subtotal'] : $product->get_price_excluding_tax( $args['qty'] ) ) ); wc_update_order_item_meta( $item_id, '_line_total', wc_format_decimal( isset( $args['totals']['total'] ) ? $args['totals']['total'] : $product->get_price_excluding_tax( $args['qty'] ) ) ); } // set item tax totals wc_update_order_item_meta( $item_id, '_line_subtotal_tax', wc_format_decimal( isset( $args['totals']['subtotal_tax'] ) ? $args['totals']['subtotal_tax'] : 0 ) ); wc_update_order_item_meta( $item_id, '_line_tax', wc_format_decimal( isset( $args['totals']['tax'] ) ? $args['totals']['tax'] : 0 ) ); // variation meta if ( isset( $args['variation'] ) && is_array( $args['variation'] ) ) { foreach ( $args['variation'] as $key => $value ) { wc_update_order_item_meta( $item_id, str_replace( 'attribute_', '', $key ), $value ); } } // backorders if ( isset( $args['qty'] ) && $product->backorders_require_notification() && $product->is_on_backorder( $args['qty'] ) ) { wc_update_order_item_meta( $item_id, apply_filters( 'woocommerce_backordered_item_meta_name', __( 'Backordered', 'woocommerce' ) ), $args['qty'] - max( 0, $product->get_total_stock() ) ); } do_action( 'woocommerce_order_edit_product', $this->id, $item_id, $args, $product ); return true; } /** * Add coupon code to the order * * @param string $code * @param integer $discount_amount * @return int|bool Item ID or false */ public function add_coupon( $code, $discount_amount = 0 ) { $item_id = wc_add_order_item( $this->id, array( 'order_item_name' => $code, 'order_item_type' => 'coupon' ) ); if ( ! $item_id ) { return false; } wc_add_order_item_meta( $item_id, 'discount_amount', $discount_amount ); do_action( 'woocommerce_order_add_coupon', $this->id, $item_id, $code, $discount_amount ); return $item_id; } /** * Update coupon for order * * Note this does not update order totals * * @since 2.2 * @param int $item_id * @param array $args * @return bool */ public function update_coupon( $item_id, $args ) { if ( ! $item_id ) { return false; } // code if ( isset( $args['code'] ) ) { wc_update_order_item( $item_id, array( 'order_item_name' => $args['code'] ) ); } // amount if ( isset( $args['discount_amount'] ) ) { wc_update_order_item_meta( $item_id, 'discount_amount', wc_format_decimal( $args['discount_amount'] ) ); } do_action( 'woocommerce_order_update_coupon', $this->id, $item_id, $args ); return true; } /** * Add a tax row to the order * * @since 2.2 * @param int tax_rate_id * @return int|bool Item ID or false */ public function add_tax( $tax_rate_id, $tax_amount = 0, $shipping_tax_amount = 0 ) { $code = WC_Tax::get_rate_code( $tax_rate_id ); if ( ! $code ) { return false; } $item_id = wc_add_order_item( $this->id, array( 'order_item_name' => $code, 'order_item_type' => 'tax' ) ); if ( ! $item_id ) { return false; } wc_add_order_item_meta( $item_id, 'rate_id', $tax_rate_id ); wc_add_order_item_meta( $item_id, 'label', WC_Tax::get_rate_label( $tax_rate_id ) ); wc_add_order_item_meta( $item_id, 'compound', WC_Tax::is_compound( $tax_rate_id ) ? 1 : 0 ); wc_add_order_item_meta( $item_id, 'tax_amount', wc_format_decimal( $tax_amount ) ); wc_add_order_item_meta( $item_id, 'shipping_tax_amount', wc_format_decimal( $shipping_tax_amount ) ); do_action( 'woocommerce_order_add_tax', $this->id, $item_id, $tax_rate_id, $tax_amount, $shipping_tax_amount ); return $item_id; } /** * Add a shipping row to the order * * @param WC_Shipping_Rate shipping_rate * @return int|bool Item ID or false */ public function add_shipping( $shipping_rate ) { $item_id = wc_add_order_item( $this->id, array( 'order_item_name' => $shipping_rate->label, 'order_item_type' => 'shipping' ) ); if ( ! $item_id ) { return false; } wc_add_order_item_meta( $item_id, 'method_id', $shipping_rate->id ); wc_add_order_item_meta( $item_id, 'cost', wc_format_decimal( $shipping_rate->cost ) ); // Save shipping taxes - Since 2.2 $taxes = array_map( 'wc_format_decimal', $shipping_rate->taxes ); wc_add_order_item_meta( $item_id, 'taxes', $taxes ); do_action( 'woocommerce_order_add_shipping', $this->id, $item_id, $shipping_rate ); // Update total $this->set_total( $this->order_shipping + wc_format_decimal( $shipping_rate->cost ), 'shipping' ); return $item_id; } /** * Update shipping method for order * * Note this does not update the order total * * @since 2.2 * @param int $item_id * @param array $args * @return bool */ public function update_shipping( $item_id, $args ) { if ( ! $item_id ) { return false; } // method title if ( isset( $args['method_title'] ) ) { wc_update_order_item( $item_id, array( 'order_item_name' => $args['method_title'] ) ); } // method ID if ( isset( $args['method_id'] ) ) { wc_update_order_item_meta( $item_id, 'method_id', $args['method_id'] ); } // method cost if ( isset( $args['cost'] ) ) { wc_update_order_item_meta( $item_id, 'cost', wc_format_decimal( $args['cost'] ) ); } do_action( 'woocommerce_order_update_shipping', $this->id, $item_id, $args ); return true; } /** * Add a fee to the order * * @param object $fee * @return int|bool Item ID or false */ public function add_fee( $fee ) { $item_id = wc_add_order_item( $this->id, array( 'order_item_name' => $fee->name, 'order_item_type' => 'fee' ) ); if ( ! $item_id ) { return false; } if ( $fee->taxable ) { wc_add_order_item_meta( $item_id, '_tax_class', $fee->tax_class ); } else { wc_add_order_item_meta( $item_id, '_tax_class', '0' ); } wc_add_order_item_meta( $item_id, '_line_total', wc_format_decimal( $fee->amount ) ); wc_add_order_item_meta( $item_id, '_line_tax', wc_format_decimal( $fee->tax ) ); // Save tax data - Since 2.2 $tax_data = array_map( 'wc_format_decimal', $fee->tax_data ); wc_add_order_item_meta( $item_id, '_line_tax_data', array( 'total' => $tax_data ) ); do_action( 'woocommerce_order_add_fee', $this->id, $item_id, $fee ); return $item_id; } /** * Update fee for order * * Note this does not update order totals * * @since 2.2 * @param int $item_id * @param array $args * @return bool */ public function update_fee( $item_id, $args ) { if ( ! $item_id ) { return false; } // name if ( isset( $args['name'] ) ) { wc_update_order_item( $item_id, array( 'order_item_name' => $args['name'] ) ); } // tax class if ( isset( $args['tax_class'] ) ) { wc_update_order_item_meta( $item_id, '_tax_class', $args['tax_class'] ); } // total if ( isset( $args['line_total'] ) ) { wc_update_order_item_meta( $item_id, '_line_total', wc_format_decimal( $args['line_total'] ) ); } // total tax if ( isset( $args['line_tax'] ) ) { wc_update_order_item_meta( $item_id, '_line_tax', wc_format_decimal( $args['line_tax'] ) ); } do_action( 'woocommerce_order_update_fee', $this->id, $item_id, $args ); return true; } /** * Set an order total * * @param float $amount * @param string $total_type */ public function set_total( $amount, $total_type = 'total' ) { if ( ! in_array( $total_type, array( 'shipping', 'order_discount', 'tax', 'shipping_tax', 'total', 'cart_discount' ) ) ) { return false; } switch ( $total_type ) { case 'total' : $key = '_order_total'; $amount = wc_format_decimal( $amount, get_option( 'woocommerce_price_num_decimals' ) ); break; case 'order_discount' : case 'cart_discount' : $key = '_' . $total_type; $amount = wc_format_decimal( $amount ); break; default : $key = '_order_' . $total_type; $amount = wc_format_decimal( $amount ); break; } update_post_meta( $this->id, $key, $amount ); } /** * Calculate taxes for all line items and shipping, and store the totals and tax rows. * * Will use the base country unless customer addresses are set. * * @return bool success or fail */ public function calculate_taxes() { $shipping_tax_total = 0; $tax_total = 0; $taxes = array(); $tax_based_on = get_option( 'woocommerce_tax_based_on' ); if ( 'base' === $tax_based_on ) { $default = get_option( 'woocommerce_default_country' ); $postcode = ''; $city = ''; if ( strstr( $default, ':' ) ) { list( $country, $state ) = explode( ':', $default ); } else { $country = $default; $state = ''; } } elseif ( 'billing' === $tax_based_on ) { $country = $this->billing_country; $state = $this->billing_state; $postcode = $this->billing_postcode; $city = $this->billing_city; } else { $country = $this->shipping_country; $state = $this->shipping_state; $postcode = $this->shipping_postcode; $city = $this->shipping_city; } // Get items foreach ( $this->get_items( array( 'line_item', 'fee' ) ) as $item_id => $item ) { $product = $this->get_product_from_item( $item ); $line_total = isset( $item['line_total'] ) ? $item['line_total'] : 0; $line_subtotal = isset( $item['line_subtotal'] ) ? $item['line_subtotal'] : 0; $tax_class = $item['tax_class']; $item_tax_status = $product ? $product->get_tax_status() : 'taxable'; if ( '0' !== $tax_class && 'taxable' === $item_tax_status ) { $tax_rates = WC_Tax::find_rates( array( 'country' => $country, 'state' => $state, 'postcode' => $postcode, 'city' => $city, 'tax_class' => $tax_class ) ); $line_subtotal_taxes = WC_Tax::calc_tax( $line_subtotal, $tax_rates, false ); $line_taxes = WC_Tax::calc_tax( $line_total, $tax_rates, false ); $line_subtotal_tax = max( 0, array_sum( $line_subtotal_taxes ) ); $line_tax = max( 0, array_sum( $line_taxes ) ); $tax_total += $line_tax; wc_update_order_item_meta( $item_id, '_line_subtotal_tax', wc_format_decimal( $line_subtotal_tax ) ); wc_update_order_item_meta( $item_id, '_line_tax', wc_format_decimal( $line_tax ) ); // Sum the item taxes foreach ( array_keys( $taxes + $line_taxes ) as $key ) { $taxes[ $key ] = ( isset( $line_taxes[ $key ] ) ? $line_taxes[ $key ] : 0 ) + ( isset( $taxes[ $key ] ) ? $taxes[ $key ] : 0 ); } } } // Now calculate shipping tax $matched_tax_rates = array(); $tax_rates = WC_Tax::find_rates( array( 'country' => $country, 'state' => $state, 'postcode' => $postcode, 'city' => $city, 'tax_class' => '' ) ); if ( $tax_rates ) { foreach ( $tax_rates as $key => $rate ) { if ( isset( $rate['shipping'] ) && 'yes' === $rate['shipping'] ) { $matched_tax_rates[ $key ] = $rate; } } } $shipping_taxes = WC_Tax::calc_shipping_tax( $this->order_shipping, $matched_tax_rates ); $shipping_tax_total = WC_Tax::round( array_sum( $shipping_taxes ) ); // Save tax totals $this->set_total( $shipping_tax_total, 'shipping_tax' ); $this->set_total( $tax_total, 'tax' ); // Tax rows $this->remove_order_items( 'tax' ); // Now merge to keep tax rows foreach ( array_keys( $taxes + $shipping_taxes ) as $tax_rate_id ) { $this->add_tax( $tax_rate_id, isset( $taxes[ $tax_rate_id ] ) ? $taxes[ $tax_rate_id ] : 0, isset( $shipping_taxes[ $tax_rate_id ] ) ? $shipping_taxes[ $tax_rate_id ] : 0 ); } return true; } /** * Calculate shipping total * * @since 2.2 * @return float */ public function calculate_shipping() { $shipping_total = 0; foreach ( $this->get_shipping_methods() as $shipping ) { $shipping_total += $shipping['cost']; } $this->set_total( $shipping_total, 'shipping' ); return $this->get_total_shipping(); } /** * Update tax lines at order level by looking at the line item taxes themselves. * * @return bool success or fail */ public function update_taxes() { $order_taxes = array(); $order_shipping_taxes = array(); foreach ( $this->get_items( array( 'line_item', 'fee' ) ) as $item_id => $item ) { $line_tax_data = maybe_unserialize( $item['line_tax_data'] ); if ( isset( $line_tax_data['total'] ) ) { foreach ( $line_tax_data['total'] as $tax_rate_id => $tax ) { if ( ! isset( $order_taxes[ $tax_rate_id ] ) ) { $order_taxes[ $tax_rate_id ] = 0; } $order_taxes[ $tax_rate_id ] += $tax; } } } foreach ( $this->get_items( array( 'shipping' ) ) as $item_id => $item ) { $line_tax_data = maybe_unserialize( $item['taxes'] ); if ( isset( $line_tax_data['total'] ) ) { foreach ( $line_tax_data['total'] as $tax_rate_id => $tax ) { if ( ! isset( $order_shipping_taxes[ $tax_rate_id ] ) ) { $order_shipping_taxes[ $tax_rate_id ] = 0; } $order_shipping_taxes[ $tax_rate_id ] += $tax; } } } // Remove old existing tax rows $this->remove_order_items( 'tax' ); // Now merge to keep tax rows foreach ( array_keys( $order_taxes + $order_shipping_taxes ) as $tax_rate_id ) { $this->add_tax( $tax_rate_id, isset( $order_taxes[ $tax_rate_id ] ) ? $order_taxes[ $tax_rate_id ] : 0, isset( $order_shipping_taxes[ $tax_rate_id ] ) ? $order_shipping_taxes[ $tax_rate_id ] : 0 ); } // Save tax totals $this->set_total( WC_Tax::round( array_sum( $order_shipping_taxes ) ), 'shipping_tax' ); $this->set_total( WC_Tax::round( array_sum( $order_taxes ) ), 'tax' ); return true; } /** * Calculate totals by looking at the contents of the order. Stores the totals and returns the orders final total. * * @since 2.2 * @param $and_taxes bool Calc taxes if true * @return float calculated grand total */ public function calculate_totals( $and_taxes = true ) { $cart_subtotal = 0; $cart_total = 0; $fee_total = 0; if ( $and_taxes ) { $this->calculate_taxes(); } // line items foreach ( $this->get_items() as $item ) { $cart_subtotal += wc_format_decimal( isset( $item['line_subtotal'] ) ? $item['line_subtotal'] : 0 ); $cart_total += wc_format_decimal( isset( $item['line_total'] ) ? $item['line_total'] : 0 ); } $this->calculate_shipping(); foreach ( $this->get_fees() as $item ) { $fee_total += $item['line_total']; } $this->set_total( $cart_subtotal - $cart_total, 'cart_discount' ); $grand_total = round( $cart_total + $fee_total + $this->get_total_shipping() - $this->get_order_discount() + $this->get_cart_tax() + $this->get_shipping_tax(), absint( get_option( 'woocommerce_price_num_decimals' ) ) ); $this->set_total( $grand_total, 'total' ); return $grand_total; } /** * Gets an order from the database. * * @param int $id (default: 0) * @return bool */ public function get_order( $id = 0 ) { if ( ! $id ) { return false; } if ( $result = get_post( $id ) ) { $this->populate( $result ); return true; } return false; } /** * Populates an order from the loaded post data. * * @param mixed $result */ public function populate( $result ) { // Standard post data $this->id = $result->ID; $this->order_date = $result->post_date; $this->modified_date = $result->post_modified; $this->customer_message = $result->post_excerpt; $this->customer_note = $result->post_excerpt; $this->post_status = $result->post_status; // Billing email cam default to user if set if ( empty( $this->billing_email ) && ! empty( $this->customer_user ) ) { $user = get_user_by( 'id', $this->customer_user ); $this->billing_email = $user->user_email; } } /** * __isset function. * * @param mixed $key * @return bool */ public function __isset( $key ) { if ( ! $this->id ) { return false; } return metadata_exists( 'post', $this->id, '_' . $key ); } /** * __get function. * * @param mixed $key * @return mixed */ public function __get( $key ) { // Get values or default if not set if ( 'completed_date' === $key ) { $value = ( $value = get_post_meta( $this->id, '_completed_date', true ) ) ? $value : $this->modified_date; } elseif ( 'user_id' === $key ) { $value = ( $value = get_post_meta( $this->id, '_customer_user', true ) ) ? absint( $value ) : ''; } elseif ( 'status' === $key ) { $value = $this->get_status(); } else { $value = get_post_meta( $this->id, '_' . $key, true ); } return $value; } /** * Return the order statuses without wc- internal prefix * * @return string */ public function get_status() { return apply_filters( 'woocommerce_order_get_status', 'wc-' === substr( $this->post_status, 0, 3 ) ? substr( $this->post_status, 3 ) : $this->post_status, $this ); } /** * Checks the order status against a passed in status. * * @return bool */ public function has_status( $status ) { return apply_filters( 'woocommerce_order_has_status', ( is_array( $status ) && in_array( $this->get_status(), $status ) ) || $this->get_status() === $status ? true : false, $this, $status ); } /** * Gets the user ID associated with the order. Guests are 0. * * @since 2.2 * @return int|false */ public function get_user_id() { return $this->customer_user ? $this->customer_user : 0; } /** * Get the user associated with the order. False for guests. * * @since 2.2 * @return WP_User|false */ public function get_user() { return $this->get_user_id() ? get_user_by( 'id', $this->get_user_id() ) : false; } /** * Get transaction id for the order * * @return string */ public function get_transaction_id() { return get_post_meta( $this->id, '_transaction_id', true ); } /** * Check if an order key is valid. * * @param mixed $key * @return bool */ public function key_is_valid( $key ) { if ( $key == $this->order_key ) { return true; } return false; } /** * get_order_number function. * * Gets the order number for display (by default, order ID) * * @return string */ public function get_order_number() { return apply_filters( 'woocommerce_order_number', _x( '#', 'hash before order number', 'woocommerce' ) . $this->id, $this ); } /** * Get a formatted billing address for the order. * * @return string */ public function get_formatted_billing_address() { if ( ! $this->formatted_billing_address ) { // Formatted Addresses $address = apply_filters( 'woocommerce_order_formatted_billing_address', array( 'first_name' => $this->billing_first_name, 'last_name' => $this->billing_last_name, 'company' => $this->billing_company, 'address_1' => $this->billing_address_1, 'address_2' => $this->billing_address_2, 'city' => $this->billing_city, 'state' => $this->billing_state, 'postcode' => $this->billing_postcode, 'country' => $this->billing_country ), $this ); $this->formatted_billing_address = WC()->countries->get_formatted_address( $address ); } return $this->formatted_billing_address; } /** * Get the billing address in an array. * * @return string */ public function get_billing_address() { if ( ! $this->billing_address ) { // Formatted Addresses $address = array( 'address_1' => $this->billing_address_1, 'address_2' => $this->billing_address_2, 'city' => $this->billing_city, 'state' => $this->billing_state, 'postcode' => $this->billing_postcode, 'country' => $this->billing_country ); $joined_address = array(); foreach ( $address as $part ) { if ( ! empty( $part ) ) { $joined_address[] = $part; } } $this->billing_address = implode( ', ', $joined_address ); } return $this->billing_address; } /** * Get a formatted shipping address for the order. * * @return string */ public function get_formatted_shipping_address() { if ( ! $this->formatted_shipping_address ) { if ( $this->shipping_address_1 ) { // Formatted Addresses $address = apply_filters( 'woocommerce_order_formatted_shipping_address', array( 'first_name' => $this->shipping_first_name, 'last_name' => $this->shipping_last_name, 'company' => $this->shipping_company, 'address_1' => $this->shipping_address_1, 'address_2' => $this->shipping_address_2, 'city' => $this->shipping_city, 'state' => $this->shipping_state, 'postcode' => $this->shipping_postcode, 'country' => $this->shipping_country ), $this ); $this->formatted_shipping_address = WC()->countries->get_formatted_address( $address ); } } return $this->formatted_shipping_address; } /** * Get the shipping address in an array. * * @return array */ public function get_shipping_address() { if ( ! $this->shipping_address ) { if ( $this->shipping_address_1 ) { // Formatted Addresses $address = array( 'address_1' => $this->shipping_address_1, 'address_2' => $this->shipping_address_2, 'city' => $this->shipping_city, 'state' => $this->shipping_state, 'postcode' => $this->shipping_postcode, 'country' => $this->shipping_country ); $joined_address = array(); foreach ( $address as $part ) { if ( ! empty( $part ) ) { $joined_address[] = $part; } } $this->shipping_address = implode( ', ', $joined_address ); } } return $this->shipping_address; } /** * Return an array of items/products within this order. * * @param string|array $type Types of line items to get (array or string) * @return array */ public function get_items( $type = '' ) { global $wpdb; if ( empty( $type ) ) { $type = array( 'line_item' ); } if ( ! is_array( $type ) ) { $type = array( $type ); } $type = array_map( 'esc_attr', $type ); $line_items = $wpdb->get_results( $wpdb->prepare( " SELECT order_item_id, order_item_name, order_item_type FROM {$wpdb->prefix}woocommerce_order_items WHERE order_id = %d AND order_item_type IN ( '" . implode( "','", $type ) . "' ) ORDER BY order_item_id ", $this->id ) ); $items = array(); // Reserved meta keys $reserved_item_meta_keys = array( 'name', 'type', 'item_meta', 'qty', 'tax_class', 'product_id', 'variation_id', 'line_subtotal', 'line_total', 'line_tax', 'line_subtotal_tax' ); // Loop items foreach ( $line_items as $item ) { // Place line item into array to return $items[ $item->order_item_id ]['name'] = $item->order_item_name; $items[ $item->order_item_id ]['type'] = $item->order_item_type; $items[ $item->order_item_id ]['item_meta'] = $this->get_item_meta( $item->order_item_id ); // Expand meta data into the array foreach ( $items[ $item->order_item_id ]['item_meta'] as $name => $value ) { if ( in_array( $name, $reserved_item_meta_keys ) ) { continue; } if ( '_' === substr( $name, 0, 1 ) ) { $items[ $item->order_item_id ][ substr( $name, 1 ) ] = $value[0]; } elseif ( ! in_array( $name, $reserved_item_meta_keys ) ) { $items[ $item->order_item_id ][ $name ] = $value[0]; } } } return apply_filters( 'woocommerce_order_get_items', $items, $this ); } /** * Gets order total - formatted for display. * * @return string */ public function get_item_count( $type = '' ) { if ( empty( $type ) ) { $type = array( 'line_item' ); } if ( ! is_array( $type ) ) { $type = array( $type ); } $items = $this->get_items( $type ); $count = 0; foreach ( $items as $item ) { if ( ! empty( $item['qty'] ) ) { $count += $item['qty']; } else { $count ++; } } return apply_filters( 'woocommerce_get_item_count', $count, $type, $this ); } /** * Return an array of fees within this order. * * @return array */ public function get_fees() { return $this->get_items( 'fee' ); } /** * Return an array of taxes within this order. * * @return array */ public function get_taxes() { return $this->get_items( 'tax' ); } /** * Return an array of shipping costs within this order. * * @return array */ public function get_shipping_methods() { return $this->get_items( 'shipping' ); } /** * Check whether this order has a specific shipping method or not * * @param string $method_id */ public function has_shipping_method( $method_id ) { $shipping_methods = $this->get_shipping_methods(); $has_method = false; if ( ! $shipping_methods ) { return false; } foreach ( $shipping_methods as $shipping_method ) { if ( $shipping_method['method_id'] == $method_id ) { $has_method = true; } } return $has_method; } /** * Get taxes, merged by code, formatted ready for output. * * @return array */ public function get_tax_totals() { $taxes = $this->get_items( 'tax' ); $tax_totals = array(); foreach ( $taxes as $key => $tax ) { $code = $tax[ 'name' ]; if ( ! isset( $tax_totals[ $code ] ) ) { $tax_totals[ $code ] = new stdClass(); $tax_totals[ $code ]->amount = 0; } $tax_totals[ $code ]->id = $key; $tax_totals[ $code ]->rate_id = $tax['rate_id']; $tax_totals[ $code ]->is_compound = $tax[ 'compound' ]; $tax_totals[ $code ]->label = isset( $tax[ 'label' ] ) ? $tax[ 'label' ] : $tax[ 'name' ]; $tax_totals[ $code ]->amount += $tax[ 'tax_amount' ] + $tax[ 'shipping_tax_amount' ]; $tax_totals[ $code ]->formatted_amount = wc_price( wc_round_tax_total( $tax_totals[ $code ]->amount ), array('currency' => $this->get_order_currency()) ); } return apply_filters( 'woocommerce_order_tax_totals', $tax_totals, $this ); } /** * has_meta function for order items. * * @param string $order_item_id * @return array of meta data */ public function has_meta( $order_item_id ) { global $wpdb; return $wpdb->get_results( $wpdb->prepare( "SELECT meta_key, meta_value, meta_id, order_item_id FROM {$wpdb->prefix}woocommerce_order_itemmeta WHERE order_item_id = %d ORDER BY meta_id", absint( $order_item_id ) ), ARRAY_A ); } /** * Get order item meta. * * @param mixed $order_item_id * @param string $key (default: '') * @param bool $single (default: false) * @return array|string */ public function get_item_meta( $order_item_id, $key = '', $single = false ) { return get_metadata( 'order_item', $order_item_id, $key, $single ); } /** Total Getters *******************************************************/ /** * Gets the total (product) discount amount - these are applied before tax. * * @return float */ public function get_cart_discount() { return apply_filters( 'woocommerce_order_amount_cart_discount', (double) $this->cart_discount, $this ); } /** * Gets the total (product) discount amount - these are applied before tax. * * @return float */ public function get_order_discount() { return apply_filters( 'woocommerce_order_amount_order_discount', (double) $this->order_discount, $this ); } /** * Gets the total discount amount - both kinds * * @return float */ public function get_total_discount() { return apply_filters( 'woocommerce_order_amount_total_discount', $this->get_cart_discount() + $this->get_order_discount(), $this ); } /** * Gets cart tax amount. * * @return float */ public function get_cart_tax() { return apply_filters( 'woocommerce_order_amount_cart_tax', (double) $this->order_tax, $this ); } /** * Gets shipping tax amount. * * @return float */ public function get_shipping_tax() { return apply_filters( 'woocommerce_order_amount_shipping_tax', (double) $this->order_shipping_tax, $this ); } /** * Gets shipping and product tax. * * @return float */ public function get_total_tax() { return apply_filters( 'woocommerce_order_amount_total_tax', wc_round_tax_total( $this->get_cart_tax() + $this->get_shipping_tax() ), $this ); } /** * Gets shipping total. * * @return float */ public function get_total_shipping() { return apply_filters( 'woocommerce_order_amount_total_shipping', (double) $this->order_shipping, $this ); } /** * Gets order total. * * @return float */ public function get_total() { return apply_filters( 'woocommerce_order_amount_total', (double) $this->order_total, $this ); } /** * Gets order subtotal. * * @return mixed|void */ public function get_subtotal() { $subtotal = 0; foreach ( $this->get_items() as $item ) { $subtotal += ( isset( $item['line_subtotal'] ) ) ? $item['line_subtotal'] : 0; } return apply_filters( 'woocommerce_order_amount_subtotal', (double) $subtotal, $this ); } /** * Get item subtotal - this is the cost before discount. * * @param mixed $item * @param bool $inc_tax (default: false) * @param bool $round (default: true) * @return float */ public function get_item_subtotal( $item, $inc_tax = false, $round = true ) { if ( $inc_tax ) { $price = ( $item['line_subtotal'] + $item['line_subtotal_tax'] ) / max( 1, $item['qty'] ); } else { $price = ( $item['line_subtotal'] / $item['qty'] ); } $price = $round ? round( $price, 2 ) : $price; return apply_filters( 'woocommerce_order_amount_item_subtotal', $price, $this, $item ); } /** * Get line subtotal - this is the cost before discount. * * @param mixed $item * @param bool $inc_tax (default: false) * @param bool $round (default: true) * @return float */ public function get_line_subtotal( $item, $inc_tax = false, $round = true ) { if ( $inc_tax ) { $price = $item['line_subtotal'] + $item['line_subtotal_tax']; } else { $price = $item['line_subtotal']; } $price = $round ? round( $price, 2 ) : $price; return apply_filters( 'woocommerce_order_amount_line_subtotal', $price, $this, $item ); } /** * Calculate item cost - useful for gateways. * * @param mixed $item * @param bool $inc_tax (default: false) * @param bool $round (default: true) * @return float */ public function get_item_total( $item, $inc_tax = false, $round = true ) { $qty = ( ! empty( $item['qty'] ) ) ? $item['qty'] : 1; if ( $inc_tax ) { $price = ( $item['line_total'] + $item['line_tax'] ) / max( 1, $qty ); } else { $price = $item['line_total'] / $qty; } $price = $round ? round( $price, 2 ) : $price; return apply_filters( 'woocommerce_order_amount_item_total', $price, $this ); } /** * Calculate line total - useful for gateways. * * @param mixed $item * @param bool $inc_tax (default: false) * @return float */ public function get_line_total( $item, $inc_tax = false ) { $line_total = $inc_tax ? round( $item['line_total'] + $item['line_tax'], 2 ) : round( $item['line_total'], 2 ); return apply_filters( 'woocommerce_order_amount_line_total', $line_total, $this ); } /** * Calculate item tax - useful for gateways. * * @param mixed $item * @param bool $round (default: true) * @return float */ public function get_item_tax( $item, $round = true ) { $price = $item['line_tax'] / max( 1, $item['qty'] ); $price = $round ? wc_round_tax_total( $price ) : $price; return apply_filters( 'woocommerce_order_amount_item_tax', $price, $item, $round, $this ); } /** * Calculate line tax - useful for gateways. * * @param mixed $item * @return float */ public function get_line_tax( $item ) { return apply_filters( 'woocommerce_order_amount_line_tax', wc_round_tax_total( $item['line_tax'] ), $item, $this ); } /** * Gets shipping total. * * @deprecated As of 2.1, use of get_total_shipping() is preferred * @return float */ public function get_shipping() { _deprecated_function( 'get_shipping', '2.1', 'get_total_shipping' ); return $this->get_total_shipping(); } /** * get_order_total function. Alias for get_total() * * @deprecated As of 2.1, use of get_total() is preferred * @return float */ public function get_order_total() { _deprecated_function( 'get_order_total', '2.1', 'get_total' ); return $this->get_total(); } /** End Total Getters *******************************************************/ /** * Gets formatted shipping method title. * * @return string */ public function get_shipping_method() { $labels = array(); // Backwards compat < 2.1 - get shipping title stored in meta if ( $this->shipping_method_title ) { $labels[] = $this->shipping_method_title; } else { // 2.1+ get line items for shipping $shipping_methods = $this->get_shipping_methods(); foreach ( $shipping_methods as $shipping ) { $labels[] = $shipping['name']; } } return apply_filters( 'woocommerce_order_shipping_method', implode( ', ', $labels ), $this ); } /** * Gets line subtotal - formatted for display. * * @param array $item * @param string $tax_display * @return string */ public function get_formatted_line_subtotal( $item, $tax_display = '' ) { if ( ! $tax_display ) { $tax_display = $this->tax_display_cart; } if ( ! isset( $item['line_subtotal'] ) || ! isset( $item['line_subtotal_tax'] ) ) { return ''; } if ( 'excl' == $tax_display ) { $ex_tax_label = $this->prices_include_tax ? 1 : 0; $subtotal = wc_price( $this->get_line_subtotal( $item ), array( 'ex_tax_label' => $ex_tax_label, 'currency' => $this->get_order_currency() ) ); } else { $subtotal = wc_price( $this->get_line_subtotal( $item, true ), array('currency' => $this->get_order_currency()) ); } return apply_filters( 'woocommerce_order_formatted_line_subtotal', $subtotal, $item, $this ); } /** * Gets order currency * * @return string */ public function get_order_currency() { return apply_filters( 'woocommerce_get_order_currency', $this->order_currency, $this ); } /** * Gets order total - formatted for display. * * @return string */ public function get_formatted_order_total() { $formatted_total = wc_price( $this->get_total(), array( 'currency' => $this->get_order_currency() ) ); return apply_filters( 'woocommerce_get_formatted_order_total', $formatted_total, $this ); } /** * Gets subtotal - subtotal is shown before discounts, but with localised taxes. * * @param bool $compound (default: false) * @param string $tax_display (default: the tax_display_cart value) * @return string */ public function get_subtotal_to_display( $compound = false, $tax_display = '' ) { if ( ! $tax_display ) { $tax_display = $this->tax_display_cart; } $subtotal = 0; if ( ! $compound ) { foreach ( $this->get_items() as $item ) { if ( ! isset( $item['line_subtotal'] ) || ! isset( $item['line_subtotal_tax'] ) ) { return ''; } $subtotal += $item['line_subtotal']; if ( 'incl' == $tax_display ) { $subtotal += $item['line_subtotal_tax']; } } $subtotal = wc_price( $subtotal, array('currency' => $this->get_order_currency()) ); if ( $tax_display == 'excl' && $this->prices_include_tax ) { $subtotal .= ' <small>' . WC()->countries->ex_tax_or_vat() . '</small>'; } } else { if ( 'incl' == $tax_display ) { return ''; } foreach ( $this->get_items() as $item ) { $subtotal += $item['line_subtotal']; } // Add Shipping Costs $subtotal += $this->get_total_shipping(); // Remove non-compound taxes foreach ( $this->get_taxes() as $tax ) { if ( ! empty( $tax['compound'] ) ) { continue; } $subtotal = $subtotal + $tax['tax_amount'] + $tax['shipping_tax_amount']; } // Remove discounts $subtotal = $subtotal - $this->get_cart_discount(); $subtotal = wc_price( $subtotal, array('currency' => $this->get_order_currency()) ); } return apply_filters( 'woocommerce_order_subtotal_to_display', $subtotal, $compound, $this ); } /** * Gets shipping (formatted). * * @return string */ public function get_shipping_to_display( $tax_display = '' ) { if ( ! $tax_display ) { $tax_display = $this->tax_display_cart; } if ( $this->order_shipping > 0 ) { $tax_text = ''; if ( $tax_display == 'excl' ) { // Show shipping excluding tax $shipping = wc_price( $this->order_shipping, array('currency' => $this->get_order_currency()) ); if ( $this->order_shipping_tax > 0 && $this->prices_include_tax ) { $tax_text = WC()->countries->ex_tax_or_vat() . ' '; } } else { // Show shipping including tax $shipping = wc_price( $this->order_shipping + $this->order_shipping_tax, array('currency' => $this->get_order_currency()) ); if ( $this->order_shipping_tax > 0 && ! $this->prices_include_tax ) { $tax_text = WC()->countries->inc_tax_or_vat() . ' '; } } $shipping .= sprintf( __( '&nbsp;<small>%svia %s</small>', 'woocommerce' ), $tax_text, $this->get_shipping_method() ); } elseif ( $this->get_shipping_method() ) { $shipping = $this->get_shipping_method(); } else { $shipping = __( 'Free!', 'woocommerce' ); } return apply_filters( 'woocommerce_order_shipping_to_display', $shipping, $this ); } /** * Get cart discount (formatted). * * @return string. */ public function get_cart_discount_to_display() { return apply_filters( 'woocommerce_order_cart_discount_to_display', wc_price( $this->get_cart_discount(), array( 'currency' => $this->get_order_currency() ) ), $this ); } /** * Get cart discount (formatted). * * @return string */ public function get_order_discount_to_display() { return apply_filters( 'woocommerce_order_discount_to_display', wc_price( $this->get_order_discount(), array( 'currency' => $this->get_order_currency() ) ), $this ); } /** * Get a product (either product or variation). * * @param mixed $item * @return WC_Product */ public function get_product_from_item( $item ) { if ( ! empty( $item['variation_id'] ) && 'product_variation' === get_post_type( $item['variation_id'] ) ) { $_product = wc_get_product( $item['variation_id'] ); } elseif ( ! empty( $item['product_id'] ) ) { $_product = wc_get_product( $item['product_id'] ); } else { $_product = false; } return apply_filters( 'woocommerce_get_product_from_item', $_product, $item, $this ); } /** * Get totals for display on pages and in emails. * * @return array */ public function get_order_item_totals( $tax_display = '' ) { if ( ! $tax_display ) { $tax_display = $this->tax_display_cart; } $total_rows = array(); if ( $subtotal = $this->get_subtotal_to_display( false, $tax_display ) ) { $total_rows['cart_subtotal'] = array( 'label' => __( 'Cart Subtotal:', 'woocommerce' ), 'value' => $subtotal ); } if ( $this->get_cart_discount() > 0 ) { $total_rows['cart_discount'] = array( 'label' => __( 'Cart Discount:', 'woocommerce' ), 'value' => '-' . $this->get_cart_discount_to_display() ); } if ( $this->get_shipping_method() ) { $total_rows['shipping'] = array( 'label' => __( 'Shipping:', 'woocommerce' ), 'value' => $this->get_shipping_to_display() ); } if ( $fees = $this->get_fees() ) foreach( $fees as $id => $fee ) { if ( apply_filters( 'woocommerce_get_order_item_totals_excl_free_fees', $fee['line_total'] + $fee['line_tax'] == 0, $id ) ) { continue; } if ( 'excl' == $tax_display ) { $total_rows[ 'fee_' . $id ] = array( 'label' => $fee['name'] . ':', 'value' => wc_price( $fee['line_total'], array('currency' => $this->get_order_currency()) ) ); } else { $total_rows[ 'fee_' . $id ] = array( 'label' => $fee['name'] . ':', 'value' => wc_price( $fee['line_total'] + $fee['line_tax'], array('currency' => $this->get_order_currency()) ) ); } } // Tax for tax exclusive prices if ( 'excl' == $tax_display ) { if ( get_option( 'woocommerce_tax_total_display' ) == 'itemized' ) { foreach ( $this->get_tax_totals() as $code => $tax ) { $total_rows[ sanitize_title( $code ) ] = array( 'label' => $tax->label . ':', 'value' => $tax->formatted_amount ); } } else { $total_rows['tax'] = array( 'label' => WC()->countries->tax_or_vat() . ':', 'value' => wc_price( $this->get_total_tax(), array('currency' => $this->get_order_currency()) ) ); } } if ( $this->get_order_discount() > 0 ) { $total_rows['order_discount'] = array( 'label' => __( 'Order Discount:', 'woocommerce' ), 'value' => '-' . $this->get_order_discount_to_display() ); } if ( $this->get_total() > 0 ) { $total_rows['payment_method'] = array( 'label' => __( 'Payment Method:', 'woocommerce' ), 'value' => $this->payment_method_title ); } $total_rows['order_total'] = array( 'label' => __( 'Order Total:', 'woocommerce' ), 'value' => $this->get_formatted_order_total() ); // Tax for inclusive prices if ( 'yes' == get_option( 'woocommerce_calc_taxes' ) && 'incl' == $tax_display ) { $tax_string_array = array(); if ( 'itemized' == get_option( 'woocommerce_tax_total_display' ) ) { foreach ( $this->get_tax_totals() as $code => $tax ) { $tax_string_array[] = sprintf( '%s %s', $tax->formatted_amount, $tax->label ); } } else { $tax_string_array[] = sprintf( '%s %s', wc_price( $this->get_total_tax(), array('currency' => $this->get_order_currency()) ), WC()->countries->tax_or_vat() ); } if ( ! empty( $tax_string_array ) ) { $total_rows['order_total']['value'] .= ' ' . sprintf( __( '(Includes %s)', 'woocommerce' ), implode( ', ', $tax_string_array ) ); } } return apply_filters( 'woocommerce_get_order_item_totals', $total_rows, $this ); } /** * Output items for display in html emails. * * @param bool $show_download_links (default: false) * @param bool $show_sku (default: false) * @param bool $show_purchase_note (default: false) * @param bool $show_image (default: false) * @param array $image_size (default: array( 32, 32 ) * @param bool plain text * @return string */ public function email_order_items_table( $show_download_links = false, $show_sku = false, $show_purchase_note = false, $show_image = false, $image_size = array( 32, 32 ), $plain_text = false ) { ob_start(); $template = $plain_text ? 'emails/plain/email-order-items.php' : 'emails/email-order-items.php'; wc_get_template( $template, array( 'order' => $this, 'items' => $this->get_items(), 'show_download_links' => $show_download_links, 'show_sku' => $show_sku, 'show_purchase_note' => $show_purchase_note, 'show_image' => $show_image, 'image_size' => $image_size ) ); $return = apply_filters( 'woocommerce_email_order_items_table', ob_get_clean(), $this ); return $return; } /** * Checks if product download is permitted * * @return bool */ public function is_download_permitted() { return apply_filters( 'woocommerce_order_is_download_permitted', $this->has_status( 'completed' ) || ( get_option( 'woocommerce_downloads_grant_access_after_payment' ) == 'yes' && $this->has_status( 'processing' ) ), $this ); } /** * Returns true if the order contains a downloadable product. * * @return bool */ public function has_downloadable_item() { $has_downloadable_item = false; foreach ( $this->get_items() as $item ) { $_product = $this->get_product_from_item( $item ); if ( $_product && $_product->exists() && $_product->is_downloadable() && $_product->has_file() ) { $has_downloadable_item = true; } } return $has_downloadable_item; } /** * Generates a URL so that a customer can pay for their (unpaid - pending) order. Pass 'true' for the checkout version which doesn't offer gateway choices. * * @param boolean $on_checkout * @return string */ public function get_checkout_payment_url( $on_checkout = false ) { $pay_url = wc_get_endpoint_url( 'order-pay', $this->id, get_permalink( wc_get_page_id( 'checkout' ) ) ); if ( 'yes' == get_option( 'woocommerce_force_ssl_checkout' ) || is_ssl() ) { $pay_url = str_replace( 'http:', 'https:', $pay_url ); } if ( $on_checkout ) { $pay_url = add_query_arg( 'key', $this->order_key, $pay_url ); } else { $pay_url = add_query_arg( array( 'pay_for_order' => 'true', 'key' => $this->order_key ), $pay_url ); } return apply_filters( 'woocommerce_get_checkout_payment_url', $pay_url, $this ); } /** * Generates a URL for the thanks page (order received) * * @return string */ public function get_checkout_order_received_url() { $order_received_url = wc_get_endpoint_url( 'order-received', $this->id, get_permalink( wc_get_page_id( 'checkout' ) ) ); if ( 'yes' == get_option( 'woocommerce_force_ssl_checkout' ) || is_ssl() ) { $order_received_url = str_replace( 'http:', 'https:', $order_received_url ); } $order_received_url = add_query_arg( 'key', $this->order_key, $order_received_url ); return apply_filters( 'woocommerce_get_checkout_order_received_url', $order_received_url, $this ); } /** * Generates a URL so that a customer can cancel their (unpaid - pending) order. * * @return string */ public function get_cancel_order_url( $redirect = '' ) { $cancel_endpoint = get_permalink( wc_get_page_id( 'cart' ) ); if ( ! $cancel_endpoint ) { $cancel_endpoint = home_url(); } if ( false === strpos( $cancel_endpoint, '?' ) ) { $cancel_endpoint = trailingslashit( $cancel_endpoint ); } return apply_filters('woocommerce_get_cancel_order_url', wp_nonce_url( add_query_arg( array( 'cancel_order' => 'true', 'order' => $this->order_key, 'order_id' => $this->id, 'redirect' => $redirect ), $cancel_endpoint ), 'woocommerce-cancel_order' ) ); } /** * Generates a URL to view an order from the my account page * * @return string */ public function get_view_order_url() { $view_order_url = wc_get_endpoint_url( 'view-order', $this->id, get_permalink( wc_get_page_id( 'myaccount' ) ) ); return apply_filters( 'woocommerce_get_view_order_url', $view_order_url, $this ); } /** * Gets any downloadable product file urls. * * @deprecated as of 2.1 get_item_downloads is preferred as downloads are more than just file urls * @param int $product_id product identifier * @param int $variation_id variation identifier, or null * @param array $item the item * @return array available downloadable file urls */ public function get_downloadable_file_urls( $product_id, $variation_id, $item ) { global $wpdb; _deprecated_function( 'get_downloadable_file_urls', '2.1', 'get_item_downloads' ); $download_file = $variation_id > 0 ? $variation_id : $product_id; $_product = wc_get_product( $download_file ); $user_email = $this->billing_email; $results = $wpdb->get_results( $wpdb->prepare(" SELECT download_id FROM {$wpdb->prefix}woocommerce_downloadable_product_permissions WHERE user_email = %s AND order_key = %s AND product_id = %s ", $user_email, $this->order_key, $download_file ) ); $file_urls = array(); foreach ( $results as $result ) { if ( $_product->has_file( $result->download_id ) ) { $file_urls[ $_product->get_file_download_path( $result->download_id ) ] = $this->get_download_url( $download_file, $result->download_id ); } } return apply_filters( 'woocommerce_get_downloadable_file_urls', $file_urls, $product_id, $variation_id, $item ); } /** * Get the downloadable files for an item in this order * * @param array $item * @return array */ public function get_item_downloads( $item ) { global $wpdb; $product_id = $item['variation_id'] > 0 ? $item['variation_id'] : $item['product_id']; $product = wc_get_product( $product_id ); $download_ids = $wpdb->get_col( $wpdb->prepare(" SELECT download_id FROM {$wpdb->prefix}woocommerce_downloadable_product_permissions WHERE user_email = %s AND order_key = %s AND product_id = %s ORDER BY permission_id ", $this->billing_email, $this->order_key, $product_id ) ); $files = array(); foreach ( $download_ids as $download_id ) { if ( $product->has_file( $download_id ) ) { $files[ $download_id ] = $product->get_file( $download_id ); $files[ $download_id ]['download_url'] = $this->get_download_url( $product_id, $download_id ); } } return apply_filters( 'woocommerce_get_item_downloads', $files, $item, $this ); } /** * Get the Download URL * * @param int $product_id * @param int $download_id * @return string */ public function get_download_url( $product_id, $download_id ) { return add_query_arg( array( 'download_file' => $product_id, 'order' => $this->order_key, 'email' => urlencode( $this->billing_email ), 'key' => $download_id ), trailingslashit( home_url() ) ); } /** * Adds a note (comment) to the order * * @param string $note Note to add * @param int $is_customer_note (default: 0) Is this a note for the customer? * @return id Comment ID */ public function add_order_note( $note, $is_customer_note = 0 ) { $is_customer_note = intval( $is_customer_note ); if ( is_user_logged_in() && current_user_can( 'edit_shop_order', $this->id ) ) { $user = get_user_by( 'id', get_current_user_id() ); $comment_author = $user->display_name; $comment_author_email = $user->user_email; } else { $comment_author = __( 'WooCommerce', 'woocommerce' ); $comment_author_email = strtolower( __( 'WooCommerce', 'woocommerce' ) ) . '@'; $comment_author_email .= isset( $_SERVER['HTTP_HOST'] ) ? str_replace( 'www.', '', $_SERVER['HTTP_HOST'] ) : 'noreply.com'; $comment_author_email = sanitize_email( $comment_author_email ); } $comment_post_ID = $this->id; $comment_author_url = ''; $comment_content = $note; $comment_agent = 'WooCommerce'; $comment_type = 'order_note'; $comment_parent = 0; $comment_approved = 1; $commentdata = apply_filters( 'woocommerce_new_order_note_data', compact( 'comment_post_ID', 'comment_author', 'comment_author_email', 'comment_author_url', 'comment_content', 'comment_agent', 'comment_type', 'comment_parent', 'comment_approved' ), array( 'order_id' => $this->id, 'is_customer_note' => $is_customer_note ) ); $comment_id = wp_insert_comment( $commentdata ); add_comment_meta( $comment_id, 'is_customer_note', $is_customer_note ); if ( $is_customer_note ) { do_action( 'woocommerce_new_customer_note', array( 'order_id' => $this->id, 'customer_note' => $note ) ); } return $comment_id; } /** * Updates status of order * * @param string $new_status Status to change the order to. No internal wc- prefix is required. * @param string $note (default: '') Optional note to add */ public function update_status( $new_status, $note = '' ) { if ( ! $this->id ) { return; } // Standardise status names. $new_status = 'wc-' === substr( $new_status, 0, 3 ) ? substr( $new_status, 3 ) : $new_status; $old_status = $this->get_status(); // Only update if they differ - and ensure post_status is a 'wc' status. if ( $new_status !== $old_status || ! in_array( $this->post_status, array_keys( wc_get_order_statuses() ) ) ) { // Update the order wp_update_post( array( 'ID' => $this->id, 'post_status' => 'wc-' . $new_status ) ); $this->post_status = 'wc-' . $new_status; $this->add_order_note( trim( $note . ' ' . sprintf( __( 'Order status changed from %s to %s.', 'woocommerce' ), wc_get_order_status_name( $old_status ), wc_get_order_status_name( $new_status ) ) ) ); // Status was changed do_action( 'woocommerce_order_status_' . $new_status, $this->id ); do_action( 'woocommerce_order_status_' . $old_status . '_to_' . $new_status, $this->id ); do_action( 'woocommerce_order_status_changed', $this->id, $old_status, $new_status ); switch ( $new_status ) { case 'completed' : // Record the sales $this->record_product_sales(); // Increase coupon usage counts $this->increase_coupon_usage_counts(); // Record the completed date of the order update_post_meta( $this->id, '_completed_date', current_time('mysql') ); // Update reports wc_delete_shop_order_transients( $this->id ); break; case 'processing' : case 'on-hold' : // Record the sales $this->record_product_sales(); // Increase coupon usage counts $this->increase_coupon_usage_counts(); // Update reports wc_delete_shop_order_transients( $this->id ); break; case 'cancelled' : // If the order is cancelled, restore used coupons $this->decrease_coupon_usage_counts(); // Update reports wc_delete_shop_order_transients( $this->id ); break; } } } /** * Cancel the order and restore the cart (before payment) * * @param string $note (default: '') Optional note to add */ public function cancel_order( $note = '' ) { unset( WC()->session->order_awaiting_payment ); $this->update_status( 'cancelled', $note ); } /** * When a payment is complete this function is called * * Most of the time this should mark an order as 'processing' so that admin can process/post the items * If the cart contains only downloadable items then the order is 'completed' since the admin needs to take no action * Stock levels are reduced at this point * Sales are also recorded for products * Finally, record the date of payment * * @param $transaction_id string Optional transaction id to store in post meta */ public function payment_complete( $transaction_id = '' ) { do_action( 'woocommerce_pre_payment_complete', $this->id ); if ( ! empty( WC()->session->order_awaiting_payment ) ) { unset( WC()->session->order_awaiting_payment ); } $valid_order_statuses = apply_filters( 'woocommerce_valid_order_statuses_for_payment_complete', array( 'on-hold', 'pending', 'failed' ), $this ); if ( $this->id && $this->has_status( $valid_order_statuses ) ) { $order_needs_processing = true; if ( sizeof( $this->get_items() ) > 0 ) { foreach ( $this->get_items() as $item ) { if ( $item['product_id'] > 0 ) { $_product = $this->get_product_from_item( $item ); if ( false !== $_product && ! apply_filters( 'woocommerce_order_item_needs_processing', ! ( $_product->is_downloadable() && $_product->is_virtual() ), $_product, $this->id ) ) { $order_needs_processing = false; continue; } } $order_needs_processing = true; break; } } $new_order_status = $order_needs_processing ? 'processing' : 'completed'; $new_order_status = apply_filters( 'woocommerce_payment_complete_order_status', $new_order_status, $this->id ); $this->update_status( $new_order_status ); add_post_meta( $this->id, '_paid_date', current_time('mysql'), true ); if ( ! empty( $transaction_id ) ) { add_post_meta( $this->id, '_transaction_id', $transaction_id, true ); } $this_order = array( 'ID' => $this->id, 'post_date' => current_time( 'mysql', 0 ), 'post_date_gmt' => current_time( 'mysql', 1 ) ); wp_update_post( $this_order ); if ( apply_filters( 'woocommerce_payment_complete_reduce_order_stock', true, $this->id ) ) { $this->reduce_order_stock(); // Payment is complete so reduce stock levels } do_action( 'woocommerce_payment_complete', $this->id ); } else { do_action( 'woocommerce_payment_complete_order_status_' . $this->get_status(), $this->id ); } } /** * Record sales */ public function record_product_sales() { if ( 'yes' == get_post_meta( $this->id, '_recorded_sales', true ) ) { return; } if ( sizeof( $this->get_items() ) > 0 ) { foreach ( $this->get_items() as $item ) { if ( $item['product_id'] > 0 ) { $sales = (int) get_post_meta( $item['product_id'], 'total_sales', true ); $sales += (int) $item['qty']; if ( $sales ) { update_post_meta( $item['product_id'], 'total_sales', $sales ); } } } } update_post_meta( $this->id, '_recorded_sales', 'yes' ); } /** * Get coupon codes only. * * @return array */ public function get_used_coupons() { $codes = array(); $coupons = $this->get_items( 'coupon' ); foreach ( $coupons as $item_id => $item ) { $codes[] = trim( $item['name'] ); } return $codes; } /** * Increase applied coupon counts */ public function increase_coupon_usage_counts() { if ( 'yes' == get_post_meta( $this->id, '_recorded_coupon_usage_counts', true ) ) { return; } if ( sizeof( $this->get_used_coupons() ) > 0 ) { foreach ( $this->get_used_coupons() as $code ) { if ( ! $code ) { continue; } $coupon = new WC_Coupon( $code ); $used_by = $this->user_id; if ( ! $used_by ) { $used_by = $this->billing_email; } $coupon->inc_usage_count( $used_by ); } } update_post_meta( $this->id, '_recorded_coupon_usage_counts', 'yes' ); } /** * Decrease applied coupon counts */ public function decrease_coupon_usage_counts() { if ( 'yes' != get_post_meta( $this->id, '_recorded_coupon_usage_counts', true ) ) { return; } if ( sizeof( $this->get_used_coupons() ) > 0 ) { foreach ( $this->get_used_coupons() as $code ) { if ( ! $code ) { continue; } $coupon = new WC_Coupon( $code ); $used_by = $this->user_id; if ( ! $used_by ) { $used_by = $this->billing_email; } $coupon->dcr_usage_count( $used_by ); } } delete_post_meta( $this->id, '_recorded_coupon_usage_counts' ); } /** * Reduce stock levels */ public function reduce_order_stock() { if ( 'yes' == get_option('woocommerce_manage_stock') && sizeof( $this->get_items() ) > 0 ) { // Reduce stock levels and do any other actions with products in the cart foreach ( $this->get_items() as $item ) { if ( $item['product_id'] > 0 ) { $_product = $this->get_product_from_item( $item ); if ( $_product && $_product->exists() && $_product->managing_stock() ) { $qty = apply_filters( 'woocommerce_order_item_quantity', $item['qty'], $this, $item ); $new_stock = $_product->reduce_stock( $qty ); $this->add_order_note( sprintf( __( 'Item #%s stock reduced from %s to %s.', 'woocommerce' ), $item['product_id'], $new_stock + $qty, $new_stock) ); $this->send_stock_notifications( $_product, $new_stock, $item['qty'] ); } } } do_action( 'woocommerce_reduce_order_stock', $this ); $this->add_order_note( __( 'Order item stock reduced successfully.', 'woocommerce' ) ); } } /** * send_stock_notifications function. * * @param WC_Product $product * @param int $new_stock * @param int $qty_ordered */ public function send_stock_notifications( $product, $new_stock, $qty_ordered ) { // Backorders if ( $new_stock < 0 ) { do_action( 'woocommerce_product_on_backorder', array( 'product' => $product, 'order_id' => $this->id, 'quantity' => $qty_ordered ) ); } // stock status notifications $notification_sent = false; if ( 'yes' == get_option( 'woocommerce_notify_no_stock' ) && get_option('woocommerce_notify_no_stock_amount') >= $new_stock ) { do_action( 'woocommerce_no_stock', $product ); $notification_sent = true; } if ( ! $notification_sent && 'yes' == get_option( 'woocommerce_notify_low_stock' ) && get_option('woocommerce_notify_low_stock_amount') >= $new_stock ) { do_action( 'woocommerce_low_stock', $product ); $notification_sent = true; } } /** * List order notes (public) for the customer * * @return array */ public function get_customer_order_notes() { $notes = array(); $args = array( 'post_id' => $this->id, 'approve' => 'approve', 'type' => '' ); remove_filter( 'comments_clauses', array( 'WC_Comments', 'exclude_order_comments' ) ); $comments = get_comments( $args ); foreach ( $comments as $comment ) { $is_customer_note = get_comment_meta( $comment->comment_ID, 'is_customer_note', true ); $comment->comment_content = make_clickable( $comment->comment_content ); if ( $is_customer_note ) { $notes[] = $comment; } } add_filter( 'comments_clauses', array( 'WC_Comments', 'exclude_order_comments' ) ); return (array) $notes; } /** * Checks if an order needs payment, based on status and order total * * @return bool */ public function needs_payment() { $valid_order_statuses = apply_filters( 'woocommerce_valid_order_statuses_for_payment', array( 'pending', 'failed' ), $this ); if ( $this->has_status( $valid_order_statuses ) && $this->get_total() > 0 ) { $needs_payment = true; } else { $needs_payment = false; } return apply_filters( 'woocommerce_order_needs_payment', $needs_payment, $this, $valid_order_statuses ); } /** * Checks if an order needs display the shipping address, based on shipping method * * @return boolean */ public function needs_shipping_address() { $hide = apply_filters( 'woocommerce_order_hide_shipping_address', array( 'local_pickup' ), $this ); $needs = false; foreach ( $this->get_shipping_methods() as $shipping_method ) { if ( ! in_array( $shipping_method['method_id'], $hide ) ) { $needs = true; break; } } return $needs; } /** * Checks if an order can be edited, specifically for use on the Edit Order screen * * @access public * @return bool */ public function is_editable() { if ( ! isset( $this->editable ) ) { $this->editable = in_array( $this->get_status(), array( 'pending', 'on-hold', 'auto-draft' ) ); } return apply_filters( 'wc_order_is_editable', $this->editable, $this ); } }
pknzghq10000/wp
wp-content/plugins/woocommerce/trunk/includes/abstracts/abstract-wc-order.php
PHP
gpl-2.0
69,079
<?php /************************************************************************************* * bf.php * ---------- * Author: Benny Baumann (BenBE@geshi.org) * Copyright: (c) 2008 Benny Baumann (http://qbnz.com/highlighter/) * Release Version: 1.0.9.0 * Date Started: 2009/10/31 * * Brainfuck language file for GeSHi. * * CHANGES * ------- * 2008/10/31 (1.0.8.1) * - First Release * * TODO * ---- * ************************************************************************************* * * This file is part of GeSHi. * * GeSHi is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GeSHi is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GeSHi; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * ************************************************************************************/ $language_data = array ( 'LANG_NAME' => 'Brainfuck', 'COMMENT_SINGLE' => array(), 'COMMENT_MULTI' => array(), 'COMMENT_REGEXP' => array(1 => '/[^\n+\-<>\[\]\.\,Y]+/s'), 'CASE_KEYWORDS' => GESHI_CAPS_UPPER, 'QUOTEMARKS' => array(), 'ESCAPE_CHAR' => '', 'KEYWORDS' => array( ), 'SYMBOLS' => array( 0 => array('+', '-'), 1 => array('[', ']'), 2 => array('<', '>'), 3 => array('.', ','), 4 => array('Y') //Brainfork Extension ;-) ), 'CASE_SENSITIVE' => array( GESHI_COMMENTS => false, ), 'STYLES' => array( 'KEYWORDS' => array( ), 'COMMENTS' => array( 1 => 'color: #666666; font-style: italic;' ), 'BRACKETS' => array( 0 => 'color: #660000;' ), 'STRINGS' => array( 0 => 'color: #ff0000;' ), 'NUMBERS' => array( ), 'METHODS' => array( ), 'SYMBOLS' => array( 0 => 'color: #006600;', 1 => 'color: #660000;', 2 => 'color: #000066;', 3 => 'color: #666600;', 4 => 'color: #660066;' ), 'ESCAPE_CHAR' => array( ), 'SCRIPT' => array( ), 'REGEXPS' => array( ) ), 'URLS' => array( ), 'OOLANG' => false, 'OBJECT_SPLITTERS' => array( ), 'REGEXPS' => array( ), 'STRICT_MODE_APPLIES' => GESHI_NEVER, 'SCRIPT_DELIMITERS' => array( ), 'HIGHLIGHT_STRICT_BLOCK' => array( ), 'TAB_WIDTH' => 4, 'PARSER_CONTROL' => array( 'ENABLE_FLAGS' => array( 'STRINGS' => GESHI_NEVER, 'NUMBERS' => GESHI_NEVER, 'BRACKETS' => GESHI_NEVER ), 'KEYWORDS' => array( 'DISALLOW_BEFORE' => '', 'DISALLOW_AFTER' => '' ) ) );
NewRoute/glfusion
private/vendor/geshi/geshi/src/geshi/bf.php
PHP
gpl-2.0
3,329
/* Connection state tracking for netfilter. This is separated from, but required by, the NAT layer; it can also be used by an iptables extension. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> * (C) 2005-2012 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/netfilter.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/vmalloc.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/jhash.h> #include <linux/err.h> #include <linux/percpu.h> #include <linux/moduleparam.h> #include <linux/notifier.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/socket.h> #include <linux/mm.h> #include <linux/nsproxy.h> #include <linux/rculist_nulls.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_l3proto.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_seqadj.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_extend.h> #include <net/netfilter/nf_conntrack_acct.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/nf_conntrack_timestamp.h> #include <net/netfilter/nf_conntrack_timeout.h> #include <net/netfilter/nf_conntrack_labels.h> #include <net/netfilter/nf_conntrack_synproxy.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_core.h> #include <net/netfilter/nf_nat_helper.h> #define NF_CONNTRACK_VERSION "0.5.0" int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct, enum nf_nat_manip_type manip, const struct nlattr *attr) __read_mostly; EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook); __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS]; EXPORT_SYMBOL_GPL(nf_conntrack_locks); __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock); EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock); static __read_mostly spinlock_t nf_conntrack_locks_all_lock; static __read_mostly bool nf_conntrack_locks_all; void nf_conntrack_lock(spinlock_t *lock) __acquires(lock) { spin_lock(lock); while (unlikely(nf_conntrack_locks_all)) { spin_unlock(lock); spin_lock(&nf_conntrack_locks_all_lock); spin_unlock(&nf_conntrack_locks_all_lock); spin_lock(lock); } } EXPORT_SYMBOL_GPL(nf_conntrack_lock); static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2) { h1 %= CONNTRACK_LOCKS; h2 %= CONNTRACK_LOCKS; spin_unlock(&nf_conntrack_locks[h1]); if (h1 != h2) spin_unlock(&nf_conntrack_locks[h2]); } /* return true if we need to recompute hashes (in case hash table was resized) */ static bool nf_conntrack_double_lock(struct net *net, unsigned int h1, unsigned int h2, unsigned int sequence) { h1 %= CONNTRACK_LOCKS; h2 %= CONNTRACK_LOCKS; if (h1 <= h2) { nf_conntrack_lock(&nf_conntrack_locks[h1]); if (h1 != h2) spin_lock_nested(&nf_conntrack_locks[h2], SINGLE_DEPTH_NESTING); } else { nf_conntrack_lock(&nf_conntrack_locks[h2]); spin_lock_nested(&nf_conntrack_locks[h1], SINGLE_DEPTH_NESTING); } if (read_seqcount_retry(&net->ct.generation, sequence)) { nf_conntrack_double_unlock(h1, h2); return true; } return false; } static void nf_conntrack_all_lock(void) { int i; spin_lock(&nf_conntrack_locks_all_lock); nf_conntrack_locks_all = true; for (i = 0; i < CONNTRACK_LOCKS; i++) { spin_lock(&nf_conntrack_locks[i]); spin_unlock(&nf_conntrack_locks[i]); } } static void nf_conntrack_all_unlock(void) { nf_conntrack_locks_all = false; spin_unlock(&nf_conntrack_locks_all_lock); } unsigned int nf_conntrack_htable_size __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); unsigned int nf_conntrack_max __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_max); DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); unsigned int nf_conntrack_hash_rnd __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd); static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple) { unsigned int n; /* The direction must be ignored, so we hash everything up to the * destination ports (which is a multiple of 4) and treat the last * three bytes manually. */ n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^ (((__force __u16)tuple->dst.u.all << 16) | tuple->dst.protonum)); } static u32 __hash_bucket(u32 hash, unsigned int size) { return reciprocal_scale(hash, size); } static u32 hash_bucket(u32 hash, const struct net *net) { return __hash_bucket(hash, net->ct.htable_size); } static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, unsigned int size) { return __hash_bucket(hash_conntrack_raw(tuple), size); } static inline u_int32_t hash_conntrack(const struct net *net, const struct nf_conntrack_tuple *tuple) { return __hash_conntrack(tuple, net->ct.htable_size); } bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff, unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, struct net *net, struct nf_conntrack_tuple *tuple, const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto) { memset(tuple, 0, sizeof(*tuple)); tuple->src.l3num = l3num; if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) return false; tuple->dst.protonum = protonum; tuple->dst.dir = IP_CT_DIR_ORIGINAL; return l4proto->pkt_to_tuple(skb, dataoff, net, tuple); } EXPORT_SYMBOL_GPL(nf_ct_get_tuple); bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, u_int16_t l3num, struct net *net, struct nf_conntrack_tuple *tuple) { struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; unsigned int protoff; u_int8_t protonum; int ret; rcu_read_lock(); l3proto = __nf_ct_l3proto_find(l3num); ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); if (ret != NF_ACCEPT) { rcu_read_unlock(); return false; } l4proto = __nf_ct_l4proto_find(l3num, protonum); ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple, l3proto, l4proto); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, const struct nf_conntrack_tuple *orig, const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto) { memset(inverse, 0, sizeof(*inverse)); inverse->src.l3num = orig->src.l3num; if (l3proto->invert_tuple(inverse, orig) == 0) return false; inverse->dst.dir = !orig->dst.dir; inverse->dst.protonum = orig->dst.protonum; return l4proto->invert_tuple(inverse, orig); } EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); static void clean_from_lists(struct nf_conn *ct) { pr_debug("clean_from_lists(%p)\n", ct); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); /* Destroy all pending expectations */ nf_ct_remove_expectations(ct); } /* must be called with local_bh_disable */ static void nf_ct_add_to_dying_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; /* add this conntrack to the (per cpu) dying list */ ct->cpu = smp_processor_id(); pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &pcpu->dying); spin_unlock(&pcpu->lock); } /* must be called with local_bh_disable */ static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; /* add this conntrack to the (per cpu) unconfirmed list */ ct->cpu = smp_processor_id(); pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &pcpu->unconfirmed); spin_unlock(&pcpu->lock); } /* must be called with local_bh_disable */ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; /* We overload first tuple to link into unconfirmed or dying list.*/ pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); spin_unlock(&pcpu->lock); } /* Released via destroy_conntrack() */ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, const struct nf_conntrack_zone *zone, gfp_t flags) { struct nf_conn *tmpl; tmpl = kzalloc(sizeof(*tmpl), flags); if (tmpl == NULL) return NULL; tmpl->status = IPS_TEMPLATE; write_pnet(&tmpl->ct_net, net); if (nf_ct_zone_add(tmpl, flags, zone) < 0) goto out_free; atomic_set(&tmpl->ct_general.use, 0); return tmpl; out_free: kfree(tmpl); return NULL; } EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc); void nf_ct_tmpl_free(struct nf_conn *tmpl) { nf_ct_ext_destroy(tmpl); nf_ct_ext_free(tmpl); kfree(tmpl); } EXPORT_SYMBOL_GPL(nf_ct_tmpl_free); static void destroy_conntrack(struct nf_conntrack *nfct) { struct nf_conn *ct = (struct nf_conn *)nfct; struct net *net = nf_ct_net(ct); struct nf_conntrack_l4proto *l4proto; pr_debug("destroy_conntrack(%p)\n", ct); NF_CT_ASSERT(atomic_read(&nfct->use) == 0); NF_CT_ASSERT(!timer_pending(&ct->timeout)); if (unlikely(nf_ct_is_template(ct))) { nf_ct_tmpl_free(ct); return; } rcu_read_lock(); l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); if (l4proto && l4proto->destroy) l4proto->destroy(ct); rcu_read_unlock(); local_bh_disable(); /* Expectations will have been removed in clean_from_lists, * except TFTP can create an expectation on the first packet, * before connection is in the list, so we need to clean here, * too. */ nf_ct_remove_expectations(ct); nf_ct_del_from_dying_or_unconfirmed_list(ct); NF_CT_STAT_INC(net, delete); local_bh_enable(); if (ct->master) nf_ct_put(ct->master); pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); nf_conntrack_free(ct); } static void nf_ct_delete_from_lists(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); unsigned int hash, reply_hash; unsigned int sequence; nf_ct_helper_destroy(ct); local_bh_disable(); do { sequence = read_seqcount_begin(&net->ct.generation); hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); reply_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); clean_from_lists(ct); nf_conntrack_double_unlock(hash, reply_hash); nf_ct_add_to_dying_list(ct); NF_CT_STAT_INC(net, delete_list); local_bh_enable(); } bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) { struct nf_conn_tstamp *tstamp; tstamp = nf_conn_tstamp_find(ct); if (tstamp && tstamp->stop == 0) tstamp->stop = ktime_get_real_ns(); if (nf_ct_is_dying(ct)) goto delete; if (nf_conntrack_event_report(IPCT_DESTROY, ct, portid, report) < 0) { /* destroy event was not delivered */ nf_ct_delete_from_lists(ct); nf_conntrack_ecache_delayed_work(nf_ct_net(ct)); return false; } nf_conntrack_ecache_work(nf_ct_net(ct)); set_bit(IPS_DYING_BIT, &ct->status); delete: nf_ct_delete_from_lists(ct); nf_ct_put(ct); return true; } EXPORT_SYMBOL_GPL(nf_ct_delete); static void death_by_timeout(unsigned long ul_conntrack) { nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0); } static inline bool nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone) { struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); /* A conntrack can be recreated with the equal tuple, * so we need to check that the conntrack is confirmed */ return nf_ct_tuple_equal(tuple, &h->tuple) && nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) && nf_ct_is_confirmed(ct); } /* * Warning : * - Caller must take a reference on returned object * and recheck nf_ct_tuple_equal(tuple, &h->tuple) */ static struct nf_conntrack_tuple_hash * ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple, u32 hash) { struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; unsigned int bucket = hash_bucket(hash, net); /* Disable BHs the entire time since we normally need to disable them * at least once for the stats anyway. */ local_bh_disable(); begin: hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { if (nf_ct_key_equal(h, tuple, zone)) { NF_CT_STAT_INC(net, found); local_bh_enable(); return h; } NF_CT_STAT_INC(net, searched); } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(n) != bucket) { NF_CT_STAT_INC(net, search_restart); goto begin; } local_bh_enable(); return NULL; } /* Find a connection corresponding to a tuple. */ static struct nf_conntrack_tuple_hash * __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple, u32 hash) { struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; rcu_read_lock(); begin: h = ____nf_conntrack_find(net, zone, tuple, hash); if (h) { ct = nf_ct_tuplehash_to_ctrack(h); if (unlikely(nf_ct_is_dying(ct) || !atomic_inc_not_zero(&ct->ct_general.use))) h = NULL; else { if (unlikely(!nf_ct_key_equal(h, tuple, zone))) { nf_ct_put(ct); goto begin; } } } rcu_read_unlock(); return h; } struct nf_conntrack_tuple_hash * nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple) { return __nf_conntrack_find_get(net, zone, tuple, hash_conntrack_raw(tuple)); } EXPORT_SYMBOL_GPL(nf_conntrack_find_get); static void __nf_conntrack_hash_insert(struct nf_conn *ct, unsigned int hash, unsigned int reply_hash) { struct net *net = nf_ct_net(ct); hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &net->ct.hash[hash]); hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, &net->ct.hash[reply_hash]); } int nf_conntrack_hash_check_insert(struct nf_conn *ct) { const struct nf_conntrack_zone *zone; struct net *net = nf_ct_net(ct); unsigned int hash, reply_hash; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; unsigned int sequence; zone = nf_ct_zone(ct); local_bh_disable(); do { sequence = read_seqcount_begin(&net->ct.generation); hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); reply_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); /* See if there's one in the list already, including reverse */ hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, &h->tuple) && nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone, NF_CT_DIRECTION(h))) goto out; hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, &h->tuple) && nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone, NF_CT_DIRECTION(h))) goto out; add_timer(&ct->timeout); smp_wmb(); /* The caller holds a reference to this object */ atomic_set(&ct->ct_general.use, 2); __nf_conntrack_hash_insert(ct, hash, reply_hash); nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net, insert); local_bh_enable(); return 0; out: nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net, insert_failed); local_bh_enable(); return -EEXIST; } EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); /* Confirm a connection given skb; places it in hash table */ int __nf_conntrack_confirm(struct sk_buff *skb) { const struct nf_conntrack_zone *zone; unsigned int hash, reply_hash; struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; struct nf_conn_help *help; struct nf_conn_tstamp *tstamp; struct hlist_nulls_node *n; enum ip_conntrack_info ctinfo; struct net *net; unsigned int sequence; ct = nf_ct_get(skb, &ctinfo); net = nf_ct_net(ct); /* ipt_REJECT uses nf_conntrack_attach to attach related ICMP/TCP RST packets in other direction. Actual packet which created connection will be IP_CT_NEW or for an expected connection, IP_CT_RELATED. */ if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) return NF_ACCEPT; zone = nf_ct_zone(ct); local_bh_disable(); do { sequence = read_seqcount_begin(&net->ct.generation); /* reuse the hash saved before */ hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev; hash = hash_bucket(hash, net); reply_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); /* We're not in hash table, and we refuse to set up related * connections for unconfirmed conns. But packet copies and * REJECT will give spurious warnings here. */ /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ /* No external references means no one else could have * confirmed us. */ NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); pr_debug("Confirming conntrack %p\n", ct); /* We have to check the DYING flag after unlink to prevent * a race against nf_ct_get_next_corpse() possibly called from * user context, else we insert an already 'dead' hash, blocking * further use of that particular connection -JM. */ nf_ct_del_from_dying_or_unconfirmed_list(ct); if (unlikely(nf_ct_is_dying(ct))) goto out; /* See if there's one in the list already, including reverse: NAT could have grabbed it without realizing, since we're not in the hash. If there is, we lost race. */ hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, &h->tuple) && nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone, NF_CT_DIRECTION(h))) goto out; hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, &h->tuple) && nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone, NF_CT_DIRECTION(h))) goto out; /* Timer relative to confirmation time, not original setting time, otherwise we'd get timer wrap in weird delay cases. */ ct->timeout.expires += jiffies; add_timer(&ct->timeout); atomic_inc(&ct->ct_general.use); ct->status |= IPS_CONFIRMED; /* set conntrack timestamp, if enabled. */ tstamp = nf_conn_tstamp_find(ct); if (tstamp) { if (skb->tstamp.tv64 == 0) __net_timestamp(skb); tstamp->start = ktime_to_ns(skb->tstamp); } /* Since the lookup is lockless, hash insertion must be done after * starting the timer and setting the CONFIRMED bit. The RCU barriers * guarantee that no other CPU can find the conntrack before the above * stores are visible. */ __nf_conntrack_hash_insert(ct, hash, reply_hash); nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net, insert); local_bh_enable(); help = nfct_help(ct); if (help && help->helper) nf_conntrack_event_cache(IPCT_HELPER, ct); nf_conntrack_event_cache(master_ct(ct) ? IPCT_RELATED : IPCT_NEW, ct); return NF_ACCEPT; out: nf_ct_add_to_dying_list(ct); nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net, insert_failed); local_bh_enable(); return NF_DROP; } EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); /* Returns true if a connection correspondings to the tuple (required for NAT). */ int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, const struct nf_conn *ignored_conntrack) { struct net *net = nf_ct_net(ignored_conntrack); const struct nf_conntrack_zone *zone; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; struct nf_conn *ct; unsigned int hash; zone = nf_ct_zone(ignored_conntrack); hash = hash_conntrack(net, tuple); /* Disable BHs the entire time since we need to disable them at * least once for the stats anyway. */ rcu_read_lock_bh(); hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (ct != ignored_conntrack && nf_ct_tuple_equal(tuple, &h->tuple) && nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) { NF_CT_STAT_INC(net, found); rcu_read_unlock_bh(); return 1; } NF_CT_STAT_INC(net, searched); } rcu_read_unlock_bh(); return 0; } EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); #define NF_CT_EVICTION_RANGE 8 /* There's a small race here where we may free a just-assured connection. Too bad: we're in trouble anyway. */ static noinline int early_drop(struct net *net, unsigned int _hash) { /* Use oldest entry, which is roughly LRU */ struct nf_conntrack_tuple_hash *h; struct nf_conn *ct = NULL, *tmp; struct hlist_nulls_node *n; unsigned int i = 0, cnt = 0; int dropped = 0; unsigned int hash, sequence; spinlock_t *lockp; local_bh_disable(); restart: sequence = read_seqcount_begin(&net->ct.generation); hash = hash_bucket(_hash, net); for (; i < net->ct.htable_size; i++) { lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS]; nf_conntrack_lock(lockp); if (read_seqcount_retry(&net->ct.generation, sequence)) { spin_unlock(lockp); goto restart; } hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { tmp = nf_ct_tuplehash_to_ctrack(h); if (!test_bit(IPS_ASSURED_BIT, &tmp->status) && !nf_ct_is_dying(tmp) && atomic_inc_not_zero(&tmp->ct_general.use)) { ct = tmp; break; } cnt++; } hash = (hash + 1) % net->ct.htable_size; spin_unlock(lockp); if (ct || cnt >= NF_CT_EVICTION_RANGE) break; } local_bh_enable(); if (!ct) return dropped; if (del_timer(&ct->timeout)) { if (nf_ct_delete(ct, 0, 0)) { dropped = 1; NF_CT_STAT_INC_ATOMIC(net, early_drop); } } nf_ct_put(ct); return dropped; } void init_nf_conntrack_hash_rnd(void) { unsigned int rand; /* * Why not initialize nf_conntrack_rnd in a "init()" function ? * Because there isn't enough entropy when system initializing, * and we initialize it as late as possible. */ do { get_random_bytes(&rand, sizeof(rand)); } while (!rand); cmpxchg(&nf_conntrack_hash_rnd, 0, rand); } static struct nf_conn * __nf_conntrack_alloc(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *orig, const struct nf_conntrack_tuple *repl, gfp_t gfp, u32 hash) { struct nf_conn *ct; if (unlikely(!nf_conntrack_hash_rnd)) { init_nf_conntrack_hash_rnd(); /* recompute the hash as nf_conntrack_hash_rnd is initialized */ hash = hash_conntrack_raw(orig); } /* We don't want any race condition at early drop stage */ atomic_inc(&net->ct.count); if (nf_conntrack_max && unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { if (!early_drop(net, hash)) { atomic_dec(&net->ct.count); net_warn_ratelimited("nf_conntrack: table full, dropping packet\n"); return ERR_PTR(-ENOMEM); } } /* * Do not use kmem_cache_zalloc(), as this cache uses * SLAB_DESTROY_BY_RCU. */ ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); if (ct == NULL) goto out; spin_lock_init(&ct->lock); ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; /* save hash for reusing when confirming */ *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; ct->status = 0; /* Don't set timer yet: wait for confirmation */ setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); write_pnet(&ct->ct_net, net); memset(&ct->__nfct_init_offset[0], 0, offsetof(struct nf_conn, proto) - offsetof(struct nf_conn, __nfct_init_offset[0])); if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0) goto out_free; /* Because we use RCU lookups, we set ct_general.use to zero before * this is inserted in any list. */ atomic_set(&ct->ct_general.use, 0); return ct; out_free: kmem_cache_free(net->ct.nf_conntrack_cachep, ct); out: atomic_dec(&net->ct.count); return ERR_PTR(-ENOMEM); } struct nf_conn *nf_conntrack_alloc(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *orig, const struct nf_conntrack_tuple *repl, gfp_t gfp) { return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0); } EXPORT_SYMBOL_GPL(nf_conntrack_alloc); void nf_conntrack_free(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); /* A freed object has refcnt == 0, that's * the golden rule for SLAB_DESTROY_BY_RCU */ NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0); nf_ct_ext_destroy(ct); nf_ct_ext_free(ct); kmem_cache_free(net->ct.nf_conntrack_cachep, ct); smp_mb__before_atomic(); atomic_dec(&net->ct.count); } EXPORT_SYMBOL_GPL(nf_conntrack_free); /* Allocate a new conntrack: we return -ENOMEM if classification failed due to stress. Otherwise it really is unclassifiable. */ static struct nf_conntrack_tuple_hash * init_conntrack(struct net *net, struct nf_conn *tmpl, const struct nf_conntrack_tuple *tuple, struct nf_conntrack_l3proto *l3proto, struct nf_conntrack_l4proto *l4proto, struct sk_buff *skb, unsigned int dataoff, u32 hash) { struct nf_conn *ct; struct nf_conn_help *help; struct nf_conntrack_tuple repl_tuple; struct nf_conntrack_ecache *ecache; struct nf_conntrack_expect *exp = NULL; const struct nf_conntrack_zone *zone; struct nf_conn_timeout *timeout_ext; struct nf_conntrack_zone tmp; unsigned int *timeouts; if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { pr_debug("Can't invert tuple.\n"); return NULL; } zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC, hash); if (IS_ERR(ct)) return (struct nf_conntrack_tuple_hash *)ct; if (tmpl && nfct_synproxy(tmpl)) { nfct_seqadj_ext_add(ct); nfct_synproxy_ext_add(ct); } timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL; if (timeout_ext) { timeouts = nf_ct_timeout_data(timeout_ext); if (unlikely(!timeouts)) timeouts = l4proto->get_timeouts(net); } else { timeouts = l4proto->get_timeouts(net); } if (!l4proto->new(ct, skb, dataoff, timeouts)) { nf_conntrack_free(ct); pr_debug("init conntrack: can't track with proto module\n"); return NULL; } if (timeout_ext) nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout), GFP_ATOMIC); nf_ct_acct_ext_add(ct, GFP_ATOMIC); nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); nf_ct_labels_ext_add(ct); ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, ecache ? ecache->expmask : 0, GFP_ATOMIC); local_bh_disable(); if (net->ct.expect_count) { spin_lock(&nf_conntrack_expect_lock); exp = nf_ct_find_expectation(net, zone, tuple); if (exp) { pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", ct, exp); /* Welcome, Mr. Bond. We've been expecting you... */ __set_bit(IPS_EXPECTED_BIT, &ct->status); /* exp->master safe, refcnt bumped in nf_ct_find_expectation */ ct->master = exp->master; if (exp->helper) { help = nf_ct_helper_ext_add(ct, exp->helper, GFP_ATOMIC); if (help) rcu_assign_pointer(help->helper, exp->helper); } #ifdef CONFIG_NF_CONNTRACK_MARK ct->mark = exp->master->mark; #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK ct->secmark = exp->master->secmark; #endif NF_CT_STAT_INC(net, expect_new); } spin_unlock(&nf_conntrack_expect_lock); } if (!exp) { __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); NF_CT_STAT_INC(net, new); } /* Now it is inserted into the unconfirmed list, bump refcount */ nf_conntrack_get(&ct->ct_general); nf_ct_add_to_unconfirmed_list(ct); local_bh_enable(); if (exp) { if (exp->expectfn) exp->expectfn(ct, exp); nf_ct_expect_put(exp); } return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; } /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ static inline struct nf_conn * resolve_normal_ct(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, struct nf_conntrack_l3proto *l3proto, struct nf_conntrack_l4proto *l4proto, int *set_reply, enum ip_conntrack_info *ctinfo) { const struct nf_conntrack_zone *zone; struct nf_conntrack_tuple tuple; struct nf_conntrack_tuple_hash *h; struct nf_conntrack_zone tmp; struct nf_conn *ct; u32 hash; if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num, protonum, net, &tuple, l3proto, l4proto)) { pr_debug("resolve_normal_ct: Can't get tuple\n"); return NULL; } /* look for tuple match */ zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); hash = hash_conntrack_raw(&tuple); h = __nf_conntrack_find_get(net, zone, &tuple, hash); if (!h) { h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, skb, dataoff, hash); if (!h) return NULL; if (IS_ERR(h)) return (void *)h; } ct = nf_ct_tuplehash_to_ctrack(h); /* It exists; we have (non-exclusive) reference. */ if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { *ctinfo = IP_CT_ESTABLISHED_REPLY; /* Please set reply bit if this packet OK */ *set_reply = 1; } else { /* Once we've had two way comms, always ESTABLISHED. */ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { pr_debug("nf_conntrack_in: normal packet for %p\n", ct); *ctinfo = IP_CT_ESTABLISHED; } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { pr_debug("nf_conntrack_in: related packet for %p\n", ct); *ctinfo = IP_CT_RELATED; } else { pr_debug("nf_conntrack_in: new packet for %p\n", ct); *ctinfo = IP_CT_NEW; } *set_reply = 0; } skb->nfct = &ct->ct_general; skb->nfctinfo = *ctinfo; return ct; } unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, struct sk_buff *skb) { struct nf_conn *ct, *tmpl = NULL; enum ip_conntrack_info ctinfo; struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; unsigned int *timeouts; unsigned int dataoff; u_int8_t protonum; int set_reply = 0; int ret; if (skb->nfct) { /* Previously seen (loopback or untracked)? Ignore. */ tmpl = (struct nf_conn *)skb->nfct; if (!nf_ct_is_template(tmpl)) { NF_CT_STAT_INC_ATOMIC(net, ignore); return NF_ACCEPT; } skb->nfct = NULL; } /* rcu_read_lock()ed by nf_hook_slow */ l3proto = __nf_ct_l3proto_find(pf); ret = l3proto->get_l4proto(skb, skb_network_offset(skb), &dataoff, &protonum); if (ret <= 0) { pr_debug("not prepared to track yet or error occurred\n"); NF_CT_STAT_INC_ATOMIC(net, error); NF_CT_STAT_INC_ATOMIC(net, invalid); ret = -ret; goto out; } l4proto = __nf_ct_l4proto_find(pf, protonum); /* It may be an special packet, error, unclean... * inverse of the return code tells to the netfilter * core what to do with the packet. */ if (l4proto->error != NULL) { ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo, pf, hooknum); if (ret <= 0) { NF_CT_STAT_INC_ATOMIC(net, error); NF_CT_STAT_INC_ATOMIC(net, invalid); ret = -ret; goto out; } /* ICMP[v6] protocol trackers may assign one conntrack. */ if (skb->nfct) goto out; } ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, l3proto, l4proto, &set_reply, &ctinfo); if (!ct) { /* Not valid part of a connection */ NF_CT_STAT_INC_ATOMIC(net, invalid); ret = NF_ACCEPT; goto out; } if (IS_ERR(ct)) { /* Too stressed to deal. */ NF_CT_STAT_INC_ATOMIC(net, drop); ret = NF_DROP; goto out; } NF_CT_ASSERT(skb->nfct); /* Decide what timeout policy we want to apply to this flow. */ timeouts = nf_ct_timeout_lookup(net, ct, l4proto); ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts); if (ret <= 0) { /* Invalid: inverse of the return code tells * the netfilter core what to do */ pr_debug("nf_conntrack_in: Can't track with proto module\n"); nf_conntrack_put(skb->nfct); skb->nfct = NULL; NF_CT_STAT_INC_ATOMIC(net, invalid); if (ret == -NF_DROP) NF_CT_STAT_INC_ATOMIC(net, drop); ret = -ret; goto out; } if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) nf_conntrack_event_cache(IPCT_REPLY, ct); out: if (tmpl) { /* Special case: we have to repeat this hook, assign the * template again to this packet. We assume that this packet * has no conntrack assigned. This is used by nf_ct_tcp. */ if (ret == NF_REPEAT) skb->nfct = (struct nf_conntrack *)tmpl; else nf_ct_put(tmpl); } return ret; } EXPORT_SYMBOL_GPL(nf_conntrack_in); bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, const struct nf_conntrack_tuple *orig) { bool ret; rcu_read_lock(); ret = nf_ct_invert_tuple(inverse, orig, __nf_ct_l3proto_find(orig->src.l3num), __nf_ct_l4proto_find(orig->src.l3num, orig->dst.protonum)); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); /* Alter reply tuple (maybe alter helper). This is for NAT, and is implicitly racy: see __nf_conntrack_confirm */ void nf_conntrack_alter_reply(struct nf_conn *ct, const struct nf_conntrack_tuple *newreply) { struct nf_conn_help *help = nfct_help(ct); /* Should be unconfirmed, so not in hash table yet */ NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); pr_debug("Altering reply tuple of %p to ", ct); nf_ct_dump_tuple(newreply); ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; if (ct->master || (help && !hlist_empty(&help->expectations))) return; rcu_read_lock(); __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo, const struct sk_buff *skb, unsigned long extra_jiffies, int do_acct) { NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); NF_CT_ASSERT(skb); /* Only update if this is not a fixed timeout */ if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) goto acct; /* If not in hash table, timer will not be active yet */ if (!nf_ct_is_confirmed(ct)) { ct->timeout.expires = extra_jiffies; } else { unsigned long newtime = jiffies + extra_jiffies; /* Only update the timeout if the new timeout is at least HZ jiffies from the old timeout. Need del_timer for race avoidance (may already be dying). */ if (newtime - ct->timeout.expires >= HZ) mod_timer_pending(&ct->timeout, newtime); } acct: if (do_acct) { struct nf_conn_acct *acct; acct = nf_conn_acct_find(ct); if (acct) { struct nf_conn_counter *counter = acct->counter; atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets); atomic64_add(skb->len, &counter[CTINFO2DIR(ctinfo)].bytes); } } } EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); bool __nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo, const struct sk_buff *skb, int do_acct) { if (do_acct) { struct nf_conn_acct *acct; acct = nf_conn_acct_find(ct); if (acct) { struct nf_conn_counter *counter = acct->counter; atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets); atomic64_add(skb->len - skb_network_offset(skb), &counter[CTINFO2DIR(ctinfo)].bytes); } } if (del_timer(&ct->timeout)) { ct->timeout.function((unsigned long)ct); return true; } return false; } EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); #ifdef CONFIG_NF_CONNTRACK_ZONES static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { .len = sizeof(struct nf_conntrack_zone), .align = __alignof__(struct nf_conntrack_zone), .id = NF_CT_EXT_ZONE, }; #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK) #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_conntrack.h> #include <linux/mutex.h> /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be * in ip_conntrack_core, since we don't want the protocols to autoload * or depend on ctnetlink */ int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) || nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port)) goto nla_put_failure; return 0; nla_put_failure: return -1; } EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, }; EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *t) { if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) return -EINVAL; t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); return 0; } EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); int nf_ct_port_nlattr_tuple_size(void) { return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1); } EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size); #endif /* Used by ipt_REJECT and ip6t_REJECT. */ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; /* This ICMP is in reverse direction to the packet which caused it */ ct = nf_ct_get(skb, &ctinfo); if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ctinfo = IP_CT_RELATED_REPLY; else ctinfo = IP_CT_RELATED; /* Attach to new skbuff, and increment count */ nskb->nfct = &ct->ct_general; nskb->nfctinfo = ctinfo; nf_conntrack_get(nskb->nfct); } /* Bring out ya dead! */ static struct nf_conn * get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data, unsigned int *bucket) { struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; struct hlist_nulls_node *n; int cpu; spinlock_t *lockp; for (; *bucket < net->ct.htable_size; (*bucket)++) { lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS]; local_bh_disable(); nf_conntrack_lock(lockp); if (*bucket < net->ct.htable_size) { hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; ct = nf_ct_tuplehash_to_ctrack(h); if (iter(ct, data)) goto found; } } spin_unlock(lockp); local_bh_enable(); cond_resched(); } for_each_possible_cpu(cpu) { struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); spin_lock_bh(&pcpu->lock); hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (iter(ct, data)) set_bit(IPS_DYING_BIT, &ct->status); } spin_unlock_bh(&pcpu->lock); cond_resched(); } return NULL; found: atomic_inc(&ct->ct_general.use); spin_unlock(lockp); local_bh_enable(); return ct; } void nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data, u32 portid, int report) { struct nf_conn *ct; unsigned int bucket = 0; might_sleep(); while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { /* Time to push up daises... */ if (del_timer(&ct->timeout)) nf_ct_delete(ct, portid, report); /* ... else the timer will get him soon. */ nf_ct_put(ct); cond_resched(); } } EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); static int kill_all(struct nf_conn *i, void *data) { return 1; } void nf_ct_free_hashtable(void *hash, unsigned int size) { if (is_vmalloc_addr(hash)) vfree(hash); else free_pages((unsigned long)hash, get_order(sizeof(struct hlist_head) * size)); } EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); static int untrack_refs(void) { int cnt = 0, cpu; for_each_possible_cpu(cpu) { struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); cnt += atomic_read(&ct->ct_general.use) - 1; } return cnt; } void nf_conntrack_cleanup_start(void) { RCU_INIT_POINTER(ip_ct_attach, NULL); } void nf_conntrack_cleanup_end(void) { RCU_INIT_POINTER(nf_ct_destroy, NULL); while (untrack_refs() > 0) schedule(); #ifdef CONFIG_NF_CONNTRACK_ZONES nf_ct_extend_unregister(&nf_ct_zone_extend); #endif nf_conntrack_proto_fini(); nf_conntrack_seqadj_fini(); nf_conntrack_labels_fini(); nf_conntrack_helper_fini(); nf_conntrack_timeout_fini(); nf_conntrack_ecache_fini(); nf_conntrack_tstamp_fini(); nf_conntrack_acct_fini(); nf_conntrack_expect_fini(); } /* * Mishearing the voices in his head, our hero wonders how he's * supposed to kill the mall. */ void nf_conntrack_cleanup_net(struct net *net) { LIST_HEAD(single); list_add(&net->exit_list, &single); nf_conntrack_cleanup_net_list(&single); } void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list) { int busy; struct net *net; /* * This makes sure all current packets have passed through * netfilter framework. Roll on, two-stage module * delete... */ synchronize_net(); i_see_dead_people: busy = 0; list_for_each_entry(net, net_exit_list, exit_list) { nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0); if (atomic_read(&net->ct.count) != 0) busy = 1; } if (busy) { schedule(); goto i_see_dead_people; } list_for_each_entry(net, net_exit_list, exit_list) { nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); nf_conntrack_proto_pernet_fini(net); nf_conntrack_helper_pernet_fini(net); nf_conntrack_ecache_pernet_fini(net); nf_conntrack_tstamp_pernet_fini(net); nf_conntrack_acct_pernet_fini(net); nf_conntrack_expect_pernet_fini(net); kmem_cache_destroy(net->ct.nf_conntrack_cachep); kfree(net->ct.slabname); free_percpu(net->ct.stat); free_percpu(net->ct.pcpu_lists); } } void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) { struct hlist_nulls_head *hash; unsigned int nr_slots, i; size_t sz; BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); sz = nr_slots * sizeof(struct hlist_nulls_head); hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, get_order(sz)); if (!hash) hash = vzalloc(sz); if (hash && nulls) for (i = 0; i < nr_slots; i++) INIT_HLIST_NULLS_HEAD(&hash[i], i); return hash; } EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) { int i, bucket, rc; unsigned int hashsize, old_size; struct hlist_nulls_head *hash, *old_hash; struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; if (current->nsproxy->net_ns != &init_net) return -EOPNOTSUPP; /* On boot, we can set this without any fancy locking. */ if (!nf_conntrack_htable_size) return param_set_uint(val, kp); rc = kstrtouint(val, 0, &hashsize); if (rc) return rc; if (!hashsize) return -EINVAL; hash = nf_ct_alloc_hashtable(&hashsize, 1); if (!hash) return -ENOMEM; local_bh_disable(); nf_conntrack_all_lock(); write_seqcount_begin(&init_net.ct.generation); /* Lookups in the old hash might happen in parallel, which means we * might get false negatives during connection lookup. New connections * created because of a false negative won't make it into the hash * though since that required taking the locks. */ for (i = 0; i < init_net.ct.htable_size; i++) { while (!hlist_nulls_empty(&init_net.ct.hash[i])) { h = hlist_nulls_entry(init_net.ct.hash[i].first, struct nf_conntrack_tuple_hash, hnnode); ct = nf_ct_tuplehash_to_ctrack(h); hlist_nulls_del_rcu(&h->hnnode); bucket = __hash_conntrack(&h->tuple, hashsize); hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); } } old_size = init_net.ct.htable_size; old_hash = init_net.ct.hash; init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; init_net.ct.hash = hash; write_seqcount_end(&init_net.ct.generation); nf_conntrack_all_unlock(); local_bh_enable(); nf_ct_free_hashtable(old_hash, old_size); return 0; } EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, &nf_conntrack_htable_size, 0600); void nf_ct_untracked_status_or(unsigned long bits) { int cpu; for_each_possible_cpu(cpu) per_cpu(nf_conntrack_untracked, cpu).status |= bits; } EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or); int nf_conntrack_init_start(void) { int max_factor = 8; int i, ret, cpu; for (i = 0; i < CONNTRACK_LOCKS; i++) spin_lock_init(&nf_conntrack_locks[i]); if (!nf_conntrack_htable_size) { /* Idea from tcp.c: use 1/16384 of memory. * On i386: 32MB machine has 512 buckets. * >= 1GB machines have 16384 buckets. * >= 4GB machines have 65536 buckets. */ nf_conntrack_htable_size = (((totalram_pages << PAGE_SHIFT) / 16384) / sizeof(struct hlist_head)); if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE))) nf_conntrack_htable_size = 65536; else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) nf_conntrack_htable_size = 16384; if (nf_conntrack_htable_size < 32) nf_conntrack_htable_size = 32; /* Use a max. factor of four by default to get the same max as * with the old struct list_heads. When a table size is given * we use the old value of 8 to avoid reducing the max. * entries. */ max_factor = 4; } nf_conntrack_max = max_factor * nf_conntrack_htable_size; printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n", NF_CONNTRACK_VERSION, nf_conntrack_htable_size, nf_conntrack_max); ret = nf_conntrack_expect_init(); if (ret < 0) goto err_expect; ret = nf_conntrack_acct_init(); if (ret < 0) goto err_acct; ret = nf_conntrack_tstamp_init(); if (ret < 0) goto err_tstamp; ret = nf_conntrack_ecache_init(); if (ret < 0) goto err_ecache; ret = nf_conntrack_timeout_init(); if (ret < 0) goto err_timeout; ret = nf_conntrack_helper_init(); if (ret < 0) goto err_helper; ret = nf_conntrack_labels_init(); if (ret < 0) goto err_labels; ret = nf_conntrack_seqadj_init(); if (ret < 0) goto err_seqadj; #ifdef CONFIG_NF_CONNTRACK_ZONES ret = nf_ct_extend_register(&nf_ct_zone_extend); if (ret < 0) goto err_extend; #endif ret = nf_conntrack_proto_init(); if (ret < 0) goto err_proto; /* Set up fake conntrack: to never be deleted, not in any hashes */ for_each_possible_cpu(cpu) { struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); write_pnet(&ct->ct_net, &init_net); atomic_set(&ct->ct_general.use, 1); } /* - and look it like as a confirmed connection */ nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); return 0; err_proto: #ifdef CONFIG_NF_CONNTRACK_ZONES nf_ct_extend_unregister(&nf_ct_zone_extend); err_extend: #endif nf_conntrack_seqadj_fini(); err_seqadj: nf_conntrack_labels_fini(); err_labels: nf_conntrack_helper_fini(); err_helper: nf_conntrack_timeout_fini(); err_timeout: nf_conntrack_ecache_fini(); err_ecache: nf_conntrack_tstamp_fini(); err_tstamp: nf_conntrack_acct_fini(); err_acct: nf_conntrack_expect_fini(); err_expect: return ret; } void nf_conntrack_init_end(void) { /* For use by REJECT target */ RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach); RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack); } /* * We need to use special "null" values, not used in hash table */ #define UNCONFIRMED_NULLS_VAL ((1<<30)+0) #define DYING_NULLS_VAL ((1<<30)+1) #define TEMPLATE_NULLS_VAL ((1<<30)+2) int nf_conntrack_init_net(struct net *net) { int ret = -ENOMEM; int cpu; atomic_set(&net->ct.count, 0); seqcount_init(&net->ct.generation); net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu); if (!net->ct.pcpu_lists) goto err_stat; for_each_possible_cpu(cpu) { struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); spin_lock_init(&pcpu->lock); INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL); INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL); } net->ct.stat = alloc_percpu(struct ip_conntrack_stat); if (!net->ct.stat) goto err_pcpu_lists; net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); if (!net->ct.slabname) goto err_slabname; net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, sizeof(struct nf_conn), 0, SLAB_DESTROY_BY_RCU, NULL); if (!net->ct.nf_conntrack_cachep) { printk(KERN_ERR "Unable to create nf_conn slab cache\n"); goto err_cache; } net->ct.htable_size = nf_conntrack_htable_size; net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1); if (!net->ct.hash) { printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); goto err_hash; } ret = nf_conntrack_expect_pernet_init(net); if (ret < 0) goto err_expect; ret = nf_conntrack_acct_pernet_init(net); if (ret < 0) goto err_acct; ret = nf_conntrack_tstamp_pernet_init(net); if (ret < 0) goto err_tstamp; ret = nf_conntrack_ecache_pernet_init(net); if (ret < 0) goto err_ecache; ret = nf_conntrack_helper_pernet_init(net); if (ret < 0) goto err_helper; ret = nf_conntrack_proto_pernet_init(net); if (ret < 0) goto err_proto; return 0; err_proto: nf_conntrack_helper_pernet_fini(net); err_helper: nf_conntrack_ecache_pernet_fini(net); err_ecache: nf_conntrack_tstamp_pernet_fini(net); err_tstamp: nf_conntrack_acct_pernet_fini(net); err_acct: nf_conntrack_expect_pernet_fini(net); err_expect: nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); err_hash: kmem_cache_destroy(net->ct.nf_conntrack_cachep); err_cache: kfree(net->ct.slabname); err_slabname: free_percpu(net->ct.stat); err_pcpu_lists: free_percpu(net->ct.pcpu_lists); err_stat: return ret; }
jakew02/sp3-linux
net/netfilter/nf_conntrack_core.c
C
gpl-2.0
50,667
/* * Misc utility routines for accessing chip-specific features * of the SiliconBackplane-based Broadcom chips. * * Copyright (C) 1999-2014, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: siutils.c 434466 2013-11-06 12:34:26Z $ */ #include <bcm_cfg.h> #include <typedefs.h> #include <bcmdefs.h> #include <osl.h> #include <bcmutils.h> #include <siutils.h> #include <bcmdevs.h> #include <hndsoc.h> #include <sbchipc.h> #include <pcicfg.h> #include <sbpcmcia.h> #include <sbsocram.h> #include <bcmsdh.h> #include <sdio.h> #include <sbsdio.h> #include <sbhnddma.h> #include <sbsdpcmdev.h> #include <bcmsdpcm.h> #include <hndpmu.h> #ifdef BCMSPI #include <spid.h> #endif /* BCMSPI */ #ifdef BCM_SDRBL #include <hndcpu.h> #endif /* BCM_SDRBL */ #include "siutils_priv.h" /* local prototypes */ static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, uint bustype, void *sdh, char **vars, uint *varsz); static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh); static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, uint *origidx, void *regs); /* global variable to indicate reservation/release of gpio's */ static uint32 si_gpioreservation = 0; /* global flag to prevent shared resources from being initialized multiple times in si_attach() */ int do_4360_pcie2_war = 0; /* global kernel resource */ static si_info_t ksii; static si_cores_info_t ksii_cores_info; /** * Allocate an si handle. This function may be called multiple times. * * devid - pci device id (used to determine chip#) * osh - opaque OS handle * regs - virtual address of initial core registers * bustype - pci/pcmcia/sb/sdio/etc * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this * function set 'vars' to NULL, making dereferencing of this parameter undesired. * varsz - pointer to int to return the size of the vars */ si_t * si_attach(uint devid, osl_t *osh, void *regs, uint bustype, void *sdh, char **vars, uint *varsz) { si_info_t *sii; si_cores_info_t *cores_info; /* alloc si_info_t */ if ((sii = MALLOCZ(osh, sizeof (si_info_t))) == NULL) { SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh))); return (NULL); } /* alloc si_cores_info_t */ if ((cores_info = (si_cores_info_t *)MALLOCZ(osh, sizeof (si_cores_info_t))) == NULL) { SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh))); MFREE(osh, sii, sizeof(si_info_t)); return (NULL); } sii->cores_info = cores_info; if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) { MFREE(osh, sii, sizeof(si_info_t)); MFREE(osh, cores_info, sizeof(si_cores_info_t)); return (NULL); } sii->vars = vars ? *vars : NULL; sii->varsz = varsz ? *varsz : 0; return (si_t *)sii; } static uint32 wd_msticks; /* watchdog timer ticks normalized to ms */ /** generic kernel variant of si_attach() */ si_t * si_kattach(osl_t *osh) { static bool ksii_attached = FALSE; si_cores_info_t *cores_info; if (!ksii_attached) { void *regs = NULL; regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE); cores_info = (si_cores_info_t *)&ksii_cores_info; ksii.cores_info = cores_info; ASSERT(osh); if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs, SI_BUS, NULL, osh != SI_OSH ? &(ksii.vars) : NULL, osh != SI_OSH ? &(ksii.varsz) : NULL) == NULL) { SI_ERROR(("si_kattach: si_doattach failed\n")); REG_UNMAP(regs); return NULL; } REG_UNMAP(regs); /* save ticks normalized to ms for si_watchdog_ms() */ if (PMUCTL_ENAB(&ksii.pub)) { /* based on 32KHz ILP clock */ wd_msticks = 32; } else { wd_msticks = ALP_CLOCK / 1000; } ksii_attached = TRUE; SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n", ksii.pub.ccrev, wd_msticks)); } return &ksii.pub; } static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh) { /* need to set memseg flag for CF card first before any sb registers access */ if (BUSTYPE(bustype) == PCMCIA_BUS) sii->memseg = TRUE; if (BUSTYPE(bustype) == SDIO_BUS) { int err; uint8 clkset; /* Try forcing SDIO core to do ALPAvail request only */ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ; bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); if (!err) { uint8 clkval; /* If register supported, wait for ALPAvail and then force ALP */ clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL); if ((clkval & ~SBSDIO_AVBITS) == clkset) { SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)), PMU_MAX_TRANSITION_DLY); if (!SBSDIO_ALPAV(clkval)) { SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n", clkval)); return FALSE; } clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP; bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); OSL_DELAY(65); } } /* Also, disable the extra SDIO pull-ups */ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL); } #ifdef BCMSPI /* Avoid backplane accesses before wake-wlan (i.e. htavail) for spi. * F1 read accesses may return correct data but with data-not-available dstatus bit set. */ if (BUSTYPE(bustype) == SPI_BUS) { int err; uint32 regdata; /* wake up wlan function :WAKE_UP goes as HT_AVAIL request in hardware */ regdata = bcmsdh_cfg_read_word(sdh, SDIO_FUNC_0, SPID_CONFIG, NULL); SI_MSG(("F0 REG0 rd = 0x%x\n", regdata)); regdata |= WAKE_UP; bcmsdh_cfg_write_word(sdh, SDIO_FUNC_0, SPID_CONFIG, regdata, &err); OSL_DELAY(100000); } #endif /* BCMSPI */ return TRUE; } static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, uint *origidx, void *regs) { si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; bool pci, pcie, pcie_gen2 = FALSE; uint i; uint pciidx, pcieidx, pcirev, pcierev; cc = si_setcoreidx(&sii->pub, SI_CC_IDX); ASSERT((uintptr)cc); /* get chipcommon rev */ sii->pub.ccrev = (int)si_corerev(&sii->pub); /* get chipcommon chipstatus */ if (sii->pub.ccrev >= 11) sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus); /* get chipcommon capabilites */ sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities); /* get chipcommon extended capabilities */ if (sii->pub.ccrev >= 35) sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext); /* get pmu rev and caps */ if (sii->pub.cccaps & CC_CAP_PMU) { sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities); sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK; } SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n", sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev, sii->pub.pmucaps)); /* figure out bus/orignal core idx */ sii->pub.buscoretype = NODEV_CORE_ID; sii->pub.buscorerev = (uint)NOREV; sii->pub.buscoreidx = BADIDX; pci = pcie = FALSE; pcirev = pcierev = (uint)NOREV; pciidx = pcieidx = BADIDX; for (i = 0; i < sii->numcores; i++) { uint cid, crev; si_setcoreidx(&sii->pub, i); cid = si_coreid(&sii->pub); crev = si_corerev(&sii->pub); /* Display cores found */ SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n", i, cid, crev, cores_info->coresba[i], cores_info->regs[i])); if (BUSTYPE(bustype) == SI_BUS) { /* now look at the chipstatus register to figure the pacakge */ /* for SDIO but downloaded on PCIE dev */ if (cid == PCIE2_CORE_ID) { if ((CHIPID(sii->pub.chip) == BCM43602_CHIP_ID) || ((CHIPID(sii->pub.chip) == BCM4345_CHIP_ID) && CST4345_CHIPMODE_PCIE(sii->pub.chipst))) { pcieidx = i; pcierev = crev; pcie = TRUE; pcie_gen2 = TRUE; } } } else if (BUSTYPE(bustype) == PCI_BUS) { if (cid == PCI_CORE_ID) { pciidx = i; pcirev = crev; pci = TRUE; } else if ((cid == PCIE_CORE_ID) || (cid == PCIE2_CORE_ID)) { pcieidx = i; pcierev = crev; pcie = TRUE; if (cid == PCIE2_CORE_ID) pcie_gen2 = TRUE; } } else if ((BUSTYPE(bustype) == PCMCIA_BUS) && (cid == PCMCIA_CORE_ID)) { sii->pub.buscorerev = crev; sii->pub.buscoretype = cid; sii->pub.buscoreidx = i; } else if (((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) && ((cid == PCMCIA_CORE_ID) || (cid == SDIOD_CORE_ID))) { sii->pub.buscorerev = crev; sii->pub.buscoretype = cid; sii->pub.buscoreidx = i; } /* find the core idx before entering this func. */ if ((savewin && (savewin == cores_info->coresba[i])) || (regs == cores_info->regs[i])) *origidx = i; } #if defined(PCIE_FULL_DONGLE) pci = FALSE; #endif if (pci) { sii->pub.buscoretype = PCI_CORE_ID; sii->pub.buscorerev = pcirev; sii->pub.buscoreidx = pciidx; } else if (pcie) { if (pcie_gen2) sii->pub.buscoretype = PCIE2_CORE_ID; else sii->pub.buscoretype = PCIE_CORE_ID; sii->pub.buscorerev = pcierev; sii->pub.buscoreidx = pcieidx; } SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype, sii->pub.buscorerev)); if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) && (sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (CHIPREV(sii->pub.chiprev) <= 3)) OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL); /* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was * already running. */ if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) { if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) || si_setcore(&sii->pub, ARMCM3_CORE_ID, 0)) si_core_disable(&sii->pub, 0); } /* return to the original core */ si_setcoreidx(&sii->pub, *origidx); return TRUE; } /** * Allocate an si handle. This function may be called multiple times. * * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this * function set 'vars' to NULL. */ static si_info_t * si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, uint bustype, void *sdh, char **vars, uint *varsz) { struct si_pub *sih = &sii->pub; uint32 w, savewin; chipcregs_t *cc; char *pvars = NULL; uint origidx; #if !defined(_CFEZ_) || defined(CFG_WL) #endif ASSERT(GOODREGS(regs)); savewin = 0; sih->buscoreidx = BADIDX; sii->curmap = regs; sii->sdh = sdh; sii->osh = osh; /* check to see if we are a si core mimic'ing a pci core */ if ((bustype == PCI_BUS) && (OSL_PCI_READ_CONFIG(sii->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff)) { SI_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SI " "devid:0x%x\n", __FUNCTION__, devid)); bustype = SI_BUS; } /* find Chipcommon address */ if (bustype == PCI_BUS) { savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); if (!GOODCOREADDR(savewin, SI_ENUM_BASE)) savewin = SI_ENUM_BASE; OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE); if (!regs) return NULL; cc = (chipcregs_t *)regs; } else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) { cc = (chipcregs_t *)sii->curmap; } else { cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE); } sih->bustype = bustype; if (bustype != BUSTYPE(bustype)) { SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n", bustype, BUSTYPE(bustype))); return NULL; } /* bus/core/clk setup for register access */ if (!si_buscore_prep(sii, bustype, devid, sdh)) { SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype)); return NULL; } /* ChipID recognition. * We assume we can read chipid at offset 0 from the regs arg. * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon), * some way of recognizing them needs to be added here. */ if (!cc) { SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__)); return NULL; } w = R_REG(osh, &cc->chipid); if ((w & 0xfffff) == 148277) w -= 65532; sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT; /* Might as wll fill in chip id rev & pkg */ sih->chip = w & CID_ID_MASK; sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT; sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT; if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 0) && (sih->chippkg != BCM4329_289PIN_PKG_ID)) { sih->chippkg = BCM4329_182PIN_PKG_ID; } sih->issim = IS_SIM(sih->chippkg); /* scan for cores */ if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) { SI_MSG(("Found chip type SB (0x%08x)\n", w)); sb_scan(&sii->pub, regs, devid); } else if ((CHIPTYPE(sii->pub.socitype) == SOCI_AI) || (CHIPTYPE(sii->pub.socitype) == SOCI_NAI)) { if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) SI_MSG(("Found chip type AI (0x%08x)\n", w)); else SI_MSG(("Found chip type NAI (0x%08x)\n", w)); /* pass chipc address instead of original core base */ ai_scan(&sii->pub, (void *)(uintptr)cc, devid); } else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) { SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip)); /* pass chipc address instead of original core base */ ub_scan(&sii->pub, (void *)(uintptr)cc, devid); } else { SI_ERROR(("Found chip of unknown type (0x%08x)\n", w)); return NULL; } /* no cores found, bail out */ if (sii->numcores == 0) { SI_ERROR(("si_doattach: could not find any cores\n")); return NULL; } /* bus/core/clk setup */ origidx = SI_CC_IDX; if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) { SI_ERROR(("si_doattach: si_buscore_setup failed\n")); goto exit; } #if !defined(_CFEZ_) || defined(CFG_WL) if (CHIPID(sih->chip) == BCM4322_CHIP_ID && (((sih->chipst & CST4322_SPROM_OTP_SEL_MASK) >> CST4322_SPROM_OTP_SEL_SHIFT) == (CST4322_OTP_PRESENT | CST4322_SPROM_PRESENT))) { SI_ERROR(("%s: Invalid setting: both SPROM and OTP strapped.\n", __FUNCTION__)); return NULL; } /* assume current core is CC */ if ((sii->pub.ccrev == 0x25) && ((CHIPID(sih->chip) == BCM43236_CHIP_ID || CHIPID(sih->chip) == BCM43235_CHIP_ID || CHIPID(sih->chip) == BCM43234_CHIP_ID || CHIPID(sih->chip) == BCM43238_CHIP_ID) && (CHIPREV(sii->pub.chiprev) <= 2))) { if ((cc->chipstatus & CST43236_BP_CLK) != 0) { uint clkdiv; clkdiv = R_REG(osh, &cc->clkdiv); /* otp_clk_div is even number, 120/14 < 9mhz */ clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT); W_REG(osh, &cc->clkdiv, clkdiv); SI_ERROR(("%s: set clkdiv to %x\n", __FUNCTION__, clkdiv)); } OSL_DELAY(10); } if (bustype == PCI_BUS) { } #endif #ifdef BCM_SDRBL /* 4360 rom bootloader in PCIE case, if the SDR is enabled, But preotection is * not turned on, then we want to hold arm in reset. * Bottomline: In sdrenable case, we allow arm to boot only when protection is * turned on. */ if (CHIP_HOSTIF_PCIE(&(sii->pub))) { uint32 sflags = si_arm_sflags(&(sii->pub)); /* If SDR is enabled but protection is not turned on * then we want to force arm to WFI. */ if ((sflags & (SISF_SDRENABLE | SISF_TCMPROT)) == SISF_SDRENABLE) { disable_arm_irq(); while (1) { hnd_cpu_wait(sih); } } } #endif /* BCM_SDRBL */ pvars = NULL; BCM_REFERENCE(pvars); if (sii->pub.ccrev >= 20) { uint32 gpiopullup = 0, gpiopulldown = 0; cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); ASSERT(cc != NULL); /* 4314/43142 has pin muxing, don't clear gpio bits */ if ((CHIPID(sih->chip) == BCM4314_CHIP_ID) || (CHIPID(sih->chip) == BCM43142_CHIP_ID)) { gpiopullup |= 0x402e0; gpiopulldown |= 0x20500; } W_REG(osh, &cc->gpiopullup, gpiopullup); W_REG(osh, &cc->gpiopulldown, gpiopulldown); si_setcoreidx(sih, origidx); } /* clear any previous epidiag-induced target abort */ ASSERT(!si_taclear(sih, FALSE)); return (sii); exit: return NULL; } /** may be called with core in reset */ void si_detach(si_t *sih) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint idx; if (BUSTYPE(sih->bustype) == SI_BUS) for (idx = 0; idx < SI_MAXCORES; idx++) if (cores_info->regs[idx]) { REG_UNMAP(cores_info->regs[idx]); cores_info->regs[idx] = NULL; } #if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS) if (cores_info != &ksii_cores_info) #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */ MFREE(sii->osh, cores_info, sizeof(si_cores_info_t)); #if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS) if (sii != &ksii) #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */ MFREE(sii->osh, sii, sizeof(si_info_t)); } void * si_osh(si_t *sih) { si_info_t *sii; sii = SI_INFO(sih); return sii->osh; } void si_setosh(si_t *sih, osl_t *osh) { si_info_t *sii; sii = SI_INFO(sih); if (sii->osh != NULL) { SI_ERROR(("osh is already set....\n")); ASSERT(!sii->osh); } sii->osh = osh; } /** register driver interrupt disabling and restoring callback functions */ void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn, void *intrsenabled_fn, void *intr_arg) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; sii->intr_arg = intr_arg; sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn; sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn; sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn; /* save current core id. when this function called, the current core * must be the core which provides driver functions(il, et, wl, etc.) */ sii->dev_coreid = cores_info->coreid[sii->curidx]; } void si_deregister_intr_callback(si_t *sih) { si_info_t *sii; sii = SI_INFO(sih); sii->intrsoff_fn = NULL; } uint si_intflag(si_t *sih) { si_info_t *sii = SI_INFO(sih); if (CHIPTYPE(sih->socitype) == SOCI_SB) return sb_intflag(sih); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return R_REG(sii->osh, ((uint32 *)(uintptr) (sii->oob_router + OOB_STATUSA))); else { ASSERT(0); return 0; } } uint si_flag(si_t *sih) { if (CHIPTYPE(sih->socitype) == SOCI_SB) return sb_flag(sih); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return ai_flag(sih); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) return ub_flag(sih); else { ASSERT(0); return 0; } } uint si_flag_alt(si_t *sih) { if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return ai_flag_alt(sih); else { ASSERT(0); return 0; } } void si_setint(si_t *sih, int siflag) { if (CHIPTYPE(sih->socitype) == SOCI_SB) sb_setint(sih, siflag); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) ai_setint(sih, siflag); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) ub_setint(sih, siflag); else ASSERT(0); } uint si_coreid(si_t *sih) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; return cores_info->coreid[sii->curidx]; } uint si_coreidx(si_t *sih) { si_info_t *sii; sii = SI_INFO(sih); return sii->curidx; } /** return the core-type instantiation # of the current core */ uint si_coreunit(si_t *sih) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint idx; uint coreid; uint coreunit; uint i; coreunit = 0; idx = sii->curidx; ASSERT(GOODREGS(sii->curmap)); coreid = si_coreid(sih); /* count the cores of our type */ for (i = 0; i < idx; i++) if (cores_info->coreid[i] == coreid) coreunit++; return (coreunit); } uint si_corevendor(si_t *sih) { if (CHIPTYPE(sih->socitype) == SOCI_SB) return sb_corevendor(sih); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return ai_corevendor(sih); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) return ub_corevendor(sih); else { ASSERT(0); return 0; } } bool si_backplane64(si_t *sih) { return ((sih->cccaps & CC_CAP_BKPLN64) != 0); } uint si_corerev(si_t *sih) { if (CHIPTYPE(sih->socitype) == SOCI_SB) return sb_corerev(sih); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return ai_corerev(sih); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) return ub_corerev(sih); else { ASSERT(0); return 0; } } /** return index of coreid or BADIDX if not found */ uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint found; uint i; found = 0; for (i = 0; i < sii->numcores; i++) if (cores_info->coreid[i] == coreid) { if (found == coreunit) return (i); found++; } return (BADIDX); } /** return total coreunit of coreid or zero if not found */ uint si_numcoreunits(si_t *sih, uint coreid) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint found; uint i; found = 0; for (i = 0; i < sii->numcores; i++) if (cores_info->coreid[i] == coreid) { found++; } return (found == 0? 0:found); } /** return list of found cores */ uint si_corelist(si_t *sih, uint coreid[]) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; bcopy((uchar*)cores_info->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint))); return (sii->numcores); } /** return current wrapper mapping */ void * si_wrapperregs(si_t *sih) { si_info_t *sii; sii = SI_INFO(sih); ASSERT(GOODREGS(sii->curwrap)); return (sii->curwrap); } /** return current register mapping */ void * si_coreregs(si_t *sih) { si_info_t *sii; sii = SI_INFO(sih); ASSERT(GOODREGS(sii->curmap)); return (sii->curmap); } /** * This function changes logical "focus" to the indicated core; * must be called with interrupts off. * Moreover, callers should keep interrupts off during switching out of and back to d11 core */ void * si_setcore(si_t *sih, uint coreid, uint coreunit) { uint idx; idx = si_findcoreidx(sih, coreid, coreunit); if (!GOODIDX(idx)) return (NULL); if (CHIPTYPE(sih->socitype) == SOCI_SB) return sb_setcoreidx(sih, idx); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return ai_setcoreidx(sih, idx); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) return ub_setcoreidx(sih, idx); else { ASSERT(0); return NULL; } } void * si_setcoreidx(si_t *sih, uint coreidx) { if (CHIPTYPE(sih->socitype) == SOCI_SB) return sb_setcoreidx(sih, coreidx); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return ai_setcoreidx(sih, coreidx); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) return ub_setcoreidx(sih, coreidx); else { ASSERT(0); return NULL; } } /** Turn off interrupt as required by sb_setcore, before switch core */ void * si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val) { void *cc; si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; if (SI_FAST(sii)) { /* Overloading the origidx variable to remember the coreid, * this works because the core ids cannot be confused with * core indices. */ *origidx = coreid; if (coreid == CC_CORE_ID) return (void *)CCREGS_FAST(sii); else if (coreid == sih->buscoretype) return (void *)PCIEREGS(sii); } INTR_OFF(sii, *intr_val); *origidx = sii->curidx; cc = si_setcore(sih, coreid, 0); ASSERT(cc != NULL); return cc; } /* restore coreidx and restore interrupt */ void si_restore_core(si_t *sih, uint coreid, uint intr_val) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype))) return; si_setcoreidx(sih, coreid); INTR_RESTORE(sii, intr_val); } int si_numaddrspaces(si_t *sih) { if (CHIPTYPE(sih->socitype) == SOCI_SB) return sb_numaddrspaces(sih); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return ai_numaddrspaces(sih); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) return ub_numaddrspaces(sih); else { ASSERT(0); return 0; } } uint32 si_addrspace(si_t *sih, uint asidx) { if (CHIPTYPE(sih->socitype) == SOCI_SB) return sb_addrspace(sih, asidx); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return ai_addrspace(sih, asidx); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) return ub_addrspace(sih, asidx); else { ASSERT(0); return 0; } } uint32 si_addrspacesize(si_t *sih, uint asidx) { if (CHIPTYPE(sih->socitype) == SOCI_SB) return sb_addrspacesize(sih, asidx); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return ai_addrspacesize(sih, asidx); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) return ub_addrspacesize(sih, asidx); else { ASSERT(0); return 0; } } void si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size) { /* Only supported for SOCI_AI */ if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) ai_coreaddrspaceX(sih, asidx, addr, size); else *size = 0; } uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val) { if (CHIPTYPE(sih->socitype) == SOCI_SB) return sb_core_cflags(sih, mask, val); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return ai_core_cflags(sih, mask, val); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) return ub_core_cflags(sih, mask, val); else { ASSERT(0); return 0; } } void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) { if (CHIPTYPE(sih->socitype) == SOCI_SB) sb_core_cflags_wo(sih, mask, val); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) ai_core_cflags_wo(sih, mask, val); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) ub_core_cflags_wo(sih, mask, val); else ASSERT(0); } uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val) { if (CHIPTYPE(sih->socitype) == SOCI_SB) return sb_core_sflags(sih, mask, val); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return ai_core_sflags(sih, mask, val); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) return ub_core_sflags(sih, mask, val); else { ASSERT(0); return 0; } } bool si_iscoreup(si_t *sih) { if (CHIPTYPE(sih->socitype) == SOCI_SB) return sb_iscoreup(sih); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return ai_iscoreup(sih); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) return ub_iscoreup(sih); else { ASSERT(0); return FALSE; } } uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val) { /* only for AI back plane chips */ if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return (ai_wrap_reg(sih, offset, mask, val)); return 0; } uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) { if (CHIPTYPE(sih->socitype) == SOCI_SB) return sb_corereg(sih, coreidx, regoff, mask, val); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return ai_corereg(sih, coreidx, regoff, mask, val); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) return ub_corereg(sih, coreidx, regoff, mask, val); else { ASSERT(0); return 0; } } /* * If there is no need for fiddling with interrupts or core switches (typically silicon * back plane registers, pci registers and chipcommon registers), this function * returns the register offset on this core to a mapped address. This address can * be used for W_REG/R_REG directly. * * For accessing registers that would need a core switch, this function will return * NULL. */ uint32 * si_corereg_addr(si_t *sih, uint coreidx, uint regoff) { if (CHIPTYPE(sih->socitype) == SOCI_SB) return sb_corereg_addr(sih, coreidx, regoff); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) return ai_corereg_addr(sih, coreidx, regoff); else { return 0; } } void si_core_disable(si_t *sih, uint32 bits) { if (CHIPTYPE(sih->socitype) == SOCI_SB) sb_core_disable(sih, bits); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) ai_core_disable(sih, bits); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) ub_core_disable(sih, bits); } void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits) { if (CHIPTYPE(sih->socitype) == SOCI_SB) sb_core_reset(sih, bits, resetbits); else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) ai_core_reset(sih, bits, resetbits); else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) ub_core_reset(sih, bits, resetbits); } /** Run bist on current core. Caller needs to take care of core-specific bist hazards */ int si_corebist(si_t *sih) { uint32 cflags; int result = 0; /* Read core control flags */ cflags = si_core_cflags(sih, 0, 0); /* Set bist & fgc */ si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC)); /* Wait for bist done */ SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000); if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR) result = BCME_ERROR; /* Reset core control flags */ si_core_cflags(sih, 0xffff, cflags); return result; } static uint32 factor6(uint32 x) { switch (x) { case CC_F6_2: return 2; case CC_F6_3: return 3; case CC_F6_4: return 4; case CC_F6_5: return 5; case CC_F6_6: return 6; case CC_F6_7: return 7; default: return 0; } } /** calculate the speed the SI would run at given a set of clockcontrol values */ uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m) { uint32 n1, n2, clock, m1, m2, m3, mc; n1 = n & CN_N1_MASK; n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT; if (pll_type == PLL_TYPE6) { if (m & CC_T6_MMASK) return CC_T6_M1; else return CC_T6_M0; } else if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3) || (pll_type == PLL_TYPE4) || (pll_type == PLL_TYPE7)) { n1 = factor6(n1); n2 += CC_F5_BIAS; } else if (pll_type == PLL_TYPE2) { n1 += CC_T2_BIAS; n2 += CC_T2_BIAS; ASSERT((n1 >= 2) && (n1 <= 7)); ASSERT((n2 >= 5) && (n2 <= 23)); } else if (pll_type == PLL_TYPE5) { return (100000000); } else ASSERT(0); /* PLL types 3 and 7 use BASE2 (25Mhz) */ if ((pll_type == PLL_TYPE3) || (pll_type == PLL_TYPE7)) { clock = CC_CLOCK_BASE2 * n1 * n2; } else clock = CC_CLOCK_BASE1 * n1 * n2; if (clock == 0) return 0; m1 = m & CC_M1_MASK; m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT; m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT; mc = (m & CC_MC_MASK) >> CC_MC_SHIFT; if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3) || (pll_type == PLL_TYPE4) || (pll_type == PLL_TYPE7)) { m1 = factor6(m1); if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3)) m2 += CC_F5_BIAS; else m2 = factor6(m2); m3 = factor6(m3); switch (mc) { case CC_MC_BYPASS: return (clock); case CC_MC_M1: return (clock / m1); case CC_MC_M1M2: return (clock / (m1 * m2)); case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3)); case CC_MC_M1M3: return (clock / (m1 * m3)); default: return (0); } } else { ASSERT(pll_type == PLL_TYPE2); m1 += CC_T2_BIAS; m2 += CC_T2M2_BIAS; m3 += CC_T2_BIAS; ASSERT((m1 >= 2) && (m1 <= 7)); ASSERT((m2 >= 3) && (m2 <= 10)); ASSERT((m3 >= 2) && (m3 <= 7)); if ((mc & CC_T2MC_M1BYP) == 0) clock /= m1; if ((mc & CC_T2MC_M2BYP) == 0) clock /= m2; if ((mc & CC_T2MC_M3BYP) == 0) clock /= m3; return (clock); } } /** * Some chips could have multiple host interfaces, however only one will be active. * For a given chip. Depending pkgopt and cc_chipst return the active host interface. */ uint si_chip_hostif(si_t *sih) { uint hosti = 0; switch (CHIPID(sih->chip)) { case BCM43602_CHIP_ID: hosti = CHIP_HOSTIF_PCIEMODE; break; case BCM4360_CHIP_ID: /* chippkg bit-0 == 0 is PCIE only pkgs * chippkg bit-0 == 1 has both PCIE and USB cores enabled */ if ((sih->chippkg & 0x1) && (sih->chipst & CST4360_MODE_USB)) hosti = CHIP_HOSTIF_USBMODE; else hosti = CHIP_HOSTIF_PCIEMODE; break; case BCM4335_CHIP_ID: /* TBD: like in 4360, do we need to check pkg? */ if (CST4335_CHIPMODE_USB20D(sih->chipst)) hosti = CHIP_HOSTIF_USBMODE; else if (CST4335_CHIPMODE_SDIOD(sih->chipst)) hosti = CHIP_HOSTIF_SDIOMODE; else hosti = CHIP_HOSTIF_PCIEMODE; break; case BCM4345_CHIP_ID: if (CST4345_CHIPMODE_USB20D(sih->chipst) || CST4345_CHIPMODE_HSIC(sih->chipst)) hosti = CHIP_HOSTIF_USBMODE; else if (CST4345_CHIPMODE_SDIOD(sih->chipst)) hosti = CHIP_HOSTIF_SDIOMODE; else if (CST4345_CHIPMODE_PCIE(sih->chipst)) hosti = CHIP_HOSTIF_PCIEMODE; break; case BCM4350_CHIP_ID: case BCM4354_CHIP_ID: case BCM43556_CHIP_ID: case BCM43558_CHIP_ID: case BCM43566_CHIP_ID: case BCM43568_CHIP_ID: case BCM43569_CHIP_ID: if (CST4350_CHIPMODE_USB20D(sih->chipst) || CST4350_CHIPMODE_HSIC20D(sih->chipst) || CST4350_CHIPMODE_USB30D(sih->chipst) || CST4350_CHIPMODE_USB30D_WL(sih->chipst) || CST4350_CHIPMODE_HSIC30D(sih->chipst)) hosti = CHIP_HOSTIF_USBMODE; else if (CST4350_CHIPMODE_SDIOD(sih->chipst)) hosti = CHIP_HOSTIF_SDIOMODE; else if (CST4350_CHIPMODE_PCIE(sih->chipst)) hosti = CHIP_HOSTIF_PCIEMODE; break; default: break; } return hosti; } /** set chip watchdog reset timer to fire in 'ticks' */ void si_watchdog(si_t *sih, uint ticks) { uint nb, maxt; if (PMUCTL_ENAB(sih)) { #if !defined(_CFEZ_) || defined(CFG_WL) if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) && (CHIPREV(sih->chiprev) == 0) && (ticks != 0)) { si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2); si_setcore(sih, USB20D_CORE_ID, 0); si_core_disable(sih, 1); si_setcore(sih, CC_CORE_ID, 0); } #endif nb = (sih->ccrev < 26) ? 16 : ((sih->ccrev >= 37) ? 32 : 24); /* The mips compiler uses the sllv instruction, * so we specially handle the 32-bit case. */ if (nb == 32) maxt = 0xffffffff; else maxt = ((1 << nb) - 1); if (ticks == 1) ticks = 2; else if (ticks > maxt) ticks = maxt; si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pmuwatchdog), ~0, ticks); } else { maxt = (1 << 28) - 1; if (ticks > maxt) ticks = maxt; si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks); } } /** trigger watchdog reset after ms milliseconds */ void si_watchdog_ms(si_t *sih, uint32 ms) { si_watchdog(sih, wd_msticks * ms); } uint32 si_watchdog_msticks(void) { return wd_msticks; } bool si_taclear(si_t *sih, bool details) { return FALSE; } /** return the slow clock source - LPO, XTAL, or PCI */ static uint si_slowclk_src(si_info_t *sii) { chipcregs_t *cc; ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID); if (sii->pub.ccrev < 6) { if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) && (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)) & PCI_CFG_GPIO_SCS)) return (SCC_SS_PCI); else return (SCC_SS_XTAL); } else if (sii->pub.ccrev < 10) { cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx); ASSERT(cc); return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK); } else /* Insta-clock */ return (SCC_SS_XTAL); } /** return the ILP (slowclock) min or max frequency */ static uint si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc) { uint32 slowclk; uint div; ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID); /* shouldn't be here unless we've established the chip has dynamic clk control */ ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL); slowclk = si_slowclk_src(sii); if (sii->pub.ccrev < 6) { if (slowclk == SCC_SS_PCI) return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64)); else return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32)); } else if (sii->pub.ccrev < 10) { div = 4 * (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1); if (slowclk == SCC_SS_LPO) return (max_freq ? LPOMAXFREQ : LPOMINFREQ); else if (slowclk == SCC_SS_XTAL) return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div)); else if (slowclk == SCC_SS_PCI) return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div)); else ASSERT(0); } else { /* Chipc rev 10 is InstaClock */ div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT; div = 4 * (div + 1); return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div)); } return (0); } static void si_clkctl_setdelay(si_info_t *sii, void *chipcregs) { chipcregs_t *cc = (chipcregs_t *)chipcregs; uint slowmaxfreq, pll_delay, slowclk; uint pll_on_delay, fref_sel_delay; pll_delay = PLL_DELAY; /* If the slow clock is not sourced by the xtal then add the xtal_on_delay * since the xtal will also be powered down by dynamic clk control logic. */ slowclk = si_slowclk_src(sii); if (slowclk != SCC_SS_XTAL) pll_delay += XTAL_ON_DELAY; /* Starting with 4318 it is ILP that is used for the delays */ slowmaxfreq = si_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? FALSE : TRUE, cc); pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000; fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000; W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay); W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay); } /** initialize power control delay registers */ void si_clkctl_init(si_t *sih) { si_info_t *sii; uint origidx = 0; chipcregs_t *cc; bool fast; if (!CCCTL_ENAB(sih)) return; sii = SI_INFO(sih); fast = SI_FAST(sii); if (!fast) { origidx = sii->curidx; if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) return; } else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL) return; ASSERT(cc != NULL); /* set all Instaclk chip ILP to 1 MHz */ if (sih->ccrev >= 10) SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK, (ILP_DIV_1MHZ << SYCC_CD_SHIFT)); si_clkctl_setdelay(sii, (void *)(uintptr)cc); OSL_DELAY(20000); if (!fast) si_setcoreidx(sih, origidx); } /** change logical "focus" to the gpio core for optimized access */ void * si_gpiosetcore(si_t *sih) { return (si_setcoreidx(sih, SI_CC_IDX)); } /** * mask & set gpiocontrol bits. * If a gpiocontrol bit is set to 0, chipcommon controls the corresponding GPIO pin. * If a gpiocontrol bit is set to 1, the GPIO pin is no longer a GPIO and becomes dedicated * to some chip-specific purpose. */ uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority) { uint regoff; regoff = 0; /* gpios could be shared on router platforms * ignore reservation if it's high priority (e.g., test apps) */ if ((priority != GPIO_HI_PRIORITY) && (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { mask = priority ? (si_gpioreservation & mask) : ((si_gpioreservation | mask) & ~(si_gpioreservation)); val &= mask; } regoff = OFFSETOF(chipcregs_t, gpiocontrol); return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); } /** mask&set gpio output enable bits */ uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority) { uint regoff; regoff = 0; /* gpios could be shared on router platforms * ignore reservation if it's high priority (e.g., test apps) */ if ((priority != GPIO_HI_PRIORITY) && (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { mask = priority ? (si_gpioreservation & mask) : ((si_gpioreservation | mask) & ~(si_gpioreservation)); val &= mask; } regoff = OFFSETOF(chipcregs_t, gpioouten); return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); } /** mask&set gpio output bits */ uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority) { uint regoff; regoff = 0; /* gpios could be shared on router platforms * ignore reservation if it's high priority (e.g., test apps) */ if ((priority != GPIO_HI_PRIORITY) && (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { mask = priority ? (si_gpioreservation & mask) : ((si_gpioreservation | mask) & ~(si_gpioreservation)); val &= mask; } regoff = OFFSETOF(chipcregs_t, gpioout); return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); } /** reserve one gpio */ uint32 si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority) { /* only cores on SI_BUS share GPIO's and only applcation users need to * reserve/release GPIO */ if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) { ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority)); return 0xffffffff; } /* make sure only one bit is set */ if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) { ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1))); return 0xffffffff; } /* already reserved */ if (si_gpioreservation & gpio_bitmask) return 0xffffffff; /* set reservation */ si_gpioreservation |= gpio_bitmask; return si_gpioreservation; } /** * release one gpio. * * releasing the gpio doesn't change the current value on the GPIO last write value * persists till someone overwrites it. */ uint32 si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority) { /* only cores on SI_BUS share GPIO's and only applcation users need to * reserve/release GPIO */ if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) { ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority)); return 0xffffffff; } /* make sure only one bit is set */ if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) { ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1))); return 0xffffffff; } /* already released */ if (!(si_gpioreservation & gpio_bitmask)) return 0xffffffff; /* clear reservation */ si_gpioreservation &= ~gpio_bitmask; return si_gpioreservation; } /* return the current gpioin register value */ uint32 si_gpioin(si_t *sih) { uint regoff; regoff = OFFSETOF(chipcregs_t, gpioin); return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0)); } /* mask&set gpio interrupt polarity bits */ uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority) { uint regoff; /* gpios could be shared on router platforms */ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { mask = priority ? (si_gpioreservation & mask) : ((si_gpioreservation | mask) & ~(si_gpioreservation)); val &= mask; } regoff = OFFSETOF(chipcregs_t, gpiointpolarity); return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); } /* mask&set gpio interrupt mask bits */ uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority) { uint regoff; /* gpios could be shared on router platforms */ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { mask = priority ? (si_gpioreservation & mask) : ((si_gpioreservation | mask) & ~(si_gpioreservation)); val &= mask; } regoff = OFFSETOF(chipcregs_t, gpiointmask); return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); } /* assign the gpio to an led */ uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val) { if (sih->ccrev < 16) return 0xffffffff; /* gpio led powersave reg */ return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val)); } /* mask&set gpio timer val */ uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval) { if (sih->ccrev < 16) return 0xffffffff; return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval)); } uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val) { uint offs; if (sih->ccrev < 20) return 0xffffffff; offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup)); return (si_corereg(sih, SI_CC_IDX, offs, mask, val)); } uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val) { uint offs; if (sih->ccrev < 11) return 0xffffffff; if (regtype == GPIO_REGEVT) offs = OFFSETOF(chipcregs_t, gpioevent); else if (regtype == GPIO_REGEVT_INTMSK) offs = OFFSETOF(chipcregs_t, gpioeventintmask); else if (regtype == GPIO_REGEVT_INTPOL) offs = OFFSETOF(chipcregs_t, gpioeventintpolarity); else return 0xffffffff; return (si_corereg(sih, SI_CC_IDX, offs, mask, val)); } void * si_gpio_handler_register(si_t *sih, uint32 event, bool level, gpio_handler_t cb, void *arg) { si_info_t *sii = SI_INFO(sih); gpioh_item_t *gi; ASSERT(event); ASSERT(cb != NULL); if (sih->ccrev < 11) return NULL; if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL) return NULL; bzero(gi, sizeof(gpioh_item_t)); gi->event = event; gi->handler = cb; gi->arg = arg; gi->level = level; gi->next = sii->gpioh_head; sii->gpioh_head = gi; return (void *)(gi); } void si_gpio_handler_unregister(si_t *sih, void *gpioh) { si_info_t *sii = SI_INFO(sih); gpioh_item_t *p, *n; if (sih->ccrev < 11) return; ASSERT(sii->gpioh_head != NULL); if ((void*)sii->gpioh_head == gpioh) { sii->gpioh_head = sii->gpioh_head->next; MFREE(sii->osh, gpioh, sizeof(gpioh_item_t)); return; } else { p = sii->gpioh_head; n = p->next; while (n) { if ((void*)n == gpioh) { p->next = n->next; MFREE(sii->osh, gpioh, sizeof(gpioh_item_t)); return; } p = n; n = n->next; } } ASSERT(0); /* Not found in list */ } void si_gpio_handler_process(si_t *sih) { si_info_t *sii = SI_INFO(sih); gpioh_item_t *h; uint32 level = si_gpioin(sih); uint32 levelp = si_gpiointpolarity(sih, 0, 0, 0); uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0); uint32 edgep = si_gpioevent(sih, GPIO_REGEVT_INTPOL, 0, 0); for (h = sii->gpioh_head; h != NULL; h = h->next) { if (h->handler) { uint32 status = (h->level ? level : edge) & h->event; uint32 polarity = (h->level ? levelp : edgep) & h->event; /* polarity bitval is opposite of status bitval */ if (status ^ polarity) h->handler(status, h->arg); } } si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */ } uint32 si_gpio_int_enable(si_t *sih, bool enable) { uint offs; if (sih->ccrev < 11) return 0xffffffff; offs = OFFSETOF(chipcregs_t, intmask); return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0))); } /** Return the size of the specified SOCRAM bank */ static uint socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type) { uint banksize, bankinfo; uint bankidx = idx | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT); ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM); W_REG(sii->osh, &regs->bankidx, bankidx); bankinfo = R_REG(sii->osh, &regs->bankinfo); banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1); return banksize; } void si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect, uint8 *remap) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; sbsocramregs_t *regs; bool wasup; uint corerev; /* Block ints and save current core */ INTR_OFF(sii, intr_val); origidx = si_coreidx(sih); if (!set) *enable = *protect = *remap = 0; /* Switch to SOCRAM core */ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) goto done; /* Get info for determining size */ if (!(wasup = si_iscoreup(sih))) si_core_reset(sih, 0, 0); corerev = si_corerev(sih); if (corerev >= 10) { uint32 extcinfo; uint8 nb; uint8 i; uint32 bankidx, bankinfo; extcinfo = R_REG(sii->osh, &regs->extracoreinfo); nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT); for (i = 0; i < nb; i++) { bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); W_REG(sii->osh, &regs->bankidx, bankidx); bankinfo = R_REG(sii->osh, &regs->bankinfo); if (set) { bankinfo &= ~SOCRAM_BANKINFO_DEVRAMSEL_MASK; bankinfo &= ~SOCRAM_BANKINFO_DEVRAMPRO_MASK; bankinfo &= ~SOCRAM_BANKINFO_DEVRAMREMAP_MASK; if (*enable) { bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMSEL_SHIFT); if (*protect) bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMPRO_SHIFT); if ((corerev >= 16) && *remap) bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT); } W_REG(sii->osh, &regs->bankinfo, bankinfo); } else if (i == 0) { if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) { *enable = 1; if (bankinfo & SOCRAM_BANKINFO_DEVRAMPRO_MASK) *protect = 1; if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) *remap = 1; } } } } /* Return to previous state and core */ if (!wasup) si_core_disable(sih, 0); si_setcoreidx(sih, origidx); done: INTR_RESTORE(sii, intr_val); } bool si_socdevram_remap_isenb(si_t *sih) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; sbsocramregs_t *regs; bool wasup, remap = FALSE; uint corerev; uint32 extcinfo; uint8 nb; uint8 i; uint32 bankidx, bankinfo; /* Block ints and save current core */ INTR_OFF(sii, intr_val); origidx = si_coreidx(sih); /* Switch to SOCRAM core */ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) goto done; /* Get info for determining size */ if (!(wasup = si_iscoreup(sih))) si_core_reset(sih, 0, 0); corerev = si_corerev(sih); if (corerev >= 16) { extcinfo = R_REG(sii->osh, &regs->extracoreinfo); nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT); for (i = 0; i < nb; i++) { bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); W_REG(sii->osh, &regs->bankidx, bankidx); bankinfo = R_REG(sii->osh, &regs->bankinfo); if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) { remap = TRUE; break; } } } /* Return to previous state and core */ if (!wasup) si_core_disable(sih, 0); si_setcoreidx(sih, origidx); done: INTR_RESTORE(sii, intr_val); return remap; } bool si_socdevram_pkg(si_t *sih) { if (si_socdevram_size(sih) > 0) return TRUE; else return FALSE; } uint32 si_socdevram_size(si_t *sih) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; uint32 memsize = 0; sbsocramregs_t *regs; bool wasup; uint corerev; /* Block ints and save current core */ INTR_OFF(sii, intr_val); origidx = si_coreidx(sih); /* Switch to SOCRAM core */ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) goto done; /* Get info for determining size */ if (!(wasup = si_iscoreup(sih))) si_core_reset(sih, 0, 0); corerev = si_corerev(sih); if (corerev >= 10) { uint32 extcinfo; uint8 nb; uint8 i; extcinfo = R_REG(sii->osh, &regs->extracoreinfo); nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT)); for (i = 0; i < nb; i++) memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM); } /* Return to previous state and core */ if (!wasup) si_core_disable(sih, 0); si_setcoreidx(sih, origidx); done: INTR_RESTORE(sii, intr_val); return memsize; } uint32 si_socdevram_remap_size(si_t *sih) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; uint32 memsize = 0, banksz; sbsocramregs_t *regs; bool wasup; uint corerev; uint32 extcinfo; uint8 nb; uint8 i; uint32 bankidx, bankinfo; /* Block ints and save current core */ INTR_OFF(sii, intr_val); origidx = si_coreidx(sih); /* Switch to SOCRAM core */ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) goto done; /* Get info for determining size */ if (!(wasup = si_iscoreup(sih))) si_core_reset(sih, 0, 0); corerev = si_corerev(sih); if (corerev >= 16) { extcinfo = R_REG(sii->osh, &regs->extracoreinfo); nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT)); /* * FIX: A0 Issue: Max addressable is 512KB, instead 640KB * Only four banks are accessible to ARM */ if ((corerev == 16) && (nb == 5)) nb = 4; for (i = 0; i < nb; i++) { bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); W_REG(sii->osh, &regs->bankidx, bankidx); bankinfo = R_REG(sii->osh, &regs->bankinfo); if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) { banksz = socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM); memsize += banksz; } else { /* Account only consecutive banks for now */ break; } } } /* Return to previous state and core */ if (!wasup) si_core_disable(sih, 0); si_setcoreidx(sih, origidx); done: INTR_RESTORE(sii, intr_val); return memsize; } /** Return the RAM size of the SOCRAM core */ uint32 si_socram_size(si_t *sih) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; sbsocramregs_t *regs; bool wasup; uint corerev; uint32 coreinfo; uint memsize = 0; /* Block ints and save current core */ INTR_OFF(sii, intr_val); origidx = si_coreidx(sih); /* Switch to SOCRAM core */ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) goto done; /* Get info for determining size */ if (!(wasup = si_iscoreup(sih))) si_core_reset(sih, 0, 0); corerev = si_corerev(sih); coreinfo = R_REG(sii->osh, &regs->coreinfo); /* Calculate size from coreinfo based on rev */ if (corerev == 0) memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK)); else if (corerev < 3) { memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK)); memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; } else if ((corerev <= 7) || (corerev == 12)) { uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; uint bsz = (coreinfo & SRCI_SRBSZ_MASK); uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT; if (lss != 0) nb --; memsize = nb * (1 << (bsz + SR_BSZ_BASE)); if (lss != 0) memsize += (1 << ((lss - 1) + SR_BSZ_BASE)); } else { uint8 i; uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; for (i = 0; i < nb; i++) memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM); } /* Return to previous state and core */ if (!wasup) si_core_disable(sih, 0); si_setcoreidx(sih, origidx); done: INTR_RESTORE(sii, intr_val); return memsize; } /** Return the TCM-RAM size of the ARMCR4 core. */ uint32 si_tcm_size(si_t *sih) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; uint8 *regs; bool wasup; uint32 corecap; uint memsize = 0; uint32 nab = 0; uint32 nbb = 0; uint32 totb = 0; uint32 bxinfo = 0; uint32 idx = 0; uint32 *arm_cap_reg; uint32 *arm_bidx; uint32 *arm_binfo; /* Block ints and save current core */ INTR_OFF(sii, intr_val); origidx = si_coreidx(sih); /* Switch to CR4 core */ if (!(regs = si_setcore(sih, ARMCR4_CORE_ID, 0))) goto done; /* Get info for determining size. If in reset, come out of reset, * but remain in halt */ if (!(wasup = si_iscoreup(sih))) si_core_reset(sih, SICF_CPUHALT, SICF_CPUHALT); arm_cap_reg = (uint32 *)(regs + SI_CR4_CAP); corecap = R_REG(sii->osh, arm_cap_reg); nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT; nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT; totb = nab + nbb; arm_bidx = (uint32 *)(regs + SI_CR4_BANKIDX); arm_binfo = (uint32 *)(regs + SI_CR4_BANKINFO); for (idx = 0; idx < totb; idx++) { W_REG(sii->osh, arm_bidx, idx); bxinfo = R_REG(sii->osh, arm_binfo); memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT; } /* Return to previous state and core */ if (!wasup) si_core_disable(sih, 0); si_setcoreidx(sih, origidx); done: INTR_RESTORE(sii, intr_val); return memsize; } bool si_has_flops(si_t *sih) { uint origidx, cr4_rev; /* Find out CR4 core revision */ origidx = si_coreidx(sih); if (si_setcore(sih, ARMCR4_CORE_ID, 0)) { cr4_rev = si_corerev(sih); si_setcoreidx(sih, origidx); if (cr4_rev == 1 || cr4_rev >= 3) return TRUE; } return FALSE; } uint32 si_socram_srmem_size(si_t *sih) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; sbsocramregs_t *regs; bool wasup; uint corerev; uint32 coreinfo; uint memsize = 0; if ((CHIPID(sih->chip) == BCM4334_CHIP_ID) && (CHIPREV(sih->chiprev) < 2)) { return (32 * 1024); } /* Block ints and save current core */ INTR_OFF(sii, intr_val); origidx = si_coreidx(sih); /* Switch to SOCRAM core */ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) goto done; /* Get info for determining size */ if (!(wasup = si_iscoreup(sih))) si_core_reset(sih, 0, 0); corerev = si_corerev(sih); coreinfo = R_REG(sii->osh, &regs->coreinfo); /* Calculate size from coreinfo based on rev */ if (corerev >= 16) { uint8 i; uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; for (i = 0; i < nb; i++) { W_REG(sii->osh, &regs->bankidx, i); if (R_REG(sii->osh, &regs->bankinfo) & SOCRAM_BANKINFO_RETNTRAM_MASK) memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM); } } /* Return to previous state and core */ if (!wasup) si_core_disable(sih, 0); si_setcoreidx(sih, origidx); done: INTR_RESTORE(sii, intr_val); return memsize; } #if !defined(_CFEZ_) || defined(CFG_WL) void si_btcgpiowar(si_t *sih) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; chipcregs_t *cc; /* Make sure that there is ChipCommon core present && * UART_TX is strapped to 1 */ if (!(sih->cccaps & CC_CAP_UARTGPIO)) return; /* si_corereg cannot be used as we have to guarantee 8-bit read/writes */ INTR_OFF(sii, intr_val); origidx = si_coreidx(sih); cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); ASSERT(cc != NULL); W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04); /* restore the original index */ si_setcoreidx(sih, origidx); INTR_RESTORE(sii, intr_val); } void si_chipcontrl_btshd0_4331(si_t *sih, bool on) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; chipcregs_t *cc; uint origidx; uint32 val; uint intr_val = 0; INTR_OFF(sii, intr_val); origidx = si_coreidx(sih); cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); val = R_REG(sii->osh, &cc->chipcontrol); /* bt_shd0 controls are same for 4331 chiprevs 0 and 1, packages 12x9 and 12x12 */ if (on) { /* Enable bt_shd0 on gpio4: */ val |= (CCTRL4331_BT_SHD0_ON_GPIO4); W_REG(sii->osh, &cc->chipcontrol, val); } else { val &= ~(CCTRL4331_BT_SHD0_ON_GPIO4); W_REG(sii->osh, &cc->chipcontrol, val); } /* restore the original index */ si_setcoreidx(sih, origidx); INTR_RESTORE(sii, intr_val); } void si_chipcontrl_restore(si_t *sih, uint32 val) { si_info_t *sii = SI_INFO(sih); chipcregs_t *cc; uint origidx = si_coreidx(sih); cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); W_REG(sii->osh, &cc->chipcontrol, val); si_setcoreidx(sih, origidx); } uint32 si_chipcontrl_read(si_t *sih) { si_info_t *sii = SI_INFO(sih); chipcregs_t *cc; uint origidx = si_coreidx(sih); uint32 val; cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); val = R_REG(sii->osh, &cc->chipcontrol); si_setcoreidx(sih, origidx); return val; } void si_chipcontrl_epa4331(si_t *sih, bool on) { si_info_t *sii = SI_INFO(sih); chipcregs_t *cc; uint origidx = si_coreidx(sih); uint32 val; cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); val = R_REG(sii->osh, &cc->chipcontrol); if (on) { if (sih->chippkg == 9 || sih->chippkg == 0xb) { val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5); /* Ext PA Controls for 4331 12x9 Package */ W_REG(sii->osh, &cc->chipcontrol, val); } else { /* Ext PA Controls for 4331 12x12 Package */ if (sih->chiprev > 0) { W_REG(sii->osh, &cc->chipcontrol, val | (CCTRL4331_EXTPA_EN) | (CCTRL4331_EXTPA_EN2)); } else { W_REG(sii->osh, &cc->chipcontrol, val | (CCTRL4331_EXTPA_EN)); } } } else { val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_EN2 | CCTRL4331_EXTPA_ON_GPIO2_5); W_REG(sii->osh, &cc->chipcontrol, val); } si_setcoreidx(sih, origidx); } /** switch muxed pins, on: SROM, off: FEMCTRL. Called for a family of ac chips, not just 4360. */ void si_chipcontrl_srom4360(si_t *sih, bool on) { si_info_t *sii = SI_INFO(sih); chipcregs_t *cc; uint origidx = si_coreidx(sih); uint32 val; cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); val = R_REG(sii->osh, &cc->chipcontrol); if (on) { val &= ~(CCTRL4360_SECI_MODE | CCTRL4360_BTSWCTRL_MODE | CCTRL4360_EXTRA_FEMCTRL_MODE | CCTRL4360_BT_LGCY_MODE | CCTRL4360_CORE2FEMCTRL4_ON); W_REG(sii->osh, &cc->chipcontrol, val); } else { } si_setcoreidx(sih, origidx); } void si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl) { si_info_t *sii; chipcregs_t *cc; uint origidx; uint32 val; bool sel_chip; sel_chip = (CHIPID(sih->chip) == BCM4331_CHIP_ID) || (CHIPID(sih->chip) == BCM43431_CHIP_ID); sel_chip &= ((sih->chippkg == 9 || sih->chippkg == 0xb)); if (!sel_chip) return; sii = SI_INFO(sih); origidx = si_coreidx(sih); cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); val = R_REG(sii->osh, &cc->chipcontrol); if (enter_wowl) { val |= CCTRL4331_EXTPA_EN; W_REG(sii->osh, &cc->chipcontrol, val); } else { val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5); W_REG(sii->osh, &cc->chipcontrol, val); } si_setcoreidx(sih, origidx); } #endif uint si_pll_reset(si_t *sih) { uint err = 0; return (err); } /** Enable BT-COEX & Ex-PA for 4313 */ void si_epa_4313war(si_t *sih) { si_info_t *sii = SI_INFO(sih); chipcregs_t *cc; uint origidx = si_coreidx(sih); cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); /* EPA Fix */ W_REG(sii->osh, &cc->gpiocontrol, R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK); si_setcoreidx(sih, origidx); } void si_clk_pmu_htavail_set(si_t *sih, bool set_clear) { } /** Re-enable synth_pwrsw resource in min_res_mask for 4313 */ void si_pmu_synth_pwrsw_4313_war(si_t *sih) { } /** WL/BT control for 4313 btcombo boards >= P250 */ void si_btcombo_p250_4313_war(si_t *sih) { si_info_t *sii = SI_INFO(sih); chipcregs_t *cc; uint origidx = si_coreidx(sih); cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); W_REG(sii->osh, &cc->gpiocontrol, R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_5_6_EN_MASK); W_REG(sii->osh, &cc->gpioouten, R_REG(sii->osh, &cc->gpioouten) | GPIO_CTRL_5_6_EN_MASK); si_setcoreidx(sih, origidx); } void si_btc_enable_chipcontrol(si_t *sih) { si_info_t *sii = SI_INFO(sih); chipcregs_t *cc; uint origidx = si_coreidx(sih); cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); /* BT fix */ W_REG(sii->osh, &cc->chipcontrol, R_REG(sii->osh, &cc->chipcontrol) | CC_BTCOEX_EN_MASK); si_setcoreidx(sih, origidx); } void si_btcombo_43228_war(si_t *sih) { si_info_t *sii = SI_INFO(sih); chipcregs_t *cc; uint origidx = si_coreidx(sih); cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); W_REG(sii->osh, &cc->gpioouten, GPIO_CTRL_7_6_EN_MASK); W_REG(sii->osh, &cc->gpioout, GPIO_OUT_7_EN_MASK); si_setcoreidx(sih, origidx); } /** check if the device is removed */ bool si_deviceremoved(si_t *sih) { uint32 w; switch (BUSTYPE(sih->bustype)) { case PCI_BUS: ASSERT(SI_INFO(sih)->osh != NULL); w = OSL_PCI_READ_CONFIG(SI_INFO(sih)->osh, PCI_CFG_VID, sizeof(uint32)); if ((w & 0xFFFF) != VENDOR_BROADCOM) return TRUE; break; } return FALSE; } bool si_is_sprom_available(si_t *sih) { if (sih->ccrev >= 31) { si_info_t *sii; uint origidx; chipcregs_t *cc; uint32 sromctrl; if ((sih->cccaps & CC_CAP_SROM) == 0) return FALSE; sii = SI_INFO(sih); origidx = sii->curidx; cc = si_setcoreidx(sih, SI_CC_IDX); ASSERT(cc); sromctrl = R_REG(sii->osh, &cc->sromcontrol); si_setcoreidx(sih, origidx); return (sromctrl & SRC_PRESENT); } switch (CHIPID(sih->chip)) { case BCM4312_CHIP_ID: return ((sih->chipst & CST4312_SPROM_OTP_SEL_MASK) != CST4312_OTP_SEL); case BCM4325_CHIP_ID: return (sih->chipst & CST4325_SPROM_SEL) != 0; case BCM4322_CHIP_ID: case BCM43221_CHIP_ID: case BCM43231_CHIP_ID: case BCM43222_CHIP_ID: case BCM43111_CHIP_ID: case BCM43112_CHIP_ID: case BCM4342_CHIP_ID: { uint32 spromotp; spromotp = (sih->chipst & CST4322_SPROM_OTP_SEL_MASK) >> CST4322_SPROM_OTP_SEL_SHIFT; return (spromotp & CST4322_SPROM_PRESENT) != 0; } case BCM4329_CHIP_ID: return (sih->chipst & CST4329_SPROM_SEL) != 0; case BCM4315_CHIP_ID: return (sih->chipst & CST4315_SPROM_SEL) != 0; case BCM4319_CHIP_ID: return (sih->chipst & CST4319_SPROM_SEL) != 0; case BCM4336_CHIP_ID: case BCM43362_CHIP_ID: return (sih->chipst & CST4336_SPROM_PRESENT) != 0; case BCM4330_CHIP_ID: return (sih->chipst & CST4330_SPROM_PRESENT) != 0; case BCM4313_CHIP_ID: return (sih->chipst & CST4313_SPROM_PRESENT) != 0; case BCM4331_CHIP_ID: case BCM43431_CHIP_ID: return (sih->chipst & CST4331_SPROM_PRESENT) != 0; case BCM43239_CHIP_ID: return ((sih->chipst & CST43239_SPROM_MASK) && !(sih->chipst & CST43239_SFLASH_MASK)); case BCM4324_CHIP_ID: case BCM43242_CHIP_ID: return ((sih->chipst & CST4324_SPROM_MASK) && !(sih->chipst & CST4324_SFLASH_MASK)); case BCM4335_CHIP_ID: case BCM4345_CHIP_ID: return ((sih->chipst & CST4335_SPROM_MASK) && !(sih->chipst & CST4335_SFLASH_MASK)); case BCM4350_CHIP_ID: case BCM4354_CHIP_ID: case BCM43556_CHIP_ID: case BCM43558_CHIP_ID: case BCM43566_CHIP_ID: case BCM43568_CHIP_ID: case BCM43569_CHIP_ID: return (sih->chipst & CST4350_SPROM_PRESENT) != 0; case BCM43602_CHIP_ID: return (sih->chipst & CST43602_SPROM_PRESENT) != 0; case BCM43131_CHIP_ID: case BCM43217_CHIP_ID: case BCM43227_CHIP_ID: case BCM43228_CHIP_ID: case BCM43428_CHIP_ID: return (sih->chipst & CST43228_OTP_PRESENT) != CST43228_OTP_PRESENT; default: return TRUE; } } uint32 si_get_sromctl(si_t *sih) { chipcregs_t *cc; uint origidx = si_coreidx(sih); uint32 sromctl; osl_t *osh = si_osh(sih); cc = si_setcoreidx(sih, SI_CC_IDX); ASSERT((uintptr)cc); sromctl = R_REG(osh, &cc->sromcontrol); /* return to the original core */ si_setcoreidx(sih, origidx); return sromctl; } int si_set_sromctl(si_t *sih, uint32 value) { chipcregs_t *cc; uint origidx = si_coreidx(sih); osl_t *osh = si_osh(sih); cc = si_setcoreidx(sih, SI_CC_IDX); ASSERT((uintptr)cc); /* get chipcommon rev */ if (si_corerev(sih) < 32) return BCME_UNSUPPORTED; W_REG(osh, &cc->sromcontrol, value); /* return to the original core */ si_setcoreidx(sih, origidx); return BCME_OK; } uint si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val) { uint origidx; uint ret_val; origidx = si_coreidx(sih); si_setcoreidx(sih, coreidx); ret_val = si_wrapperreg(sih, offset, mask, val); /* return to the original core */ si_setcoreidx(sih, origidx); return ret_val; } /* cleanup the hndrte timer from the host when ARM is been halted * without a chance for ARM cleanup its resources * If left not cleanup, Intr from a software timer can still * request HT clk when ARM is halted. */ uint32 si_pmu_res_req_timer_clr(si_t *sih) { uint32 mask; mask = PRRT_REQ_ACTIVE | PRRT_INTEN; if (CHIPID(sih->chip) != BCM4328_CHIP_ID) mask <<= 14; /* clear mask bits */ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, res_req_timer), mask, 0); /* readback to ensure write completes */ return si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, res_req_timer), 0, 0); } /** turn on/off rfldo */ void si_pmu_rfldo(si_t *sih, bool on) { } #ifdef SURVIVE_PERST_ENAB static uint32 si_pcie_survive_perst(si_t *sih, uint32 mask, uint32 val) { si_info_t *sii; sii = SI_INFO(sih); if (!PCIE(sii)) return (0); return pcie_survive_perst(sii->pch, mask, val); } static void si_watchdog_reset(si_t *sih) { si_info_t *sii = SI_INFO(sih); chipcregs_t *cc; uint32 origidx, i; origidx = si_coreidx(sih); cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); /* issue a watchdog reset */ W_REG(sii->osh, &cc->pmuwatchdog, 2); /* do busy wait for 20ms */ for (i = 0; i < 2000; i++) { OSL_DELAY(10); } si_setcoreidx(sih, origidx); } #endif /* SURVIVE_PERST_ENAB */ void si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 sperst_val) { #ifdef SURVIVE_PERST_ENAB if (BUSTYPE(sih->bustype) != PCI_BUS) return; if ((CHIPID(sih->chip) != BCM4360_CHIP_ID && CHIPID(sih->chip) != BCM4352_CHIP_ID) || (CHIPREV(sih->chiprev) >= 4)) return; if (reset) { si_info_t *sii = SI_INFO(sih); uint32 bar0win, bar0win_after; /* save the bar0win */ bar0win = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); si_watchdog_reset(sih); bar0win_after = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); if (bar0win_after != bar0win) { SI_ERROR(("%s: bar0win before %08x, bar0win after %08x\n", __FUNCTION__, bar0win, bar0win_after)); OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32), bar0win); } } if (sperst_mask) { /* enable survive perst */ si_pcie_survive_perst(sih, sperst_mask, sperst_val); } #endif /* SURVIVE_PERST_ENAB */ } void si_pcie_ltr_war(si_t *sih) { }
Y300-0100/android_kernel_samsung_SM-G355HN_XEC
drivers/net/wireless/bcmdhd/siutils.c
C
gpl-2.0
70,718
/* smc-mca.c: A SMC Ultra ethernet driver for linux. */ /* Most of this driver, except for ultramca_probe is nearly verbatim from smc-ultra.c by Donald Becker. The rest is written and copyright 1996 by David Weis, weisd3458@uni.edu This is a driver for the SMC Ultra and SMC EtherEZ ethercards. This driver uses the cards in the 8390-compatible, shared memory mode. Most of the run-time complexity is handled by the generic code in 8390.c. This driver enables the shared memory only when doing the actual data transfers to avoid a bug in early version of the card that corrupted data transferred by a AHA1542. This driver does not support the programmed-I/O data transfer mode of the EtherEZ. That support (if available) is smc-ez.c. Nor does it use the non-8390-compatible "Altego" mode. (No support currently planned.) Changelog: Paul Gortmaker : multiple card support for module users. David Weis : Micro Channel-ized it. Tom Sightler : Added support for IBM PS/2 Ethernet Adapter/A Christopher Turcksin : Changed MCA-probe so that multiple adapters are found correctly (Jul 16, 1997) Chris Beauregard : Tried to merge the two changes above (Dec 15, 1997) Tom Sightler : Fixed minor detection bug caused by above merge Tom Sightler : Added support for three more Western Digital MCA-adapters Tom Sightler : Added support for 2.2.x mca_find_unused_adapter Hartmut Schmidt : - Modified parameter detection to handle each card differently depending on a switch-list - 'card_ver' removed from the adapter list - Some minor bug fixes */ #include <linux/mca.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <asm/io.h> #include <asm/system.h> #include "8390.h" #define DRV_NAME "smc-mca" static int ultramca_open(struct net_device *dev); static void ultramca_reset_8390(struct net_device *dev); static void ultramca_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); static void ultramca_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); static void ultramca_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page); static int ultramca_close_card(struct net_device *dev); #define START_PG 0x00 /* First page of TX buffer */ #define ULTRA_CMDREG 0 /* Offset to ASIC command register. */ #define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */ #define ULTRA_MEMENB 0x40 /* Enable the shared memory. */ #define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */ #define ULTRA_IO_EXTENT 32 #define EN0_ERWCNT 0x08 /* Early receive warning count. */ #define _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A 0 #define _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A 1 #define _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A 2 #define _6fc1_WD_Starcard_PLUS_A_WD8003ST_A 3 #define _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A 4 #define _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A 5 #define _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A 6 #define _efe5_IBM_PS2_Adapter_A_for_Ethernet 7 struct smc_mca_adapters_t { unsigned int id; char *name; }; #define MAX_ULTRAMCA_CARDS 4 /* Max number of Ultra cards per module */ static int ultra_io[MAX_ULTRAMCA_CARDS]; static int ultra_irq[MAX_ULTRAMCA_CARDS]; MODULE_LICENSE("GPL"); module_param_array(ultra_io, int, NULL, 0); module_param_array(ultra_irq, int, NULL, 0); MODULE_PARM_DESC(ultra_io, "SMC Ultra/EtherEZ MCA I/O base address(es)"); MODULE_PARM_DESC(ultra_irq, "SMC Ultra/EtherEZ MCA IRQ number(s)"); static const struct { unsigned int base_addr; } addr_table[] = { { 0x0800 }, { 0x1800 }, { 0x2800 }, { 0x3800 }, { 0x4800 }, { 0x5800 }, { 0x6800 }, { 0x7800 }, { 0x8800 }, { 0x9800 }, { 0xa800 }, { 0xb800 }, { 0xc800 }, { 0xd800 }, { 0xe800 }, { 0xf800 } }; #define MEM_MASK 64 static const struct { unsigned char mem_index; unsigned long mem_start; unsigned char num_pages; } mem_table[] = { { 16, 0x0c0000, 40 }, { 18, 0x0c4000, 40 }, { 20, 0x0c8000, 40 }, { 22, 0x0cc000, 40 }, { 24, 0x0d0000, 40 }, { 26, 0x0d4000, 40 }, { 28, 0x0d8000, 40 }, { 30, 0x0dc000, 40 }, {144, 0xfc0000, 40 }, {148, 0xfc8000, 40 }, {154, 0xfd0000, 40 }, {156, 0xfd8000, 40 }, { 0, 0x0c0000, 20 }, { 1, 0x0c2000, 20 }, { 2, 0x0c4000, 20 }, { 3, 0x0c6000, 20 } }; #define IRQ_MASK 243 static const struct { unsigned char new_irq; unsigned char old_irq; } irq_table[] = { { 3, 3 }, { 4, 4 }, { 10, 10 }, { 14, 15 } }; static short smc_mca_adapter_ids[] __initdata = { 0x61c8, 0x61c9, 0x6fc0, 0x6fc1, 0x6fc2, 0xefd4, 0xefd5, 0xefe5, 0x0000 }; static char *smc_mca_adapter_names[] __initdata = { "SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)", "SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)", "WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)", "WD Starcard PLUS/A (WD8003ST/A)", "WD Ethercard PLUS 10T/A (WD8003W/A)", "IBM PS/2 Adapter/A for Ethernet UTP/AUI (WD8013WP/A)", "IBM PS/2 Adapter/A for Ethernet BNC/AUI (WD8013EP/A)", "IBM PS/2 Adapter/A for Ethernet", NULL }; static int ultra_found = 0; static const struct net_device_ops ultramca_netdev_ops = { .ndo_open = ultramca_open, .ndo_stop = ultramca_close_card, .ndo_start_xmit = ei_start_xmit, .ndo_tx_timeout = ei_tx_timeout, .ndo_get_stats = ei_get_stats, .ndo_set_multicast_list = ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ei_poll, #endif }; static int __init ultramca_probe(struct device *gen_dev) { unsigned short ioaddr; struct net_device *dev; unsigned char reg4, num_pages; struct mca_device *mca_dev = to_mca_device(gen_dev); char slot = mca_dev->slot; unsigned char pos2 = 0xff, pos3 = 0xff, pos4 = 0xff, pos5 = 0xff; int i, rc; int adapter = mca_dev->index; int tbase = 0; int tirq = 0; int base_addr = ultra_io[ultra_found]; int irq = ultra_irq[ultra_found]; if (base_addr || irq) { printk(KERN_INFO "Probing for SMC MCA adapter"); if (base_addr) { printk(KERN_INFO " at I/O address 0x%04x%c", base_addr, irq ? ' ' : '\n'); } if (irq) { printk(KERN_INFO "using irq %d\n", irq); } } tirq = 0; tbase = 0; /* If we're trying to match a specificied irq or io address, * we'll reject the adapter found unless it's the one we're * looking for */ pos2 = mca_device_read_stored_pos(mca_dev, 2); /* io_addr */ pos3 = mca_device_read_stored_pos(mca_dev, 3); /* shared mem */ pos4 = mca_device_read_stored_pos(mca_dev, 4); /* ROM bios addr range */ pos5 = mca_device_read_stored_pos(mca_dev, 5); /* irq, media and RIPL */ /* Test the following conditions: * - If an irq parameter is supplied, compare it * with the irq of the adapter we found * - If a base_addr paramater is given, compare it * with the base_addr of the adapter we found * - Check that the irq and the base_addr of the * adapter we found is not already in use by * this driver */ switch (mca_dev->index) { case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A: case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A: case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A: case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A: { tbase = addr_table[(pos2 & 0xf0) >> 4].base_addr; tirq = irq_table[(pos5 & 0xc) >> 2].new_irq; break; } case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A: case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A: case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A: case _efe5_IBM_PS2_Adapter_A_for_Ethernet: { tbase = ((pos2 & 0x0fe) * 0x10); tirq = irq_table[(pos5 & 3)].old_irq; break; } } if(!tirq || !tbase || (irq && irq != tirq) || (base_addr && tbase != base_addr)) /* FIXME: we're trying to force the ordering of the * devices here, there should be a way of getting this * to happen */ return -ENXIO; /* Adapter found. */ dev = alloc_ei_netdev(); if(!dev) return -ENODEV; SET_NETDEV_DEV(dev, gen_dev); mca_device_set_name(mca_dev, smc_mca_adapter_names[adapter]); mca_device_set_claim(mca_dev, 1); printk(KERN_INFO "smc_mca: %s found in slot %d\n", smc_mca_adapter_names[adapter], slot + 1); ultra_found++; dev->base_addr = ioaddr = mca_device_transform_ioport(mca_dev, tbase); dev->irq = mca_device_transform_irq(mca_dev, tirq); dev->mem_start = 0; num_pages = 40; switch (adapter) { /* card-# in const array above [hs] */ case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A: case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A: { for (i = 0; i < 16; i++) { /* taking 16 counts * up to 15 [hs] */ if (mem_table[i].mem_index == (pos3 & ~MEM_MASK)) { dev->mem_start = (unsigned long) mca_device_transform_memory(mca_dev, (void *)mem_table[i].mem_start); num_pages = mem_table[i].num_pages; } } break; } case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A: case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A: case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A: case _efe5_IBM_PS2_Adapter_A_for_Ethernet: { dev->mem_start = (unsigned long) mca_device_transform_memory(mca_dev, (void *)((pos3 & 0xfc) * 0x1000)); num_pages = 0x40; break; } case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A: case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A: { /* courtesy of gamera@quartz.ocn.ne.jp, pos3 indicates * the index of the 0x2000 step. * beware different number of pages [hs] */ dev->mem_start = (unsigned long) mca_device_transform_memory(mca_dev, (void *)(0xc0000 + (0x2000 * (pos3 & 0xf)))); num_pages = 0x20 + (2 * (pos3 & 0x10)); break; } } /* sanity check, shouldn't happen */ if (dev->mem_start == 0) { rc = -ENODEV; goto err_unclaim; } if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME)) { rc = -ENODEV; goto err_unclaim; } reg4 = inb(ioaddr + 4) & 0x7f; outb(reg4, ioaddr + 4); for (i = 0; i < 6; i++) dev->dev_addr[i] = inb(ioaddr + 8 + i); printk(KERN_INFO "smc_mca[%d]: Parameters: %#3x, %pM", slot + 1, ioaddr, dev->dev_addr); /* Switch from the station address to the alternate register set * and read the useful registers there. */ outb(0x80 | reg4, ioaddr + 4); /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */ outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c); /* Switch back to the station address register set so that * the MS-DOS driver can find the card after a warm boot. */ outb(reg4, ioaddr + 4); dev_set_drvdata(gen_dev, dev); /* The 8390 isn't at the base address, so fake the offset */ dev->base_addr = ioaddr + ULTRA_NIC_OFFSET; ei_status.name = "SMC Ultra MCA"; ei_status.word16 = 1; ei_status.tx_start_page = START_PG; ei_status.rx_start_page = START_PG + TX_PAGES; ei_status.stop_page = num_pages; ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG) * 256); if (!ei_status.mem) { rc = -ENOMEM; goto err_release_region; } dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG) * 256; printk(", IRQ %d memory %#lx-%#lx.\n", dev->irq, dev->mem_start, dev->mem_end - 1); ei_status.reset_8390 = &ultramca_reset_8390; ei_status.block_input = &ultramca_block_input; ei_status.block_output = &ultramca_block_output; ei_status.get_8390_hdr = &ultramca_get_8390_hdr; ei_status.priv = slot; dev->netdev_ops = &ultramca_netdev_ops; NS8390_init(dev, 0); rc = register_netdev(dev); if (rc) goto err_unmap; return 0; err_unmap: iounmap(ei_status.mem); err_release_region: release_region(ioaddr, ULTRA_IO_EXTENT); err_unclaim: mca_device_set_claim(mca_dev, 0); free_netdev(dev); return rc; } static int ultramca_open(struct net_device *dev) { int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ int retval; if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) return retval; outb(ULTRA_MEMENB, ioaddr); /* Enable memory */ outb(0x80, ioaddr + 5); /* ??? */ outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */ outb(0x04, ioaddr + 5); /* ??? */ /* Set the early receive warning level in window 0 high enough not * to receive ERW interrupts. */ /* outb_p(E8390_NODMA + E8390_PAGE0, dev->base_addr); * outb(0xff, dev->base_addr + EN0_ERWCNT); */ ei_open(dev); return 0; } static void ultramca_reset_8390(struct net_device *dev) { int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ outb(ULTRA_RESET, ioaddr); if (ei_debug > 1) printk("resetting Ultra, t=%ld...", jiffies); ei_status.txing = 0; outb(0x80, ioaddr + 5); /* ??? */ outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */ if (ei_debug > 1) printk("reset done\n"); return; } /* Grab the 8390 specific header. Similar to the block_input routine, but * we don't need to be concerned with ring wrap as the header will be at * the start of a page, so we optimize accordingly. */ static void ultramca_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { void __iomem *hdr_start = ei_status.mem + ((ring_page - START_PG) << 8); #ifdef notdef /* Officially this is what we are doing, but the readl() is faster */ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); #else ((unsigned int*)hdr)[0] = readl(hdr_start); #endif } /* Block input and output are easy on shared memory ethercards, the only * complication is when the ring buffer wraps. */ static void ultramca_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { void __iomem *xfer_start = ei_status.mem + ring_offset - START_PG * 256; if (ring_offset + count > ei_status.stop_page * 256) { /* We must wrap the input move. */ int semi_count = ei_status.stop_page * 256 - ring_offset; memcpy_fromio(skb->data, xfer_start, semi_count); count -= semi_count; memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); } else { memcpy_fromio(skb->data, xfer_start, count); } } static void ultramca_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page) { void __iomem *shmem = ei_status.mem + ((start_page - START_PG) << 8); memcpy_toio(shmem, buf, count); } static int ultramca_close_card(struct net_device *dev) { int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ netif_stop_queue(dev); if (ei_debug > 1) printk("%s: Shutting down ethercard.\n", dev->name); outb(0x00, ioaddr + 6); /* Disable interrupts. */ free_irq(dev->irq, dev); NS8390_init(dev, 0); /* We should someday disable shared memory and change to 8-bit mode * "just in case"... */ return 0; } static int ultramca_remove(struct device *gen_dev) { struct mca_device *mca_dev = to_mca_device(gen_dev); struct net_device *dev = dev_get_drvdata(gen_dev); if (dev) { /* NB: ultra_close_card() does free_irq */ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; unregister_netdev(dev); mca_device_set_claim(mca_dev, 0); release_region(ioaddr, ULTRA_IO_EXTENT); iounmap(ei_status.mem); free_netdev(dev); } return 0; } static struct mca_driver ultra_driver = { .id_table = smc_mca_adapter_ids, .driver = { .name = "smc-mca", .bus = &mca_bus_type, .probe = ultramca_probe, .remove = ultramca_remove, } }; static int __init ultramca_init_module(void) { if(!MCA_bus) return -ENXIO; mca_register_driver(&ultra_driver); return ultra_found ? 0 : -ENXIO; } static void __exit ultramca_cleanup_module(void) { mca_unregister_driver(&ultra_driver); } module_init(ultramca_init_module); module_exit(ultramca_cleanup_module);
JoeyJiao/huawei_kernel_2.6.32_9
drivers/net/smc-mca.c
C
gpl-2.0
16,499
/* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/ipv6.h> #include <linux/in6.h> #include <linux/netfilter.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/icmp.h> #include <linux/sysctl.h> #include <net/ipv6.h> #include <net/inet_frag.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter_bridge.h> #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_l3proto.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> #endif #include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/ipv6/nf_defrag_ipv6.h> static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, struct sk_buff *skb) { u16 zone = NF_CT_DEFAULT_ZONE; #if IS_ENABLED(CONFIG_NF_CONNTRACK) if (skb->nfct) zone = nf_ct_zone((struct nf_conn *)skb->nfct); #endif #ifdef CONFIG_BRIDGE_NETFILTER if (skb->nf_bridge && skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone; #endif if (hooknum == NF_INET_PRE_ROUTING) return IP6_DEFRAG_CONNTRACK_IN + zone; else return IP6_DEFRAG_CONNTRACK_OUT + zone; } static unsigned int ipv6_defrag(unsigned int hooknum, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { struct sk_buff *reasm; #if IS_ENABLED(CONFIG_NF_CONNTRACK) /* Previously seen (loopback)? */ if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) return NF_ACCEPT; #endif reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); /* queued */ if (reasm == NULL) return NF_STOLEN; /* error occurred or not fragmented */ if (reasm == skb) return NF_ACCEPT; nf_ct_frag6_consume_orig(reasm); NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm, (struct net_device *) in, (struct net_device *) out, okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1); return NF_STOLEN; } static struct nf_hook_ops ipv6_defrag_ops[] = { { .hook = ipv6_defrag, .owner = THIS_MODULE, .pf = NFPROTO_IPV6, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP6_PRI_CONNTRACK_DEFRAG, }, { .hook = ipv6_defrag, .owner = THIS_MODULE, .pf = NFPROTO_IPV6, .hooknum = NF_INET_LOCAL_OUT, .priority = NF_IP6_PRI_CONNTRACK_DEFRAG, }, }; static int __init nf_defrag_init(void) { int ret = 0; ret = nf_ct_frag6_init(); if (ret < 0) { pr_err("nf_defrag_ipv6: can't initialize frag6.\n"); return ret; } ret = nf_register_hooks(ipv6_defrag_ops, ARRAY_SIZE(ipv6_defrag_ops)); if (ret < 0) { pr_err("nf_defrag_ipv6: can't register hooks\n"); goto cleanup_frag6; } return ret; cleanup_frag6: nf_ct_frag6_cleanup(); return ret; } static void __exit nf_defrag_fini(void) { nf_unregister_hooks(ipv6_defrag_ops, ARRAY_SIZE(ipv6_defrag_ops)); nf_ct_frag6_cleanup(); } void nf_defrag_ipv6_enable(void) { } EXPORT_SYMBOL_GPL(nf_defrag_ipv6_enable); module_init(nf_defrag_init); module_exit(nf_defrag_fini); MODULE_LICENSE("GPL");
RealDigitalMediaAndroid/linux-imx6
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
C
gpl-2.0
3,428
/* * kvm asynchronous fault support * * Copyright 2010 Red Hat, Inc. * * Author: * Gleb Natapov <gleb@redhat.com> * * This file is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/kvm_host.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/mmu_context.h> #include "async_pf.h" #include <trace/events/kvm.h> static struct kmem_cache *async_pf_cache; int kvm_async_pf_init(void) { async_pf_cache = KMEM_CACHE(kvm_async_pf, 0); if (!async_pf_cache) return -ENOMEM; return 0; } void kvm_async_pf_deinit(void) { if (async_pf_cache) kmem_cache_destroy(async_pf_cache); async_pf_cache = NULL; } void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu) { INIT_LIST_HEAD(&vcpu->async_pf.done); INIT_LIST_HEAD(&vcpu->async_pf.queue); spin_lock_init(&vcpu->async_pf.lock); } static void async_pf_execute(struct work_struct *work) { struct page *page = NULL; struct kvm_async_pf *apf = container_of(work, struct kvm_async_pf, work); struct mm_struct *mm = apf->mm; struct kvm_vcpu *vcpu = apf->vcpu; unsigned long addr = apf->addr; gva_t gva = apf->gva; might_sleep(); use_mm(mm); down_read(&mm->mmap_sem); get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL); up_read(&mm->mmap_sem); unuse_mm(mm); spin_lock(&vcpu->async_pf.lock); list_add_tail(&apf->link, &vcpu->async_pf.done); apf->page = page; apf->done = true; spin_unlock(&vcpu->async_pf.lock); /* * apf may be freed by kvm_check_async_pf_completion() after * this point */ trace_kvm_async_pf_completed(addr, page, gva); if (waitqueue_active(&vcpu->wq)) wake_up_interruptible(&vcpu->wq); mmdrop(mm); kvm_put_kvm(vcpu->kvm); } void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) { /* cancel outstanding work queue item */ while (!list_empty(&vcpu->async_pf.queue)) { struct kvm_async_pf *work = list_entry(vcpu->async_pf.queue.next, typeof(*work), queue); cancel_work_sync(&work->work); list_del(&work->queue); if (!work->done) /* work was canceled */ kmem_cache_free(async_pf_cache, work); } spin_lock(&vcpu->async_pf.lock); while (!list_empty(&vcpu->async_pf.done)) { struct kvm_async_pf *work = list_entry(vcpu->async_pf.done.next, typeof(*work), link); list_del(&work->link); if (!is_error_page(work->page)) kvm_release_page_clean(work->page); kmem_cache_free(async_pf_cache, work); } spin_unlock(&vcpu->async_pf.lock); vcpu->async_pf.queued = 0; } void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) { struct kvm_async_pf *work; while (!list_empty_careful(&vcpu->async_pf.done) && kvm_arch_can_inject_async_page_present(vcpu)) { spin_lock(&vcpu->async_pf.lock); work = list_first_entry(&vcpu->async_pf.done, typeof(*work), link); list_del(&work->link); spin_unlock(&vcpu->async_pf.lock); if (work->page) kvm_arch_async_page_ready(vcpu, work); kvm_arch_async_page_present(vcpu, work); list_del(&work->queue); vcpu->async_pf.queued--; if (!is_error_page(work->page)) kvm_release_page_clean(work->page); kmem_cache_free(async_pf_cache, work); } } int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, struct kvm_arch_async_pf *arch) { struct kvm_async_pf *work; if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) return 0; /* setup delayed work */ /* * do alloc nowait since if we are going to sleep anyway we * may as well sleep faulting in page */ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT); if (!work) return 0; work->page = NULL; work->done = false; work->vcpu = vcpu; work->gva = gva; work->addr = gfn_to_hva(vcpu->kvm, gfn); work->arch = *arch; work->mm = current->mm; atomic_inc(&work->mm->mm_count); kvm_get_kvm(work->vcpu->kvm); /* this can't really happen otherwise gfn_to_pfn_async would succeed */ if (unlikely(kvm_is_error_hva(work->addr))) goto retry_sync; INIT_WORK(&work->work, async_pf_execute); if (!schedule_work(&work->work)) goto retry_sync; list_add_tail(&work->queue, &vcpu->async_pf.queue); vcpu->async_pf.queued++; kvm_arch_async_page_not_present(vcpu, work); return 1; retry_sync: kvm_put_kvm(work->vcpu->kvm); mmdrop(work->mm); kmem_cache_free(async_pf_cache, work); return 0; } int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) { struct kvm_async_pf *work; if (!list_empty_careful(&vcpu->async_pf.done)) return 0; work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC); if (!work) return -ENOMEM; work->page = KVM_ERR_PTR_BAD_PAGE; INIT_LIST_HEAD(&work->queue); /* for list_del to work */ spin_lock(&vcpu->async_pf.lock); list_add_tail(&work->link, &vcpu->async_pf.done); spin_unlock(&vcpu->async_pf.lock); vcpu->async_pf.queued++; return 0; }
childofthehorn/android_kernel_oneplus_msm8994
virt/kvm/async_pf.c
C
gpl-2.0
5,335
/* irq.c: FRV IRQ handling * * Copyright (C) 2003, 2004, 2006 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/timex.h> #include <linux/random.h> #include <linux/init.h> #include <linux/kernel_stat.h> #include <linux/irq.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/module.h> #include <linux/bitops.h> #include <asm/atomic.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/delay.h> #include <asm/irq.h> #include <asm/irc-regs.h> #include <asm/gdb-stub.h> #define set_IRR(N,A,B,C,D) __set_IRR(N, (A << 28) | (B << 24) | (C << 20) | (D << 16)) extern void __init fpga_init(void); #ifdef CONFIG_FUJITSU_MB93493 extern void __init mb93493_init(void); #endif #define __reg16(ADDR) (*(volatile unsigned short *)(ADDR)) atomic_t irq_err_count; int arch_show_interrupts(struct seq_file *p, int prec) { seq_printf(p, "%*s: ", prec, "ERR"); seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); return 0; } /* * on-CPU PIC operations */ static void frv_cpupic_ack(struct irq_data *d) { __clr_RC(d->irq); __clr_IRL(); } static void frv_cpupic_mask(struct irq_data *d) { __set_MASK(d->irq); } static void frv_cpupic_mask_ack(struct irq_data *d) { __set_MASK(d->irq); __clr_RC(d->irq); __clr_IRL(); } static void frv_cpupic_unmask(struct irq_data *d) { __clr_MASK(d->irq); } static struct irq_chip frv_cpu_pic = { .name = "cpu", .irq_ack = frv_cpupic_ack, .irq_mask = frv_cpupic_mask, .irq_mask_ack = frv_cpupic_mask_ack, .irq_unmask = frv_cpupic_unmask, }; /* * handles all normal device IRQs * - registers are referred to by the __frame variable (GR28) * - IRQ distribution is complicated in this arch because of the many PICs, the * way they work and the way they cascade */ asmlinkage void do_IRQ(void) { irq_enter(); generic_handle_irq(__get_IRL()); irq_exit(); } /* * handles all NMIs when not co-opted by the debugger * - registers are referred to by the __frame variable (GR28) */ asmlinkage void do_NMI(void) { } /* * initialise the interrupt system */ void __init init_IRQ(void) { int level; for (level = 1; level <= 14; level++) irq_set_chip_and_handler(level, &frv_cpu_pic, handle_level_irq); irq_set_handler(IRQ_CPU_TIMER0, handle_edge_irq); /* set the trigger levels for internal interrupt sources * - timers all falling-edge * - ERR0 is rising-edge * - all others are high-level */ __set_IITMR(0, 0x003f0000); /* DMA0-3, TIMER0-2 */ __set_IITMR(1, 0x20000000); /* ERR0-1, UART0-1, DMA4-7 */ /* route internal interrupts */ set_IRR(4, IRQ_DMA3_LEVEL, IRQ_DMA2_LEVEL, IRQ_DMA1_LEVEL, IRQ_DMA0_LEVEL); set_IRR(5, 0, IRQ_TIMER2_LEVEL, IRQ_TIMER1_LEVEL, IRQ_TIMER0_LEVEL); set_IRR(6, IRQ_GDBSTUB_LEVEL, IRQ_GDBSTUB_LEVEL, IRQ_UART1_LEVEL, IRQ_UART0_LEVEL); set_IRR(7, IRQ_DMA7_LEVEL, IRQ_DMA6_LEVEL, IRQ_DMA5_LEVEL, IRQ_DMA4_LEVEL); /* route external interrupts */ set_IRR(2, IRQ_XIRQ7_LEVEL, IRQ_XIRQ6_LEVEL, IRQ_XIRQ5_LEVEL, IRQ_XIRQ4_LEVEL); set_IRR(3, IRQ_XIRQ3_LEVEL, IRQ_XIRQ2_LEVEL, IRQ_XIRQ1_LEVEL, IRQ_XIRQ0_LEVEL); #if defined(CONFIG_MB93091_VDK) __set_TM1(0x55550000); /* XIRQ7-0 all active low */ #elif defined(CONFIG_MB93093_PDK) __set_TM1(0x15550000); /* XIRQ7 active high, 6-0 all active low */ #else #error dont know external IRQ trigger levels for this setup #endif fpga_init(); #ifdef CONFIG_FUJITSU_MB93493 mb93493_init(); #endif }
Dazzozo/android_kernel_huawei_u8815
arch/frv/kernel/irq.c
C
gpl-2.0
3,930
/*** -*- linux-c -*- ********************************************************** Driver for Atmel at76c502 at76c504 and at76c506 wireless cards. Copyright 2000-2001 ATMEL Corporation. Copyright 2003 Simon Kelley. This code was developed from version 2.1.1 of the Atmel drivers, released by Atmel corp. under the GPL in December 2002. It also includes code from the Linux aironet drivers (C) Benjamin Reed, and the Linux PCMCIA package, (C) David Hinds. For all queries about this code, please contact the current author, Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This software is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Atmel wireless lan drivers; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ******************************************************************************/ #ifdef __IN_PCMCIA_PACKAGE__ #include <pcmcia/k_compat.h> #endif #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/netdevice.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include <pcmcia/ciscode.h> #include <asm/io.h> #include <linux/wireless.h> #include "atmel.h" /*====================================================================*/ MODULE_AUTHOR("Simon Kelley"); MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards."); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("Atmel at76c50x PCMCIA cards"); /*====================================================================*/ static int atmel_config(struct pcmcia_device *link); static void atmel_release(struct pcmcia_device *link); static void atmel_detach(struct pcmcia_device *p_dev); typedef struct local_info_t { struct net_device *eth_dev; } local_info_t; static int atmel_probe(struct pcmcia_device *p_dev) { local_info_t *local; dev_dbg(&p_dev->dev, "atmel_attach()\n"); /* Allocate space for private device-specific data */ local = kzalloc(sizeof(local_info_t), GFP_KERNEL); if (!local) return -ENOMEM; p_dev->priv = local; return atmel_config(p_dev); } /* atmel_attach */ static void atmel_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "atmel_detach\n"); atmel_release(link); kfree(link->priv); } /* Call-back function to interrogate PCMCIA-specific information about the current existence of the card */ static int card_present(void *arg) { struct pcmcia_device *link = (struct pcmcia_device *)arg; if (pcmcia_dev_present(link)) return 1; return 0; } static int atmel_config_check(struct pcmcia_device *p_dev, void *priv_data) { if (p_dev->config_index == 0) return -EINVAL; return pcmcia_request_io(p_dev); } static int atmel_config(struct pcmcia_device *link) { local_info_t *dev; int ret; const struct pcmcia_device_id *did; dev = link->priv; did = dev_get_drvdata(&link->dev); dev_dbg(&link->dev, "atmel_config\n"); link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP | CONF_AUTO_AUDIO | CONF_AUTO_SET_IO; if (pcmcia_loop_config(link, atmel_config_check, NULL)) goto failed; if (!link->irq) { dev_err(&link->dev, "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config."); goto failed; } ret = pcmcia_enable_device(link); if (ret) goto failed; ((local_info_t*)link->priv)->eth_dev = init_atmel_card(link->irq, link->resource[0]->start, did ? did->driver_info : ATMEL_FW_TYPE_NONE, &link->dev, card_present, link); if (!((local_info_t*)link->priv)->eth_dev) goto failed; return 0; failed: atmel_release(link); return -ENODEV; } static void atmel_release(struct pcmcia_device *link) { struct net_device *dev = ((local_info_t*)link->priv)->eth_dev; dev_dbg(&link->dev, "atmel_release\n"); if (dev) stop_atmel_card(dev); ((local_info_t*)link->priv)->eth_dev = NULL; pcmcia_disable_device(link); } static int atmel_suspend(struct pcmcia_device *link) { local_info_t *local = link->priv; netif_device_detach(local->eth_dev); return 0; } static int atmel_resume(struct pcmcia_device *link) { local_info_t *local = link->priv; atmel_open(local->eth_dev); netif_device_attach(local->eth_dev); return 0; } /*====================================================================*/ /* We use the driver_info field to store the correct firmware type for a card. */ #define PCMCIA_DEVICE_MANF_CARD_INFO(manf, card, info) { \ .match_flags = PCMCIA_DEV_ID_MATCH_MANF_ID| \ PCMCIA_DEV_ID_MATCH_CARD_ID, \ .manf_id = (manf), \ .card_id = (card), \ .driver_info = (kernel_ulong_t)(info), } #define PCMCIA_DEVICE_PROD_ID12_INFO(v1, v2, vh1, vh2, info) { \ .match_flags = PCMCIA_DEV_ID_MATCH_PROD_ID1| \ PCMCIA_DEV_ID_MATCH_PROD_ID2, \ .prod_id = { (v1), (v2), NULL, NULL }, \ .prod_id_hash = { (vh1), (vh2), 0, 0 }, \ .driver_info = (kernel_ulong_t)(info), } static const struct pcmcia_device_id atmel_ids[] = { PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0620, ATMEL_FW_TYPE_502_3COM), PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0696, ATMEL_FW_TYPE_502_3COM), PCMCIA_DEVICE_MANF_CARD_INFO(0x01bf, 0x3302, ATMEL_FW_TYPE_502E), PCMCIA_DEVICE_MANF_CARD_INFO(0xd601, 0x0007, ATMEL_FW_TYPE_502), PCMCIA_DEVICE_PROD_ID12_INFO("11WAVE", "11WP611AL-E", 0x9eb2da1f, 0xc9a0d3f9, ATMEL_FW_TYPE_502E), PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR", 0xabda4164, 0x41b37e1f, ATMEL_FW_TYPE_502), PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR_D", 0xabda4164, 0x3675d704, ATMEL_FW_TYPE_502D), PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR_E", 0xabda4164, 0x4172e792, ATMEL_FW_TYPE_502E), PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504_R", 0xabda4164, 0x917f3d72, ATMEL_FW_TYPE_504_2958), PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504", 0xabda4164, 0x5040670a, ATMEL_FW_TYPE_504), PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504A", 0xabda4164, 0xe15ed87f, ATMEL_FW_TYPE_504A_2958), PCMCIA_DEVICE_PROD_ID12_INFO("BT", "Voyager 1020 Laptop Adapter", 0xae49b86a, 0x1e957cd5, ATMEL_FW_TYPE_502), PCMCIA_DEVICE_PROD_ID12_INFO("CNet", "CNWLC 11Mbps Wireless PC Card V-5", 0xbc477dde, 0x502fae6b, ATMEL_FW_TYPE_502E), PCMCIA_DEVICE_PROD_ID12_INFO("IEEE 802.11b", "Wireless LAN PC Card", 0x5b878724, 0x122f1df6, ATMEL_FW_TYPE_502), PCMCIA_DEVICE_PROD_ID12_INFO("IEEE 802.11b", "Wireless LAN Card S", 0x5b878724, 0x5fba533a, ATMEL_FW_TYPE_504_2958), PCMCIA_DEVICE_PROD_ID12_INFO("OEM", "11Mbps Wireless LAN PC Card V-3", 0xfea54c90, 0x1c5b0f68, ATMEL_FW_TYPE_502), PCMCIA_DEVICE_PROD_ID12_INFO("SMC", "2632W", 0xc4f8b18b, 0x30f38774, ATMEL_FW_TYPE_502D), PCMCIA_DEVICE_PROD_ID12_INFO("SMC", "2632W-V2", 0xc4f8b18b, 0x172d1377, ATMEL_FW_TYPE_502), PCMCIA_DEVICE_PROD_ID12_INFO("Wireless", "PC_CARD", 0xa407ecdd, 0x119f6314, ATMEL_FW_TYPE_502D), PCMCIA_DEVICE_PROD_ID12_INFO("WLAN", "802.11b PC CARD", 0x575c516c, 0xb1f6dbc4, ATMEL_FW_TYPE_502D), PCMCIA_DEVICE_PROD_ID12_INFO("LG", "LW2100N", 0xb474d43a, 0x6b1fec94, ATMEL_FW_TYPE_502E), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, atmel_ids); static struct pcmcia_driver atmel_driver = { .owner = THIS_MODULE, .name = "atmel_cs", .probe = atmel_probe, .remove = atmel_detach, .id_table = atmel_ids, .suspend = atmel_suspend, .resume = atmel_resume, }; module_pcmcia_driver(atmel_driver); /* This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. In addition: Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
yseras/SGS4-3.13
drivers/net/wireless/atmel_cs.c
C
gpl-2.0
9,967
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "hw.h" #include <linux/export.h> #define AR_BufLen 0x00000fff static void ar9002_hw_rx_enable(struct ath_hw *ah) { REG_WRITE(ah, AR_CR, AR_CR_RXE); } static void ar9002_hw_set_desc_link(void *ds, u32 ds_link) { ((struct ath_desc*) ds)->ds_link = ds_link; } static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) { u32 isr = 0; u32 mask2 = 0; struct ath9k_hw_capabilities *pCap = &ah->caps; u32 sync_cause = 0; bool fatal_int = false; struct ath_common *common = ath9k_hw_common(ah); if (!AR_SREV_9100(ah)) { if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) { if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) == AR_RTC_STATUS_ON) { isr = REG_READ(ah, AR_ISR); } } sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT; *masked = 0; if (!isr && !sync_cause) return false; } else { *masked = 0; isr = REG_READ(ah, AR_ISR); } if (isr) { if (isr & AR_ISR_BCNMISC) { u32 isr2; isr2 = REG_READ(ah, AR_ISR_S2); if (isr2 & AR_ISR_S2_TIM) mask2 |= ATH9K_INT_TIM; if (isr2 & AR_ISR_S2_DTIM) mask2 |= ATH9K_INT_DTIM; if (isr2 & AR_ISR_S2_DTIMSYNC) mask2 |= ATH9K_INT_DTIMSYNC; if (isr2 & (AR_ISR_S2_CABEND)) mask2 |= ATH9K_INT_CABEND; if (isr2 & AR_ISR_S2_GTT) mask2 |= ATH9K_INT_GTT; if (isr2 & AR_ISR_S2_CST) mask2 |= ATH9K_INT_CST; if (isr2 & AR_ISR_S2_TSFOOR) mask2 |= ATH9K_INT_TSFOOR; } isr = REG_READ(ah, AR_ISR_RAC); if (isr == 0xffffffff) { *masked = 0; return false; } *masked = isr & ATH9K_INT_COMMON; if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM | AR_ISR_RXOK | AR_ISR_RXERR)) *masked |= ATH9K_INT_RX; if (isr & (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR | AR_ISR_TXEOL)) { u32 s0_s, s1_s; *masked |= ATH9K_INT_TX; s0_s = REG_READ(ah, AR_ISR_S0_S); ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); s1_s = REG_READ(ah, AR_ISR_S1_S); ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); } if (isr & AR_ISR_RXORN) { ath_dbg(common, INTERRUPT, "receive FIFO overrun interrupt\n"); } *masked |= mask2; } if (AR_SREV_9100(ah)) return true; if (isr & AR_ISR_GENTMR) { u32 s5_s; s5_s = REG_READ(ah, AR_ISR_S5_S); ah->intr_gen_timer_trigger = MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); ah->intr_gen_timer_thresh = MS(s5_s, AR_ISR_S5_GENTIMER_THRESH); if (ah->intr_gen_timer_trigger) *masked |= ATH9K_INT_GENTIMER; if ((s5_s & AR_ISR_S5_TIM_TIMER) && !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) *masked |= ATH9K_INT_TIM_TIMER; } if (sync_cause) { fatal_int = (sync_cause & (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR)) ? true : false; if (fatal_int) { if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) { ath_dbg(common, ANY, "received PCI FATAL interrupt\n"); } if (sync_cause & AR_INTR_SYNC_HOST1_PERR) { ath_dbg(common, ANY, "received PCI PERR interrupt\n"); } *masked |= ATH9K_INT_FATAL; } if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { ath_dbg(common, INTERRUPT, "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n"); REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); REG_WRITE(ah, AR_RC, 0); *masked |= ATH9K_INT_FATAL; } if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) { ath_dbg(common, INTERRUPT, "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); } REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR); } return true; } static void ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) { struct ar5416_desc *ads = AR5416DESC(ds); u32 ctl1, ctl6; ads->ds_txstatus0 = ads->ds_txstatus1 = 0; ads->ds_txstatus2 = ads->ds_txstatus3 = 0; ads->ds_txstatus4 = ads->ds_txstatus5 = 0; ads->ds_txstatus6 = ads->ds_txstatus7 = 0; ads->ds_txstatus8 = ads->ds_txstatus9 = 0; ACCESS_ONCE(ads->ds_link) = i->link; ACCESS_ONCE(ads->ds_data) = i->buf_addr[0]; ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore); ctl6 = SM(i->keytype, AR_EncrType); if (AR_SREV_9285(ah)) { ads->ds_ctl8 = 0; ads->ds_ctl9 = 0; ads->ds_ctl10 = 0; ads->ds_ctl11 = 0; } if ((i->is_first || i->is_last) && i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) { ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0) | set11nTries(i->rates, 1) | set11nTries(i->rates, 2) | set11nTries(i->rates, 3) | (i->dur_update ? AR_DurUpdateEna : 0) | SM(0, AR_BurstDur); ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0) | set11nRate(i->rates, 1) | set11nRate(i->rates, 2) | set11nRate(i->rates, 3); } else { ACCESS_ONCE(ads->ds_ctl2) = 0; ACCESS_ONCE(ads->ds_ctl3) = 0; } if (!i->is_first) { ACCESS_ONCE(ads->ds_ctl0) = 0; ACCESS_ONCE(ads->ds_ctl1) = ctl1; ACCESS_ONCE(ads->ds_ctl6) = ctl6; return; } ctl1 |= (i->keyix != ATH9K_TXKEYIX_INVALID ? SM(i->keyix, AR_DestIdx) : 0) | SM(i->type, AR_FrameType) | (i->flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0) | (i->flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0) | (i->flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0); switch (i->aggr) { case AGGR_BUF_FIRST: ctl6 |= SM(i->aggr_len, AR_AggrLen); /* fall through */ case AGGR_BUF_MIDDLE: ctl1 |= AR_IsAggr | AR_MoreAggr; ctl6 |= SM(i->ndelim, AR_PadDelim); break; case AGGR_BUF_LAST: ctl1 |= AR_IsAggr; break; case AGGR_BUF_NONE: break; } ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) | SM(i->txpower, AR_XmitPower) | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) | (i->flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0) | (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0) | (i->flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0) | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable : (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)); ACCESS_ONCE(ads->ds_ctl1) = ctl1; ACCESS_ONCE(ads->ds_ctl6) = ctl6; if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST) return; ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) | set11nPktDurRTSCTS(i->rates, 1); ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) | set11nPktDurRTSCTS(i->rates, 3); ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) | set11nRateFlags(i->rates, 1) | set11nRateFlags(i->rates, 2) | set11nRateFlags(i->rates, 3) | SM(i->rtscts_rate, AR_RTSCTSRate); } static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_status *ts) { struct ar5416_desc *ads = AR5416DESC(ds); u32 status; status = ACCESS_ONCE(ads->ds_txstatus9); if ((status & AR_TxDone) == 0) return -EINPROGRESS; ts->ts_tstamp = ads->AR_SendTimestamp; ts->ts_status = 0; ts->ts_flags = 0; if (status & AR_TxOpExceeded) ts->ts_status |= ATH9K_TXERR_XTXOP; ts->tid = MS(status, AR_TxTid); ts->ts_rateindex = MS(status, AR_FinalTxIdx); ts->ts_seqnum = MS(status, AR_SeqNum); status = ACCESS_ONCE(ads->ds_txstatus0); ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00); ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01); ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02); if (status & AR_TxBaStatus) { ts->ts_flags |= ATH9K_TX_BA; ts->ba_low = ads->AR_BaBitmapLow; ts->ba_high = ads->AR_BaBitmapHigh; } status = ACCESS_ONCE(ads->ds_txstatus1); if (status & AR_FrmXmitOK) ts->ts_status |= ATH9K_TX_ACKED; else { if (status & AR_ExcessiveRetries) ts->ts_status |= ATH9K_TXERR_XRETRY; if (status & AR_Filtered) ts->ts_status |= ATH9K_TXERR_FILT; if (status & AR_FIFOUnderrun) { ts->ts_status |= ATH9K_TXERR_FIFO; ath9k_hw_updatetxtriglevel(ah, true); } } if (status & AR_TxTimerExpired) ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED; if (status & AR_DescCfgErr) ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR; if (status & AR_TxDataUnderrun) { ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN; ath9k_hw_updatetxtriglevel(ah, true); } if (status & AR_TxDelimUnderrun) { ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN; ath9k_hw_updatetxtriglevel(ah, true); } ts->ts_shortretry = MS(status, AR_RTSFailCnt); ts->ts_longretry = MS(status, AR_DataFailCnt); ts->ts_virtcol = MS(status, AR_VirtRetryCnt); status = ACCESS_ONCE(ads->ds_txstatus5); ts->ts_rssi = MS(status, AR_TxRSSICombined); ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10); ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11); ts->ts_rssi_ext2 = MS(status, AR_TxRSSIAnt12); ts->evm0 = ads->AR_TxEVM0; ts->evm1 = ads->AR_TxEVM1; ts->evm2 = ads->AR_TxEVM2; return 0; } void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds, u32 size, u32 flags) { struct ar5416_desc *ads = AR5416DESC(ds); ads->ds_ctl1 = size & AR_BufLen; if (flags & ATH9K_RXDESC_INTREQ) ads->ds_ctl1 |= AR_RxIntrReq; memset(&ads->u.rx, 0, sizeof(ads->u.rx)); } EXPORT_SYMBOL(ath9k_hw_setuprxdesc); void ar9002_hw_attach_mac_ops(struct ath_hw *ah) { struct ath_hw_ops *ops = ath9k_hw_ops(ah); ops->rx_enable = ar9002_hw_rx_enable; ops->set_desc_link = ar9002_hw_set_desc_link; ops->get_isr = ar9002_hw_get_isr; ops->set_txdesc = ar9002_set_txdesc; ops->proc_txdesc = ar9002_hw_proc_txdesc; }
kozmikkick/KozmiKKerneL-M7
drivers/net/wireless/ath/ath9k/ar9002_mac.c
C
gpl-2.0
10,018
/* NXP PCF50633 ADC Driver * * (C) 2006-2008 by Openmoko, Inc. * Author: Balaji Rao <balajirrao@openmoko.org> * All rights reserved. * * Broken down from monstrous PCF50633 driver mainly by * Harald Welte, Andy Green and Werner Almesberger * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * NOTE: This driver does not yet support subtractive ADC mode, which means * you can do only one measurement per read request. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/completion.h> #include <linux/mfd/pcf50633/core.h> #include <linux/mfd/pcf50633/adc.h> struct pcf50633_adc_request { int mux; int avg; void (*callback)(struct pcf50633 *, void *, int); void *callback_param; }; struct pcf50633_adc_sync_request { int result; struct completion completion; }; #define PCF50633_MAX_ADC_FIFO_DEPTH 8 struct pcf50633_adc { struct pcf50633 *pcf; /* Private stuff */ struct pcf50633_adc_request *queue[PCF50633_MAX_ADC_FIFO_DEPTH]; int queue_head; int queue_tail; struct mutex queue_mutex; }; static inline struct pcf50633_adc *__to_adc(struct pcf50633 *pcf) { return platform_get_drvdata(pcf->adc_pdev); } static void adc_setup(struct pcf50633 *pcf, int channel, int avg) { channel &= PCF50633_ADCC1_ADCMUX_MASK; /* kill ratiometric, but enable ACCSW biasing */ pcf50633_reg_write(pcf, PCF50633_REG_ADCC2, 0x00); pcf50633_reg_write(pcf, PCF50633_REG_ADCC3, 0x01); /* start ADC conversion on selected channel */ pcf50633_reg_write(pcf, PCF50633_REG_ADCC1, channel | avg | PCF50633_ADCC1_ADCSTART | PCF50633_ADCC1_RES_10BIT); } static void trigger_next_adc_job_if_any(struct pcf50633 *pcf) { struct pcf50633_adc *adc = __to_adc(pcf); int head; head = adc->queue_head; if (!adc->queue[head]) return; adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg); } static int adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req) { struct pcf50633_adc *adc = __to_adc(pcf); int head, tail; mutex_lock(&adc->queue_mutex); head = adc->queue_head; tail = adc->queue_tail; if (adc->queue[tail]) { mutex_unlock(&adc->queue_mutex); dev_err(pcf->dev, "ADC queue is full, dropping request\n"); return -EBUSY; } adc->queue[tail] = req; if (head == tail) trigger_next_adc_job_if_any(pcf); adc->queue_tail = (tail + 1) & (PCF50633_MAX_ADC_FIFO_DEPTH - 1); mutex_unlock(&adc->queue_mutex); return 0; } static void pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, int result) { struct pcf50633_adc_sync_request *req = param; req->result = result; complete(&req->completion); } int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg) { struct pcf50633_adc_sync_request req; int ret; init_completion(&req.completion); ret = pcf50633_adc_async_read(pcf, mux, avg, pcf50633_adc_sync_read_callback, &req); if (ret) return ret; wait_for_completion(&req.completion); return req.result; } EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read); int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg, void (*callback)(struct pcf50633 *, void *, int), void *callback_param) { struct pcf50633_adc_request *req; /* req is freed when the result is ready, in interrupt handler */ req = kmalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; req->mux = mux; req->avg = avg; req->callback = callback; req->callback_param = callback_param; return adc_enqueue_request(pcf, req); } EXPORT_SYMBOL_GPL(pcf50633_adc_async_read); static int adc_result(struct pcf50633 *pcf) { u8 adcs1, adcs3; u16 result; adcs1 = pcf50633_reg_read(pcf, PCF50633_REG_ADCS1); adcs3 = pcf50633_reg_read(pcf, PCF50633_REG_ADCS3); result = (adcs1 << 2) | (adcs3 & PCF50633_ADCS3_ADCDAT1L_MASK); dev_dbg(pcf->dev, "adc result = %d\n", result); return result; } static void pcf50633_adc_irq(int irq, void *data) { struct pcf50633_adc *adc = data; struct pcf50633 *pcf = adc->pcf; struct pcf50633_adc_request *req; int head, res; mutex_lock(&adc->queue_mutex); head = adc->queue_head; req = adc->queue[head]; if (WARN_ON(!req)) { dev_err(pcf->dev, "pcf50633-adc irq: ADC queue empty!\n"); mutex_unlock(&adc->queue_mutex); return; } adc->queue[head] = NULL; adc->queue_head = (head + 1) & (PCF50633_MAX_ADC_FIFO_DEPTH - 1); res = adc_result(pcf); trigger_next_adc_job_if_any(pcf); mutex_unlock(&adc->queue_mutex); req->callback(pcf, req->callback_param, res); kfree(req); } static int __devinit pcf50633_adc_probe(struct platform_device *pdev) { struct pcf50633_adc *adc; adc = kzalloc(sizeof(*adc), GFP_KERNEL); if (!adc) return -ENOMEM; adc->pcf = dev_to_pcf50633(pdev->dev.parent); platform_set_drvdata(pdev, adc); pcf50633_register_irq(adc->pcf, PCF50633_IRQ_ADCRDY, pcf50633_adc_irq, adc); mutex_init(&adc->queue_mutex); return 0; } static int __devexit pcf50633_adc_remove(struct platform_device *pdev) { struct pcf50633_adc *adc = platform_get_drvdata(pdev); int i, head; pcf50633_free_irq(adc->pcf, PCF50633_IRQ_ADCRDY); mutex_lock(&adc->queue_mutex); head = adc->queue_head; if (WARN_ON(adc->queue[head])) dev_err(adc->pcf->dev, "adc driver removed with request pending\n"); for (i = 0; i < PCF50633_MAX_ADC_FIFO_DEPTH; i++) kfree(adc->queue[i]); mutex_unlock(&adc->queue_mutex); kfree(adc); return 0; } static struct platform_driver pcf50633_adc_driver = { .driver = { .name = "pcf50633-adc", }, .probe = pcf50633_adc_probe, .remove = __devexit_p(pcf50633_adc_remove), }; static int __init pcf50633_adc_init(void) { return platform_driver_register(&pcf50633_adc_driver); } module_init(pcf50633_adc_init); static void __exit pcf50633_adc_exit(void) { platform_driver_unregister(&pcf50633_adc_driver); } module_exit(pcf50633_adc_exit); MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>"); MODULE_DESCRIPTION("PCF50633 adc driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pcf50633-adc");
SweetwaterBurns/E4GT-Kernel
drivers/mfd/pcf50633-adc.c
C
gpl-2.0
6,282
/* * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl) * * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net) * * Adapted from mac_scsinew.c: */ /* * Generic Macintosh NCR5380 driver * * Copyright 1998, Michael Schmitz <mschmitz@lbl.gov> * * derived in part from: */ /* * Generic Generic NCR5380 driver * * Copyright 1995, Russell King * * ALPHA RELEASE 1. * * For more information, please consult * * NCR 5380 Family * SCSI Protocol Controller * Databook * * NCR Microelectronics * 1635 Aeroplaza Drive * Colorado Springs, CO 80916 * 1+ (719) 578-3400 * 1+ (800) 334-5454 */ /* * This is from mac_scsi.h, but hey, maybe this is useful for Sun3 too! :) * * Options : * * PARITY - enable parity checking. Not supported. * * SCSI2 - enable support for SCSI-II tagged queueing. Untested. * * USLEEP - enable support for devices that don't disconnect. Untested. */ /* * $Log: sun3_NCR5380.c,v $ */ #define AUTOSENSE #include <linux/types.h> #include <linux/stddef.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/blkdev.h> #include <asm/io.h> #include <asm/sun3ints.h> #include <asm/dvma.h> #include <asm/idprom.h> #include <asm/machines.h> #define NDEBUG 0 #define NDEBUG_ABORT 0x00100000 #define NDEBUG_TAGS 0x00200000 #define NDEBUG_MERGING 0x00400000 /* dma on! */ #define REAL_DMA #include "scsi.h" #include "initio.h" #include <scsi/scsi_host.h> #include "sun3_scsi.h" static void NCR5380_print(struct Scsi_Host *instance); /* #define OLDDMA */ #define USE_WRAPPER /*#define RESET_BOOT */ #define DRIVER_SETUP /* * BUG can be used to trigger a strange code-size related hang on 2.1 kernels */ #ifdef BUG #undef RESET_BOOT #undef DRIVER_SETUP #endif /* #define SUPPORT_TAGS */ #define ENABLE_IRQ() enable_irq( IRQ_SUN3_SCSI ); static irqreturn_t scsi_sun3_intr(int irq, void *dummy); static inline unsigned char sun3scsi_read(int reg); static inline void sun3scsi_write(int reg, int value); static int setup_can_queue = -1; module_param(setup_can_queue, int, 0); static int setup_cmd_per_lun = -1; module_param(setup_cmd_per_lun, int, 0); static int setup_sg_tablesize = -1; module_param(setup_sg_tablesize, int, 0); #ifdef SUPPORT_TAGS static int setup_use_tagged_queuing = -1; module_param(setup_use_tagged_queuing, int, 0); #endif static int setup_hostid = -1; module_param(setup_hostid, int, 0); static struct scsi_cmnd *sun3_dma_setup_done = NULL; #define AFTER_RESET_DELAY (HZ/2) /* ms to wait after hitting dma regs */ #define SUN3_DMA_DELAY 10 /* dvma buffer to allocate -- 32k should hopefully be more than sufficient */ #define SUN3_DVMA_BUFSIZE 0xe000 /* minimum number of bytes to do dma on */ #define SUN3_DMA_MINSIZE 128 static volatile unsigned char *sun3_scsi_regp; static volatile struct sun3_dma_regs *dregs; #ifdef OLDDMA static unsigned char *dmabuf = NULL; /* dma memory buffer */ #endif static struct sun3_udc_regs *udc_regs = NULL; static unsigned char *sun3_dma_orig_addr = NULL; static unsigned long sun3_dma_orig_count = 0; static int sun3_dma_active = 0; static unsigned long last_residual = 0; /* * NCR 5380 register access functions */ static inline unsigned char sun3scsi_read(int reg) { return( sun3_scsi_regp[reg] ); } static inline void sun3scsi_write(int reg, int value) { sun3_scsi_regp[reg] = value; } /* dma controller register access functions */ static inline unsigned short sun3_udc_read(unsigned char reg) { unsigned short ret; dregs->udc_addr = UDC_CSR; udelay(SUN3_DMA_DELAY); ret = dregs->udc_data; udelay(SUN3_DMA_DELAY); return ret; } static inline void sun3_udc_write(unsigned short val, unsigned char reg) { dregs->udc_addr = reg; udelay(SUN3_DMA_DELAY); dregs->udc_data = val; udelay(SUN3_DMA_DELAY); } /* * XXX: status debug */ static struct Scsi_Host *default_instance; /* * Function : int sun3scsi_detect(struct scsi_host_template * tpnt) * * Purpose : initializes mac NCR5380 driver based on the * command line / compile time port and irq definitions. * * Inputs : tpnt - template for this SCSI adapter. * * Returns : 1 if a host adapter was found, 0 if not. * */ int __init sun3scsi_detect(struct scsi_host_template * tpnt) { unsigned long ioaddr; static int called = 0; struct Scsi_Host *instance; /* check that this machine has an onboard 5380 */ switch(idprom->id_machtype) { case SM_SUN3|SM_3_50: case SM_SUN3|SM_3_60: break; default: return 0; } if(called) return 0; tpnt->proc_name = "Sun3 5380 SCSI"; /* setup variables */ tpnt->can_queue = (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE; tpnt->cmd_per_lun = (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN; tpnt->sg_tablesize = (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE; if (setup_hostid >= 0) tpnt->this_id = setup_hostid; else { /* use 7 as default */ tpnt->this_id = 7; } ioaddr = (unsigned long)ioremap(IOBASE_SUN3_SCSI, PAGE_SIZE); sun3_scsi_regp = (unsigned char *)ioaddr; dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8); if((udc_regs = dvma_malloc(sizeof(struct sun3_udc_regs))) == NULL) { printk("SUN3 Scsi couldn't allocate DVMA memory!\n"); return 0; } #ifdef OLDDMA if((dmabuf = dvma_malloc_align(SUN3_DVMA_BUFSIZE, 0x10000)) == NULL) { printk("SUN3 Scsi couldn't allocate DVMA memory!\n"); return 0; } #endif #ifdef SUPPORT_TAGS if (setup_use_tagged_queuing < 0) setup_use_tagged_queuing = USE_TAGGED_QUEUING; #endif instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); if(instance == NULL) return 0; default_instance = instance; instance->io_port = (unsigned long) ioaddr; instance->irq = IRQ_SUN3_SCSI; NCR5380_init(instance, 0); instance->n_io_port = 32; ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; if (request_irq(instance->irq, scsi_sun3_intr, 0, "Sun3SCSI-5380", instance)) { #ifndef REAL_DMA printk("scsi%d: IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; #else printk("scsi%d: IRQ%d not free, bailing out\n", instance->host_no, instance->irq); return 0; #endif } printk("scsi%d: Sun3 5380 at port %lX irq", instance->host_no, instance->io_port); if (instance->irq == SCSI_IRQ_NONE) printk ("s disabled"); else printk (" %d", instance->irq); printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", instance->can_queue, instance->cmd_per_lun, SUN3SCSI_PUBLIC_RELEASE); printk("\nscsi%d:", instance->host_no); NCR5380_print_options(instance); printk("\n"); dregs->csr = 0; udelay(SUN3_DMA_DELAY); dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; udelay(SUN3_DMA_DELAY); dregs->fifo_count = 0; called = 1; #ifdef RESET_BOOT sun3_scsi_reset_boot(instance); #endif return 1; } int sun3scsi_release (struct Scsi_Host *shpnt) { if (shpnt->irq != SCSI_IRQ_NONE) free_irq(shpnt->irq, shpnt); iounmap((void *)sun3_scsi_regp); NCR5380_exit(shpnt); return 0; } #ifdef RESET_BOOT /* * Our 'bus reset on boot' function */ static void sun3_scsi_reset_boot(struct Scsi_Host *instance) { unsigned long end; NCR5380_local_declare(); NCR5380_setup(instance); /* * Do a SCSI reset to clean up the bus during initialization. No * messing with the queues, interrupts, or locks necessary here. */ printk( "Sun3 SCSI: resetting the SCSI bus..." ); /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */ // sun3_disable_irq( IRQ_SUN3_SCSI ); /* get in phase */ NCR5380_write( TARGET_COMMAND_REG, PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); /* assert RST */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); /* The min. reset hold time is 25us, so 40us should be enough */ udelay( 50 ); /* reset RST and interrupt */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); NCR5380_read( RESET_PARITY_INTERRUPT_REG ); for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); ) barrier(); /* switch on SCSI IRQ again */ // sun3_enable_irq( IRQ_SUN3_SCSI ); printk( " done\n" ); } #endif const char * sun3scsi_info (struct Scsi_Host *spnt) { return ""; } // safe bits for the CSR #define CSR_GOOD 0x060f static irqreturn_t scsi_sun3_intr(int irq, void *dummy) { unsigned short csr = dregs->csr; int handled = 0; if(csr & ~CSR_GOOD) { if(csr & CSR_DMA_BUSERR) { printk("scsi%d: bus error in dma\n", default_instance->host_no); } if(csr & CSR_DMA_CONFLICT) { printk("scsi%d: dma conflict\n", default_instance->host_no); } handled = 1; } if(csr & (CSR_SDB_INT | CSR_DMA_INT)) { NCR5380_intr(irq, dummy); handled = 1; } return IRQ_RETVAL(handled); } /* * Debug stuff - to be called on NMI, or sysrq key. Use at your own risk; * reentering NCR5380_print_status seems to have ugly side effects */ /* this doesn't seem to get used at all -- sam */ #if 0 void sun3_sun3_debug (void) { unsigned long flags; NCR5380_local_declare(); if (default_instance) { local_irq_save(flags); NCR5380_print_status(default_instance); local_irq_restore(flags); } } #endif /* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag) { #ifdef OLDDMA if(write_flag) memcpy(dmabuf, data, count); else { sun3_dma_orig_addr = data; sun3_dma_orig_count = count; } #else void *addr; if(sun3_dma_orig_addr != NULL) dvma_unmap(sun3_dma_orig_addr); // addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf); addr = (void *)dvma_map((unsigned long) data, count); sun3_dma_orig_addr = addr; sun3_dma_orig_count = count; #endif dregs->fifo_count = 0; sun3_udc_write(UDC_RESET, UDC_CSR); /* reset fifo */ dregs->csr &= ~CSR_FIFO; dregs->csr |= CSR_FIFO; /* set direction */ if(write_flag) dregs->csr |= CSR_SEND; else dregs->csr &= ~CSR_SEND; /* byte count for fifo */ dregs->fifo_count = count; sun3_udc_write(UDC_RESET, UDC_CSR); /* reset fifo */ dregs->csr &= ~CSR_FIFO; dregs->csr |= CSR_FIFO; if(dregs->fifo_count != count) { printk("scsi%d: fifo_mismatch %04x not %04x\n", default_instance->host_no, dregs->fifo_count, (unsigned int) count); NCR5380_print(default_instance); } /* setup udc */ #ifdef OLDDMA udc_regs->addr_hi = ((dvma_vtob(dmabuf) & 0xff0000) >> 8); udc_regs->addr_lo = (dvma_vtob(dmabuf) & 0xffff); #else udc_regs->addr_hi = (((unsigned long)(addr) & 0xff0000) >> 8); udc_regs->addr_lo = ((unsigned long)(addr) & 0xffff); #endif udc_regs->count = count/2; /* count in words */ udc_regs->mode_hi = UDC_MODE_HIWORD; if(write_flag) { if(count & 1) udc_regs->count++; udc_regs->mode_lo = UDC_MODE_LSEND; udc_regs->rsel = UDC_RSEL_SEND; } else { udc_regs->mode_lo = UDC_MODE_LRECV; udc_regs->rsel = UDC_RSEL_RECV; } /* announce location of regs block */ sun3_udc_write(((dvma_vtob(udc_regs) & 0xff0000) >> 8), UDC_CHN_HI); sun3_udc_write((dvma_vtob(udc_regs) & 0xffff), UDC_CHN_LO); /* set dma master on */ sun3_udc_write(0xd, UDC_MODE); /* interrupt enable */ sun3_udc_write(UDC_INT_ENABLE, UDC_CSR); return count; } static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance) { unsigned short resid; dregs->udc_addr = 0x32; udelay(SUN3_DMA_DELAY); resid = dregs->udc_data; udelay(SUN3_DMA_DELAY); resid *= 2; return (unsigned long) resid; } static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) { return last_residual; } static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, struct scsi_cmnd *cmd, int write_flag) { if (cmd->request->cmd_type == REQ_TYPE_FS) return wanted; else return 0; } static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) { sun3_udc_write(UDC_CHN_START, UDC_CSR); return 0; } /* clean up after our dma is done */ static int sun3scsi_dma_finish(int write_flag) { unsigned short count; unsigned short fifo; int ret = 0; sun3_dma_active = 0; #if 1 // check to empty the fifo on a read if(!write_flag) { int tmo = 20000; /* .2 sec */ while(1) { if(dregs->csr & CSR_FIFO_EMPTY) break; if(--tmo <= 0) { printk("sun3scsi: fifo failed to empty!\n"); return 1; } udelay(10); } } #endif count = sun3scsi_dma_count(default_instance); #ifdef OLDDMA /* if we've finished a read, copy out the data we read */ if(sun3_dma_orig_addr) { /* check for residual bytes after dma end */ if(count && (NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK))) { printk("scsi%d: sun3_scsi_finish: read overrun baby... ", default_instance->host_no); printk("basr now %02x\n", NCR5380_read(BUS_AND_STATUS_REG)); ret = count; } /* copy in what we dma'd no matter what */ memcpy(sun3_dma_orig_addr, dmabuf, sun3_dma_orig_count); sun3_dma_orig_addr = NULL; } #else fifo = dregs->fifo_count; last_residual = fifo; /* empty bytes from the fifo which didn't make it */ if((!write_flag) && (count - fifo) == 2) { unsigned short data; unsigned char *vaddr; data = dregs->fifo_data; vaddr = (unsigned char *)dvma_btov(sun3_dma_orig_addr); vaddr += (sun3_dma_orig_count - fifo); vaddr[-2] = (data & 0xff00) >> 8; vaddr[-1] = (data & 0xff); } dvma_unmap(sun3_dma_orig_addr); sun3_dma_orig_addr = NULL; #endif sun3_udc_write(UDC_RESET, UDC_CSR); dregs->fifo_count = 0; dregs->csr &= ~CSR_SEND; /* reset fifo */ dregs->csr &= ~CSR_FIFO; dregs->csr |= CSR_FIFO; sun3_dma_setup_done = NULL; return ret; } #include "sun3_NCR5380.c" static struct scsi_host_template driver_template = { .name = SUN3_SCSI_NAME, .detect = sun3scsi_detect, .release = sun3scsi_release, .info = sun3scsi_info, .queuecommand = sun3scsi_queue_command, .eh_abort_handler = sun3scsi_abort, .eh_bus_reset_handler = sun3scsi_bus_reset, .can_queue = CAN_QUEUE, .this_id = 7, .sg_tablesize = SG_TABLESIZE, .cmd_per_lun = CMD_PER_LUN, .use_clustering = DISABLE_CLUSTERING }; #include "scsi_module.c" MODULE_LICENSE("GPL");
FenomenalSabderMOD/MOTOE
drivers/scsi/sun3_scsi.c
C
gpl-2.0
14,373
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) * Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de) * * Most of this code is based on the SDL diagrams published in the 7th ARRL * Computer Networking Conference papers. The diagrams have mistakes in them, * but are mostly correct. Before you modify the code could you read the SDL * diagrams as the code is not obvious and probably very easy to break. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> /* * State machine for state 1, Awaiting Connection State. * The handling of the timer(s) is in file ax25_std_timer.c. * Handling of state 0 and connection release is in ax25.c. */ static int ax25_std_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) { switch (frametype) { case AX25_SABM: ax25->modulus = AX25_MODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW]; ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); break; case AX25_SABME: ax25->modulus = AX25_EMODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW]; ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); break; case AX25_DISC: ax25_send_control(ax25, AX25_DM, pf, AX25_RESPONSE); break; case AX25_UA: if (pf) { ax25_calculate_rtt(ax25); ax25_stop_t1timer(ax25); ax25_start_t3timer(ax25); ax25_start_idletimer(ax25); ax25->vs = 0; ax25->va = 0; ax25->vr = 0; ax25->state = AX25_STATE_3; ax25->n2count = 0; if (ax25->sk != NULL) { bh_lock_sock(ax25->sk); ax25->sk->sk_state = TCP_ESTABLISHED; /* For WAIT_SABM connections we will produce an accept ready socket here */ if (!sock_flag(ax25->sk, SOCK_DEAD)) ax25->sk->sk_state_change(ax25->sk); bh_unlock_sock(ax25->sk); } } break; case AX25_DM: if (pf) { if (ax25->modulus == AX25_MODULUS) { ax25_disconnect(ax25, ECONNREFUSED); } else { ax25->modulus = AX25_MODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW]; } } break; default: break; } return 0; } /* * State machine for state 2, Awaiting Release State. * The handling of the timer(s) is in file ax25_std_timer.c * Handling of state 0 and connection release is in ax25.c. */ static int ax25_std_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) { switch (frametype) { case AX25_SABM: case AX25_SABME: ax25_send_control(ax25, AX25_DM, pf, AX25_RESPONSE); break; case AX25_DISC: ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_disconnect(ax25, 0); break; case AX25_DM: case AX25_UA: if (pf) ax25_disconnect(ax25, 0); break; case AX25_I: case AX25_REJ: case AX25_RNR: case AX25_RR: if (pf) ax25_send_control(ax25, AX25_DM, AX25_POLLON, AX25_RESPONSE); break; default: break; } return 0; } /* * State machine for state 3, Connected State. * The handling of the timer(s) is in file ax25_std_timer.c * Handling of state 0 and connection release is in ax25.c. */ static int ax25_std_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type) { int queued = 0; switch (frametype) { case AX25_SABM: case AX25_SABME: if (frametype == AX25_SABM) { ax25->modulus = AX25_MODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW]; } else { ax25->modulus = AX25_EMODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW]; } ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_stop_t1timer(ax25); ax25_stop_t2timer(ax25); ax25_start_t3timer(ax25); ax25_start_idletimer(ax25); ax25->condition = 0x00; ax25->vs = 0; ax25->va = 0; ax25->vr = 0; ax25_requeue_frames(ax25); break; case AX25_DISC: ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_disconnect(ax25, 0); break; case AX25_DM: ax25_disconnect(ax25, ECONNRESET); break; case AX25_RR: case AX25_RNR: if (frametype == AX25_RR) ax25->condition &= ~AX25_COND_PEER_RX_BUSY; else ax25->condition |= AX25_COND_PEER_RX_BUSY; if (type == AX25_COMMAND && pf) ax25_std_enquiry_response(ax25); if (ax25_validate_nr(ax25, nr)) { ax25_check_iframes_acked(ax25, nr); } else { ax25_std_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; } break; case AX25_REJ: ax25->condition &= ~AX25_COND_PEER_RX_BUSY; if (type == AX25_COMMAND && pf) ax25_std_enquiry_response(ax25); if (ax25_validate_nr(ax25, nr)) { ax25_frames_acked(ax25, nr); ax25_calculate_rtt(ax25); ax25_stop_t1timer(ax25); ax25_start_t3timer(ax25); ax25_requeue_frames(ax25); } else { ax25_std_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; } break; case AX25_I: if (!ax25_validate_nr(ax25, nr)) { ax25_std_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; break; } if (ax25->condition & AX25_COND_PEER_RX_BUSY) { ax25_frames_acked(ax25, nr); } else { ax25_check_iframes_acked(ax25, nr); } if (ax25->condition & AX25_COND_OWN_RX_BUSY) { if (pf) ax25_std_enquiry_response(ax25); break; } if (ns == ax25->vr) { ax25->vr = (ax25->vr + 1) % ax25->modulus; queued = ax25_rx_iframe(ax25, skb); if (ax25->condition & AX25_COND_OWN_RX_BUSY) ax25->vr = ns; /* ax25->vr - 1 */ ax25->condition &= ~AX25_COND_REJECT; if (pf) { ax25_std_enquiry_response(ax25); } else { if (!(ax25->condition & AX25_COND_ACK_PENDING)) { ax25->condition |= AX25_COND_ACK_PENDING; ax25_start_t2timer(ax25); } } } else { if (ax25->condition & AX25_COND_REJECT) { if (pf) ax25_std_enquiry_response(ax25); } else { ax25->condition |= AX25_COND_REJECT; ax25_send_control(ax25, AX25_REJ, pf, AX25_RESPONSE); ax25->condition &= ~AX25_COND_ACK_PENDING; } } break; case AX25_FRMR: case AX25_ILLEGAL: ax25_std_establish_data_link(ax25); ax25->state = AX25_STATE_1; break; default: break; } return queued; } /* * State machine for state 4, Timer Recovery State. * The handling of the timer(s) is in file ax25_std_timer.c * Handling of state 0 and connection release is in ax25.c. */ static int ax25_std_state4_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type) { int queued = 0; switch (frametype) { case AX25_SABM: case AX25_SABME: if (frametype == AX25_SABM) { ax25->modulus = AX25_MODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW]; } else { ax25->modulus = AX25_EMODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW]; } ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_stop_t1timer(ax25); ax25_stop_t2timer(ax25); ax25_start_t3timer(ax25); ax25_start_idletimer(ax25); ax25->condition = 0x00; ax25->vs = 0; ax25->va = 0; ax25->vr = 0; ax25->state = AX25_STATE_3; ax25->n2count = 0; ax25_requeue_frames(ax25); break; case AX25_DISC: ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_disconnect(ax25, 0); break; case AX25_DM: ax25_disconnect(ax25, ECONNRESET); break; case AX25_RR: case AX25_RNR: if (frametype == AX25_RR) ax25->condition &= ~AX25_COND_PEER_RX_BUSY; else ax25->condition |= AX25_COND_PEER_RX_BUSY; if (type == AX25_RESPONSE && pf) { ax25_stop_t1timer(ax25); ax25->n2count = 0; if (ax25_validate_nr(ax25, nr)) { ax25_frames_acked(ax25, nr); if (ax25->vs == ax25->va) { ax25_start_t3timer(ax25); ax25->state = AX25_STATE_3; } else { ax25_requeue_frames(ax25); } } else { ax25_std_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; } break; } if (type == AX25_COMMAND && pf) ax25_std_enquiry_response(ax25); if (ax25_validate_nr(ax25, nr)) { ax25_frames_acked(ax25, nr); } else { ax25_std_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; } break; case AX25_REJ: ax25->condition &= ~AX25_COND_PEER_RX_BUSY; if (pf && type == AX25_RESPONSE) { ax25_stop_t1timer(ax25); ax25->n2count = 0; if (ax25_validate_nr(ax25, nr)) { ax25_frames_acked(ax25, nr); if (ax25->vs == ax25->va) { ax25_start_t3timer(ax25); ax25->state = AX25_STATE_3; } else { ax25_requeue_frames(ax25); } } else { ax25_std_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; } break; } if (type == AX25_COMMAND && pf) ax25_std_enquiry_response(ax25); if (ax25_validate_nr(ax25, nr)) { ax25_frames_acked(ax25, nr); ax25_requeue_frames(ax25); } else { ax25_std_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; } break; case AX25_I: if (!ax25_validate_nr(ax25, nr)) { ax25_std_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; break; } ax25_frames_acked(ax25, nr); if (ax25->condition & AX25_COND_OWN_RX_BUSY) { if (pf) ax25_std_enquiry_response(ax25); break; } if (ns == ax25->vr) { ax25->vr = (ax25->vr + 1) % ax25->modulus; queued = ax25_rx_iframe(ax25, skb); if (ax25->condition & AX25_COND_OWN_RX_BUSY) ax25->vr = ns; /* ax25->vr - 1 */ ax25->condition &= ~AX25_COND_REJECT; if (pf) { ax25_std_enquiry_response(ax25); } else { if (!(ax25->condition & AX25_COND_ACK_PENDING)) { ax25->condition |= AX25_COND_ACK_PENDING; ax25_start_t2timer(ax25); } } } else { if (ax25->condition & AX25_COND_REJECT) { if (pf) ax25_std_enquiry_response(ax25); } else { ax25->condition |= AX25_COND_REJECT; ax25_send_control(ax25, AX25_REJ, pf, AX25_RESPONSE); ax25->condition &= ~AX25_COND_ACK_PENDING; } } break; case AX25_FRMR: case AX25_ILLEGAL: ax25_std_establish_data_link(ax25); ax25->state = AX25_STATE_1; break; default: break; } return queued; } /* * Higher level upcall for a LAPB frame */ int ax25_std_frame_in(ax25_cb *ax25, struct sk_buff *skb, int type) { int queued = 0, frametype, ns, nr, pf; frametype = ax25_decode(ax25, skb, &ns, &nr, &pf); switch (ax25->state) { case AX25_STATE_1: queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type); break; case AX25_STATE_2: queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type); break; case AX25_STATE_3: queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type); break; case AX25_STATE_4: queued = ax25_std_state4_machine(ax25, skb, frametype, ns, nr, pf, type); break; } ax25_kick(ax25); return queued; }
rock12/ALPS.L0.MT6580.SMT.DEV.P1.4_K80_KERNEL
net/ax25/ax25_std_in.c
C
gpl-2.0
11,319
#include "cache.h" static const char *alias_key; static char *alias_val; static int alias_lookup_cb(const char *k, const char *v, void *cb __used) { if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) { if (!v) return config_error_nonbool(k); alias_val = strdup(v); return 0; } return 0; } char *alias_lookup(const char *alias) { alias_key = alias; alias_val = NULL; perf_config(alias_lookup_cb, NULL); return alias_val; } int split_cmdline(char *cmdline, const char ***argv) { int src, dst, count = 0, size = 16; char quoted = 0; *argv = malloc(sizeof(char*) * size); /* split alias_string */ (*argv)[count++] = cmdline; for (src = dst = 0; cmdline[src];) { char c = cmdline[src]; if (!quoted && isspace(c)) { cmdline[dst++] = 0; while (cmdline[++src] && isspace(cmdline[src])) ; /* skip */ if (count >= size) { size += 16; *argv = realloc(*argv, sizeof(char*) * size); } (*argv)[count++] = cmdline + dst; } else if (!quoted && (c == '\'' || c == '"')) { quoted = c; src++; } else if (c == quoted) { quoted = 0; src++; } else { if (c == '\\' && quoted != '\'') { src++; c = cmdline[src]; if (!c) { free(*argv); *argv = NULL; return error("cmdline ends with \\"); } } cmdline[dst++] = c; src++; } } cmdline[dst] = 0; if (quoted) { free(*argv); *argv = NULL; return error("unclosed quote"); } return count; }
jamison904/kernel_m919_aokp
tools/perf/util/alias.c
C
gpl-2.0
1,454
/********************************************************************* * * Filename: ircomm_lmp.c * Version: 1.0 * Description: Interface between IrCOMM and IrLMP * Status: Stable * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sun Jun 6 20:48:27 1999 * Modified at: Sun Dec 12 13:44:17 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * Sources: Previous IrLPT work by Thomas Davis * * Copyright (c) 1999 Dag Brattli, All Rights Reserved. * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * ********************************************************************/ #include <linux/init.h> #include <linux/gfp.h> #include <net/irda/irda.h> #include <net/irda/irlmp.h> #include <net/irda/iriap.h> #include <net/irda/irda_device.h> /* struct irda_skb_cb */ #include <net/irda/ircomm_event.h> #include <net/irda/ircomm_lmp.h> /* * Function ircomm_lmp_connect_request (self, userdata) * * * */ static int ircomm_lmp_connect_request(struct ircomm_cb *self, struct sk_buff *userdata, struct ircomm_info *info) { int ret = 0; IRDA_DEBUG(0, "%s()\n", __func__ ); /* Don't forget to refcount it - should be NULL anyway */ if(userdata) skb_get(userdata); ret = irlmp_connect_request(self->lsap, info->dlsap_sel, info->saddr, info->daddr, NULL, userdata); return ret; } /* * Function ircomm_lmp_connect_response (self, skb) * * * */ static int ircomm_lmp_connect_response(struct ircomm_cb *self, struct sk_buff *userdata) { struct sk_buff *tx_skb; IRDA_DEBUG(0, "%s()\n", __func__ ); /* Any userdata supplied? */ if (userdata == NULL) { tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); if (!tx_skb) return -ENOMEM; /* Reserve space for MUX and LAP header */ skb_reserve(tx_skb, LMP_MAX_HEADER); } else { /* * Check that the client has reserved enough space for * headers */ IRDA_ASSERT(skb_headroom(userdata) >= LMP_MAX_HEADER, return -1;); /* Don't forget to refcount it - should be NULL anyway */ skb_get(userdata); tx_skb = userdata; } return irlmp_connect_response(self->lsap, tx_skb); } static int ircomm_lmp_disconnect_request(struct ircomm_cb *self, struct sk_buff *userdata, struct ircomm_info *info) { struct sk_buff *tx_skb; int ret; IRDA_DEBUG(0, "%s()\n", __func__ ); if (!userdata) { tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); if (!tx_skb) return -ENOMEM; /* Reserve space for MUX and LAP header */ skb_reserve(tx_skb, LMP_MAX_HEADER); userdata = tx_skb; } else { /* Don't forget to refcount it - should be NULL anyway */ skb_get(userdata); } ret = irlmp_disconnect_request(self->lsap, userdata); return ret; } /* * Function ircomm_lmp_flow_control (skb) * * This function is called when a data frame we have sent to IrLAP has * been deallocated. We do this to make sure we don't flood IrLAP with * frames, since we are not using the IrTTP flow control mechanism */ static void ircomm_lmp_flow_control(struct sk_buff *skb) { struct irda_skb_cb *cb; struct ircomm_cb *self; int line; IRDA_ASSERT(skb != NULL, return;); cb = (struct irda_skb_cb *) skb->cb; IRDA_DEBUG(2, "%s()\n", __func__ ); line = cb->line; self = (struct ircomm_cb *) hashbin_lock_find(ircomm, line, NULL); if (!self) { IRDA_DEBUG(2, "%s(), didn't find myself\n", __func__ ); return; } IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); self->pkt_count--; if ((self->pkt_count < 2) && (self->flow_status == FLOW_STOP)) { IRDA_DEBUG(2, "%s(), asking TTY to start again!\n", __func__ ); self->flow_status = FLOW_START; if (self->notify.flow_indication) self->notify.flow_indication(self->notify.instance, self, FLOW_START); } } /* * Function ircomm_lmp_data_request (self, userdata) * * Send data frame to peer device * */ static int ircomm_lmp_data_request(struct ircomm_cb *self, struct sk_buff *skb, int not_used) { struct irda_skb_cb *cb; int ret; IRDA_ASSERT(skb != NULL, return -1;); cb = (struct irda_skb_cb *) skb->cb; cb->line = self->line; IRDA_DEBUG(4, "%s(), sending frame\n", __func__ ); /* Don't forget to refcount it - see ircomm_tty_do_softint() */ skb_get(skb); skb_orphan(skb); skb->destructor = ircomm_lmp_flow_control; if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) { IRDA_DEBUG(2, "%s(), asking TTY to slow down!\n", __func__ ); self->flow_status = FLOW_STOP; if (self->notify.flow_indication) self->notify.flow_indication(self->notify.instance, self, FLOW_STOP); } ret = irlmp_data_request(self->lsap, skb); if (ret) { IRDA_ERROR("%s(), failed\n", __func__); /* irlmp_data_request already free the packet */ } return ret; } /* * Function ircomm_lmp_data_indication (instance, sap, skb) * * Incoming data which we must deliver to the state machine, to check * we are still connected. */ static int ircomm_lmp_data_indication(void *instance, void *sap, struct sk_buff *skb) { struct ircomm_cb *self = (struct ircomm_cb *) instance; IRDA_DEBUG(4, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); ircomm_do_event(self, IRCOMM_LMP_DATA_INDICATION, skb, NULL); /* Drop reference count - see ircomm_tty_data_indication(). */ dev_kfree_skb(skb); return 0; } /* * Function ircomm_lmp_connect_confirm (instance, sap, qos, max_sdu_size, * max_header_size, skb) * * Connection has been confirmed by peer device * */ static void ircomm_lmp_connect_confirm(void *instance, void *sap, struct qos_info *qos, __u32 max_seg_size, __u8 max_header_size, struct sk_buff *skb) { struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(qos != NULL, return;); info.max_data_size = max_seg_size; info.max_header_size = max_header_size; info.qos = qos; ircomm_do_event(self, IRCOMM_LMP_CONNECT_CONFIRM, skb, &info); /* Drop reference count - see ircomm_tty_connect_confirm(). */ dev_kfree_skb(skb); } /* * Function ircomm_lmp_connect_indication (instance, sap, qos, max_sdu_size, * max_header_size, skb) * * Peer device wants to make a connection with us * */ static void ircomm_lmp_connect_indication(void *instance, void *sap, struct qos_info *qos, __u32 max_seg_size, __u8 max_header_size, struct sk_buff *skb) { struct ircomm_cb *self = (struct ircomm_cb *)instance; struct ircomm_info info; IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(qos != NULL, return;); info.max_data_size = max_seg_size; info.max_header_size = max_header_size; info.qos = qos; ircomm_do_event(self, IRCOMM_LMP_CONNECT_INDICATION, skb, &info); /* Drop reference count - see ircomm_tty_connect_indication(). */ dev_kfree_skb(skb); } /* * Function ircomm_lmp_disconnect_indication (instance, sap, reason, skb) * * Peer device has closed the connection, or the link went down for some * other reason */ static void ircomm_lmp_disconnect_indication(void *instance, void *sap, LM_REASON reason, struct sk_buff *skb) { struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; IRDA_DEBUG(0, "%s()\n", __func__ ); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); info.reason = reason; ircomm_do_event(self, IRCOMM_LMP_DISCONNECT_INDICATION, skb, &info); /* Drop reference count - see ircomm_tty_disconnect_indication(). */ if(skb) dev_kfree_skb(skb); } /* * Function ircomm_open_lsap (self) * * Open LSAP. This function will only be used when using "raw" services * */ int ircomm_open_lsap(struct ircomm_cb *self) { notify_t notify; IRDA_DEBUG(0, "%s()\n", __func__ ); /* Register callbacks */ irda_notify_init(&notify); notify.data_indication = ircomm_lmp_data_indication; notify.connect_confirm = ircomm_lmp_connect_confirm; notify.connect_indication = ircomm_lmp_connect_indication; notify.disconnect_indication = ircomm_lmp_disconnect_indication; notify.instance = self; strlcpy(notify.name, "IrCOMM", sizeof(notify.name)); self->lsap = irlmp_open_lsap(LSAP_ANY, &notify, 0); if (!self->lsap) { IRDA_DEBUG(0,"%sfailed to allocate tsap\n", __func__ ); return -1; } self->slsap_sel = self->lsap->slsap_sel; /* * Initialize the call-table for issuing commands */ self->issue.data_request = ircomm_lmp_data_request; self->issue.connect_request = ircomm_lmp_connect_request; self->issue.connect_response = ircomm_lmp_connect_response; self->issue.disconnect_request = ircomm_lmp_disconnect_request; return 0; }
AdrianoMartins/android_kernel_lge_v500
net/irda/ircomm/ircomm_lmp.c
C
gpl-2.0
9,978
/* * Firmware loader for ETRAX FS IO-Processor * * Copyright (C) 2004 Axis Communications AB */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/firmware.h> #include <hwregs/reg_rdwr.h> #include <hwregs/reg_map.h> #include <hwregs/iop/iop_reg_space.h> #include <hwregs/iop/iop_mpu_macros.h> #include <hwregs/iop/iop_mpu_defs.h> #include <hwregs/iop/iop_spu_defs.h> #include <hwregs/iop/iop_sw_cpu_defs.h> #define IOP_TIMEOUT 100 #error "This driver is broken with regard to its driver core usage." #error "Please contact <greg@kroah.com> for details on how to fix it properly." static struct device iop_spu_device[2] = { { .init_name = "iop-spu0", }, { .init_name = "iop-spu1", }, }; static struct device iop_mpu_device = { .init_name = "iop-mpu", }; static int wait_mpu_idle(void) { reg_iop_mpu_r_stat mpu_stat; unsigned int timeout = IOP_TIMEOUT; do { mpu_stat = REG_RD(iop_mpu, regi_iop_mpu, r_stat); } while (mpu_stat.instr_reg_busy == regk_iop_mpu_yes && --timeout > 0); if (timeout == 0) { printk(KERN_ERR "Timeout waiting for MPU to be idle\n"); return -EBUSY; } return 0; } int iop_fw_load_spu(const unsigned char *fw_name, unsigned int spu_inst) { reg_iop_sw_cpu_rw_mc_ctrl mc_ctrl = { .wr_spu0_mem = regk_iop_sw_cpu_no, .wr_spu1_mem = regk_iop_sw_cpu_no, .size = 4, .cmd = regk_iop_sw_cpu_reg_copy, .keep_owner = regk_iop_sw_cpu_yes }; reg_iop_spu_rw_ctrl spu_ctrl = { .en = regk_iop_spu_no, .fsm = regk_iop_spu_no, }; reg_iop_sw_cpu_r_mc_stat mc_stat; const struct firmware *fw_entry; u32 *data; unsigned int timeout; int retval, i; if (spu_inst > 1) return -ENODEV; /* get firmware */ retval = request_firmware(&fw_entry, fw_name, &iop_spu_device[spu_inst]); if (retval != 0) { printk(KERN_ERR "iop_load_spu: Failed to load firmware \"%s\"\n", fw_name); return retval; } data = (u32 *) fw_entry->data; /* acquire ownership of memory controller */ switch (spu_inst) { case 0: mc_ctrl.wr_spu0_mem = regk_iop_sw_cpu_yes; REG_WR(iop_spu, regi_iop_spu0, rw_ctrl, spu_ctrl); break; case 1: mc_ctrl.wr_spu1_mem = regk_iop_sw_cpu_yes; REG_WR(iop_spu, regi_iop_spu1, rw_ctrl, spu_ctrl); break; } timeout = IOP_TIMEOUT; do { REG_WR(iop_sw_cpu, regi_iop_sw_cpu, rw_mc_ctrl, mc_ctrl); mc_stat = REG_RD(iop_sw_cpu, regi_iop_sw_cpu, r_mc_stat); } while (mc_stat.owned_by_cpu == regk_iop_sw_cpu_no && --timeout > 0); if (timeout == 0) { printk(KERN_ERR "Timeout waiting to acquire MC\n"); retval = -EBUSY; goto out; } /* write to SPU memory */ for (i = 0; i < (fw_entry->size/4); i++) { switch (spu_inst) { case 0: REG_WR_INT(iop_spu, regi_iop_spu0, rw_seq_pc, (i*4)); break; case 1: REG_WR_INT(iop_spu, regi_iop_spu1, rw_seq_pc, (i*4)); break; } REG_WR_INT(iop_sw_cpu, regi_iop_sw_cpu, rw_mc_data, *data); data++; } /* release ownership of memory controller */ (void) REG_RD(iop_sw_cpu, regi_iop_sw_cpu, rs_mc_data); out: release_firmware(fw_entry); return retval; } int iop_fw_load_mpu(unsigned char *fw_name) { const unsigned int start_addr = 0; reg_iop_mpu_rw_ctrl mpu_ctrl; const struct firmware *fw_entry; u32 *data; int retval, i; /* get firmware */ retval = request_firmware(&fw_entry, fw_name, &iop_mpu_device); if (retval != 0) { printk(KERN_ERR "iop_load_spu: Failed to load firmware \"%s\"\n", fw_name); return retval; } data = (u32 *) fw_entry->data; /* disable MPU */ mpu_ctrl.en = regk_iop_mpu_no; REG_WR(iop_mpu, regi_iop_mpu, rw_ctrl, mpu_ctrl); /* put start address in R0 */ REG_WR_VECT(iop_mpu, regi_iop_mpu, rw_r, 0, start_addr); /* write to memory by executing 'SWX i, 4, R0' for each word */ if ((retval = wait_mpu_idle()) != 0) goto out; REG_WR(iop_mpu, regi_iop_mpu, rw_instr, MPU_SWX_IIR_INSTR(0, 4, 0)); for (i = 0; i < (fw_entry->size / 4); i++) { REG_WR_INT(iop_mpu, regi_iop_mpu, rw_immediate, *data); if ((retval = wait_mpu_idle()) != 0) goto out; data++; } out: release_firmware(fw_entry); return retval; } int iop_start_mpu(unsigned int start_addr) { reg_iop_mpu_rw_ctrl mpu_ctrl = { .en = regk_iop_mpu_yes }; int retval; /* disable MPU */ if ((retval = wait_mpu_idle()) != 0) goto out; REG_WR(iop_mpu, regi_iop_mpu, rw_instr, MPU_HALT()); if ((retval = wait_mpu_idle()) != 0) goto out; /* set PC and wait for it to bite */ if ((retval = wait_mpu_idle()) != 0) goto out; REG_WR_INT(iop_mpu, regi_iop_mpu, rw_instr, MPU_BA_I(start_addr)); if ((retval = wait_mpu_idle()) != 0) goto out; /* make sure the MPU starts executing with interrupts disabled */ REG_WR(iop_mpu, regi_iop_mpu, rw_instr, MPU_DI()); if ((retval = wait_mpu_idle()) != 0) goto out; /* enable MPU */ REG_WR(iop_mpu, regi_iop_mpu, rw_ctrl, mpu_ctrl); out: return retval; } static int __init iop_fw_load_init(void) { #if 0 /* * static struct devices can not be added directly to sysfs by ignoring * the driver model infrastructure. To fix this properly, please use * the platform_bus to register these devices to be able to properly * use the firmware infrastructure. */ device_initialize(&iop_spu_device[0]); kobject_set_name(&iop_spu_device[0].kobj, "iop-spu0"); kobject_add(&iop_spu_device[0].kobj); device_initialize(&iop_spu_device[1]); kobject_set_name(&iop_spu_device[1].kobj, "iop-spu1"); kobject_add(&iop_spu_device[1].kobj); device_initialize(&iop_mpu_device); kobject_set_name(&iop_mpu_device.kobj, "iop-mpu"); kobject_add(&iop_mpu_device.kobj); #endif return 0; } static void __exit iop_fw_load_exit(void) { } module_init(iop_fw_load_init); module_exit(iop_fw_load_exit); MODULE_DESCRIPTION("ETRAX FS IO-Processor Firmware Loader"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(iop_fw_load_spu); EXPORT_SYMBOL(iop_fw_load_mpu); EXPORT_SYMBOL(iop_start_mpu);
zaclimon/android_kernel_samsung_kylepro
arch/cris/arch-v32/drivers/iop_fw_load.c
C
gpl-2.0
5,989
/* * USB Serial Converter Generic functions * * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com) * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/sysrq.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/uaccess.h> #include <linux/kfifo.h> #include <linux/serial.h> static int debug; #ifdef CONFIG_USB_SERIAL_GENERIC static int generic_probe(struct usb_interface *interface, const struct usb_device_id *id); static __u16 vendor = 0x05f9; static __u16 product = 0xffff; module_param(vendor, ushort, 0); MODULE_PARM_DESC(vendor, "User specified USB idVendor"); module_param(product, ushort, 0); MODULE_PARM_DESC(product, "User specified USB idProduct"); static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */ /* we want to look at all devices, as the vendor/product id can change * depending on the command line argument */ static const struct usb_device_id generic_serial_ids[] = { {.driver_info = 42}, {} }; static struct usb_driver generic_driver = { .name = "usbserial_generic", .probe = generic_probe, .disconnect = usb_serial_disconnect, .id_table = generic_serial_ids, .no_dynamic_id = 1, }; /* All of the device info needed for the Generic Serial Converter */ struct usb_serial_driver usb_serial_generic_device = { .driver = { .owner = THIS_MODULE, .name = "generic", }, .id_table = generic_device_ids, .usb_driver = &generic_driver, .num_ports = 1, .disconnect = usb_serial_generic_disconnect, .release = usb_serial_generic_release, .throttle = usb_serial_generic_throttle, .unthrottle = usb_serial_generic_unthrottle, .resume = usb_serial_generic_resume, }; static int generic_probe(struct usb_interface *interface, const struct usb_device_id *id) { const struct usb_device_id *id_pattern; id_pattern = usb_match_id(interface, generic_device_ids); if (id_pattern != NULL) return usb_serial_probe(interface, id); return -ENODEV; } #endif int usb_serial_generic_register(int _debug) { int retval = 0; debug = _debug; #ifdef CONFIG_USB_SERIAL_GENERIC generic_device_ids[0].idVendor = vendor; generic_device_ids[0].idProduct = product; generic_device_ids[0].match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT; /* register our generic driver with ourselves */ retval = usb_serial_register(&usb_serial_generic_device); if (retval) goto exit; retval = usb_register(&generic_driver); if (retval) usb_serial_deregister(&usb_serial_generic_device); exit: #endif return retval; } void usb_serial_generic_deregister(void) { #ifdef CONFIG_USB_SERIAL_GENERIC /* remove our generic driver */ usb_deregister(&generic_driver); usb_serial_deregister(&usb_serial_generic_device); #endif } int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port) { int result = 0; unsigned long flags; dbg("%s - port %d", __func__, port->number); /* clear the throttle flags */ spin_lock_irqsave(&port->lock, flags); port->throttled = 0; port->throttle_req = 0; spin_unlock_irqrestore(&port->lock, flags); /* if we have a bulk endpoint, start reading from it */ if (port->bulk_in_size) result = usb_serial_generic_submit_read_urb(port, GFP_KERNEL); return result; } EXPORT_SYMBOL_GPL(usb_serial_generic_open); static void generic_cleanup(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; unsigned long flags; int i; dbg("%s - port %d", __func__, port->number); if (serial->dev) { /* shutdown any bulk transfers that might be going on */ if (port->bulk_out_size) { usb_kill_urb(port->write_urb); for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) usb_kill_urb(port->write_urbs[i]); spin_lock_irqsave(&port->lock, flags); kfifo_reset_out(&port->write_fifo); spin_unlock_irqrestore(&port->lock, flags); } if (port->bulk_in_size) usb_kill_urb(port->read_urb); } } void usb_serial_generic_close(struct usb_serial_port *port) { dbg("%s - port %d", __func__, port->number); generic_cleanup(port); } EXPORT_SYMBOL_GPL(usb_serial_generic_close); int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port, void *dest, size_t size) { return kfifo_out_locked(&port->write_fifo, dest, size, &port->lock); } /** * usb_serial_generic_write_start - kick off an URB write * @port: Pointer to the &struct usb_serial_port data * * Returns zero on success, or a negative errno value */ static int usb_serial_generic_write_start(struct usb_serial_port *port) { struct urb *urb; int count, result; unsigned long flags; int i; if (test_and_set_bit_lock(USB_SERIAL_WRITE_BUSY, &port->flags)) return 0; retry: spin_lock_irqsave(&port->lock, flags); if (!port->write_urbs_free || !kfifo_len(&port->write_fifo)) { clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); spin_unlock_irqrestore(&port->lock, flags); return 0; } i = (int)find_first_bit(&port->write_urbs_free, ARRAY_SIZE(port->write_urbs)); spin_unlock_irqrestore(&port->lock, flags); urb = port->write_urbs[i]; count = port->serial->type->prepare_write_buffer(port, urb->transfer_buffer, port->bulk_out_size); urb->transfer_buffer_length = count; usb_serial_debug_data(debug, &port->dev, __func__, count, urb->transfer_buffer); spin_lock_irqsave(&port->lock, flags); port->tx_bytes += count; spin_unlock_irqrestore(&port->lock, flags); clear_bit(i, &port->write_urbs_free); result = usb_submit_urb(urb, GFP_ATOMIC); if (result) { if (!port->port.console) { dev_err(&port->dev, "%s - error submitting urb: %d\n", __func__, result); } set_bit(i, &port->write_urbs_free); spin_lock_irqsave(&port->lock, flags); port->tx_bytes -= count; spin_unlock_irqrestore(&port->lock, flags); clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); return result; } goto retry; /* try sending off another urb */ } /** * usb_serial_generic_write - generic write function for serial USB devices * @tty: Pointer to &struct tty_struct for the device * @port: Pointer to the &usb_serial_port structure for the device * @buf: Pointer to the data to write * @count: Number of bytes to write * * Returns the number of characters actually written, which may be anything * from zero to @count. If an error occurs, it returns the negative errno * value. */ int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { int result; dbg("%s - port %d", __func__, port->number); /* only do something if we have a bulk out endpoint */ if (!port->bulk_out_size) return -ENODEV; if (!count) return 0; count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock); result = usb_serial_generic_write_start(port); if (result) return result; return count; } EXPORT_SYMBOL_GPL(usb_serial_generic_write); int usb_serial_generic_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; unsigned long flags; int room; dbg("%s - port %d", __func__, port->number); if (!port->bulk_out_size) return 0; spin_lock_irqsave(&port->lock, flags); room = kfifo_avail(&port->write_fifo); spin_unlock_irqrestore(&port->lock, flags); dbg("%s - returns %d", __func__, room); return room; } int usb_serial_generic_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; unsigned long flags; int chars; dbg("%s - port %d", __func__, port->number); if (!port->bulk_out_size) return 0; spin_lock_irqsave(&port->lock, flags); chars = kfifo_len(&port->write_fifo) + port->tx_bytes; spin_unlock_irqrestore(&port->lock, flags); dbg("%s - returns %d", __func__, chars); return chars; } int usb_serial_generic_submit_read_urb(struct usb_serial_port *port, gfp_t mem_flags) { int result; result = usb_submit_urb(port->read_urb, mem_flags); if (result && result != -EPERM) { dev_err(&port->dev, "%s - error submitting urb: %d\n", __func__, result); } return result; } EXPORT_SYMBOL_GPL(usb_serial_generic_submit_read_urb); void usb_serial_generic_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; struct tty_struct *tty; char *ch = (char *)urb->transfer_buffer; int i; if (!urb->actual_length) return; tty = tty_port_tty_get(&port->port); if (!tty) return; /* The per character mucking around with sysrq path it too slow for stuff like 3G modems, so shortcircuit it in the 99.9999999% of cases where the USB serial is not a console anyway */ if (!port->port.console || !port->sysrq) tty_insert_flip_string(tty, ch, urb->actual_length); else { for (i = 0; i < urb->actual_length; i++, ch++) { if (!usb_serial_handle_sysrq_char(port, *ch)) tty_insert_flip_char(tty, *ch, TTY_NORMAL); } } tty_flip_buffer_push(tty); tty_kref_put(tty); } EXPORT_SYMBOL_GPL(usb_serial_generic_process_read_urb); void usb_serial_generic_read_bulk_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; unsigned char *data = urb->transfer_buffer; int status = urb->status; unsigned long flags; dbg("%s - port %d", __func__, port->number); if (unlikely(status != 0)) { dbg("%s - nonzero read bulk status received: %d", __func__, status); return; } usb_serial_debug_data(debug, &port->dev, __func__, urb->actual_length, data); port->serial->type->process_read_urb(urb); /* Throttle the device if requested by tty */ spin_lock_irqsave(&port->lock, flags); port->throttled = port->throttle_req; if (!port->throttled) { spin_unlock_irqrestore(&port->lock, flags); usb_serial_generic_submit_read_urb(port, GFP_ATOMIC); } else spin_unlock_irqrestore(&port->lock, flags); } EXPORT_SYMBOL_GPL(usb_serial_generic_read_bulk_callback); void usb_serial_generic_write_bulk_callback(struct urb *urb) { unsigned long flags; struct usb_serial_port *port = urb->context; int status = urb->status; int i; dbg("%s - port %d", __func__, port->number); for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) if (port->write_urbs[i] == urb) break; spin_lock_irqsave(&port->lock, flags); port->tx_bytes -= urb->transfer_buffer_length; set_bit(i, &port->write_urbs_free); spin_unlock_irqrestore(&port->lock, flags); if (status) { dbg("%s - non-zero urb status: %d", __func__, status); spin_lock_irqsave(&port->lock, flags); kfifo_reset_out(&port->write_fifo); spin_unlock_irqrestore(&port->lock, flags); } else { usb_serial_generic_write_start(port); } usb_serial_port_softint(port); } EXPORT_SYMBOL_GPL(usb_serial_generic_write_bulk_callback); void usb_serial_generic_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; unsigned long flags; dbg("%s - port %d", __func__, port->number); /* Set the throttle request flag. It will be picked up * by usb_serial_generic_read_bulk_callback(). */ spin_lock_irqsave(&port->lock, flags); port->throttle_req = 1; spin_unlock_irqrestore(&port->lock, flags); } EXPORT_SYMBOL_GPL(usb_serial_generic_throttle); void usb_serial_generic_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; int was_throttled; dbg("%s - port %d", __func__, port->number); /* Clear the throttle flags */ spin_lock_irq(&port->lock); was_throttled = port->throttled; port->throttled = port->throttle_req = 0; spin_unlock_irq(&port->lock); if (was_throttled) usb_serial_generic_submit_read_urb(port, GFP_KERNEL); } EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle); #ifdef CONFIG_MAGIC_SYSRQ int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) { if (port->sysrq && port->port.console) { if (ch && time_before(jiffies, port->sysrq)) { handle_sysrq(ch); port->sysrq = 0; return 1; } port->sysrq = 0; } return 0; } #else int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) { return 0; } #endif EXPORT_SYMBOL_GPL(usb_serial_handle_sysrq_char); int usb_serial_handle_break(struct usb_serial_port *port) { if (!port->sysrq) { port->sysrq = jiffies + HZ*5; return 1; } port->sysrq = 0; return 0; } EXPORT_SYMBOL_GPL(usb_serial_handle_break); /** * usb_serial_handle_dcd_change - handle a change of carrier detect state * @port: usb_serial_port structure for the open port * @tty: tty_struct structure for the port * @status: new carrier detect status, nonzero if active */ void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, struct tty_struct *tty, unsigned int status) { struct tty_port *port = &usb_port->port; dbg("%s - port %d, status %d", __func__, usb_port->number, status); if (status) wake_up_interruptible(&port->open_wait); else if (tty && !C_CLOCAL(tty)) tty_hangup(tty); } EXPORT_SYMBOL_GPL(usb_serial_handle_dcd_change); int usb_serial_generic_resume(struct usb_serial *serial) { struct usb_serial_port *port; int i, c = 0, r; for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) continue; if (port->read_urb) { r = usb_submit_urb(port->read_urb, GFP_NOIO); if (r < 0) c++; } if (port->bulk_out_size) { r = usb_serial_generic_write_start(port); if (r < 0) c++; } } return c ? -EIO : 0; } EXPORT_SYMBOL_GPL(usb_serial_generic_resume); void usb_serial_generic_disconnect(struct usb_serial *serial) { int i; dbg("%s", __func__); /* stop reads and writes on all ports */ for (i = 0; i < serial->num_ports; ++i) generic_cleanup(serial->port[i]); } EXPORT_SYMBOL_GPL(usb_serial_generic_disconnect); void usb_serial_generic_release(struct usb_serial *serial) { dbg("%s", __func__); }
ubuntustudio-kernel/ubuntu-precise-lowlatency
drivers/usb/serial/generic.c
C
gpl-2.0
14,100
/* * fs/nfs/nfs4state.c * * Client-side XDR for NFSv4. * * Copyright (c) 2002 The Regents of the University of Michigan. * All rights reserved. * * Kendrick Smith <kmsmith@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Implementation of the NFSv4 state model. For the time being, * this is minimal, but will be made much more complex in a * subsequent patch. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/nfs_fs.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/random.h> #include <linux/ratelimit.h> #include <linux/workqueue.h> #include <linux/bitops.h> #include <linux/jiffies.h> #include <linux/sunrpc/clnt.h> #include "nfs4_fs.h" #include "callback.h" #include "delegation.h" #include "internal.h" #include "nfs4idmap.h" #include "nfs4session.h" #include "pnfs.h" #include "netns.h" #define NFSDBG_FACILITY NFSDBG_STATE #define OPENOWNER_POOL_SIZE 8 const nfs4_stateid zero_stateid = { { .data = { 0 } }, .type = NFS4_SPECIAL_STATEID_TYPE, }; const nfs4_stateid invalid_stateid = { { /* Funky initialiser keeps older gcc versions happy */ .data = { 0xff, 0xff, 0xff, 0xff, 0 }, }, .type = NFS4_INVALID_STATEID_TYPE, }; static DEFINE_MUTEX(nfs_clid_init_mutex); int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) { struct nfs4_setclientid_res clid = { .clientid = clp->cl_clientid, .confirm = clp->cl_confirm, }; unsigned short port; int status; struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state)) goto do_confirm; port = nn->nfs_callback_tcpport; if (clp->cl_addr.ss_family == AF_INET6) port = nn->nfs_callback_tcpport6; status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid); if (status != 0) goto out; clp->cl_clientid = clid.clientid; clp->cl_confirm = clid.confirm; set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); do_confirm: status = nfs4_proc_setclientid_confirm(clp, &clid, cred); if (status != 0) goto out; clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); nfs4_schedule_state_renewal(clp); out: return status; } /** * nfs40_discover_server_trunking - Detect server IP address trunking (mv0) * * @clp: nfs_client under test * @result: OUT: found nfs_client, or clp * @cred: credential to use for trunking test * * Returns zero, a negative errno, or a negative NFS4ERR status. * If zero is returned, an nfs_client pointer is planted in * "result". * * Note: The returned client may not yet be marked ready. */ int nfs40_discover_server_trunking(struct nfs_client *clp, struct nfs_client **result, struct rpc_cred *cred) { struct nfs4_setclientid_res clid = { .clientid = clp->cl_clientid, .confirm = clp->cl_confirm, }; struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); unsigned short port; int status; port = nn->nfs_callback_tcpport; if (clp->cl_addr.ss_family == AF_INET6) port = nn->nfs_callback_tcpport6; status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid); if (status != 0) goto out; clp->cl_clientid = clid.clientid; clp->cl_confirm = clid.confirm; status = nfs40_walk_client_list(clp, result, cred); if (status == 0) { /* Sustain the lease, even if it's empty. If the clientid4 * goes stale it's of no use for trunking discovery. */ nfs4_schedule_state_renewal(*result); } out: return status; } struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp) { struct rpc_cred *cred = NULL; if (clp->cl_machine_cred != NULL) cred = get_rpccred(clp->cl_machine_cred); return cred; } static void nfs4_root_machine_cred(struct nfs_client *clp) { struct rpc_cred *cred, *new; new = rpc_lookup_machine_cred(NULL); spin_lock(&clp->cl_lock); cred = clp->cl_machine_cred; clp->cl_machine_cred = new; spin_unlock(&clp->cl_lock); if (cred != NULL) put_rpccred(cred); } static struct rpc_cred * nfs4_get_renew_cred_server_locked(struct nfs_server *server) { struct rpc_cred *cred = NULL; struct nfs4_state_owner *sp; struct rb_node *pos; for (pos = rb_first(&server->state_owners); pos != NULL; pos = rb_next(pos)) { sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); if (list_empty(&sp->so_states)) continue; cred = get_rpccred(sp->so_cred); break; } return cred; } /** * nfs4_get_renew_cred_locked - Acquire credential for a renew operation * @clp: client state handle * * Returns an rpc_cred with reference count bumped, or NULL. * Caller must hold clp->cl_lock. */ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp) { struct rpc_cred *cred = NULL; struct nfs_server *server; /* Use machine credentials if available */ cred = nfs4_get_machine_cred_locked(clp); if (cred != NULL) goto out; rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { cred = nfs4_get_renew_cred_server_locked(server); if (cred != NULL) break; } rcu_read_unlock(); out: return cred; } static void nfs4_end_drain_slot_table(struct nfs4_slot_table *tbl) { if (test_and_clear_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) { spin_lock(&tbl->slot_tbl_lock); nfs41_wake_slot_table(tbl); spin_unlock(&tbl->slot_tbl_lock); } } static void nfs4_end_drain_session(struct nfs_client *clp) { struct nfs4_session *ses = clp->cl_session; if (clp->cl_slot_tbl) { nfs4_end_drain_slot_table(clp->cl_slot_tbl); return; } if (ses != NULL) { nfs4_end_drain_slot_table(&ses->bc_slot_table); nfs4_end_drain_slot_table(&ses->fc_slot_table); } } static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl) { set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state); spin_lock(&tbl->slot_tbl_lock); if (tbl->highest_used_slotid != NFS4_NO_SLOT) { reinit_completion(&tbl->complete); spin_unlock(&tbl->slot_tbl_lock); return wait_for_completion_interruptible(&tbl->complete); } spin_unlock(&tbl->slot_tbl_lock); return 0; } static int nfs4_begin_drain_session(struct nfs_client *clp) { struct nfs4_session *ses = clp->cl_session; int ret = 0; if (clp->cl_slot_tbl) return nfs4_drain_slot_tbl(clp->cl_slot_tbl); /* back channel */ ret = nfs4_drain_slot_tbl(&ses->bc_slot_table); if (ret) return ret; /* fore channel */ return nfs4_drain_slot_tbl(&ses->fc_slot_table); } #if defined(CONFIG_NFS_V4_1) static int nfs41_setup_state_renewal(struct nfs_client *clp) { int status; struct nfs_fsinfo fsinfo; unsigned long now; if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) { nfs4_schedule_state_renewal(clp); return 0; } now = jiffies; status = nfs4_proc_get_lease_time(clp, &fsinfo); if (status == 0) { nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now); nfs4_schedule_state_renewal(clp); } return status; } static void nfs41_finish_session_reset(struct nfs_client *clp) { clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); /* create_session negotiated new slot table */ clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); nfs41_setup_state_renewal(clp); } int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) { int status; if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state)) goto do_confirm; status = nfs4_proc_exchange_id(clp, cred); if (status != 0) goto out; set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); do_confirm: status = nfs4_proc_create_session(clp, cred); if (status != 0) goto out; nfs41_finish_session_reset(clp); nfs_mark_client_ready(clp, NFS_CS_READY); out: return status; } /** * nfs41_discover_server_trunking - Detect server IP address trunking (mv1) * * @clp: nfs_client under test * @result: OUT: found nfs_client, or clp * @cred: credential to use for trunking test * * Returns NFS4_OK, a negative errno, or a negative NFS4ERR status. * If NFS4_OK is returned, an nfs_client pointer is planted in * "result". * * Note: The returned client may not yet be marked ready. */ int nfs41_discover_server_trunking(struct nfs_client *clp, struct nfs_client **result, struct rpc_cred *cred) { int status; status = nfs4_proc_exchange_id(clp, cred); if (status != NFS4_OK) return status; status = nfs41_walk_client_list(clp, result, cred); if (status < 0) return status; if (clp != *result) return 0; /* * Purge state if the client id was established in a prior * instance and the client id could not have arrived on the * server via Transparent State Migration. */ if (clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R) { if (!test_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags)) set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state); else set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); } nfs4_schedule_state_manager(clp); status = nfs_wait_client_init_complete(clp); if (status < 0) nfs_put_client(clp); return status; } #endif /* CONFIG_NFS_V4_1 */ /** * nfs4_get_clid_cred - Acquire credential for a setclientid operation * @clp: client state handle * * Returns an rpc_cred with reference count bumped, or NULL. */ struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp) { struct rpc_cred *cred; spin_lock(&clp->cl_lock); cred = nfs4_get_machine_cred_locked(clp); spin_unlock(&clp->cl_lock); return cred; } static struct nfs4_state_owner * nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred) { struct rb_node **p = &server->state_owners.rb_node, *parent = NULL; struct nfs4_state_owner *sp; while (*p != NULL) { parent = *p; sp = rb_entry(parent, struct nfs4_state_owner, so_server_node); if (cred < sp->so_cred) p = &parent->rb_left; else if (cred > sp->so_cred) p = &parent->rb_right; else { if (!list_empty(&sp->so_lru)) list_del_init(&sp->so_lru); atomic_inc(&sp->so_count); return sp; } } return NULL; } static struct nfs4_state_owner * nfs4_insert_state_owner_locked(struct nfs4_state_owner *new) { struct nfs_server *server = new->so_server; struct rb_node **p = &server->state_owners.rb_node, *parent = NULL; struct nfs4_state_owner *sp; int err; while (*p != NULL) { parent = *p; sp = rb_entry(parent, struct nfs4_state_owner, so_server_node); if (new->so_cred < sp->so_cred) p = &parent->rb_left; else if (new->so_cred > sp->so_cred) p = &parent->rb_right; else { if (!list_empty(&sp->so_lru)) list_del_init(&sp->so_lru); atomic_inc(&sp->so_count); return sp; } } err = ida_get_new(&server->openowner_id, &new->so_seqid.owner_id); if (err) return ERR_PTR(err); rb_link_node(&new->so_server_node, parent, p); rb_insert_color(&new->so_server_node, &server->state_owners); return new; } static void nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp) { struct nfs_server *server = sp->so_server; if (!RB_EMPTY_NODE(&sp->so_server_node)) rb_erase(&sp->so_server_node, &server->state_owners); ida_remove(&server->openowner_id, sp->so_seqid.owner_id); } static void nfs4_init_seqid_counter(struct nfs_seqid_counter *sc) { sc->create_time = ktime_get(); sc->flags = 0; sc->counter = 0; spin_lock_init(&sc->lock); INIT_LIST_HEAD(&sc->list); rpc_init_wait_queue(&sc->wait, "Seqid_waitqueue"); } static void nfs4_destroy_seqid_counter(struct nfs_seqid_counter *sc) { rpc_destroy_wait_queue(&sc->wait); } /* * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to * create a new state_owner. * */ static struct nfs4_state_owner * nfs4_alloc_state_owner(struct nfs_server *server, struct rpc_cred *cred, gfp_t gfp_flags) { struct nfs4_state_owner *sp; sp = kzalloc(sizeof(*sp), gfp_flags); if (!sp) return NULL; sp->so_server = server; sp->so_cred = get_rpccred(cred); spin_lock_init(&sp->so_lock); INIT_LIST_HEAD(&sp->so_states); nfs4_init_seqid_counter(&sp->so_seqid); atomic_set(&sp->so_count, 1); INIT_LIST_HEAD(&sp->so_lru); seqcount_init(&sp->so_reclaim_seqcount); mutex_init(&sp->so_delegreturn_mutex); return sp; } static void nfs4_reset_state_owner(struct nfs4_state_owner *sp) { /* This state_owner is no longer usable, but must * remain in place so that state recovery can find it * and the opens associated with it. * It may also be used for new 'open' request to * return a delegation to the server. * So update the 'create_time' so that it looks like * a new state_owner. This will cause the server to * request an OPEN_CONFIRM to start a new sequence. */ sp->so_seqid.create_time = ktime_get(); } static void nfs4_free_state_owner(struct nfs4_state_owner *sp) { nfs4_destroy_seqid_counter(&sp->so_seqid); put_rpccred(sp->so_cred); kfree(sp); } static void nfs4_gc_state_owners(struct nfs_server *server) { struct nfs_client *clp = server->nfs_client; struct nfs4_state_owner *sp, *tmp; unsigned long time_min, time_max; LIST_HEAD(doomed); spin_lock(&clp->cl_lock); time_max = jiffies; time_min = (long)time_max - (long)clp->cl_lease_time; list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) { /* NB: LRU is sorted so that oldest is at the head */ if (time_in_range(sp->so_expires, time_min, time_max)) break; list_move(&sp->so_lru, &doomed); nfs4_remove_state_owner_locked(sp); } spin_unlock(&clp->cl_lock); list_for_each_entry_safe(sp, tmp, &doomed, so_lru) { list_del(&sp->so_lru); nfs4_free_state_owner(sp); } } /** * nfs4_get_state_owner - Look up a state owner given a credential * @server: nfs_server to search * @cred: RPC credential to match * * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL. */ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred, gfp_t gfp_flags) { struct nfs_client *clp = server->nfs_client; struct nfs4_state_owner *sp, *new; spin_lock(&clp->cl_lock); sp = nfs4_find_state_owner_locked(server, cred); spin_unlock(&clp->cl_lock); if (sp != NULL) goto out; new = nfs4_alloc_state_owner(server, cred, gfp_flags); if (new == NULL) goto out; do { if (ida_pre_get(&server->openowner_id, gfp_flags) == 0) break; spin_lock(&clp->cl_lock); sp = nfs4_insert_state_owner_locked(new); spin_unlock(&clp->cl_lock); } while (sp == ERR_PTR(-EAGAIN)); if (sp != new) nfs4_free_state_owner(new); out: nfs4_gc_state_owners(server); return sp; } /** * nfs4_put_state_owner - Release a nfs4_state_owner * @sp: state owner data to release * * Note that we keep released state owners on an LRU * list. * This caches valid state owners so that they can be * reused, to avoid the OPEN_CONFIRM on minor version 0. * It also pins the uniquifier of dropped state owners for * a while, to ensure that those state owner names are * never reused. */ void nfs4_put_state_owner(struct nfs4_state_owner *sp) { struct nfs_server *server = sp->so_server; struct nfs_client *clp = server->nfs_client; if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) return; sp->so_expires = jiffies; list_add_tail(&sp->so_lru, &server->state_owners_lru); spin_unlock(&clp->cl_lock); } /** * nfs4_purge_state_owners - Release all cached state owners * @server: nfs_server with cached state owners to release * * Called at umount time. Remaining state owners will be on * the LRU with ref count of zero. */ void nfs4_purge_state_owners(struct nfs_server *server) { struct nfs_client *clp = server->nfs_client; struct nfs4_state_owner *sp, *tmp; LIST_HEAD(doomed); spin_lock(&clp->cl_lock); list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) { list_move(&sp->so_lru, &doomed); nfs4_remove_state_owner_locked(sp); } spin_unlock(&clp->cl_lock); list_for_each_entry_safe(sp, tmp, &doomed, so_lru) { list_del(&sp->so_lru); nfs4_free_state_owner(sp); } } static struct nfs4_state * nfs4_alloc_open_state(void) { struct nfs4_state *state; state = kzalloc(sizeof(*state), GFP_NOFS); if (!state) return NULL; atomic_set(&state->count, 1); INIT_LIST_HEAD(&state->lock_states); spin_lock_init(&state->state_lock); seqlock_init(&state->seqlock); init_waitqueue_head(&state->waitq); return state; } void nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode) { if (state->state == fmode) return; /* NB! List reordering - see the reclaim code for why. */ if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) { if (fmode & FMODE_WRITE) list_move(&state->open_states, &state->owner->so_states); else list_move_tail(&state->open_states, &state->owner->so_states); } state->state = fmode; } static struct nfs4_state * __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs4_state *state; list_for_each_entry(state, &nfsi->open_states, inode_states) { if (state->owner != owner) continue; if (!nfs4_valid_open_stateid(state)) continue; if (atomic_inc_not_zero(&state->count)) return state; } return NULL; } static void nfs4_free_open_state(struct nfs4_state *state) { kfree(state); } struct nfs4_state * nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner) { struct nfs4_state *state, *new; struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); state = __nfs4_find_state_byowner(inode, owner); spin_unlock(&inode->i_lock); if (state) goto out; new = nfs4_alloc_open_state(); spin_lock(&owner->so_lock); spin_lock(&inode->i_lock); state = __nfs4_find_state_byowner(inode, owner); if (state == NULL && new != NULL) { state = new; state->owner = owner; atomic_inc(&owner->so_count); list_add(&state->inode_states, &nfsi->open_states); ihold(inode); state->inode = inode; spin_unlock(&inode->i_lock); /* Note: The reclaim code dictates that we add stateless * and read-only stateids to the end of the list */ list_add_tail(&state->open_states, &owner->so_states); spin_unlock(&owner->so_lock); } else { spin_unlock(&inode->i_lock); spin_unlock(&owner->so_lock); if (new) nfs4_free_open_state(new); } out: return state; } void nfs4_put_open_state(struct nfs4_state *state) { struct inode *inode = state->inode; struct nfs4_state_owner *owner = state->owner; if (!atomic_dec_and_lock(&state->count, &owner->so_lock)) return; spin_lock(&inode->i_lock); list_del(&state->inode_states); list_del(&state->open_states); spin_unlock(&inode->i_lock); spin_unlock(&owner->so_lock); iput(inode); nfs4_free_open_state(state); nfs4_put_state_owner(owner); } /* * Close the current file. */ static void __nfs4_close(struct nfs4_state *state, fmode_t fmode, gfp_t gfp_mask, int wait) { struct nfs4_state_owner *owner = state->owner; int call_close = 0; fmode_t newstate; atomic_inc(&owner->so_count); /* Protect against nfs4_find_state() */ spin_lock(&owner->so_lock); switch (fmode & (FMODE_READ | FMODE_WRITE)) { case FMODE_READ: state->n_rdonly--; break; case FMODE_WRITE: state->n_wronly--; break; case FMODE_READ|FMODE_WRITE: state->n_rdwr--; } newstate = FMODE_READ|FMODE_WRITE; if (state->n_rdwr == 0) { if (state->n_rdonly == 0) { newstate &= ~FMODE_READ; call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); } if (state->n_wronly == 0) { newstate &= ~FMODE_WRITE; call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); } if (newstate == 0) clear_bit(NFS_DELEGATED_STATE, &state->flags); } nfs4_state_set_mode_locked(state, newstate); spin_unlock(&owner->so_lock); if (!call_close) { nfs4_put_open_state(state); nfs4_put_state_owner(owner); } else nfs4_do_close(state, gfp_mask, wait); } void nfs4_close_state(struct nfs4_state *state, fmode_t fmode) { __nfs4_close(state, fmode, GFP_NOFS, 0); } void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode) { __nfs4_close(state, fmode, GFP_KERNEL, 1); } /* * Search the state->lock_states for an existing lock_owner * that is compatible with either of the given owners. * If the second is non-zero, then the first refers to a Posix-lock * owner (current->files) and the second refers to a flock/OFD * owner (struct file*). In that case, prefer a match for the first * owner. * If both sorts of locks are held on the one file we cannot know * which stateid was intended to be used, so a "correct" choice cannot * be made. Failing that, a "consistent" choice is preferable. The * consistent choice we make is to prefer the first owner, that of a * Posix lock. */ static struct nfs4_lock_state * __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, fl_owner_t fl_owner2) { struct nfs4_lock_state *pos, *ret = NULL; list_for_each_entry(pos, &state->lock_states, ls_locks) { if (pos->ls_owner == fl_owner) { ret = pos; break; } if (pos->ls_owner == fl_owner2) ret = pos; } if (ret) refcount_inc(&ret->ls_count); return ret; } /* * Return a compatible lock_state. If no initialized lock_state structure * exists, return an uninitialized one. * */ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) { struct nfs4_lock_state *lsp; struct nfs_server *server = state->owner->so_server; lsp = kzalloc(sizeof(*lsp), GFP_NOFS); if (lsp == NULL) return NULL; nfs4_init_seqid_counter(&lsp->ls_seqid); refcount_set(&lsp->ls_count, 1); lsp->ls_state = state; lsp->ls_owner = fl_owner; lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS); if (lsp->ls_seqid.owner_id < 0) goto out_free; INIT_LIST_HEAD(&lsp->ls_locks); return lsp; out_free: kfree(lsp); return NULL; } void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) { ida_simple_remove(&server->lockowner_id, lsp->ls_seqid.owner_id); nfs4_destroy_seqid_counter(&lsp->ls_seqid); kfree(lsp); } /* * Return a compatible lock_state. If no initialized lock_state structure * exists, return an uninitialized one. * */ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) { struct nfs4_lock_state *lsp, *new = NULL; for(;;) { spin_lock(&state->state_lock); lsp = __nfs4_find_lock_state(state, owner, NULL); if (lsp != NULL) break; if (new != NULL) { list_add(&new->ls_locks, &state->lock_states); set_bit(LK_STATE_IN_USE, &state->flags); lsp = new; new = NULL; break; } spin_unlock(&state->state_lock); new = nfs4_alloc_lock_state(state, owner); if (new == NULL) return NULL; } spin_unlock(&state->state_lock); if (new != NULL) nfs4_free_lock_state(state->owner->so_server, new); return lsp; } /* * Release reference to lock_state, and free it if we see that * it is no longer in use */ void nfs4_put_lock_state(struct nfs4_lock_state *lsp) { struct nfs_server *server; struct nfs4_state *state; if (lsp == NULL) return; state = lsp->ls_state; if (!refcount_dec_and_lock(&lsp->ls_count, &state->state_lock)) return; list_del(&lsp->ls_locks); if (list_empty(&state->lock_states)) clear_bit(LK_STATE_IN_USE, &state->flags); spin_unlock(&state->state_lock); server = state->owner->so_server; if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { struct nfs_client *clp = server->nfs_client; clp->cl_mvops->free_lock_state(server, lsp); } else nfs4_free_lock_state(server, lsp); } static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) { struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner; dst->fl_u.nfs4_fl.owner = lsp; refcount_inc(&lsp->ls_count); } static void nfs4_fl_release_lock(struct file_lock *fl) { nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner); } static const struct file_lock_operations nfs4_fl_lock_ops = { .fl_copy_lock = nfs4_fl_copy_lock, .fl_release_private = nfs4_fl_release_lock, }; int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) { struct nfs4_lock_state *lsp; if (fl->fl_ops != NULL) return 0; lsp = nfs4_get_lock_state(state, fl->fl_owner); if (lsp == NULL) return -ENOMEM; fl->fl_u.nfs4_fl.owner = lsp; fl->fl_ops = &nfs4_fl_lock_ops; return 0; } static int nfs4_copy_lock_stateid(nfs4_stateid *dst, struct nfs4_state *state, const struct nfs_lock_context *l_ctx) { struct nfs4_lock_state *lsp; fl_owner_t fl_owner, fl_flock_owner; int ret = -ENOENT; if (l_ctx == NULL) goto out; if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) goto out; fl_owner = l_ctx->lockowner; fl_flock_owner = l_ctx->open_context->flock_owner; spin_lock(&state->state_lock); lsp = __nfs4_find_lock_state(state, fl_owner, fl_flock_owner); if (lsp && test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) ret = -EIO; else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) { nfs4_stateid_copy(dst, &lsp->ls_stateid); ret = 0; } spin_unlock(&state->state_lock); nfs4_put_lock_state(lsp); out: return ret; } bool nfs4_refresh_open_stateid(nfs4_stateid *dst, struct nfs4_state *state) { bool ret; int seq; do { ret = false; seq = read_seqbegin(&state->seqlock); if (nfs4_state_match_open_stateid_other(state, dst)) { dst->seqid = state->open_stateid.seqid; ret = true; } } while (read_seqretry(&state->seqlock, seq)); return ret; } bool nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state) { bool ret; const nfs4_stateid *src; int seq; do { ret = false; src = &zero_stateid; seq = read_seqbegin(&state->seqlock); if (test_bit(NFS_OPEN_STATE, &state->flags)) { src = &state->open_stateid; ret = true; } nfs4_stateid_copy(dst, src); } while (read_seqretry(&state->seqlock, seq)); return ret; } /* * Byte-range lock aware utility to initialize the stateid of read/write * requests. */ int nfs4_select_rw_stateid(struct nfs4_state *state, fmode_t fmode, const struct nfs_lock_context *l_ctx, nfs4_stateid *dst, struct rpc_cred **cred) { int ret; if (!nfs4_valid_open_stateid(state)) return -EIO; if (cred != NULL) *cred = NULL; ret = nfs4_copy_lock_stateid(dst, state, l_ctx); if (ret == -EIO) /* A lost lock - don't even consider delegations */ goto out; /* returns true if delegation stateid found and copied */ if (nfs4_copy_delegation_stateid(state->inode, fmode, dst, cred)) { ret = 0; goto out; } if (ret != -ENOENT) /* nfs4_copy_delegation_stateid() didn't over-write * dst, so it still has the lock stateid which we now * choose to use. */ goto out; nfs4_copy_open_stateid(dst, state); ret = 0; out: if (nfs_server_capable(state->inode, NFS_CAP_STATEID_NFSV41)) dst->seqid = 0; return ret; } struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask) { struct nfs_seqid *new; new = kmalloc(sizeof(*new), gfp_mask); if (new == NULL) return ERR_PTR(-ENOMEM); new->sequence = counter; INIT_LIST_HEAD(&new->list); new->task = NULL; return new; } void nfs_release_seqid(struct nfs_seqid *seqid) { struct nfs_seqid_counter *sequence; if (seqid == NULL || list_empty(&seqid->list)) return; sequence = seqid->sequence; spin_lock(&sequence->lock); list_del_init(&seqid->list); if (!list_empty(&sequence->list)) { struct nfs_seqid *next; next = list_first_entry(&sequence->list, struct nfs_seqid, list); rpc_wake_up_queued_task(&sequence->wait, next->task); } spin_unlock(&sequence->lock); } void nfs_free_seqid(struct nfs_seqid *seqid) { nfs_release_seqid(seqid); kfree(seqid); } /* * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or * failed with a seqid incrementing error - * see comments nfs4.h:seqid_mutating_error() */ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) { switch (status) { case 0: break; case -NFS4ERR_BAD_SEQID: if (seqid->sequence->flags & NFS_SEQID_CONFIRMED) return; pr_warn_ratelimited("NFS: v4 server returned a bad" " sequence-id error on an" " unconfirmed sequence %p!\n", seqid->sequence); case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_BADXDR: case -NFS4ERR_RESOURCE: case -NFS4ERR_NOFILEHANDLE: case -NFS4ERR_MOVED: /* Non-seqid mutating errors */ return; }; /* * Note: no locking needed as we are guaranteed to be first * on the sequence list */ seqid->sequence->counter++; } void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid) { struct nfs4_state_owner *sp; if (seqid == NULL) return; sp = container_of(seqid->sequence, struct nfs4_state_owner, so_seqid); if (status == -NFS4ERR_BAD_SEQID) nfs4_reset_state_owner(sp); if (!nfs4_has_session(sp->so_server->nfs_client)) nfs_increment_seqid(status, seqid); } /* * Increment the seqid if the LOCK/LOCKU succeeded, or * failed with a seqid incrementing error - * see comments nfs4.h:seqid_mutating_error() */ void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) { if (seqid != NULL) nfs_increment_seqid(status, seqid); } int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) { struct nfs_seqid_counter *sequence; int status = 0; if (seqid == NULL) goto out; sequence = seqid->sequence; spin_lock(&sequence->lock); seqid->task = task; if (list_empty(&seqid->list)) list_add_tail(&seqid->list, &sequence->list); if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid) goto unlock; rpc_sleep_on(&sequence->wait, task, NULL); status = -EAGAIN; unlock: spin_unlock(&sequence->lock); out: return status; } static int nfs4_run_state_manager(void *); static void nfs4_clear_state_manager_bit(struct nfs_client *clp) { smp_mb__before_atomic(); clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); smp_mb__after_atomic(); wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING); rpc_wake_up(&clp->cl_rpcwaitq); } /* * Schedule the nfs_client asynchronous state management routine */ void nfs4_schedule_state_manager(struct nfs_client *clp) { struct task_struct *task; char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1]; if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) return; __module_get(THIS_MODULE); refcount_inc(&clp->cl_count); /* The rcu_read_lock() is not strictly necessary, as the state * manager is the only thread that ever changes the rpc_xprt * after it's initialized. At this point, we're single threaded. */ rcu_read_lock(); snprintf(buf, sizeof(buf), "%s-manager", rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); rcu_read_unlock(); task = kthread_run(nfs4_run_state_manager, clp, "%s", buf); if (IS_ERR(task)) { printk(KERN_ERR "%s: kthread_run: %ld\n", __func__, PTR_ERR(task)); nfs4_clear_state_manager_bit(clp); nfs_put_client(clp); module_put(THIS_MODULE); } } /* * Schedule a lease recovery attempt */ void nfs4_schedule_lease_recovery(struct nfs_client *clp) { if (!clp) return; if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); dprintk("%s: scheduling lease recovery for server %s\n", __func__, clp->cl_hostname); nfs4_schedule_state_manager(clp); } EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery); /** * nfs4_schedule_migration_recovery - trigger migration recovery * * @server: FSID that is migrating * * Returns zero if recovery has started, otherwise a negative NFS4ERR * value is returned. */ int nfs4_schedule_migration_recovery(const struct nfs_server *server) { struct nfs_client *clp = server->nfs_client; if (server->fh_expire_type != NFS4_FH_PERSISTENT) { pr_err("NFS: volatile file handles not supported (server %s)\n", clp->cl_hostname); return -NFS4ERR_IO; } if (test_bit(NFS_MIG_FAILED, &server->mig_status)) return -NFS4ERR_IO; dprintk("%s: scheduling migration recovery for (%llx:%llx) on %s\n", __func__, (unsigned long long)server->fsid.major, (unsigned long long)server->fsid.minor, clp->cl_hostname); set_bit(NFS_MIG_IN_TRANSITION, &((struct nfs_server *)server)->mig_status); set_bit(NFS4CLNT_MOVED, &clp->cl_state); nfs4_schedule_state_manager(clp); return 0; } EXPORT_SYMBOL_GPL(nfs4_schedule_migration_recovery); /** * nfs4_schedule_lease_moved_recovery - start lease-moved recovery * * @clp: server to check for moved leases * */ void nfs4_schedule_lease_moved_recovery(struct nfs_client *clp) { dprintk("%s: scheduling lease-moved recovery for client ID %llx on %s\n", __func__, clp->cl_clientid, clp->cl_hostname); set_bit(NFS4CLNT_LEASE_MOVED, &clp->cl_state); nfs4_schedule_state_manager(clp); } EXPORT_SYMBOL_GPL(nfs4_schedule_lease_moved_recovery); int nfs4_wait_clnt_recover(struct nfs_client *clp) { int res; might_sleep(); refcount_inc(&clp->cl_count); res = wait_on_bit_action(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING, nfs_wait_bit_killable, TASK_KILLABLE); if (res) goto out; if (clp->cl_cons_state < 0) res = clp->cl_cons_state; out: nfs_put_client(clp); return res; } int nfs4_client_recover_expired_lease(struct nfs_client *clp) { unsigned int loop; int ret; for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { ret = nfs4_wait_clnt_recover(clp); if (ret != 0) break; if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) break; nfs4_schedule_state_manager(clp); ret = -EIO; } return ret; } /* * nfs40_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN * @clp: client to process * * Set the NFS4CLNT_LEASE_EXPIRED state in order to force a * resend of the SETCLIENTID and hence re-establish the * callback channel. Then return all existing delegations. */ static void nfs40_handle_cb_pathdown(struct nfs_client *clp) { set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); nfs_expire_all_delegations(clp); dprintk("%s: handling CB_PATHDOWN recovery for server %s\n", __func__, clp->cl_hostname); } void nfs4_schedule_path_down_recovery(struct nfs_client *clp) { nfs40_handle_cb_pathdown(clp); nfs4_schedule_state_manager(clp); } static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) { if (!nfs4_valid_open_stateid(state)) return 0; set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); /* Don't recover state that expired before the reboot */ if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) { clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); return 0; } set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags); set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); return 1; } int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) { if (!nfs4_valid_open_stateid(state)) return 0; set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags); set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); return 1; } int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state) { struct nfs_client *clp = server->nfs_client; if (!nfs4_state_mark_reclaim_nograce(clp, state)) return -EBADF; dprintk("%s: scheduling stateid recovery for server %s\n", __func__, clp->cl_hostname); nfs4_schedule_state_manager(clp); return 0; } EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery); static struct nfs4_lock_state * nfs_state_find_lock_state_by_stateid(struct nfs4_state *state, const nfs4_stateid *stateid) { struct nfs4_lock_state *pos; list_for_each_entry(pos, &state->lock_states, ls_locks) { if (!test_bit(NFS_LOCK_INITIALIZED, &pos->ls_flags)) continue; if (nfs4_stateid_match_other(&pos->ls_stateid, stateid)) return pos; } return NULL; } static bool nfs_state_lock_state_matches_stateid(struct nfs4_state *state, const nfs4_stateid *stateid) { bool found = false; if (test_bit(LK_STATE_IN_USE, &state->flags)) { spin_lock(&state->state_lock); if (nfs_state_find_lock_state_by_stateid(state, stateid)) found = true; spin_unlock(&state->state_lock); } return found; } void nfs_inode_find_state_and_recover(struct inode *inode, const nfs4_stateid *stateid) { struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; struct nfs_inode *nfsi = NFS_I(inode); struct nfs_open_context *ctx; struct nfs4_state *state; bool found = false; spin_lock(&inode->i_lock); list_for_each_entry(ctx, &nfsi->open_files, list) { state = ctx->state; if (state == NULL) continue; if (nfs4_stateid_match_other(&state->stateid, stateid) && nfs4_state_mark_reclaim_nograce(clp, state)) { found = true; continue; } if (nfs4_stateid_match_other(&state->open_stateid, stateid) && nfs4_state_mark_reclaim_nograce(clp, state)) { found = true; continue; } if (nfs_state_lock_state_matches_stateid(state, stateid) && nfs4_state_mark_reclaim_nograce(clp, state)) found = true; } spin_unlock(&inode->i_lock); nfs_inode_find_delegation_state_and_recover(inode, stateid); if (found) nfs4_schedule_state_manager(clp); } static void nfs4_state_mark_open_context_bad(struct nfs4_state *state) { struct inode *inode = state->inode; struct nfs_inode *nfsi = NFS_I(inode); struct nfs_open_context *ctx; spin_lock(&inode->i_lock); list_for_each_entry(ctx, &nfsi->open_files, list) { if (ctx->state != state) continue; set_bit(NFS_CONTEXT_BAD, &ctx->flags); } spin_unlock(&inode->i_lock); } static void nfs4_state_mark_recovery_failed(struct nfs4_state *state, int error) { set_bit(NFS_STATE_RECOVERY_FAILED, &state->flags); nfs4_state_mark_open_context_bad(state); } static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) { struct inode *inode = state->inode; struct nfs_inode *nfsi = NFS_I(inode); struct file_lock *fl; struct nfs4_lock_state *lsp; int status = 0; struct file_lock_context *flctx = inode->i_flctx; struct list_head *list; if (flctx == NULL) return 0; list = &flctx->flc_posix; /* Guard against delegation returns and new lock/unlock calls */ down_write(&nfsi->rwsem); spin_lock(&flctx->flc_lock); restart: list_for_each_entry(fl, list, fl_list) { if (nfs_file_open_context(fl->fl_file)->state != state) continue; spin_unlock(&flctx->flc_lock); status = ops->recover_lock(state, fl); switch (status) { case 0: break; case -ESTALE: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_EXPIRED: case -NFS4ERR_NO_GRACE: case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: goto out; default: pr_err("NFS: %s: unhandled error %d\n", __func__, status); case -ENOMEM: case -NFS4ERR_DENIED: case -NFS4ERR_RECLAIM_BAD: case -NFS4ERR_RECLAIM_CONFLICT: lsp = fl->fl_u.nfs4_fl.owner; if (lsp) set_bit(NFS_LOCK_LOST, &lsp->ls_flags); status = 0; } spin_lock(&flctx->flc_lock); } if (list == &flctx->flc_posix) { list = &flctx->flc_flock; goto restart; } spin_unlock(&flctx->flc_lock); out: up_write(&nfsi->rwsem); return status; } static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops) { struct nfs4_state *state; struct nfs4_lock_state *lock; int status = 0; /* Note: we rely on the sp->so_states list being ordered * so that we always reclaim open(O_RDWR) and/or open(O_WRITE) * states first. * This is needed to ensure that the server won't give us any * read delegations that we have to return if, say, we are * recovering after a network partition or a reboot from a * server that doesn't support a grace period. */ spin_lock(&sp->so_lock); raw_write_seqcount_begin(&sp->so_reclaim_seqcount); restart: list_for_each_entry(state, &sp->so_states, open_states) { if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) continue; if (!nfs4_valid_open_stateid(state)) continue; if (state->state == 0) continue; atomic_inc(&state->count); spin_unlock(&sp->so_lock); status = ops->recover_open(sp, state); if (status >= 0) { status = nfs4_reclaim_locks(state, ops); if (status >= 0) { if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) { spin_lock(&state->state_lock); list_for_each_entry(lock, &state->lock_states, ls_locks) { if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags)) pr_warn_ratelimited("NFS: " "%s: Lock reclaim " "failed!\n", __func__); } spin_unlock(&state->state_lock); } clear_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); nfs4_put_open_state(state); spin_lock(&sp->so_lock); goto restart; } } switch (status) { default: printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status); case -ENOENT: case -ENOMEM: case -EACCES: case -EROFS: case -EIO: case -ESTALE: /* Open state on this file cannot be recovered */ nfs4_state_mark_recovery_failed(state, status); break; case -EAGAIN: ssleep(1); case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_OLD_STATEID: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_RECLAIM_BAD: case -NFS4ERR_RECLAIM_CONFLICT: nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state); break; case -NFS4ERR_EXPIRED: case -NFS4ERR_NO_GRACE: nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state); case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: goto out_err; } nfs4_put_open_state(state); spin_lock(&sp->so_lock); goto restart; } raw_write_seqcount_end(&sp->so_reclaim_seqcount); spin_unlock(&sp->so_lock); return 0; out_err: nfs4_put_open_state(state); spin_lock(&sp->so_lock); raw_write_seqcount_end(&sp->so_reclaim_seqcount); spin_unlock(&sp->so_lock); return status; } static void nfs4_clear_open_state(struct nfs4_state *state) { struct nfs4_lock_state *lock; clear_bit(NFS_DELEGATED_STATE, &state->flags); clear_bit(NFS_O_RDONLY_STATE, &state->flags); clear_bit(NFS_O_WRONLY_STATE, &state->flags); clear_bit(NFS_O_RDWR_STATE, &state->flags); spin_lock(&state->state_lock); list_for_each_entry(lock, &state->lock_states, ls_locks) { lock->ls_seqid.flags = 0; clear_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags); } spin_unlock(&state->state_lock); } static void nfs4_reset_seqids(struct nfs_server *server, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state)) { struct nfs_client *clp = server->nfs_client; struct nfs4_state_owner *sp; struct rb_node *pos; struct nfs4_state *state; spin_lock(&clp->cl_lock); for (pos = rb_first(&server->state_owners); pos != NULL; pos = rb_next(pos)) { sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); sp->so_seqid.flags = 0; spin_lock(&sp->so_lock); list_for_each_entry(state, &sp->so_states, open_states) { if (mark_reclaim(clp, state)) nfs4_clear_open_state(state); } spin_unlock(&sp->so_lock); } spin_unlock(&clp->cl_lock); } static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state)) { struct nfs_server *server; rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) nfs4_reset_seqids(server, mark_reclaim); rcu_read_unlock(); } static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp) { /* Mark all delegations for reclaim */ nfs_delegation_mark_reclaim(clp); nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot); } static int nfs4_reclaim_complete(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops, struct rpc_cred *cred) { /* Notify the server we're done reclaiming our state */ if (ops->reclaim_complete) return ops->reclaim_complete(clp, cred); return 0; } static void nfs4_clear_reclaim_server(struct nfs_server *server) { struct nfs_client *clp = server->nfs_client; struct nfs4_state_owner *sp; struct rb_node *pos; struct nfs4_state *state; spin_lock(&clp->cl_lock); for (pos = rb_first(&server->state_owners); pos != NULL; pos = rb_next(pos)) { sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); spin_lock(&sp->so_lock); list_for_each_entry(state, &sp->so_states, open_states) { if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags)) continue; nfs4_state_mark_reclaim_nograce(clp, state); } spin_unlock(&sp->so_lock); } spin_unlock(&clp->cl_lock); } static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp) { struct nfs_server *server; if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) return 0; rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) nfs4_clear_reclaim_server(server); rcu_read_unlock(); nfs_delegation_reap_unclaimed(clp); return 1; } static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp) { const struct nfs4_state_recovery_ops *ops; struct rpc_cred *cred; int err; if (!nfs4_state_clear_reclaim_reboot(clp)) return; ops = clp->cl_mvops->reboot_recovery_ops; cred = nfs4_get_clid_cred(clp); err = nfs4_reclaim_complete(clp, ops, cred); put_rpccred(cred); if (err == -NFS4ERR_CONN_NOT_BOUND_TO_SESSION) set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); } static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp) { nfs_mark_test_expired_all_delegations(clp); nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce); } static int nfs4_recovery_handle_error(struct nfs_client *clp, int error) { switch (error) { case 0: break; case -NFS4ERR_CB_PATH_DOWN: nfs40_handle_cb_pathdown(clp); break; case -NFS4ERR_NO_GRACE: nfs4_state_end_reclaim_reboot(clp); break; case -NFS4ERR_STALE_CLIENTID: set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); nfs4_state_start_reclaim_reboot(clp); break; case -NFS4ERR_EXPIRED: set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); nfs4_state_start_reclaim_nograce(clp); break; case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_DEADSESSION: case -NFS4ERR_SEQ_FALSE_RETRY: case -NFS4ERR_SEQ_MISORDERED: set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); /* Zero session reset errors */ break; case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); break; default: dprintk("%s: failed to handle error %d for server %s\n", __func__, error, clp->cl_hostname); return error; } dprintk("%s: handled error %d for server %s\n", __func__, error, clp->cl_hostname); return 0; } static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops) { struct nfs4_state_owner *sp; struct nfs_server *server; struct rb_node *pos; int status = 0; restart: rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { nfs4_purge_state_owners(server); spin_lock(&clp->cl_lock); for (pos = rb_first(&server->state_owners); pos != NULL; pos = rb_next(pos)) { sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags)) continue; if (!atomic_inc_not_zero(&sp->so_count)) continue; spin_unlock(&clp->cl_lock); rcu_read_unlock(); status = nfs4_reclaim_open_state(sp, ops); if (status < 0) { set_bit(ops->owner_flag_bit, &sp->so_flags); nfs4_put_state_owner(sp); status = nfs4_recovery_handle_error(clp, status); return (status != 0) ? status : -EAGAIN; } nfs4_put_state_owner(sp); goto restart; } spin_unlock(&clp->cl_lock); } rcu_read_unlock(); return 0; } static int nfs4_check_lease(struct nfs_client *clp) { struct rpc_cred *cred; const struct nfs4_state_maintenance_ops *ops = clp->cl_mvops->state_renewal_ops; int status; /* Is the client already known to have an expired lease? */ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) return 0; spin_lock(&clp->cl_lock); cred = ops->get_state_renewal_cred_locked(clp); spin_unlock(&clp->cl_lock); if (cred == NULL) { cred = nfs4_get_clid_cred(clp); status = -ENOKEY; if (cred == NULL) goto out; } status = ops->renew_lease(clp, cred); put_rpccred(cred); if (status == -ETIMEDOUT) { set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); return 0; } out: return nfs4_recovery_handle_error(clp, status); } /* Set NFS4CLNT_LEASE_EXPIRED and reclaim reboot state for all v4.0 errors * and for recoverable errors on EXCHANGE_ID for v4.1 */ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status) { switch (status) { case -NFS4ERR_SEQ_MISORDERED: if (test_and_set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) return -ESERVERFAULT; /* Lease confirmation error: retry after purging the lease */ ssleep(1); clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); break; case -NFS4ERR_STALE_CLIENTID: clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); nfs4_state_start_reclaim_reboot(clp); break; case -NFS4ERR_CLID_INUSE: pr_err("NFS: Server %s reports our clientid is in use\n", clp->cl_hostname); nfs_mark_client_ready(clp, -EPERM); clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); return -EPERM; case -EACCES: case -NFS4ERR_DELAY: case -ETIMEDOUT: case -EAGAIN: ssleep(1); break; case -NFS4ERR_MINOR_VERS_MISMATCH: if (clp->cl_cons_state == NFS_CS_SESSION_INITING) nfs_mark_client_ready(clp, -EPROTONOSUPPORT); dprintk("%s: exit with error %d for server %s\n", __func__, -EPROTONOSUPPORT, clp->cl_hostname); return -EPROTONOSUPPORT; case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery * in nfs4_exchange_id */ default: dprintk("%s: exit with error %d for server %s\n", __func__, status, clp->cl_hostname); return status; } set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); dprintk("%s: handled error %d for server %s\n", __func__, status, clp->cl_hostname); return 0; } static int nfs4_establish_lease(struct nfs_client *clp) { struct rpc_cred *cred; const struct nfs4_state_recovery_ops *ops = clp->cl_mvops->reboot_recovery_ops; int status; nfs4_begin_drain_session(clp); cred = nfs4_get_clid_cred(clp); if (cred == NULL) return -ENOENT; status = ops->establish_clid(clp, cred); put_rpccred(cred); if (status != 0) return status; pnfs_destroy_all_layouts(clp); return 0; } /* * Returns zero or a negative errno. NFS4ERR values are converted * to local errno values. */ static int nfs4_reclaim_lease(struct nfs_client *clp) { int status; status = nfs4_establish_lease(clp); if (status < 0) return nfs4_handle_reclaim_lease_error(clp, status); if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state)) nfs4_state_start_reclaim_nograce(clp); if (!test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); return 0; } static int nfs4_purge_lease(struct nfs_client *clp) { int status; status = nfs4_establish_lease(clp); if (status < 0) return nfs4_handle_reclaim_lease_error(clp, status); clear_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state); set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); nfs4_state_start_reclaim_nograce(clp); return 0; } /* * Try remote migration of one FSID from a source server to a * destination server. The source server provides a list of * potential destinations. * * Returns zero or a negative NFS4ERR status code. */ static int nfs4_try_migration(struct nfs_server *server, struct rpc_cred *cred) { struct nfs_client *clp = server->nfs_client; struct nfs4_fs_locations *locations = NULL; struct inode *inode; struct page *page; int status, result; dprintk("--> %s: FSID %llx:%llx on \"%s\"\n", __func__, (unsigned long long)server->fsid.major, (unsigned long long)server->fsid.minor, clp->cl_hostname); result = 0; page = alloc_page(GFP_KERNEL); locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); if (page == NULL || locations == NULL) { dprintk("<-- %s: no memory\n", __func__); goto out; } inode = d_inode(server->super->s_root); result = nfs4_proc_get_locations(inode, locations, page, cred); if (result) { dprintk("<-- %s: failed to retrieve fs_locations: %d\n", __func__, result); goto out; } result = -NFS4ERR_NXIO; if (!(locations->fattr.valid & NFS_ATTR_FATTR_V4_LOCATIONS)) { dprintk("<-- %s: No fs_locations data, migration skipped\n", __func__); goto out; } nfs4_begin_drain_session(clp); status = nfs4_replace_transport(server, locations); if (status != 0) { dprintk("<-- %s: failed to replace transport: %d\n", __func__, status); goto out; } result = 0; dprintk("<-- %s: migration succeeded\n", __func__); out: if (page != NULL) __free_page(page); kfree(locations); if (result) { pr_err("NFS: migration recovery failed (server %s)\n", clp->cl_hostname); set_bit(NFS_MIG_FAILED, &server->mig_status); } return result; } /* * Returns zero or a negative NFS4ERR status code. */ static int nfs4_handle_migration(struct nfs_client *clp) { const struct nfs4_state_maintenance_ops *ops = clp->cl_mvops->state_renewal_ops; struct nfs_server *server; struct rpc_cred *cred; dprintk("%s: migration reported on \"%s\"\n", __func__, clp->cl_hostname); spin_lock(&clp->cl_lock); cred = ops->get_state_renewal_cred_locked(clp); spin_unlock(&clp->cl_lock); if (cred == NULL) return -NFS4ERR_NOENT; clp->cl_mig_gen++; restart: rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { int status; if (server->mig_gen == clp->cl_mig_gen) continue; server->mig_gen = clp->cl_mig_gen; if (!test_and_clear_bit(NFS_MIG_IN_TRANSITION, &server->mig_status)) continue; rcu_read_unlock(); status = nfs4_try_migration(server, cred); if (status < 0) { put_rpccred(cred); return status; } goto restart; } rcu_read_unlock(); put_rpccred(cred); return 0; } /* * Test each nfs_server on the clp's cl_superblocks list to see * if it's moved to another server. Stop when the server no longer * returns NFS4ERR_LEASE_MOVED. */ static int nfs4_handle_lease_moved(struct nfs_client *clp) { const struct nfs4_state_maintenance_ops *ops = clp->cl_mvops->state_renewal_ops; struct nfs_server *server; struct rpc_cred *cred; dprintk("%s: lease moved reported on \"%s\"\n", __func__, clp->cl_hostname); spin_lock(&clp->cl_lock); cred = ops->get_state_renewal_cred_locked(clp); spin_unlock(&clp->cl_lock); if (cred == NULL) return -NFS4ERR_NOENT; clp->cl_mig_gen++; restart: rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { struct inode *inode; int status; if (server->mig_gen == clp->cl_mig_gen) continue; server->mig_gen = clp->cl_mig_gen; rcu_read_unlock(); inode = d_inode(server->super->s_root); status = nfs4_proc_fsid_present(inode, cred); if (status != -NFS4ERR_MOVED) goto restart; /* wasn't this one */ if (nfs4_try_migration(server, cred) == -NFS4ERR_LEASE_MOVED) goto restart; /* there are more */ goto out; } rcu_read_unlock(); out: put_rpccred(cred); return 0; } /** * nfs4_discover_server_trunking - Detect server IP address trunking * * @clp: nfs_client under test * @result: OUT: found nfs_client, or clp * * Returns zero or a negative errno. If zero is returned, * an nfs_client pointer is planted in "result". * * Note: since we are invoked in process context, and * not from inside the state manager, we cannot use * nfs4_handle_reclaim_lease_error(). */ int nfs4_discover_server_trunking(struct nfs_client *clp, struct nfs_client **result) { const struct nfs4_state_recovery_ops *ops = clp->cl_mvops->reboot_recovery_ops; struct rpc_clnt *clnt; struct rpc_cred *cred; int i, status; dprintk("NFS: %s: testing '%s'\n", __func__, clp->cl_hostname); clnt = clp->cl_rpcclient; i = 0; mutex_lock(&nfs_clid_init_mutex); again: status = -ENOENT; cred = nfs4_get_clid_cred(clp); if (cred == NULL) goto out_unlock; status = ops->detect_trunking(clp, result, cred); put_rpccred(cred); switch (status) { case 0: case -EINTR: case -ERESTARTSYS: break; case -ETIMEDOUT: if (clnt->cl_softrtry) break; case -NFS4ERR_DELAY: case -EAGAIN: ssleep(1); case -NFS4ERR_STALE_CLIENTID: dprintk("NFS: %s after status %d, retrying\n", __func__, status); goto again; case -EACCES: if (i++ == 0) { nfs4_root_machine_cred(clp); goto again; } if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX) break; case -NFS4ERR_CLID_INUSE: case -NFS4ERR_WRONGSEC: /* No point in retrying if we already used RPC_AUTH_UNIX */ if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX) { status = -EPERM; break; } clnt = rpc_clone_client_set_auth(clnt, RPC_AUTH_UNIX); if (IS_ERR(clnt)) { status = PTR_ERR(clnt); break; } /* Note: this is safe because we haven't yet marked the * client as ready, so we are the only user of * clp->cl_rpcclient */ clnt = xchg(&clp->cl_rpcclient, clnt); rpc_shutdown_client(clnt); clnt = clp->cl_rpcclient; goto again; case -NFS4ERR_MINOR_VERS_MISMATCH: status = -EPROTONOSUPPORT; break; case -EKEYEXPIRED: case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery * in nfs4_exchange_id */ status = -EKEYEXPIRED; break; default: pr_warn("NFS: %s unhandled error %d. Exiting with error EIO\n", __func__, status); status = -EIO; } out_unlock: mutex_unlock(&nfs_clid_init_mutex); dprintk("NFS: %s: status = %d\n", __func__, status); return status; } #ifdef CONFIG_NFS_V4_1 void nfs4_schedule_session_recovery(struct nfs4_session *session, int err) { struct nfs_client *clp = session->clp; switch (err) { default: set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); break; case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); } nfs4_schedule_state_manager(clp); } EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery); void nfs41_notify_server(struct nfs_client *clp) { /* Use CHECK_LEASE to ping the server with a SEQUENCE */ set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); nfs4_schedule_state_manager(clp); } static void nfs4_reset_all_state(struct nfs_client *clp) { if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state); clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); nfs4_state_start_reclaim_nograce(clp); dprintk("%s: scheduling reset of all state for server %s!\n", __func__, clp->cl_hostname); nfs4_schedule_state_manager(clp); } } static void nfs41_handle_server_reboot(struct nfs_client *clp) { if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { nfs4_state_start_reclaim_reboot(clp); dprintk("%s: server %s rebooted!\n", __func__, clp->cl_hostname); nfs4_schedule_state_manager(clp); } } static void nfs41_handle_all_state_revoked(struct nfs_client *clp) { nfs4_reset_all_state(clp); dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname); } static void nfs41_handle_some_state_revoked(struct nfs_client *clp) { nfs4_state_start_reclaim_nograce(clp); nfs4_schedule_state_manager(clp); dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname); } static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) { /* FIXME: For now, we destroy all layouts. */ pnfs_destroy_all_layouts(clp); /* FIXME: For now, we test all delegations+open state+locks. */ nfs41_handle_some_state_revoked(clp); dprintk("%s: Recallable state revoked on server %s!\n", __func__, clp->cl_hostname); } static void nfs41_handle_backchannel_fault(struct nfs_client *clp) { set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); nfs4_schedule_state_manager(clp); dprintk("%s: server %s declared a backchannel fault\n", __func__, clp->cl_hostname); } static void nfs41_handle_cb_path_down(struct nfs_client *clp) { if (test_and_set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state) == 0) nfs4_schedule_state_manager(clp); } void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags, bool recovery) { if (!flags) return; dprintk("%s: \"%s\" (client ID %llx) flags=0x%08x\n", __func__, clp->cl_hostname, clp->cl_clientid, flags); /* * If we're called from the state manager thread, then assume we're * already handling the RECLAIM_NEEDED and/or STATE_REVOKED. * Those flags are expected to remain set until we're done * recovering (see RFC5661, section 18.46.3). */ if (recovery) goto out_recovery; if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) nfs41_handle_server_reboot(clp); if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED)) nfs41_handle_all_state_revoked(clp); if (flags & (SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | SEQ4_STATUS_ADMIN_STATE_REVOKED)) nfs41_handle_some_state_revoked(clp); if (flags & SEQ4_STATUS_LEASE_MOVED) nfs4_schedule_lease_moved_recovery(clp); if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) nfs41_handle_recallable_state_revoked(clp); out_recovery: if (flags & SEQ4_STATUS_BACKCHANNEL_FAULT) nfs41_handle_backchannel_fault(clp); else if (flags & (SEQ4_STATUS_CB_PATH_DOWN | SEQ4_STATUS_CB_PATH_DOWN_SESSION)) nfs41_handle_cb_path_down(clp); } static int nfs4_reset_session(struct nfs_client *clp) { struct rpc_cred *cred; int status; if (!nfs4_has_session(clp)) return 0; nfs4_begin_drain_session(clp); cred = nfs4_get_clid_cred(clp); status = nfs4_proc_destroy_session(clp->cl_session, cred); switch (status) { case 0: case -NFS4ERR_BADSESSION: case -NFS4ERR_DEADSESSION: break; case -NFS4ERR_BACK_CHAN_BUSY: case -NFS4ERR_DELAY: set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); status = 0; ssleep(1); goto out; default: status = nfs4_recovery_handle_error(clp, status); goto out; } memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN); status = nfs4_proc_create_session(clp, cred); if (status) { dprintk("%s: session reset failed with status %d for server %s!\n", __func__, status, clp->cl_hostname); status = nfs4_handle_reclaim_lease_error(clp, status); goto out; } nfs41_finish_session_reset(clp); dprintk("%s: session reset was successful for server %s!\n", __func__, clp->cl_hostname); out: if (cred) put_rpccred(cred); return status; } static int nfs4_bind_conn_to_session(struct nfs_client *clp) { struct rpc_cred *cred; int ret; if (!nfs4_has_session(clp)) return 0; nfs4_begin_drain_session(clp); cred = nfs4_get_clid_cred(clp); ret = nfs4_proc_bind_conn_to_session(clp, cred); if (cred) put_rpccred(cred); clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); switch (ret) { case 0: dprintk("%s: bind_conn_to_session was successful for server %s!\n", __func__, clp->cl_hostname); break; case -NFS4ERR_DELAY: ssleep(1); set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); break; default: return nfs4_recovery_handle_error(clp, ret); } return 0; } #else /* CONFIG_NFS_V4_1 */ static int nfs4_reset_session(struct nfs_client *clp) { return 0; } static int nfs4_bind_conn_to_session(struct nfs_client *clp) { return 0; } #endif /* CONFIG_NFS_V4_1 */ static void nfs4_state_manager(struct nfs_client *clp) { int status = 0; const char *section = "", *section_sep = ""; /* Ensure exclusive access to NFSv4 state */ do { if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { section = "purge state"; status = nfs4_purge_lease(clp); if (status < 0) goto out_error; continue; } if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) { section = "lease expired"; /* We're going to have to re-establish a clientid */ status = nfs4_reclaim_lease(clp); if (status < 0) goto out_error; continue; } /* Initialize or reset the session */ if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) { section = "reset session"; status = nfs4_reset_session(clp); if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) continue; if (status < 0) goto out_error; } /* Send BIND_CONN_TO_SESSION */ if (test_and_clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state)) { section = "bind conn to session"; status = nfs4_bind_conn_to_session(clp); if (status < 0) goto out_error; continue; } if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) { section = "check lease"; status = nfs4_check_lease(clp); if (status < 0) goto out_error; continue; } if (test_and_clear_bit(NFS4CLNT_MOVED, &clp->cl_state)) { section = "migration"; status = nfs4_handle_migration(clp); if (status < 0) goto out_error; } if (test_and_clear_bit(NFS4CLNT_LEASE_MOVED, &clp->cl_state)) { section = "lease moved"; status = nfs4_handle_lease_moved(clp); if (status < 0) goto out_error; } /* First recover reboot state... */ if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) { section = "reclaim reboot"; status = nfs4_do_reclaim(clp, clp->cl_mvops->reboot_recovery_ops); if (status == -EAGAIN) continue; if (status < 0) goto out_error; nfs4_state_end_reclaim_reboot(clp); } /* Detect expired delegations... */ if (test_and_clear_bit(NFS4CLNT_DELEGATION_EXPIRED, &clp->cl_state)) { section = "detect expired delegations"; nfs_reap_expired_delegations(clp); continue; } /* Now recover expired state... */ if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) { section = "reclaim nograce"; status = nfs4_do_reclaim(clp, clp->cl_mvops->nograce_recovery_ops); if (status == -EAGAIN) continue; if (status < 0) goto out_error; } nfs4_end_drain_session(clp); if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) { nfs_client_return_marked_delegations(clp); continue; } nfs4_clear_state_manager_bit(clp); /* Did we race with an attempt to give us more work? */ if (clp->cl_state == 0) break; if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) break; } while (refcount_read(&clp->cl_count) > 1); return; out_error: if (strlen(section)) section_sep = ": "; pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s" " with error %d\n", section_sep, section, clp->cl_hostname, -status); ssleep(1); nfs4_end_drain_session(clp); nfs4_clear_state_manager_bit(clp); } static int nfs4_run_state_manager(void *ptr) { struct nfs_client *clp = ptr; allow_signal(SIGKILL); nfs4_state_manager(clp); nfs_put_client(clp); module_put_and_exit(0); return 0; } /* * Local variables: * c-basic-offset: 8 * End: */
zhiyisun/linux
fs/nfs/nfs4state.c
C
gpl-2.0
68,547
/* This file is part of the ScriptDev2 Project. See AUTHORS file for Copyright information * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* ScriptData SDName: Boss_Netherspite SD%Complete: 75 SDComment: Nether portals partially implemented. Find spell ID for tail swipe added in patch 3.0.2 SDCategory: Karazhan EndScriptData */ #include "precompiled.h" #include "karazhan.h" enum { // netherspite spells SPELL_NETHERBURN = 30522, SPELL_VOID_ZONE = 37063, SPELL_NETHERBREATH = 38523, SPELL_EMPOWERMENT = 38549, SPELL_NETHER_INFUSION = 38688, // hard enrage spell SPELL_NETHERSPITE_ROAR = 38684, // on banish phase begin SPELL_SHADOWFORM = 38542, // banish visual spell SPELL_FACE_RANDOM_TARGET = 38546, // triggered by spell 38684 - currently not used SPELL_PORTAL_ATTUNEMENT = 30425, // void zone spells SPELL_CONSUMPTION = 28865, // ***** Netherspite portals spells ***** // // beam buffs SPELL_SERENITY_NS = 30467, SPELL_SERENITY_PLR = 30422, SPELL_DOMINANCE_NS = 30468, SPELL_DOMINANCE_PLR = 30423, SPELL_PERSEVERENCE_NS = 30466, SPELL_PERSEVERENCE_PLR = 30421, // beam debuffs (player with this aura cannot gain the same color buff) SPELL_EXHAUSTION_SER = 38638, SPELL_EXHAUSTION_DOM = 38639, SPELL_EXHAUSTION_PER = 38637, // spells which hit players (used only for visual - as seen from spell description) SPELL_BEAM_SER = 30401, SPELL_BEAM_DOM = 30402, SPELL_BEAM_PER = 30400, // spells which hit Netherspite SPELL_BEAM_GREEN = 30464, SPELL_BEAM_BLUE = 30463, SPELL_BEAM_RED = 30465, // portal visual spells SPELL_GREEN_PORTAL = 30490, SPELL_BLUE_PORTAL = 30491, SPELL_RED_PORTAL = 30487, // passive auras SPELL_SERENITY_PASSIVE = 30397, SPELL_DOMINANCE_PASSIVE = 30398, // note: for Perseverence, there isn't any passive spell - currently we use script timer SPELL_NETHER_BEAM = 30469, // spell triggered by the passive auras // SPELL_CLEAR_NETHER_BEAM = 37072, // not clear how to use this // emotes EMOTE_PHASE_BEAM = -1532089, EMOTE_PHASE_BANISH = -1532090, // npcs NPC_PORTAL_GREEN = 17367, NPC_PORTAL_BLUE = 17368, NPC_PORTAL_RED = 17369, NPC_VOID_ZONE = 16697, MAX_PORTALS = 3, }; struct SpawnLocation { float fX, fY, fZ, fO; }; // at first spawn portals got fixed coords, should be shuffled in subsequent beam phases static const SpawnLocation aPortalCoordinates[MAX_PORTALS] = { { -11195.14f, -1616.375f, 278.3217f, 6.230825f}, { -11108.13f, -1602.839f, 280.0323f, 3.717551f}, { -11139.78f, -1681.278f, 278.3217f, 1.396263f}, }; enum NetherspitePhases { BEAM_PHASE = 0, BANISH_PHASE = 1, }; static const uint32 auiPortals[MAX_PORTALS] = { NPC_PORTAL_GREEN, NPC_PORTAL_BLUE, NPC_PORTAL_RED, }; /*###### ## boss_netherspite ######*/ struct boss_netherspiteAI : public ScriptedAI { boss_netherspiteAI(Creature* pCreature) : ScriptedAI(pCreature) { m_pInstance = (ScriptedInstance*)pCreature->GetInstanceData(); Reset(); } ScriptedInstance* m_pInstance; NetherspitePhases m_uiActivePhase; uint32 m_uiEnrageTimer; uint32 m_uiVoidZoneTimer; uint32 m_uiPhaseSwitchTimer; uint32 m_uiNetherbreathTimer; uint32 m_uiEmpowermentTimer; std::vector<uint32> m_vPortalEntryList; void Reset() override { m_uiActivePhase = BEAM_PHASE; m_uiEmpowermentTimer = 10000; m_uiEnrageTimer = 9 * MINUTE * IN_MILLISECONDS; m_uiVoidZoneTimer = 15000; m_uiPhaseSwitchTimer = MINUTE * IN_MILLISECONDS; SetCombatMovement(true); // initialize the portal list m_vPortalEntryList.clear(); m_vPortalEntryList.resize(MAX_PORTALS); for (uint8 i = 0; i < MAX_PORTALS; ++i) m_vPortalEntryList[i] = auiPortals[i]; } void Aggro(Unit* /*pWho*/) override { if (m_pInstance) m_pInstance->SetData(TYPE_NETHERSPITE, IN_PROGRESS); DoSummonPortals(); DoCastSpellIfCan(m_creature, SPELL_NETHERBURN); } void JustDied(Unit* /*pKiller*/) override { if (m_pInstance) m_pInstance->SetData(TYPE_NETHERSPITE, DONE); } void JustReachedHome() override { if (m_pInstance) m_pInstance->SetData(TYPE_NETHERSPITE, FAIL); } void SwitchPhases() { if (m_uiActivePhase == BEAM_PHASE) { if (DoCastSpellIfCan(m_creature, SPELL_NETHERSPITE_ROAR) == CAST_OK) { DoCastSpellIfCan(m_creature, SPELL_SHADOWFORM, CAST_TRIGGERED); m_creature->RemoveAurasDueToSpell(SPELL_EMPOWERMENT); SetCombatMovement(false); m_creature->GetMotionMaster()->MoveIdle(); m_uiActivePhase = BANISH_PHASE; DoScriptText(EMOTE_PHASE_BANISH, m_creature); m_uiNetherbreathTimer = 2000; m_uiPhaseSwitchTimer = 30000; } } else { m_creature->RemoveAurasDueToSpell(SPELL_SHADOWFORM); SetCombatMovement(true); DoStartMovement(m_creature->getVictim()); m_uiActivePhase = BEAM_PHASE; DoScriptText(EMOTE_PHASE_BEAM, m_creature); DoSummonPortals(); m_uiEmpowermentTimer = 10000; m_uiPhaseSwitchTimer = MINUTE * IN_MILLISECONDS; } // reset threat every phase switch DoResetThreat(); } void DoSummonPortals() { for (uint8 i = 0; i < MAX_PORTALS; ++i) m_creature->SummonCreature(m_vPortalEntryList[i], aPortalCoordinates[i].fX, aPortalCoordinates[i].fY, aPortalCoordinates[i].fZ, aPortalCoordinates[i].fO, TEMPSUMMON_TIMED_DESPAWN, 60000); // randomize the portals after the first summon std::random_shuffle(m_vPortalEntryList.begin(), m_vPortalEntryList.end()); } void JustSummoned(Creature* pSummoned) override { switch (pSummoned->GetEntry()) { case NPC_VOID_ZONE: pSummoned->CastSpell(pSummoned, SPELL_CONSUMPTION, false); break; case NPC_PORTAL_RED: pSummoned->CastSpell(pSummoned, SPELL_RED_PORTAL, false); break; case NPC_PORTAL_GREEN: pSummoned->CastSpell(pSummoned, SPELL_GREEN_PORTAL, false); break; case NPC_PORTAL_BLUE: pSummoned->CastSpell(pSummoned, SPELL_BLUE_PORTAL, false); break; } } void UpdateAI(const uint32 uiDiff) override { if (!m_creature->SelectHostileTarget() || !m_creature->getVictim()) return; if (m_uiPhaseSwitchTimer <= uiDiff) SwitchPhases(); else m_uiPhaseSwitchTimer -= uiDiff; if (m_uiEnrageTimer) { if (m_uiEnrageTimer <= uiDiff) { if (DoCastSpellIfCan(m_creature, SPELL_NETHER_INFUSION) == CAST_OK) m_uiEnrageTimer = 0; } else m_uiEnrageTimer -= uiDiff; } if (m_uiActivePhase == BEAM_PHASE) { if (m_uiVoidZoneTimer < uiDiff) { if (Unit* pTarget = m_creature->SelectAttackingTarget(ATTACKING_TARGET_RANDOM, 0)) { if (DoCastSpellIfCan(pTarget, SPELL_VOID_ZONE) == CAST_OK) m_uiVoidZoneTimer = 15000; } } else m_uiVoidZoneTimer -= uiDiff; if (m_uiEmpowermentTimer) { if (m_uiEmpowermentTimer <= uiDiff) { if (DoCastSpellIfCan(m_creature, SPELL_EMPOWERMENT) == CAST_OK) { DoCastSpellIfCan(m_creature, SPELL_PORTAL_ATTUNEMENT, CAST_TRIGGERED); m_uiEmpowermentTimer = 0; } } else m_uiEmpowermentTimer -= uiDiff; } DoMeleeAttackIfReady(); } else { if (m_uiNetherbreathTimer < uiDiff) { if (DoCastSpellIfCan(m_creature, SPELL_NETHERBREATH) == CAST_OK) m_uiNetherbreathTimer = urand(4000, 5000); } else m_uiNetherbreathTimer -= uiDiff; } } }; CreatureAI* GetAI_boss_netherspite(Creature* pCreature) { return new boss_netherspiteAI(pCreature); } /*###### ## npc_netherspite_portal ######*/ struct npc_netherspite_portalAI : public Scripted_NoMovementAI { npc_netherspite_portalAI(Creature* pCreature) : Scripted_NoMovementAI(pCreature) { m_pInstance = (ScriptedInstance*)pCreature->GetInstanceData(); Reset(); } ScriptedInstance* m_pInstance; uint32 m_uiPassiveSpellTimer; uint32 m_uiOrientationTimer; void Reset() { m_uiPassiveSpellTimer = 0; m_uiOrientationTimer = 0; } void MoveInLineOfSight(Unit* pWho) { } void AttackStart(Unit* pWho) { } void ReceiveAIEvent(AIEventType eventType, Creature* /*pSender*/, Unit* pInvoker, uint32 /*uiMiscValue*/) override { if (eventType == AI_EVENT_CUSTOM_A) { if (pInvoker->GetEntry() != NPC_NETHERSPITE) return; // update orientation every second to focus on Netherspite m_uiOrientationTimer = 1000; m_creature->SetFacingToObject(pInvoker); switch (m_creature->GetEntry()) { case NPC_PORTAL_GREEN: if (!m_creature->HasAura(SPELL_SERENITY_PASSIVE)) DoCastSpellIfCan(m_creature, SPELL_SERENITY_PASSIVE, CAST_TRIGGERED); break; case NPC_PORTAL_BLUE: if (!m_creature->HasAura(SPELL_DOMINANCE_PASSIVE)) DoCastSpellIfCan(m_creature, SPELL_DOMINANCE_PASSIVE, CAST_TRIGGERED); break; case NPC_PORTAL_RED: // Red portal spell is missing - handled in script if (!m_uiPassiveSpellTimer) m_uiPassiveSpellTimer = 1000; break; } } } void UpdateAI(const uint32 uiDiff) { if (m_uiPassiveSpellTimer) { if (m_uiPassiveSpellTimer <= uiDiff) { if (DoCastSpellIfCan(m_creature, SPELL_NETHER_BEAM, CAST_TRIGGERED) == CAST_OK) m_uiPassiveSpellTimer = 1000; } else m_uiPassiveSpellTimer -= uiDiff; } if (m_uiOrientationTimer) { if (m_uiOrientationTimer <= uiDiff) { if (m_pInstance) { if (Creature* pNetherspite = m_pInstance->GetSingleCreatureFromStorage(NPC_NETHERSPITE)) m_creature->SetFacingToObject(pNetherspite); } m_uiOrientationTimer = 1000; } else m_uiOrientationTimer -= uiDiff; } } }; CreatureAI* GetAI_npc_netherspite_portal(Creature* pCreature) { return new npc_netherspite_portalAI(pCreature); } bool EffectScriptEffectCreature_spell_portal_attunement(Unit* pCaster, uint32 uiSpellId, SpellEffectIndex uiEffIndex, Creature* pCreatureTarget, ObjectGuid /*originalCasterGuid*/) { if (uiSpellId == SPELL_PORTAL_ATTUNEMENT && uiEffIndex == EFFECT_INDEX_0) { if (pCreatureTarget->GetEntry() == NPC_PORTAL_RED || pCreatureTarget->GetEntry() == NPC_PORTAL_GREEN || pCreatureTarget->GetEntry() == NPC_PORTAL_BLUE) pCreatureTarget->AI()->SendAIEvent(AI_EVENT_CUSTOM_A, pCaster, pCreatureTarget); return true; } return false; } void AddSC_boss_netherspite() { Script* pNewScript; pNewScript = new Script; pNewScript->Name = "boss_netherspite"; pNewScript->GetAI = &GetAI_boss_netherspite; pNewScript->RegisterSelf(); pNewScript = new Script; pNewScript->Name = "npc_netherspite_portal"; pNewScript->GetAI = &GetAI_npc_netherspite_portal; pNewScript->pEffectScriptEffectNPC = &EffectScriptEffectCreature_spell_portal_attunement; pNewScript->RegisterSelf(); }
scriptdev2/scriptdev2
scripts/eastern_kingdoms/karazhan/boss_netherspite.cpp
C++
gpl-2.0
13,668
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. */ #ifdef CONFIG_FELICA #include "felica.h" #include <linux/init.h> #include <linux/cdev.h> #include <linux/device.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/termios.h> #include <linux/serial_core.h> #include <linux/uaccess.h> #define F_WAKE_LOCK #ifdef F_WAKE_LOCK #include <linux/wakelock.h> #endif #include <linux/types.h> /* jmodel */ #if defined(CONFIG_ARCH_EXYNOS) #include <mach/smc.h> #elif defined(CONFIG_ARCH_APQ8064) #include <mach/scm.h> #endif #include <asm/system_info.h> /****************************************************************************** * log ******************************************************************************/ #ifdef FELICA_DEBUG #define FELICA_LOG_DEBUG(fmt, args...) printk(KERN_INFO fmt, ## args) #else #define FELICA_LOG_DEBUG(fmt, args...) #endif #define FELICA_LOG_ERR(fmt, args...) printk(KERN_ERR fmt, ## args) /****************************************************************************** * global variable ******************************************************************************/ #ifdef F_WAKE_LOCK struct wake_lock felica_wake_1; struct wake_lock felica_wake_2; #endif static struct class *felica_class; /* storages for communicate to netlink */ static int gfa_open_cnt; static int gfa_pid; static int gfa_connect_flag; static struct sock *gfanl_sk; static char gfa_send_str[FELICA_NL_MSG_SIZE]; static char gfa_rcv_str[FELICA_NL_MSG_SIZE]; static int gfa_wait_flag; /* R/W functions availability information storage */ static char gfelica_rw_status; /* IRQ data storage for INT terminal monitoring */ struct felica_int_irqdata { struct delayed_work work; wait_queue_head_t read_wait; int irq_done; int open_flag; }; static struct felica_int_irqdata gint_irq; static struct felica_int_irqdata *pgint_irq = &gint_irq; /* storages for access restriction */ static uid_t gmfc_uid = -1; static uid_t gmfl_uid = -1; static uid_t grwm_uid = -1; static uid_t gdiag_uid = -1; #ifdef CONFIG_NFC_FELICA static uid_t gnfc_uid = -1; struct file *pg_tty; static int gnfc_open_cnt; #endif /* CONFIG_NFC_FELICA */ /* package name's storage for access restriction */ static char gdiag_name[DIAG_NAME_MAXSIZE + 1]; static uid_t gant_uid = -1; static int gi2c_address; static char gi2c_antaddress; static char gi2c_lockaddress; static struct i2c_msg gread_msgs[] = { { .addr = 0, .flags = 0, .len = 0, .buf = NULL, }, { .addr = 0, .flags = 0, .len = 0, .buf = NULL, }, }; static struct i2c_msg gwrite_msgs[] = { { .addr = 0, .flags = 0, .len = 0, .buf = NULL, }, }; /* jmodel */ #if defined(CONFIG_ARCH_EXYNOS) #define FELICA_UART1RX EXYNOS5410_GPA0(4) #endif #ifdef F_WAKE_LOCK static int tmout_1 = 3*1000; #endif #ifdef CONFIG_NFC_FELICA /****************************************************************************** * Add global variable ******************************************************************************/ struct icc_poll_data { wait_queue_head_t read_wait; wait_queue_head_t rsp_wait; wait_queue_head_t dummy_wait; int handler_done; int rsp_done; struct delayed_work work; int device_status; int read_error; int open_flag; int available_flag; }; struct poll_data { wait_queue_head_t read_wait; int irq_handler_done; struct delayed_work work; int device_status; int read_error; int open_flag; }; static struct icc_poll_data gfelica_poll_data; static struct icc_poll_data g_available_data; static struct icc_poll_data* available_d = &g_available_data; static int guartcc_start_req = UARTCC_NFC_START_ENDPROC; static int guartcc_felica_status = UARTCC_FELICA_STATAUS_IDLE; static int g_cen_sts = 0; static int g_rfs_sts = 0; static int felica_varying_gpio_intu; #endif /* CONFIG_NFC_FELICA */ /****************************************************************************** * /dev/felica ******************************************************************************/ /* character device definition */ static int felica_uart_port; static dev_t devid_felica_uart; static struct cdev cdev_felica_uart; static const struct file_operations fops_felica_uart = { .owner = THIS_MODULE, .open = felica_uart_open, .release = felica_uart_close, .read = felica_uart_read, .write = felica_uart_write, .fsync = felica_uart_sync, .unlocked_ioctl = felica_uart_ioctl, }; struct felica_sem_data { struct semaphore felica_sem; }; static struct felica_sem_data *dev_sem; /* * initialize device */ static void felica_uart_init(void) { int ret; struct device *device_felica_uart; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); devid_felica_uart = MKDEV(FELICA_MAJOR, FELICA_MINOR); ret = alloc_chrdev_region(&devid_felica_uart, FELICA_BASEMINOR, FELICA_MINOR_COUNT, FELICA_UART_NAME); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region), ret=[%d]", __func__, ret); return; } cdev_init(&cdev_felica_uart, &fops_felica_uart); ret = cdev_add(&cdev_felica_uart, devid_felica_uart, FELICA_MINOR_COUNT); if (ret < 0) { unregister_chrdev_region(devid_felica_uart, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]", __func__, ret); return; } device_felica_uart = device_create(felica_class, NULL, devid_felica_uart, NULL, FELICA_UART_NAME); if (IS_ERR(device_felica_uart)) { cdev_del(&cdev_felica_uart); unregister_chrdev_region(devid_felica_uart, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create)", __func__); return; } dev_sem = kmalloc(sizeof(struct felica_sem_data), GFP_KERNEL); if (!dev_sem) { cdev_del(&cdev_felica_uart); unregister_chrdev_region(devid_felica_uart, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(dev_sem malloc)", __func__); return; } sema_init(&dev_sem->felica_sem, 1); #ifdef CONFIG_NFC_FELICA memset((void *)&gfelica_poll_data, 0x00, sizeof(struct icc_poll_data)); init_waitqueue_head(&gfelica_poll_data.rsp_wait); gfelica_poll_data.rsp_done = 0; gfelica_poll_data.open_flag = 0; #endif /* CONFIG_NFC_FELICA */ FELICA_LOG_DEBUG("[MFDD] %s END, major=[%d], minor=[%d]", __func__, MAJOR(devid_felica_uart), MINOR(devid_felica_uart)); } /* * finalize device */ static void felica_uart_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); kfree(dev_sem); device_destroy(felica_class, devid_felica_uart); cdev_del(&cdev_felica_uart); unregister_chrdev_region(devid_felica_uart, FELICA_MINOR_COUNT); FELICA_LOG_DEBUG("[MFDD] %s START", __func__); } /* * open device */ static int felica_uart_open(struct inode *inode, struct file *file) { uid_t uid; int ret; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); // FELICA_LOG_DEBUG("[MFDD] %s system_rev: 0x%x uart port: %d ", // __func__, system_rev, felica_uart_port); uid = __task_cred(current)->uid; if ((uid != gmfc_uid) && (uid != gdiag_uid) && (uid != gant_uid)) { FELICA_LOG_DEBUG ("[MFDD] %s END, uid=[%d], gmfc_uid=[%d], gdiag_uid=[%d]", __func__, uid, gmfc_uid, gdiag_uid); return -EACCES; } if (down_interruptible(&dev_sem->felica_sem)) { FELICA_LOG_ERR("[MFDD] %s ERROR(down_interruptible)", \ __func__); return -ERESTARTSYS; } if (gfa_open_cnt == 0) { #ifdef CONFIG_NFC_FELICA switch (guartcc_start_req) { case UARTCC_NFC_START_AUTOPOLLING: /* AutoPoiing */ { /* felica open-waiting for nfc autopolling */ ret = felica_uart_open_wait_for_polling(); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s open-waiting fail=[%d]\n", \ __func__, ret); return ret; } FELICA_LOG_DEBUG("[MFDD] %s open wait release\n", \ __func__); } break; case UARTCC_NFC_START_ENDPROC: /* Unused NFC */ /* set felica status -> in use */ uartcc_set_felica_status(UARTCC_FELICA_STATAUS_IN_USE); FELICA_LOG_DEBUG("[MFDD] %s started using FeliCa=[%d]\n", \ __func__, guartcc_start_req); break; default: /* NFC in use */ FELICA_LOG_ERR("[MFDD] %s NFC in use=[%d]\n", \ __func__, guartcc_start_req); /* reset felica status -> idle */ uartcc_set_felica_status(UARTCC_FELICA_STATAUS_IDLE); up(&dev_sem->felica_sem); return -EFAULT; break; } #endif /* CONFIG_NFC_FELICA */ memset(gfa_send_str, 0, FELICA_NL_MSG_SIZE); memset(gfa_rcv_str, 0, FELICA_NL_MSG_SIZE); gfa_send_str[0] = FELICA_NL_REQ_OPEN; gfa_send_str[1] = felica_uart_port; ret = felica_nl_send_msg(2); if (ret == 0) { felica_nl_wait_ret_msg(); if (gfa_rcv_str[1] == FELICA_NL_EFAILED) { FELICA_LOG_ERR("[MFDD] %s Open Fail", __func__); #ifdef CONFIG_NFC_FELICA /* reset felica status -> idle */ uartcc_set_felica_status(UARTCC_FELICA_STATAUS_IDLE); #endif /* CONFIG_NFC_FELICA */ up(&dev_sem->felica_sem); return -EFAULT; } } else { FELICA_LOG_ERR("[MFDD] %s felica_nl_send_msg Fail", \ __func__); #ifdef CONFIG_NFC_FELICA /* reset felica status -> idle */ uartcc_set_felica_status(UARTCC_FELICA_STATAUS_IDLE); #endif /* CONFIG_NFC_FELICA */ up(&dev_sem->felica_sem); return -EFAULT; } #ifdef F_WAKE_LOCK wake_lock(&felica_wake_2); FELICA_LOG_DEBUG("[MFDD] %s Wake Lock(2)", __func__); #endif } gfa_open_cnt++; up(&dev_sem->felica_sem); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } #ifdef CONFIG_NFC_FELICA /* * felica open-waiting for nfc autopolling */ static int felica_uart_open_wait_for_polling(void) { int ret = 0; struct icc_poll_data *felica_poll = &gfelica_poll_data; FELICA_LOG_DEBUG("[MFDD] %s START\n", __func__); /* set felica status -> wait polling */ uartcc_set_felica_status(UARTCC_FELICA_STATAUS_WAIT_POLLING); up(&dev_sem->felica_sem); ret = wait_event_interruptible(felica_poll->rsp_wait, felica_poll->rsp_done == 1); if (ret < 0) { /* reset felica status -> idle */ uartcc_set_felica_status(UARTCC_FELICA_STATAUS_IDLE); FELICA_LOG_ERR ("[MFDD] %s ERROR(wait_event_interruptible), ret=[%d]\n", \ __func__, ret); return -EIO; } if (down_interruptible(&dev_sem->felica_sem)) { /* reset felica status -> idle */ uartcc_set_felica_status(UARTCC_FELICA_STATAUS_IDLE); FELICA_LOG_ERR("[MFDD] %s ERROR(down_interruptible)\n", \ __func__); return -ERESTARTSYS; } /* set felica status -> in use */ uartcc_set_felica_status(UARTCC_FELICA_STATAUS_IN_USE); felica_poll->rsp_done = 0; FELICA_LOG_DEBUG("[MFDD] %s END\n", __func__); return 0; } #endif /* CONFIG_NFC_FELICA */ /* * close device */ static int felica_uart_close(struct inode *inode, struct file *file) { int ret; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if (down_interruptible(&dev_sem->felica_sem)) { FELICA_LOG_ERR("[MFDD] %s ERROR(down_interruptible)", \ __func__); return -ERESTARTSYS; } gfa_open_cnt--; if (gfa_open_cnt == 0) { memset(gfa_send_str, 0, FELICA_NL_MSG_SIZE); memset(gfa_rcv_str, 0, FELICA_NL_MSG_SIZE); gfa_send_str[0] = FELICA_NL_REQ_CLOSE; ret = felica_nl_send_msg(1); if (ret == 0) { felica_nl_wait_ret_msg(); if (gfa_rcv_str[1] == FELICA_NL_EFAILED) { FELICA_LOG_ERR("[MFDD] %s Close Fail",\ __func__); gfa_open_cnt++; up(&dev_sem->felica_sem); return -EFAULT; } #ifdef CONFIG_NFC_FELICA /* set felica status -> idle */ uartcc_set_felica_status(UARTCC_FELICA_STATAUS_IDLE); if( 1 == available_d->available_flag ){ if( 0 != g_cen_sts ){ if( GPIO_VALUE_LOW != g_rfs_sts ){ available_d->rsp_done = 1; FELICA_LOG_DEBUG("wake up available"); wake_up_interruptible(&available_d->read_wait); } } } #endif /* CONFIG_NFC_FELICA */ } else { FELICA_LOG_ERR("[MFDD] %s felica_nl_send_msg Fail", \ __func__); gfa_open_cnt++; up(&dev_sem->felica_sem); return -EFAULT; } #ifdef F_WAKE_LOCK wake_unlock(&felica_wake_2); FELICA_LOG_DEBUG("[MFDD] %s Wake UnLock(2)", __func__); #endif } up(&dev_sem->felica_sem); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * read operation */ static ssize_t felica_uart_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { int ret = 0; int nlret; size_t wk_len = 0; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if (down_interruptible(&dev_sem->felica_sem)) { FELICA_LOG_ERR("[MFDD] %s ERROR(down_interruptible)", \ __func__); return -ERESTARTSYS; } memset(gfa_send_str, 0, FELICA_NL_MSG_SIZE); memset(gfa_rcv_str, 0, FELICA_NL_MSG_SIZE); wk_len = len; if (FELICA_NL_MSG_DATA_SIZE < wk_len) { FELICA_LOG_DEBUG("[MFDD] %s read max size over [%d]", __func__, wk_len); wk_len = FELICA_NL_MSG_DATA_SIZE; } gfa_send_str[0] = FELICA_NL_REQ_READ; gfa_send_str[1] = (char)(wk_len >> 8); gfa_send_str[2] = (char)wk_len; nlret = felica_nl_send_msg(3); wk_len = 0; if (nlret == 0) { felica_nl_wait_ret_msg(); if (gfa_rcv_str[1] == FELICA_NL_SUCCESS) { wk_len = (((int)gfa_rcv_str[2] << 8) & 0xFF00) | \ (int)gfa_rcv_str[3]; ret = copy_to_user(buf, &gfa_rcv_str[4], wk_len); if (ret != 0) { FELICA_LOG_ERR ("[MFDD]%s ERROR(copy_from_user), ret=[%d]", __func__, ret); up(&dev_sem->felica_sem); return -EFAULT; } *ppos = *ppos + wk_len; } else { FELICA_LOG_DEBUG(" %s FAIL", __func__); up(&dev_sem->felica_sem); return -EFAULT; } } else { FELICA_LOG_DEBUG(" %s FAIL", __func__); up(&dev_sem->felica_sem); return -EFAULT; } up(&dev_sem->felica_sem); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return (ssize_t) wk_len; } /* * write operation */ static ssize_t felica_uart_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { int ret = 0; int nlret; size_t wk_len = 0; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if (down_interruptible(&dev_sem->felica_sem)) { FELICA_LOG_ERR("[MFDD] %s ERROR(down_interruptible)", \ __func__); return -ERESTARTSYS; } memset(gfa_send_str, 0, FELICA_NL_MSG_SIZE); memset(gfa_rcv_str, 0, FELICA_NL_MSG_SIZE); wk_len = len; if (FELICA_NL_MSG_DATA_SIZE < wk_len) { FELICA_LOG_DEBUG("[MFDD] %s read max size over [%d]", __func__, wk_len); wk_len = FELICA_NL_MSG_DATA_SIZE; } gfa_send_str[0] = FELICA_NL_REQ_WRITE; gfa_send_str[1] = (char)(wk_len >> 8); gfa_send_str[2] = (char)wk_len; ret = copy_from_user(&gfa_send_str[3], data, wk_len); if (ret != 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_from_user), ret=[%d]", __func__, ret); up(&dev_sem->felica_sem); return -EFAULT; } nlret = felica_nl_send_msg(3 + len); if (nlret == 0) { wk_len = 0; felica_nl_wait_ret_msg(); wk_len = (((int)gfa_rcv_str[2] << 8) & 0xFF00) | \ (int)gfa_rcv_str[3]; if (gfa_rcv_str[1] == FELICA_NL_EFAILED) { FELICA_LOG_ERR("[MFDD] %s Write Fail", __func__); up(&dev_sem->felica_sem); return -EINVAL; } } else { FELICA_LOG_ERR("[MFDD] %s felica_nl_send_msg Fail", __func__); up(&dev_sem->felica_sem); return -EINVAL; } up(&dev_sem->felica_sem); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return wk_len; } /* * sync operation */ static int felica_uart_sync(struct file *file, loff_t start, loff_t len, int datasync) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * available operation */ static long felica_uart_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int ret_str = 0; int ret; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); FELICA_LOG_DEBUG("[MFDD] %s cmd = [0x%x]", __func__, cmd); /* Ignore [TCGETS:0x5401] and [TCSBRK:0x5409] */ if (TCGETS == cmd || TCSBRK == cmd) { FELICA_LOG_DEBUG("[MFDD] %s cmd throw [0x%x]", __func__, cmd); return 0; } if (down_interruptible(&dev_sem->felica_sem)) { FELICA_LOG_ERR("[MFDD] %s ERROR(down_interruptible)", \ __func__); return -ERESTARTSYS; } memset(gfa_send_str, 0, FELICA_NL_MSG_SIZE); memset(gfa_rcv_str, 0, FELICA_NL_MSG_SIZE); gfa_send_str[0] = FELICA_NL_REQ_AVAIABLE; ret = felica_nl_send_msg(1); if (ret == 0) { // 20130216 add for APQ support -S- /* 1ms sleep */ usleep_range(1000, 1000); // 20130216 add for APQ support -E- felica_nl_wait_ret_msg(); if (gfa_rcv_str[1] == FELICA_NL_SUCCESS) { /* create response data */ ret_str = (((unsigned int)gfa_rcv_str[2] << 8) & 0xFF00) | \ (unsigned int)gfa_rcv_str[3]; FELICA_LOG_DEBUG("Available Success data size [%d]", \ ret_str); } else { FELICA_LOG_ERR("[MFDD] %s Available Fail", __func__); up(&dev_sem->felica_sem); return -EINVAL; } } else { FELICA_LOG_ERR("[MFDD] %s felica_nl_send_msg Fail", __func__); up(&dev_sem->felica_sem); return -EINVAL; } up(&dev_sem->felica_sem); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return put_user(ret_str, (unsigned int __user *)arg); } /* * create netlink socket */ static void felica_nl_init(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); gfa_connect_flag = 0; gfa_pid = 0; gfa_wait_flag = 0; memset(gfa_send_str, 0, FELICA_NL_MSG_SIZE); memset(gfa_rcv_str, 0, FELICA_NL_MSG_SIZE); gfanl_sk = netlink_kernel_create(&init_net, FELICA_NL_NETLINK_USER, 0, felica_nl_recv_msg, NULL, THIS_MODULE); if (!gfanl_sk) FELICA_LOG_ERR("Error creating socket. %s\n", __func__); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * release netlink socket */ static void felica_nl_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); netlink_kernel_release(gfanl_sk); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * send message to FeliCa-Serial-Connector */ static int felica_nl_send_msg(int len) { struct nlmsghdr *nlh; struct sk_buff *skb_out; int msg_size = 0; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if (!gfanl_sk) { FELICA_LOG_ERR("[MFDD]Error Not creating socket. %s\n", __func__); return 1; } if (gfa_pid == 0) { FELICA_LOG_ERR("[MFDD]Error Not Rcv Connect Msg %s\n", __func__); return 1; } msg_size = len; skb_out = nlmsg_new(msg_size, 0); if (!skb_out) { FELICA_LOG_ERR("Failed to allocate new skb_out %s\n", __func__); return 1; } nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, msg_size, 0); NETLINK_CB(skb_out).dst_group = 0; memcpy(NLMSG_DATA(nlh), gfa_send_str, msg_size); /* "skb_out" will release by netlink.*/ nlmsg_unicast(gfanl_sk, skb_out, gfa_pid); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * receive message from FeliCa-Serial-Connector */ static void felica_nl_recv_msg(struct sk_buff *skb) { struct nlmsghdr *nlh; struct sk_buff *wskb; #if defined(CONFIG_MACH_T0) || defined(CONFIG_MACH_M3) int port_threshold = 0; #endif FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if (!skb) { FELICA_LOG_ERR("[MFDD] %s ERROR(skb NULL)", __func__); return; } wskb = skb_get(skb); if (wskb && (wskb->len > NLMSG_SPACE(0))) { nlh = nlmsg_hdr(wskb); memcpy(gfa_rcv_str, NLMSG_DATA(nlh), sizeof(gfa_rcv_str)); if ((gfa_rcv_str[0] == FELICA_NL_CONNECT_MSG) && (gfa_connect_flag == 0)) { /* pid of sending process */ gfa_pid = nlh->nlmsg_pid; #if defined(CONFIG_MACH_T0) port_threshold = 0x0a; #elif defined(CONFIG_MACH_M3) port_threshold = 0x02; #endif if (felica_get_tamper_fuse_cmd() != 1) { /* jmodel */ #if defined(CONFIG_ARCH_EXYNOS) s3c_gpio_cfgall_range(FELICA_UART1RX, 2,\ S3C_GPIO_SFN(2), S3C_GPIO_PULL_DOWN); felica_uart_port = 1; #elif defined(CONFIG_ARCH_APQ8064) felica_uart_port = 2; #endif felica_set_felica_info(); felica_uart_init(); felica_pon_init(); felica_cen_init(); felica_rfs_init(); felica_rws_init(); felica_ant_init(); #ifdef CONFIG_NFC_FELICA snfc_uart_init(); cxd2235power_init(); hsel_init(); snfc_rfs_init(); available_poll_init(); #endif if (gdiag_name[0] != 0x00) felica_uid_init(); } gfa_connect_flag = 1; } else if ((gfa_rcv_str[0] == FELICA_NL_RESPONCE) && (gfa_pid == nlh->nlmsg_pid)) { /* wake up */ gfa_wait_flag = 1; } else { FELICA_LOG_ERR("[MFDD] %s ERROR(RCV Undefine MSG)", __func__); FELICA_LOG_ERR("RCV MSG [%d]", gfa_rcv_str[0]); FELICA_LOG_ERR("rcv pid [%d]", nlh->nlmsg_pid); FELICA_LOG_ERR("gfa_pid [%d]", gfa_pid); } } kfree_skb(skb); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } static void felica_set_felica_info(void) { FELICA_LOG_DEBUG("[MFDD] %s START ", __func__); memset(gdiag_name, 0x00, DIAG_NAME_MAXSIZE + 1); gread_msgs[0].flags = gfa_rcv_str[MSG_READ1_FLAGS_OFFSET]; gread_msgs[0].len = gfa_rcv_str[MSG_READ1_LEN_OFFSET]; gread_msgs[1].flags = gfa_rcv_str[MSG_READ2_FLAGS_OFFSET]; gread_msgs[1].len = gfa_rcv_str[MSG_READ2_LEN_OFFSET]; gwrite_msgs[0].flags = gfa_rcv_str[MSG_WRITE_FLAGS_OFFSET]; gwrite_msgs[0].len = gfa_rcv_str[MSG_WRITE_LEN_OFFSET]; gi2c_lockaddress = gfa_rcv_str[MSG_LOCK_ADDR_OFFSET]; gi2c_address = gfa_rcv_str[MSG_I2C_ADDR_OFFSET]; memcpy(gdiag_name, &gfa_rcv_str[MSG_DIAG_NAME_OFFSET], DIAG_NAME_MAXSIZE); gmfc_uid = (((int)gfa_rcv_str[MSG_MFC_UID_FRONT_OFFSET] << 8) & 0xFF00) | (int)gfa_rcv_str[MSG_MFC_UID_BACK_OFFSET]; gmfl_uid = (((int)gfa_rcv_str[MSG_MFL_UID_FRONT_OFFSET] << 8) & 0xFF00) | (int)gfa_rcv_str[MSG_MFL_UID_BACK_OFFSET]; gi2c_antaddress = gfa_rcv_str[MSG_ANT_ADDR_OFFSET]; gant_uid = (((int)gfa_rcv_str[MSG_ANT_UID_FRONT_OFFSET] << 8) & 0xFF00) | (int)gfa_rcv_str[MSG_ANT_UID_BACK_OFFSET]; #ifdef CONFIG_NFC_FELICA gnfc_uid = (((int)gfa_rcv_str[MSG_NFC_UID_FRONT_OFFSET] << 8) & 0xFF00) | (int)gfa_rcv_str[MSG_NFC_UID_BACK_OFFSET]; snfc_cen_sts_init(); #endif FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * waiting to receive messages from FeliCa-Serial-Connector */ static void felica_nl_wait_ret_msg(void) { unsigned int cnt = 0; FELICA_LOG_DEBUG("[MFDD] %s START ", __func__); while (1) { if (gfa_wait_flag == 1) { FELICA_LOG_DEBUG("[MFDD] %s sleep cnt [%d]", __func__, cnt); break; } mdelay(1); cnt++; } gfa_wait_flag = 0; FELICA_LOG_DEBUG("[MFDD] %s END ", __func__); } #if defined(CONFIG_ARCH_EXYNOS) static int felica_smc_read_oemflag(u32 ctrl_word, u32 *val) { register u32 reg0 __asm__("r0"); register u32 reg1 __asm__("r1"); register u32 reg2 __asm__("r2"); register u32 reg3 __asm__("r3"); u32 idx = 0; for (idx = 0; reg2 != ctrl_word; idx++) { reg0 = -202; reg1 = 1; reg2 = idx; __asm__ volatile ("smc 0\n" : "+r" (reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3) ); if (reg1) return -1; } reg0 = -202; reg1 = 1; reg2 = idx; __asm__ volatile ("smc 0\n" : "+r" (reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3) ); if (reg1) return -1; *val = reg2; return 0; } static int felica_Cpu0(void) { int ret = 0; struct cpumask mask = CPU_MASK_CPU0; FELICA_LOG_DEBUG("System has %d CPU's, we are on CPU #%d\n" "\tBinding this process to CPU #0.\n" "\tactive mask is %lx, setting it to mask=%lx\n", nr_cpu_ids, raw_smp_processor_id(), cpu_active_mask->bits[0], mask.bits[0]); ret = set_cpus_allowed_ptr(current, &mask); if (0 != ret) FELICA_LOG_DEBUG("set_cpus_allowed_ptr=%d.\n", ret); FELICA_LOG_DEBUG("And now we are on CPU #%d", raw_smp_processor_id()); return ret; } static int felica_CpuAll(void) { int ret = 0; struct cpumask mask = CPU_MASK_ALL; FELICA_LOG_DEBUG("System has %d CPU's, we are on CPU #%d\n" "\tBinding this process to CPU #0.\n" "\tactive mask is %lx, setting it to mask=%lx\n", nr_cpu_ids, raw_smp_processor_id(), cpu_active_mask->bits[0], mask.bits[0]); ret = set_cpus_allowed_ptr(current, &mask); if (0 != ret) FELICA_LOG_DEBUG("set_cpus_allowed_ptr=%d.\n", ret); FELICA_LOG_DEBUG("And now we are on CPU #%d", raw_smp_processor_id()); return ret; } static uint8_t felica_get_tamper_fuse_cmd(void) { u32 fuse_id = 0; int ret; ret = felica_Cpu0(); if (0 != ret) { FELICA_LOG_DEBUG("changing core failed!"); return -1; } FELICA_LOG_DEBUG("get_fuse"); if (felica_smc_read_oemflag(0x80010001, (u32 *)&fuse_id) < 0) { FELICA_LOG_ERR("[MFDD] %s read flag error", __func__); return -1; } FELICA_LOG_DEBUG("[MFDD] Kernel Status[%x]", fuse_id); felica_CpuAll(); return (uint8_t)fuse_id; } #elif defined(CONFIG_ARCH_APQ8064) static uint8_t felica_get_tamper_fuse_cmd(void) { uint32_t fuse_id = FELICA_HLOS_IMG_TAMPER_FUSE; void *cmd_buf; size_t cmd_len; size_t resp_len = 0; uint8_t resp_buf; FELICA_LOG_DEBUG("[MFDD] %s START ", __func__); resp_len = sizeof(resp_buf); cmd_buf = (void *)&fuse_id; cmd_len = sizeof(fuse_id); scm_call(FELICA_SCM_SVC_FUSE, FELICA_SCM_IS_SW_FUSE_BLOWN_ID, cmd_buf, cmd_len, &resp_buf, resp_len); FELICA_LOG_DEBUG("[MFDD] resp_buf = %d\n", resp_buf); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return resp_buf; } #endif /****************************************************************************** * /dev/felica_pon *****************************************************************************/ /* character device definition */ static dev_t devid_felica_pon; static struct cdev cdev_felica_pon; static const struct file_operations fops_felica_pon = { .owner = THIS_MODULE, .open = felica_pon_open, .release = felica_pon_close, .read = felica_pon_read, .write = felica_pon_write, }; /* * initialize device */ static void felica_pon_init(void) { int ret; struct device *device_felica_pon; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); devid_felica_pon = MKDEV(FELICA_MAJOR, FELICA_MINOR); ret = alloc_chrdev_region(&devid_felica_pon, FELICA_BASEMINOR, FELICA_MINOR_COUNT, FELICA_PON_NAME); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region), ret=[%d]", __func__, ret); return; } cdev_init(&cdev_felica_pon, &fops_felica_pon); ret = cdev_add(&cdev_felica_pon, devid_felica_pon, FELICA_MINOR_COUNT); if (ret < 0) { unregister_chrdev_region(devid_felica_pon, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]", __func__, ret); return; } device_felica_pon = device_create(felica_class, NULL, devid_felica_pon, NULL, FELICA_PON_NAME); if (IS_ERR(device_felica_pon)) { cdev_del(&cdev_felica_pon); unregister_chrdev_region(devid_felica_pon, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create)", __func__); return; } FELICA_LOG_DEBUG("[MFDD] %s END, major=[%d], minor=[%d]", __func__, MAJOR(devid_felica_pon), MINOR(devid_felica_pon)); } /* * finalize device */ static void felica_pon_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); device_destroy(felica_class, devid_felica_pon); cdev_del(&cdev_felica_pon); unregister_chrdev_region(devid_felica_pon, FELICA_MINOR_COUNT); FELICA_LOG_DEBUG("[MFDD] %s START", __func__); } /* * open device */ static int felica_pon_open(struct inode *inode, struct file *file) { uid_t uid; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); uid = __task_cred(current)->uid; if ((uid != gmfc_uid) && (uid != gdiag_uid) && (uid != gant_uid)) { FELICA_LOG_DEBUG ("[MFDD] %s END, uid=[%d], gmfc_uid=[%d], gdiag_uid=[%d]", __func__, uid, gmfc_uid, gdiag_uid); return -EACCES; } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * close device */ static int felica_pon_close(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); #if defined(CONFIG_ARCH_EXYNOS) gpio_set_value(GPIO_PINID_FELICA_PON, GPIO_VALUE_LOW); #elif defined(CONFIG_ARCH_APQ8064) ice_gpiox_set(GPIO_PINID_FELICA_PON, GPIO_VALUE_LOW); #endif FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * read operation */ static ssize_t felica_pon_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { int ret; char retparam; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); #if defined(CONFIG_ARCH_EXYNOS) ret = gpio_get_value(GPIO_PINID_FELICA_PON); #elif defined(CONFIG_ARCH_APQ8064) ret = ice_gpiox_get(GPIO_PINID_FELICA_PON); #endif if (ret == GPIO_VALUE_HIGH) { retparam = FELICA_PON_WIRED; FELICA_LOG_DEBUG("Wired interface Status is [%d][start]", retparam); } else if (ret == GPIO_VALUE_LOW) { retparam = FELICA_PON_WIRELESS; FELICA_LOG_DEBUG("Wired interface Status is [%d][standby]", retparam); } else { FELICA_LOG_ERR("[MFDD] %s ERROR(gpio_get_value), ret=[%d]", __func__, ret); return -EIO; } ret = copy_to_user(buf, &retparam, FELICA_PON_DATA_LEN); if (ret != 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_to_user), ret=[%d]", __func__, ret); return -EFAULT; } *ppos += 1; FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return FELICA_PON_DATA_LEN; } /* * write operation */ static ssize_t felica_pon_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { char pon; int ret; int setparam; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); ret = copy_from_user(&pon, data, FELICA_PON_DATA_LEN); if (ret != 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_from_user), ret=[%d]", __func__, ret); return -EFAULT; } if (pon == FELICA_PON_WIRED) { setparam = GPIO_VALUE_HIGH; FELICA_LOG_DEBUG("Set wired interface to [%d][start]", setparam); } else if (pon == FELICA_PON_WIRELESS) { setparam = GPIO_VALUE_LOW; FELICA_LOG_DEBUG("Set wired interface to [%d][standby]", setparam); } else { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_from_user), pon=[%d]", __func__, pon); return -EINVAL; } #if defined(CONFIG_ARCH_EXYNOS) gpio_set_value(GPIO_PINID_FELICA_PON, setparam); #elif defined(CONFIG_ARCH_APQ8064) ice_gpiox_set(GPIO_PINID_FELICA_PON, setparam); #endif FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return FELICA_PON_DATA_LEN; } /****************************************************************************** * felica_i2c_driver ******************************************************************************/ static struct i2c_client *felica_i2c_client; static const struct i2c_device_id felica_i2c_idtable[] = { {FELICA_I2C_NAME, 0}, {} }; MODULE_DEVICE_TABLE(i2c, felica_i2c_idtable); static struct i2c_driver felica_i2c_driver = { .probe = felica_i2c_probe, .remove = felica_i2c_remove, .driver = { .name = FELICA_I2C_NAME, .owner = THIS_MODULE, }, .id_table = felica_i2c_idtable, }; /* * felica_i2c_init */ static void felica_i2c_init(void) { int ret; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); ret = i2c_add_driver(&felica_i2c_driver); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(i2c_add_driver), ret=[%d]", __func__, ret); return; } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return; } /* * felica_i2c_exit */ static void felica_i2c_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); i2c_del_driver(&felica_i2c_driver); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return; } /* * probe device */ static int felica_i2c_probe(struct i2c_client *client, const struct i2c_device_id *devid) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); felica_i2c_client = client; if (!felica_i2c_client) { FELICA_LOG_ERR("[MFDD] %s ERROR(felica_i2c_client==NULL)", \ __func__); return -EINVAL; } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * remove device */ static int felica_i2c_remove(struct i2c_client *client) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); /* no operation */ FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /****************************************************************************** * /dev/felica_cen ******************************************************************************/ /* character device definition */ static dev_t devid_felica_cen; static struct cdev cdev_felica_cen; static const struct file_operations fops_felica_cen = { .owner = THIS_MODULE, .open = felica_cen_open, .release = felica_cen_close, .read = felica_cen_read, .write = felica_cen_write, }; /* * felica_cen_init */ static void felica_cen_init(void) { int ret; struct device *device_felica_cen; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); devid_felica_cen = MKDEV(FELICA_MAJOR, FELICA_MINOR); ret = alloc_chrdev_region(&devid_felica_cen, FELICA_BASEMINOR, FELICA_MINOR_COUNT, FELICA_CEN_NAME); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region), ret=[%d]", __func__, ret); return; } cdev_init(&cdev_felica_cen, &fops_felica_cen); ret = cdev_add(&cdev_felica_cen, devid_felica_cen, FELICA_MINOR_COUNT); if (ret < 0) { unregister_chrdev_region(devid_felica_cen, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]", __func__, ret); return; } device_felica_cen = device_create(felica_class, NULL, devid_felica_cen, NULL, FELICA_CEN_NAME); if (IS_ERR(device_felica_cen)) { cdev_del(&cdev_felica_cen); unregister_chrdev_region(devid_felica_cen, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create)", __func__); return; } FELICA_LOG_DEBUG("[MFDD] %s END, major=[%d], minor=[%d]", __func__, MAJOR(devid_felica_cen), MINOR(devid_felica_cen)); } /* * felica_cen_exit */ static void felica_cen_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); device_destroy(felica_class, devid_felica_cen); cdev_del(&cdev_felica_cen); unregister_chrdev_region(devid_felica_cen, FELICA_MINOR_COUNT); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * open device */ static int felica_cen_open(struct inode *inode, struct file *file) { uid_t uid; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); uid = __task_cred(current)->uid; if (file->f_mode & FMODE_WRITE) { if ((uid != gdiag_uid) && (uid != gmfl_uid)) { FELICA_LOG_DEBUG(\ "[MFDD] %s END, uid=[%d]\n", __func__, uid); FELICA_LOG_DEBUG(\ "[MFDD] %s END, gmfc_uid=[%d]\n", __func__, gmfc_uid); FELICA_LOG_DEBUG(\ "[MFDD] %s END, gdiag_uid=[%d]\n", __func__, gdiag_uid); FELICA_LOG_DEBUG(\ "[MFDD] %s END, gmfl_uid=[%d]\n", __func__, gmfl_uid); return -EACCES; } } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * close device */ static int felica_cen_close(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); /* no operation */ FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * read operation */ static ssize_t felica_cen_read(struct file *file, char __user *buf, \ size_t len, loff_t *ppos) { int ret; unsigned char address = gi2c_lockaddress; unsigned char read_buff = 0; gread_msgs[0].addr = gi2c_address; gread_msgs[0].buf = &address; gread_msgs[1].addr = gi2c_address; gread_msgs[1].buf = &read_buff; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if (felica_i2c_client == NULL) { FELICA_LOG_DEBUG("felica_i2c_client is NULL"); return -EIO; } ret = i2c_transfer(felica_i2c_client->adapter, &gread_msgs[0], 1); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(i2c_transfer[0]), ret=[%d]", __func__, ret); return -EIO; } ret = i2c_transfer(felica_i2c_client->adapter, &gread_msgs[1], 1); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(i2c_transfer[1]), ret=[%d]", __func__, ret); return -EIO; } FELICA_LOG_DEBUG("[MFDD] %s read_buff=[%d]", __func__, read_buff); read_buff &= FELICA_CONTROL_LOCK_MASK; FELICA_LOG_DEBUG("[MFDD] %s read_buff=[%d]", __func__, read_buff); #ifdef CONFIG_NFC_FELICA g_cen_sts = read_buff; #endif ret = copy_to_user(buf, &read_buff, FELICA_CEN_DATA_LEN); if (ret != 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_to_user), ret=[%d]", __func__, ret); return -EFAULT; } *ppos += 1; FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return FELICA_CEN_DATA_LEN; } /* * write operation */ static ssize_t felica_cen_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { char cen; int ret; unsigned char write_buff[2]; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if (felica_i2c_client == NULL) { FELICA_LOG_DEBUG("felica_i2c_client is NULL"); return -EIO; } gwrite_msgs[0].buf = &write_buff[0]; gwrite_msgs[0].addr = gi2c_address; write_buff[0] = gi2c_lockaddress; ret = copy_from_user(&cen, data, FELICA_CEN_DATA_LEN); if (ret != 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_from_user), ret=[%d]", __func__, ret); return -EFAULT; } if (cen == FELICA_CEN_UNLOCK) { write_buff[1] = FELICA_CEN_SET_UNLOCK; FELICA_LOG_DEBUG("Set FeliCa-Lock status to [%d][UnLock]", write_buff[1]); } else if (cen == FELICA_CEN_LOCK) { write_buff[1] = FELICA_CEN_SET_LOCK; FELICA_LOG_DEBUG("Set FeliCa-Lock status to [%d][Lock]", write_buff[1]); } else { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_from_user), cen=[%d]", __func__, cen); return -EINVAL; } ret = i2c_transfer(felica_i2c_client->adapter, gwrite_msgs, 1); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(i2c_transfer), ret=[%d]", __func__, ret); return -EIO; } #ifdef CONFIG_NFC_FELICA g_cen_sts = cen; if( 1 == available_d->available_flag ){ if( 0 != g_cen_sts ){ if( GPIO_VALUE_LOW != g_rfs_sts ){ if( 0 == guartcc_felica_status ){ available_d->rsp_done = 1; FELICA_LOG_DEBUG("wake up available"); wake_up_interruptible(&available_d->read_wait); } } } } #endif FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return FELICA_CEN_DATA_LEN; } /****************************************************************************** * /dev/felica_rfs ******************************************************************************/ /* character device definition */ static dev_t devid_felica_rfs; static struct cdev cdev_felica_rfs; static const struct file_operations fops_felica_rfs = { .owner = THIS_MODULE, .open = felica_rfs_open, .release = felica_rfs_close, .read = felica_rfs_read, }; /* * initialize device */ static void felica_rfs_init(void) { int ret; struct device *device_felica_rfs; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); devid_felica_rfs = MKDEV(FELICA_MAJOR, FELICA_MINOR); ret = alloc_chrdev_region(&devid_felica_rfs, FELICA_BASEMINOR, FELICA_MINOR_COUNT, FELICA_RFS_NAME); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region), ret=[%d]", __func__, ret); return; } cdev_init(&cdev_felica_rfs, &fops_felica_rfs); ret = cdev_add(&cdev_felica_rfs, devid_felica_rfs, FELICA_MINOR_COUNT); if (ret < 0) { unregister_chrdev_region(devid_felica_rfs, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]", __func__, ret); return; } device_felica_rfs = device_create(felica_class, NULL, devid_felica_rfs, NULL, FELICA_RFS_NAME); if (IS_ERR(device_felica_rfs)) { cdev_del(&cdev_felica_rfs); unregister_chrdev_region(devid_felica_rfs, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create)", __func__); return; } FELICA_LOG_DEBUG("[MFDD] %s END, major=[%d], minor=[%d]", __func__, MAJOR(devid_felica_rfs), MINOR(devid_felica_rfs)); } /* * finalize device */ static void felica_rfs_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); device_destroy(felica_class, devid_felica_rfs); cdev_del(&cdev_felica_rfs); unregister_chrdev_region(devid_felica_rfs, FELICA_MINOR_COUNT); FELICA_LOG_DEBUG("[MFDD] %s START", __func__); } /* * open device */ static int felica_rfs_open(struct inode *inode, struct file *file) { uid_t uid; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); uid = __task_cred(current)->uid; if ((uid != gmfc_uid) && (uid != gdiag_uid)) { FELICA_LOG_DEBUG ("[MFDD] %s END, uid=[%d], gmfc_uid=[%d], gdiag_uid=[%d]", __func__, uid, gmfc_uid, gdiag_uid); return -EACCES; } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * close device */ static int felica_rfs_close(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); /* no operation */ FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * read operation */ static ssize_t felica_rfs_read(struct file *file, char __user *buf, \ size_t len, loff_t *ppos) { int ret; char retparam; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); ret = gpio_get_value(GPIO_PINID_FELICA_RFS); if (ret == GPIO_VALUE_HIGH) { retparam = FELICA_RFS_STANDBY; FELICA_LOG_DEBUG("Carrier-Wave Status is [%d][standby]", retparam); } else if (ret == GPIO_VALUE_LOW) { retparam = FELICA_RFS_DETECTED; FELICA_LOG_DEBUG("Carrier-Wave Status is [%d][detected]", retparam); } else { FELICA_LOG_ERR("[MFDD] %s ERROR(gpio_get_value), ret=[%d]", __func__, ret); return -EIO; } ret = copy_to_user(buf, &retparam, FELICA_RFS_DATA_LEN); if (ret != 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_to_user), ret=[%d]", __func__, ret); return -EFAULT; } *ppos += 1; FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return FELICA_RFS_DATA_LEN; } /****************************************************************************** * /dev/felica_rws ******************************************************************************/ /* character device definition */ static dev_t devid_felica_rws; static struct cdev cdev_felica_rws; static const struct file_operations fops_felica_rws = { .owner = THIS_MODULE, .open = felica_rws_open, .release = felica_rws_close, .read = felica_rws_read, .write = felica_rws_write, }; /* * initialize device */ static void felica_rws_init(void) { int ret; struct device *device_felica_rws; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); devid_felica_rws = MKDEV(FELICA_MAJOR, FELICA_MINOR); ret = alloc_chrdev_region(&devid_felica_rws, FELICA_BASEMINOR, FELICA_MINOR_COUNT, FELICA_RWS_NAME); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region), ret=[%d]", __func__, ret); return; } cdev_init(&cdev_felica_rws, &fops_felica_rws); ret = cdev_add(&cdev_felica_rws, devid_felica_rws, FELICA_MINOR_COUNT); if (ret < 0) { unregister_chrdev_region(devid_felica_rws, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]", __func__, ret); return; } device_felica_rws = device_create(felica_class, NULL, devid_felica_rws, NULL, FELICA_RWS_NAME); if (IS_ERR(device_felica_rws)) { cdev_del(&cdev_felica_rws); unregister_chrdev_region(devid_felica_rws, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create)", __func__); return; } gfelica_rw_status = FELICA_RW_STATUS_INIT; FELICA_LOG_DEBUG("[MFDD] %s END, major=[%d], minor=[%d]", __func__, MAJOR(devid_felica_rws), MINOR(devid_felica_rws)); } /* * finalize device */ static void felica_rws_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); device_destroy(felica_class, devid_felica_rws); cdev_del(&cdev_felica_rws); unregister_chrdev_region(devid_felica_rws, FELICA_MINOR_COUNT); FELICA_LOG_DEBUG("[MFDD] %s START", __func__); } /* * open device */ static int felica_rws_open(struct inode *inode, struct file *file) { uid_t uid; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); uid = __task_cred(current)->uid; if (file->f_mode & FMODE_WRITE) { if (uid != grwm_uid) { FELICA_LOG_DEBUG(\ "[MFDD] %s END, uid=[%d],gmfc_uid=[%d],gdiag_uid=[%d]", __func__, uid, gmfc_uid, gdiag_uid); return -EACCES; } } else { if ((uid != gmfc_uid) && (uid != grwm_uid)) { FELICA_LOG_DEBUG(\ "[MFDD] %s END, uid=[%d],gmfc_uid=[%d],gdiag_uid=[%d]", __func__, uid, gmfc_uid, gdiag_uid); return -EACCES; } } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * close device */ static int felica_rws_close(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); /* no operation */ FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * read operation */ static ssize_t felica_rws_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { int ret; char retparam; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if (gfelica_rw_status == FELICA_RW_STATUS_ENABLE) { retparam = FELICA_RW_STATUS_ENABLE; FELICA_LOG_DEBUG("ReaderWriterFunction status is [%d][enabled]", retparam); } else if (gfelica_rw_status == FELICA_RW_STATUS_DISABLE) { retparam = FELICA_RW_STATUS_DISABLE; FELICA_LOG_DEBUG ("ReaderWriterFunction status is [%d][disabled]", retparam); } else { FELICA_LOG_ERR("[MFDD] %s ERROR(gfelica_rw_status), RWM=[%d]", __func__, gfelica_rw_status); return -EIO; } ret = copy_to_user(buf, &retparam, FELICA_RWS_DATA_LEN); if (ret != 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_to_user), ret=[%d]", __func__, ret); return -EFAULT; } *ppos += 1; FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return FELICA_RWS_DATA_LEN; } /* * write operation */ static ssize_t felica_rws_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { char work; int ret; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); ret = copy_from_user(&work, data, FELICA_RWS_DATA_LEN); if (ret != 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_from_user), ret=[%d]", __func__, ret); return -EFAULT; } if (work == FELICA_RW_STATUS_ENABLE) { FELICA_LOG_DEBUG ("Set ReaderWriterFunction status to [%d][enable]", work); } else if (work == FELICA_RW_STATUS_DISABLE) { FELICA_LOG_DEBUG ("Set ReaderWriterFunction status to s[%d][disable]", work); } else { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_from_user), work=[%d]", __func__, work); return -EINVAL; } gfelica_rw_status = work; FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return FELICA_RWS_DATA_LEN; } /****************************************************************************** * /dev/felica_int_poll ******************************************************************************/ /* character device definition */ static dev_t devid_felica_int_poll; static struct cdev cdev_felica_int_poll; static const struct file_operations fops_felica_int_poll = { .owner = THIS_MODULE, .open = felica_int_poll_open, .release = felica_int_poll_close, .read = felica_int_poll_read, .poll = felica_int_poll_poll, }; /* * top half of irq_handler */ static irqreturn_t felica_int_irq_handler(int irq, void *dev_id) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); disable_irq_nosync(gpio_to_irq(GPIO_PINID_FELICA_INT_REV03)); schedule_delayed_work(&pgint_irq->work, msecs_to_jiffies(FELICA_INT_DELAY_TIME)); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return IRQ_HANDLED; } /* * bottom half of irq_handler */ static void felica_int_irq_work(struct work_struct *work) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); enable_irq(gpio_to_irq(GPIO_PINID_FELICA_INT_REV03)); pgint_irq->irq_done = 1; #ifdef F_WAKE_LOCK wake_lock_timeout(&felica_wake_1, msecs_to_jiffies(tmout_1)); FELICA_LOG_DEBUG("[MFDD] %s Wake Lock(1)[%d]", __func__, tmout_1); #endif wake_up_interruptible(&pgint_irq->read_wait); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * initialize device */ static void felica_int_poll_init(void) { int ret; struct device *device_felica_int_poll; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); devid_felica_int_poll = MKDEV(FELICA_MAJOR, FELICA_MINOR); ret = alloc_chrdev_region(&devid_felica_int_poll, FELICA_BASEMINOR, FELICA_MINOR_COUNT, FELICA_INT_POLL_NAME); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region), ret=[%d]", __func__, ret); return; } cdev_init(&cdev_felica_int_poll, &fops_felica_int_poll); ret = cdev_add(&cdev_felica_int_poll, devid_felica_int_poll, FELICA_MINOR_COUNT); if (ret < 0) { unregister_chrdev_region(devid_felica_int_poll, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]", __func__, ret); return; } device_felica_int_poll = device_create(felica_class, NULL, devid_felica_int_poll, NULL, FELICA_INT_POLL_NAME); if (IS_ERR(device_felica_int_poll)) { cdev_del(&cdev_felica_int_poll); unregister_chrdev_region(devid_felica_int_poll, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create)", __func__); return; } memset(pgint_irq, 0x00, sizeof(struct felica_int_irqdata)); INIT_DELAYED_WORK(&pgint_irq->work, felica_int_irq_work); init_waitqueue_head(&pgint_irq->read_wait); ret = request_irq(gpio_to_irq(GPIO_PINID_FELICA_INT_REV03), felica_int_irq_handler, IRQF_TRIGGER_FALLING, FELICA_INT_POLL_NAME, (void *)pgint_irq); if (ret != 0) { device_destroy(felica_class, devid_felica_int_poll); cdev_del(&cdev_felica_int_poll); unregister_chrdev_region(devid_felica_int_poll, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(request_irq), ret=[%d]", __func__, ret); return; } ret = enable_irq_wake(gpio_to_irq(GPIO_PINID_FELICA_INT_REV03)); if (ret < 0) { free_irq(gpio_to_irq(GPIO_PINID_FELICA_INT_REV03), (void *)pgint_irq); device_destroy(felica_class, devid_felica_int_poll); cdev_del(&cdev_felica_int_poll); unregister_chrdev_region(devid_felica_int_poll, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(enable_irq_wake), ret=[%d]", __func__, ret); return; } pgint_irq->irq_done = 0; FELICA_LOG_DEBUG("[MFDD] %s END, major=[%d], minor=[%d]", __func__, \ MAJOR(devid_felica_int_poll), MINOR(devid_felica_int_poll)); } /* * finalize device */ static void felica_int_poll_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); disable_irq(gpio_to_irq(GPIO_PINID_FELICA_INT_REV03)); free_irq(gpio_to_irq(GPIO_PINID_FELICA_INT_REV03), (void *)pgint_irq); device_destroy(felica_class, devid_felica_int_poll); cdev_del(&cdev_felica_int_poll); unregister_chrdev_region(devid_felica_int_poll, FELICA_MINOR_COUNT); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * open device */ static int felica_int_poll_open(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); /* no operation */ FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * close device */ static int felica_int_poll_close(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); /* no operation */ FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * read operation */ static ssize_t felica_int_poll_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { int ret; char retparam; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if (!pgint_irq->irq_done) { ret = wait_event_interruptible(pgint_irq->read_wait, pgint_irq->irq_done == 1); if (ret < 0) { FELICA_LOG_ERR ("[MFDD] %s ERROR(wait_event_interruptible),ret=[%d]",\ __func__, ret); return -EINTR; } } ret = gpio_get_value(GPIO_PINID_FELICA_INT_REV03); if (ret == GPIO_VALUE_HIGH) { retparam = FELICA_INT_HIGH; FELICA_LOG_DEBUG("INT-PIN value is [%d][HIGH]", retparam); } else if (ret == GPIO_VALUE_LOW) { retparam = FELICA_INT_LOW; FELICA_LOG_DEBUG("INT-PIN value is [%d][LOW]", retparam); } else { FELICA_LOG_ERR("[MFDD] %s ERROR(gpio_get_value), ret=[%d]", __func__, ret); return -EIO; } ret = copy_to_user(buf, &retparam, FELICA_INT_DATA_LEN); if (ret != 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_to_user), ret=[%d]", __func__, ret); return -EFAULT; } *ppos += 1; pgint_irq->irq_done = 0; FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return FELICA_INT_DATA_LEN; } /* * poll operation */ static unsigned int felica_int_poll_poll(struct file *file, poll_table *wait) { unsigned int mask = 0; FELICA_LOG_DEBUG("%s START", __func__); poll_wait(file, &pgint_irq->read_wait, wait); if (pgint_irq->irq_done) mask = POLLIN | POLLRDNORM; FELICA_LOG_DEBUG("%s END", __func__); return mask; } /****************************************************************************** * /dev/felica_uid ******************************************************************************/ /* character device definition */ static dev_t devid_felica_uid; static struct cdev cdev_felica_uid; static const struct file_operations fops_felica_uid = { .owner = THIS_MODULE, .open = felica_uid_open, .release = felica_uid_close, .unlocked_ioctl = felica_uid_ioctl, }; /* * initialize device */ static void felica_uid_init(void) { int ret; struct device *device_felica_uid; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); devid_felica_uid = MKDEV(FELICA_MAJOR, FELICA_MINOR); ret = alloc_chrdev_region(&devid_felica_uid, FELICA_BASEMINOR, FELICA_MINOR_COUNT, FELICA_UID_NAME); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region), ret=[%d]", __func__, ret); return; } cdev_init(&cdev_felica_uid, &fops_felica_uid); ret = cdev_add(&cdev_felica_uid, devid_felica_uid, FELICA_MINOR_COUNT); if (ret < 0) { unregister_chrdev_region(devid_felica_uid, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]", __func__, ret); return; } device_felica_uid = device_create(felica_class, NULL, devid_felica_uid, NULL, FELICA_UID_NAME); if (IS_ERR(device_felica_uid)) { cdev_del(&cdev_felica_uid); unregister_chrdev_region(devid_felica_uid, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create)", __func__); return; } FELICA_LOG_DEBUG("[MFDD] %s END, major=[%d], minor=[%d]", __func__, MAJOR(devid_felica_uid), MINOR(devid_felica_uid)); } /* * finalize device */ static void felica_uid_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if (gdiag_name[0] != 0x00) { device_destroy(felica_class, devid_felica_uid); cdev_del(&cdev_felica_uid); unregister_chrdev_region(devid_felica_uid, \ FELICA_MINOR_COUNT); } FELICA_LOG_DEBUG("[MFDD] %s START", __func__); } /* * open device */ static int felica_uid_open(struct inode *inode, struct file *file) { char *cmdpos; static char cmdline[1025]; static unsigned long start_adr, end_adr, leng; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); start_adr = current->mm->arg_start; end_adr = current->mm->arg_end; leng = end_adr - start_adr; if (1024 < leng) leng = 1024; cmdpos = (char *)(current->mm->arg_start); memcpy(cmdline, cmdpos, leng); cmdline[leng] = '\0'; if (strncmp(cmdline, gdiag_name, leng) != 0) { FELICA_LOG_DEBUG("[MFDD] %s ERROR, %s gdiag %s", \ __func__, cmdline, gdiag_name); return -EACCES; } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * close device */ static int felica_uid_close(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); /* no operation */ FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * uid registration */ static long felica_uid_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { FELICA_LOG_DEBUG("[MFDD] %s START, cmd=[%d]", __func__, cmd); switch (cmd) { case SET_FELICA_UID_DIAG: gdiag_uid = *((int *)arg); FELICA_LOG_DEBUG("Set gdiag_uid to [%d]", gdiag_uid); break; default: FELICA_LOG_ERR("[MFDD] %s ERROR(unknown command)", __func__); break; } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /****************************************************************************** * /dev/felica_ant ******************************************************************************/ /* character device definition */ static dev_t devid_felica_ant; static struct cdev cdev_felica_ant; static const struct file_operations fops_felica_ant = { .owner = THIS_MODULE, .open = felica_ant_open, .release = felica_ant_close, .read = felica_ant_read, .write = felica_ant_write, }; /* * felica_ant_init */ static void felica_ant_init(void) { int ret; struct device *device_felica_ant; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); devid_felica_ant = MKDEV(FELICA_MAJOR, FELICA_MINOR); ret = alloc_chrdev_region(&devid_felica_ant, FELICA_BASEMINOR, FELICA_MINOR_COUNT, FELICA_ANT_NAME); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region), ret=[%d]", __func__, ret); return; } cdev_init(&cdev_felica_ant, &fops_felica_ant); ret = cdev_add(&cdev_felica_ant, devid_felica_ant, FELICA_MINOR_COUNT); if (ret < 0) { unregister_chrdev_region(devid_felica_ant, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]", __func__, ret); return; } device_felica_ant = device_create(felica_class, NULL, devid_felica_ant, NULL, FELICA_ANT_NAME); if (IS_ERR(device_felica_ant)) { cdev_del(&cdev_felica_ant); unregister_chrdev_region(devid_felica_ant, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create)", __func__); return; } FELICA_LOG_DEBUG("[MFDD] %s END, major=[%d], minor=[%d]", __func__, MAJOR(devid_felica_ant), MINOR(devid_felica_ant)); } /* * felica_ant_exit */ static void felica_ant_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); device_destroy(felica_class, devid_felica_ant); cdev_del(&cdev_felica_ant); unregister_chrdev_region(devid_felica_ant, FELICA_MINOR_COUNT); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * open device */ static int felica_ant_open(struct inode *inode, struct file *file) { uid_t uid; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); uid = __task_cred(current)->uid; if ((uid != gant_uid) && (uid != gdiag_uid)) { FELICA_LOG_DEBUG(\ "[MFDD] %s END, uid=[%d]\n", __func__, uid); FELICA_LOG_DEBUG(\ "[MFDD] %s END, gant_uid=[%d]\n", __func__, gant_uid); return -EACCES; } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * close device */ static int felica_ant_close(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); /* no operation */ FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * read operation */ static ssize_t felica_ant_read(struct file *file, char __user *buf, \ size_t len, loff_t *ppos) { int ret; unsigned char address = gi2c_antaddress; unsigned char read_buff = 0; gread_msgs[0].addr = gi2c_address; gread_msgs[0].buf = &address; gread_msgs[1].addr = gi2c_address; gread_msgs[1].buf = &read_buff; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if (felica_i2c_client == NULL) { FELICA_LOG_DEBUG("[MFDD] %s felica_i2c_client is NULL", \ __func__); return -EIO; } ret = i2c_transfer(felica_i2c_client->adapter, &gread_msgs[0], 1); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(i2c_transfer[0]), ret=[%d]", __func__, ret); return -EIO; } ret = i2c_transfer(felica_i2c_client->adapter, &gread_msgs[1], 1); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(i2c_transfer[1]), ret=[%d]", __func__, ret); return -EIO; } FELICA_LOG_DEBUG("[MFDD] %s read_buff=[%d]", __func__, read_buff); ret = copy_to_user(buf, &read_buff, FELICA_ANT_DATA_LEN); if (ret != 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_to_user), ret=[%d]", __func__, ret); return -EFAULT; } *ppos += 1; FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return FELICA_ANT_DATA_LEN; } /* * write operation */ static ssize_t felica_ant_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { char ant; int ret; unsigned char write_buff[2]; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if (felica_i2c_client == NULL) { FELICA_LOG_DEBUG("[MFDD] %s felica_i2c_client is NULL", \ __func__); return -EIO; } gwrite_msgs[0].buf = &write_buff[0]; gwrite_msgs[0].addr = gi2c_address; write_buff[0] = gi2c_antaddress; ret = copy_from_user(&ant, data, FELICA_ANT_DATA_LEN); if (ret != 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_from_user), ret=[%d]", __func__, ret); return -EFAULT; } write_buff[1] = ant; ret = i2c_transfer(felica_i2c_client->adapter, gwrite_msgs, 1); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(i2c_transfer), ret=[%d]", __func__, ret); return -EIO; } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return FELICA_ANT_DATA_LEN; } /****************************************************************************** * Mobile FeliCa device driver initialization / termination function ******************************************************************************/ /* * to set initial value to each terminal */ static void felica_initialize_pin(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); #if defined(CONFIG_ARCH_EXYNOS) gpio_set_value(GPIO_PINID_FELICA_PON, GPIO_VALUE_LOW); #elif defined(CONFIG_ARCH_APQ8064) ice_gpiox_set(GPIO_PINID_FELICA_PON, GPIO_VALUE_LOW); #endif FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * to set final value to each terminal */ static void felica_finalize_pin(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); #if defined(CONFIG_ARCH_EXYNOS) gpio_set_value(GPIO_PINID_FELICA_PON, GPIO_VALUE_LOW); #elif defined(CONFIG_ARCH_APQ8064) ice_gpiox_set(GPIO_PINID_FELICA_PON, GPIO_VALUE_LOW); #endif FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * device driver registration */ static void felica_register_device(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); felica_int_poll_init(); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * device driver deregistration */ static void felica_deregister_device(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); felica_uid_exit(); felica_int_poll_exit(); felica_ant_exit(); felica_rws_exit(); felica_rfs_exit(); felica_cen_exit(); felica_pon_exit(); felica_uart_exit(); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * The entry point for initialization module */ static int __init felica_init(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); #ifdef F_WAKE_LOCK wake_lock_init(&felica_wake_1, WAKE_LOCK_SUSPEND, "felica-int-1"); wake_lock_init(&felica_wake_2, WAKE_LOCK_SUSPEND, "felica-int-2"); #endif felica_class = class_create(THIS_MODULE, "felica"); if (IS_ERR(felica_class)) { FELICA_LOG_ERR("[MFDD] %s ERROR(class_create)", __func__); return PTR_ERR(felica_class); } felica_initialize_pin(); felica_register_device(); felica_nl_init(); felica_i2c_init(); /* MFC UID registration */ schedule_delayed_work(&pgint_irq->work, msecs_to_jiffies(10)); #ifdef CONFIG_NFC_FELICA /* FELICA_INTU GPIO is changed Start*/ if(system_rev > 0 && system_rev < 15) felica_varying_gpio_intu = GPIO_PINID_NFC_INTU_REV03; else felica_varying_gpio_intu = GPIO_PINID_NFC_INTU_REV06; FELICA_LOG_DEBUG("[MFDD] %s , system_rev=[%d]", __func__, system_rev); /* FELICA_INTU GPIO is changed End */ snfc_register_device(); uartcc_init(); #endif /* CONFIG_NFC_FELICA */ FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * The entry point for the termination module */ static void __exit felica_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); #ifdef F_WAKE_LOCK wake_lock_destroy(&felica_wake_1); wake_lock_destroy(&felica_wake_2); #endif felica_i2c_exit(); felica_nl_exit(); felica_deregister_device(); #ifdef CONFIG_NFC_FELICA snfc_deregister_device(); snfc_uart_exit(); uartcc_exit(); #endif /* CONFIG_NFC_FELICA */ felica_finalize_pin(); class_destroy(felica_class); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /****************************************************************************** * * * * * Add driver code for CXD2235AGG NFC/Felica combo IC * * * * ******************************************************************************/ #ifdef CONFIG_NFC_FELICA /****************************************************************************** * /dev/snfc_hsel ******************************************************************************/ /* character device definition */ static dev_t devid_snfc_hsel; static struct cdev hsel_cdev; static const struct file_operations hsel_fileops = { .owner = THIS_MODULE, .read = hsel_read, .write = hsel_write, .open = hsel_open, .release = hsel_release, }; /* function prototype */ static int hsel_init(void) { int ret = 0; struct device* class_dev; devid_snfc_hsel = MKDEV(MISC_MAJOR, 0); FELICA_LOG_DEBUG("[MFDD] %s START", __func__); ret = alloc_chrdev_region(&devid_snfc_hsel, 0, HSEL_DEV_COUNT, HSEL_DEV_NAME); if ( 0 != ret) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region), ret=[%d]", __func__, ret); return ret; } cdev_init(&hsel_cdev, &hsel_fileops); hsel_cdev.owner = THIS_MODULE; ret = cdev_add(&hsel_cdev, devid_snfc_hsel, HSEL_DEV_COUNT); if ( 0 != ret) { unregister_chrdev_region(devid_snfc_hsel, HSEL_DEV_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]", __func__, ret); return ret; } class_dev = device_create(felica_class, NULL, devid_snfc_hsel, NULL, HSEL_DEV_NAME); if ( 0 != IS_ERR(class_dev)) { cdev_del(&hsel_cdev); unregister_chrdev_region(devid_snfc_hsel, HSEL_DEV_COUNT); ret = PTR_ERR(class_dev); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create), ret=[%d]", __func__, ret); return ret; } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return ret; } /* * finalize device */ static void hsel_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); device_destroy(felica_class, devid_snfc_hsel); cdev_del(&hsel_cdev); unregister_chrdev_region(devid_snfc_hsel, HSEL_DEV_COUNT); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * open device */ static int hsel_open(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * close device */ static int hsel_release(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * read operation */ static ssize_t hsel_read(struct file *file, char __user * buf, size_t len, loff_t * ppos) { char hsel_val; int ret; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if (NULL == buf) { FELICA_LOG_ERR("[MFDD] %s hsel_read param err", __func__); return -EIO; } if ( 1 > len ) { FELICA_LOG_ERR("[MFDD] %s length check len = %d",__func__, len); return -EIO; } #if defined(CONFIG_ARCH_EXYNOS) ret = gpio_get_value( GPIO_PINID_NFC_HSEL ); #elif defined(CONFIG_ARCH_APQ8064) ret = ice_gpiox_get( GPIO_PINID_NFC_HSEL ); #endif if ( 0 > ret ) { FELICA_LOG_ERR("[MFDD] %s ERROR(gpio_get_value), ret=[%d]", __func__, ret); return ret; } if ( 0 == ret){ hsel_val = 0; }else{ hsel_val = 1; } if (copy_to_user(buf, &hsel_val, 1)) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_to_user)",__func__); return -EFAULT; } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 1; } /* * write operation */ static ssize_t hsel_write(struct file *file, const char __user *data,\ size_t len, loff_t *ppos) { char hsel_val; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if (NULL == data) { FELICA_LOG_ERR("[MFDD] %s hsel_write param err", __func__); return -EIO; } if ( 1 > len ) { FELICA_LOG_ERR("[MFDD] %s length check len = %d", __func__, len); return -EIO; } if (copy_from_user(&hsel_val, data, 1)) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_from_user)", __func__); return -EFAULT; } if( 0 == hsel_val ){ FELICA_LOG_DEBUG("[MFDD] %s HSEL = Low", __func__); }else if( 1 == hsel_val ){ FELICA_LOG_DEBUG("[MFDD] %s HSEL = High", __func__); }else{ FELICA_LOG_ERR("param err = %d", hsel_val); return -EIO; } #if defined(CONFIG_ARCH_EXYNOS) gpio_set_value(GPIO_PINID_NFC_HSEL , hsel_val ); #elif defined(CONFIG_ARCH_APQ8064) ice_gpiox_set(GPIO_PINID_NFC_HSEL , hsel_val ); #endif FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 1; } /****************************************************************************** * /dev/snfc_intu_poll ******************************************************************************/ /* character device definition */ static dev_t devid_snfc_intu_poll; static struct cdev intu_poll_cdev; static struct poll_data g_intu_data; static struct poll_data* g_intu_d = &g_intu_data; static const struct file_operations intu_poll_fileops = { .owner = THIS_MODULE, .read = intu_poll_read, .open = intu_poll_open, .release = intu_poll_release, /* .poll = intu_poll_poll,*/ }; /* * top half of irq_handler */ static irqreturn_t intu_poll_irq_handler(int irq, void *dev_id) { struct poll_data *intu_d = g_intu_d; FELICA_LOG_DEBUG("[MFDD] %s START irq = %d", __func__, irq); disable_irq_nosync(gpio_to_irq(felica_varying_gpio_intu)); schedule_delayed_work(&intu_d->work, msecs_to_jiffies(INTU_POLL_DELAY)); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return IRQ_HANDLED; } /* * bottom half of irq_handler */ static void intu_poll_work_func(struct work_struct *work) { struct poll_data *intu_d = g_intu_d; int read_value = 0; int old_value = 0; unsigned long irqflag = 0; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); old_value = intu_d->device_status; read_value = gpio_get_value(felica_varying_gpio_intu); FELICA_LOG_DEBUG("[MFDD] %s read_value = %d old_value = %d", __func__, read_value, old_value); if (read_value < 0) { intu_d->read_error = read_value; } else if (read_value != old_value) { intu_d->device_status = read_value; intu_d->read_error = 0; if (intu_d->device_status == INTU_POLL_DEV_LOW){ irqflag = IRQF_TRIGGER_HIGH | IRQF_SHARED; }else{ irqflag = IRQF_TRIGGER_LOW | IRQF_SHARED; } if (irq_set_irq_type(gpio_to_irq(felica_varying_gpio_intu), irqflag)){ FELICA_LOG_ERR("[MFDD] %s ERROR(set_irq_type), irqflag=[%ld]", __func__, irqflag); } } enable_irq(gpio_to_irq(felica_varying_gpio_intu)); if (read_value != old_value || intu_d->read_error) { intu_d->irq_handler_done = 1; wake_up_interruptible(&intu_d->read_wait); } FELICA_LOG_DEBUG("[MFDD] %s END read_value = %d, old_value = %d, \ intu_d->read_error = %d", \ __func__, read_value, old_value, intu_d->read_error); } /* * initialize device */ static int intu_poll_init(void) { int ret = 0; struct device *class_dev; unsigned long irqflag = 0; devid_snfc_intu_poll = MKDEV(MISC_MAJOR, 0); FELICA_LOG_DEBUG("[MFDD] %s START",__func__); ret = alloc_chrdev_region(&devid_snfc_intu_poll , 0 , INTU_POLL_DEV_COUNT, INTU_POLL_DEV_NAME); if (ret) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region), ret=[%d]", __func__, ret); return ret; } cdev_init(&intu_poll_cdev, &intu_poll_fileops); intu_poll_cdev.owner = THIS_MODULE; ret = cdev_add(&intu_poll_cdev, devid_snfc_intu_poll, INTU_POLL_DEV_COUNT); if (ret) { unregister_chrdev_region(devid_snfc_intu_poll, INTU_POLL_DEV_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]", __func__, ret); return ret; } class_dev = device_create(felica_class, NULL, devid_snfc_intu_poll, NULL, INTU_POLL_DEV_NAME); if (IS_ERR(class_dev)) { cdev_del(&intu_poll_cdev); unregister_chrdev_region(devid_snfc_intu_poll, INTU_POLL_DEV_COUNT); ret = PTR_ERR(class_dev); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create), ret=[%d]", __func__, ret); return ret; } memset(g_intu_d, 0x00, sizeof(struct poll_data)); INIT_DELAYED_WORK(&g_intu_d->work, intu_poll_work_func); init_waitqueue_head(&g_intu_d->read_wait); g_intu_d->open_flag = 0; if (g_intu_d->open_flag) { FELICA_LOG_ERR("[MFDD] %s only one time", __func__); return 0; } g_intu_d->open_flag = 1; ret = gpio_get_value(felica_varying_gpio_intu); if (ret < 0) { g_intu_d->open_flag = 0; FELICA_LOG_ERR("[MFDD] %s ERROR(gpio_get_value), ret=[%d]", __func__, ret); return -EIO; } g_intu_d->device_status = ret; if (g_intu_d->device_status == INTU_POLL_DEV_LOW){ irqflag = IRQF_TRIGGER_HIGH | IRQF_SHARED; }else{ irqflag = IRQF_TRIGGER_LOW | IRQF_SHARED; } if (request_irq(gpio_to_irq(felica_varying_gpio_intu), intu_poll_irq_handler,irqflag, INTU_POLL_DEV_NAME, (void*)g_intu_d)) { g_intu_d->open_flag = 0; FELICA_LOG_ERR("[MFDD] %s ERROR(request_irq), irqflag=[%ld]", __func__, irqflag); return -EIO; } if(system_rev > 0 && system_rev < 15) { if (enable_irq_wake(gpio_to_irq(felica_varying_gpio_intu))){ FELICA_LOG_ERR("[MFDD] %s ERROR(enable_irq_wake)", __func__); free_irq(gpio_to_irq(felica_varying_gpio_intu), (void *)g_intu_d); return -EIO; } } g_intu_d->irq_handler_done = 0; FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return ret; } /* * finalize device */ static void intu_poll_exit(void) { struct poll_data *intu_d = g_intu_d; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); cancel_delayed_work(&intu_d->work); if(system_rev > 0 && system_rev < 15) { if (disable_irq_wake(gpio_to_irq(felica_varying_gpio_intu))){ FELICA_LOG_ERR("[MFDD] %s ERROR(disable_irq_wake)", __func__); } } free_irq(gpio_to_irq(felica_varying_gpio_intu), (void *)intu_d); intu_d->open_flag = 0; cdev_del(&intu_poll_cdev); unregister_chrdev_region(devid_snfc_intu_poll, INTU_POLL_DEV_COUNT); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * open device */ static int intu_poll_open(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); /* no operation */ FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * close device */ static int intu_poll_release(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); /* no operation */ FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * read operation */ static ssize_t intu_poll_read(struct file *file, char __user * buf, size_t len, loff_t * ppos) { struct poll_data *intu_d = g_intu_d; int ret; char cret; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if ( len < 1 ) { FELICA_LOG_ERR("[MFDD] %s length check len = %d", __func__, len); return -EIO; } if (!intu_d->irq_handler_done) { if (file->f_flags & O_NONBLOCK) { FELICA_LOG_ERR("NONBLOCK"); return -EAGAIN; } FELICA_LOG_DEBUG("NFC intu_poll wait irq"); ret = wait_event_interruptible(intu_d->read_wait, intu_d->irq_handler_done == 1); if (-ERESTARTSYS == ret) { FELICA_LOG_DEBUG("[MFDD] %s wait_event_interruptible ret = %d", __func__, ret); return -EINTR; } } if (intu_d->read_error) { intu_d->irq_handler_done = 0; intu_d->read_error = 0; FELICA_LOG_ERR("[MFDD] %s intu_d->read_error = %d", __func__, intu_d->read_error); return -EIO; } if (intu_d->device_status == INTU_POLL_DEV_HIGH){ cret = 1; }else{ cret = 0; } len = 1; if (copy_to_user(buf, &cret, len)) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_to_user)",__func__); return -EFAULT; } intu_d->irq_handler_done = 0; FELICA_LOG_DEBUG("[MFDD] %s END len = %d, cret = %d",__func__, len, cret); return len; } /****************************************************************************** * /dev/snfc_available_poll ******************************************************************************/ /* character device definition */ static dev_t devid_snfc_abailable_poll; static struct cdev available_poll_cdev; static const struct file_operations available_poll_fileops = { .owner = THIS_MODULE, .unlocked_ioctl = available_poll_ioctl, .read = available_poll_read, .open = available_poll_open, .release = available_poll_release, }; /* * initialize device */ static int available_poll_init(void) { int ret = 0; struct device *class_dev; devid_snfc_abailable_poll = MKDEV(MISC_MAJOR, 0); FELICA_LOG_DEBUG("[MFDD] %s START",__func__); ret = alloc_chrdev_region(&devid_snfc_abailable_poll, 0, AVAILABLE_POLL_DEV_COUNT, AVAILABLE_POLL_DEV_NAME); if (ret) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region), ret=[%d]", __func__, ret); return ret; } cdev_init(&available_poll_cdev, &available_poll_fileops); available_poll_cdev.owner = THIS_MODULE; ret = cdev_add(&available_poll_cdev, devid_snfc_abailable_poll, AVAILABLE_POLL_DEV_COUNT); if (ret) { unregister_chrdev_region(devid_snfc_abailable_poll, AVAILABLE_POLL_DEV_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]", __func__, ret); return ret; } class_dev = device_create(felica_class, NULL, devid_snfc_abailable_poll, NULL, AVAILABLE_POLL_DEV_NAME); if (IS_ERR(class_dev)) { cdev_del(&available_poll_cdev); unregister_chrdev_region(devid_snfc_abailable_poll, AVAILABLE_POLL_DEV_COUNT); ret = PTR_ERR(class_dev); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create), ret=[%d]", __func__, ret); return ret; } memset((void*)&g_available_data, 0x00, sizeof(struct icc_poll_data)); init_waitqueue_head(&g_available_data.read_wait); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return ret; } /* * finalize device */ static void available_poll_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START",__func__); device_destroy(felica_class, devid_snfc_abailable_poll); cdev_del(&available_poll_cdev); unregister_chrdev_region(devid_snfc_abailable_poll, AVAILABLE_POLL_DEV_COUNT); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * open device */ static int available_poll_open(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); /* no operation */ FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * close device */ static int available_poll_release(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if( g_available_data.available_flag == 1 ){ g_available_data.rsp_done = 1; wake_up_interruptible(&g_available_data.read_wait); } g_available_data.rsp_done = 0; g_available_data.available_flag = 0; FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * read operation */ static ssize_t available_poll_read(struct file *file, char __user * buf, size_t len, loff_t * ppos) { char type = 0; int nRet = -1; struct icc_poll_data* available_d = &g_available_data; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if( ( !uartcc_is_idle_status() ) || ( 0 == g_cen_sts ) || ( GPIO_VALUE_LOW == g_rfs_sts )){ available_d->available_flag = 1; nRet = wait_event_interruptible(available_d->read_wait, available_d->rsp_done == 1); if( nRet < 0 ){ FELICA_LOG_ERR("[MFDD] %s ERROR(wait_event_interruptible), ret=[%d]" ,__func__, nRet); return -EIO; } type = 0x01; available_d->rsp_done = 0; available_d->available_flag = 0; }else{ type = 0x01; } if (copy_to_user(buf, &type, len)) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_to_user)", __func__); return -EFAULT; } FELICA_LOG_DEBUG("[MFDD] %s END read size = %d ", __func__, len); return len; } /* * available operation */ static long available_poll_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long nRet = -EIO; FELICA_LOG_DEBUG("[MFDD] %s START cmd = %x",__func__, cmd); switch(cmd){ case AVAILABLE_POLL_DEV_SET_PRE_LOCK: g_cen_sts = 0; nRet = 0; break; default: break; } FELICA_LOG_DEBUG("[MFDD] %s END cmd = %x",__func__, cmd); return nRet; } /****************************************************************************** * /dev/snfc_rfs_poll ******************************************************************************/ /* character device definition */ static dev_t devid_snfc_rfs_poll; static struct cdev rfs_poll_cdev; static struct poll_data g_rfs_data; static struct poll_data* g_rfs_d = &g_rfs_data; static const struct file_operations rfs_poll_fileops = { .owner = THIS_MODULE, .read = rfs_poll_read, .open = rfs_poll_open, .release = rfs_poll_release, .poll = rfs_poll_poll, }; /* * top half of irq_handler */ static irqreturn_t rfs_poll_irq_handler(int irq, void *dev_id) { struct poll_data *rfs_d = g_rfs_d; FELICA_LOG_DEBUG("[MFDD] %s START irq = %d",__func__, irq); disable_irq_nosync(gpio_to_irq(GPIO_PINID_NFC_RFS_POLL)); schedule_delayed_work(&rfs_d->work, msecs_to_jiffies(RFS_POLL_DELAY)); FELICA_LOG_DEBUG("[MFDD] %s END",__func__); return IRQ_HANDLED; } /* * bottom half of irq_handler */ static void rfs_poll_work_func(struct work_struct *work) { struct poll_data *rfs_d = g_rfs_d; int read_value = 0; int old_value = 0; unsigned long irqflag = 0; struct icc_poll_data* available_d = &g_available_data; FELICA_LOG_DEBUG("[MFDD] %s START",__func__); old_value = rfs_d->device_status; read_value = gpio_get_value(GPIO_PINID_NFC_RFS_POLL); FELICA_LOG_DEBUG("[MFDD] %s read_value = %d old_value = %d", __func__ , read_value, old_value); if (read_value < 0) { rfs_d->read_error = read_value; } else if (read_value != old_value) { g_rfs_sts = read_value; if( 1 == available_d->available_flag ){ if( 0 != g_cen_sts ){ if( GPIO_VALUE_LOW != g_rfs_sts ){ if( uartcc_is_idle_status() ){ FELICA_LOG_DEBUG("[MFDD] %s wake up available", __func__); available_d->rsp_done = 1; wake_up_interruptible(&available_d->read_wait); } } } } rfs_d->device_status = read_value; rfs_d->read_error = 0; if (rfs_d->device_status == RFS_POLL_DEV_LOW){ irqflag = IRQF_TRIGGER_HIGH | IRQF_SHARED; }else{ irqflag = IRQF_TRIGGER_LOW | IRQF_SHARED; } if (irq_set_irq_type(gpio_to_irq(GPIO_PINID_NFC_RFS_POLL), irqflag)){ FELICA_LOG_ERR("[MFDD] %s ERROR(set_irq_type), irqflag=[%ld]", __func__, irqflag); } } enable_irq(gpio_to_irq(GPIO_PINID_NFC_RFS_POLL)); if (read_value != old_value || rfs_d->read_error) { rfs_d->irq_handler_done = 1; wake_up_interruptible(&rfs_d->read_wait); } FELICA_LOG_DEBUG("[MFDD] %s END read_value = %d, old_value = %d,\ rfs_d->read_error = %d", __func__, read_value, old_value, rfs_d->read_error); } /* * initialize device */ static int rfs_poll_init(void) { int ret = 0; struct device *class_dev; unsigned long irqflag = 0; devid_snfc_rfs_poll = MKDEV(MISC_MAJOR, 0); FELICA_LOG_DEBUG("[MFDD] %s START", __func__); ret = alloc_chrdev_region(&devid_snfc_rfs_poll , 0 , RFS_POLL_DEV_COUNT, RFS_POLL_DEV_NAME); if (ret) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region) ret=[%d]", __func__, ret); return ret; } cdev_init(&rfs_poll_cdev, &rfs_poll_fileops); rfs_poll_cdev.owner = THIS_MODULE; ret = cdev_add(&rfs_poll_cdev, devid_snfc_rfs_poll, RFS_POLL_DEV_COUNT); if (ret) { unregister_chrdev_region(devid_snfc_rfs_poll, RFS_POLL_DEV_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]", __func__, ret); return ret; } class_dev = device_create(felica_class, NULL, devid_snfc_rfs_poll, NULL, RFS_POLL_DEV_NAME); if (IS_ERR(class_dev)) { cdev_del(&rfs_poll_cdev); unregister_chrdev_region(devid_snfc_rfs_poll, RFS_POLL_DEV_COUNT); ret = PTR_ERR(class_dev); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create), ret=[%d]", __func__, ret); return ret; } memset(g_rfs_d, 0x00, sizeof(struct poll_data)); INIT_DELAYED_WORK(&g_rfs_d->work, rfs_poll_work_func); init_waitqueue_head(&g_rfs_d->read_wait); g_rfs_d->open_flag = 0; if (g_rfs_d->open_flag) { FELICA_LOG_ERR("[MFDD] %s only one time", __func__); return -EBUSY; } g_rfs_d->open_flag = 1; ret = gpio_get_value(GPIO_PINID_NFC_RFS_POLL); if (ret < 0) { g_rfs_d->open_flag = 0; FELICA_LOG_ERR("[MFDD] %s ERROR(gpio_get_value), ret=[%d]", __func__, ret); return -EIO; } g_rfs_d->device_status = ret; g_rfs_sts = ret; if (g_rfs_d->device_status == RFS_POLL_DEV_LOW){ irqflag = IRQF_TRIGGER_HIGH | IRQF_SHARED; }else{ irqflag = IRQF_TRIGGER_LOW | IRQF_SHARED; } if (request_irq(gpio_to_irq(GPIO_PINID_NFC_RFS_POLL), rfs_poll_irq_handler, irqflag, RFS_POLL_DEV_NAME, (void*)g_rfs_d)) { g_rfs_d->open_flag = 0; FELICA_LOG_ERR("[MFDD] %s ERROR(request_irq), irqflag=[%ld]", __func__, irqflag); return -EIO; } if (enable_irq_wake(gpio_to_irq(GPIO_PINID_NFC_RFS_POLL))){ FELICA_LOG_ERR("[MFDD] %s ERROR(enable_irq_wake)", __func__); free_irq(gpio_to_irq(GPIO_PINID_NFC_RFS_POLL), (void *)g_rfs_d); return -EIO; } g_rfs_d->irq_handler_done = 0; FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return ret; } /* * finalize device */ static void rfs_poll_exit(void) { struct poll_data *rfs_d = g_rfs_d; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); cancel_delayed_work(&rfs_d->work); if (disable_irq_wake(gpio_to_irq(GPIO_PINID_NFC_RFS_POLL))){ FELICA_LOG_ERR("[MFDD] %s ERROR(disable_irq_wake)", __func__); } free_irq(gpio_to_irq(GPIO_PINID_NFC_RFS_POLL), (void *)rfs_d); rfs_d->open_flag = 0; cdev_del(&rfs_poll_cdev); unregister_chrdev_region(devid_snfc_rfs_poll, RFS_POLL_DEV_COUNT); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * open operation */ static int rfs_poll_open(struct inode *inode, struct file *file) { int uid_ret = 0; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); uid_ret = snfc_uid_check(); if (uid_ret < 0) { FELICA_LOG_ERR ("[MFDD] %s open fail=[%d]", __func__, uid_ret); return -EACCES; } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * close operation */ static int rfs_poll_release(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); /* no operation */ FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * poll operation */ static unsigned int rfs_poll_poll(struct file *file, poll_table *wait) { struct poll_data *rfs_d = g_rfs_d; unsigned int mask = 0; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); poll_wait(file, &rfs_d->read_wait, wait); if (rfs_d->irq_handler_done){ mask = POLLIN | POLLRDNORM; } FELICA_LOG_DEBUG("[MFDD] %s END mask = %d", __func__, mask); return (mask); } /* * read operation */ static ssize_t rfs_poll_read(struct file *file, char __user * buf, size_t len, loff_t * ppos) { struct poll_data *rfs_d = g_rfs_d; int ret; char cret; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if ( len < 1 ) { FELICA_LOG_ERR("[MFDD] %s length check len = %d", __func__, len); return -EIO; } if (!rfs_d->irq_handler_done) { if (file->f_flags & O_NONBLOCK) { FELICA_LOG_ERR("[MFDD] %s NONBLOCK", __func__); return -EAGAIN; } FELICA_LOG_DEBUG("[MFDD] %s FeliCa rfs_poll wait irq", __func__); ret = wait_event_interruptible(rfs_d->read_wait, rfs_d->irq_handler_done == 1); if (-ERESTARTSYS == ret) { FELICA_LOG_DEBUG("[MFDD] %s wait_event_interruptible ret=[%d]", __func__, ret); return -EINTR; } } if (rfs_d->read_error) { rfs_d->irq_handler_done = 0; rfs_d->read_error = 0; FELICA_LOG_ERR("[MFDD] %s rfs_d->read_error = %d", __func__, rfs_d->read_error); return -EIO; } if (rfs_d->device_status == RFS_POLL_DEV_HIGH){ cret = RFS_POLL_RET_STS_HIGH; }else{ cret = RFS_POLL_RET_STS_LOW; } len = 1; if (copy_to_user(buf, &cret, len)) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_to_user)", __func__); return -EFAULT; } rfs_d->irq_handler_done = 0; FELICA_LOG_DEBUG("[MFDD] %s END len = %d, cret = %d", __func__, len, cret); return len; } /****************************************************************************** * /dev/snfc_pon ******************************************************************************/ /* character device definition */ static dev_t devid_snfc_pon; static struct cdev cxd2235power_cdev; static const struct file_operations cxd2235power_fileops = { .owner = THIS_MODULE, .write = cxd2235power_write, .open = cxd2235power_open, .release = cxd2235power_release, }; /* * initialize device */ static int cxd2235power_init(void) { int ret = 0; struct device* class_dev; devid_snfc_pon = MKDEV(MISC_MAJOR, 0); FELICA_LOG_DEBUG("[MFDD] %s START", __func__); ret = alloc_chrdev_region(&devid_snfc_pon , 0 , CXD2235_POWER_DEV_COUNT, CXD2235_POWER_DEV_NAME); if (ret) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region), ret=[%d]", __func__, ret); return ret; } cdev_init(&cxd2235power_cdev, &cxd2235power_fileops); cxd2235power_cdev.owner = THIS_MODULE; ret = cdev_add(&cxd2235power_cdev, devid_snfc_pon, CXD2235_POWER_DEV_COUNT); if (ret) { unregister_chrdev_region(devid_snfc_pon, CXD2235_POWER_DEV_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]", __func__, ret); return ret; } class_dev = device_create(felica_class, NULL, devid_snfc_pon, NULL, CXD2235_POWER_DEV_NAME); if (IS_ERR(class_dev)) { cdev_del(&cxd2235power_cdev); unregister_chrdev_region(devid_snfc_pon, CXD2235_POWER_DEV_COUNT); ret = PTR_ERR(class_dev); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create), ret=[%d]", __func__, ret); return ret; } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return ret; } /* * finalize device */ static void cxd2235power_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); device_destroy(felica_class, devid_snfc_pon); cdev_del(&cxd2235power_cdev); unregister_chrdev_region(devid_snfc_pon, CXD2235_POWER_DEV_COUNT); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * open operation */ static int cxd2235power_open(struct inode *inode, struct file *file) { int uid_ret = 0; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); uid_ret = snfc_uid_check(); if (uid_ret < 0) { FELICA_LOG_ERR ("[MFDD] %s open fail=[%d]", __func__, uid_ret); return -EACCES; } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * close operation */ static int cxd2235power_release(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); #if defined(CONFIG_ARCH_EXYNOS) gpio_set_value(GPIO_PINID_NFC_PON , GPIO_VALUE_LOW); #elif defined(CONFIG_ARCH_APQ8064) ice_gpiox_set(GPIO_PINID_NFC_PON, GPIO_VALUE_LOW); #endif FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * write operation */ static ssize_t cxd2235power_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { char on; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if ( len < 1 ) { FELICA_LOG_ERR("[MFDD] %s length check len = %d", __func__, len); return -EIO; } len = 1; if (copy_from_user(&on, data, len)) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_from_user)", __func__); return -EFAULT; } #if defined(CONFIG_ARCH_EXYNOS) gpio_set_value(GPIO_PINID_NFC_PON , on ); #elif defined(CONFIG_ARCH_APQ8064) ice_gpiox_set(GPIO_PINID_NFC_PON, on); #endif FELICA_LOG_DEBUG("[MFDD] %s END on = %d", __func__, on); return len; } /****************************************************************************** * /dev/snfc_rfs ******************************************************************************/ /* character device definition */ static dev_t devid_snfc_rfs; static struct cdev cdev_snfc_rfs; static const struct file_operations fops_snfc_rfs = { .owner = THIS_MODULE, .open = snfc_rfs_open, .release = snfc_rfs_close, .read = snfc_rfs_read, }; /* * initialize device */ static void snfc_rfs_init(void) { int ret; struct device *device_snfc_rfs; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); devid_snfc_rfs = MKDEV(FELICA_MAJOR, FELICA_MINOR); ret = alloc_chrdev_region(&devid_snfc_rfs, FELICA_BASEMINOR, FELICA_MINOR_COUNT, SNFC_RFS_NAME); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region), ret=[%d]", __func__, ret); return; } cdev_init(&cdev_snfc_rfs, &fops_snfc_rfs); ret = cdev_add(&cdev_snfc_rfs, devid_snfc_rfs, FELICA_MINOR_COUNT); if (ret < 0) { unregister_chrdev_region(devid_snfc_rfs, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]", __func__, ret); return; } device_snfc_rfs = device_create(felica_class, NULL, devid_snfc_rfs, NULL, SNFC_RFS_NAME); if (IS_ERR(device_snfc_rfs)) { cdev_del(&cdev_snfc_rfs); unregister_chrdev_region(devid_snfc_rfs, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create)", __func__); return; } FELICA_LOG_DEBUG("[MFDD] %s END, major=[%d], minor=[%d]", __func__, MAJOR(devid_snfc_rfs), MINOR(devid_snfc_rfs)); } /* * finalize device */ static void snfc_rfs_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); device_destroy(felica_class, devid_snfc_rfs); cdev_del(&cdev_snfc_rfs); unregister_chrdev_region(devid_snfc_rfs, FELICA_MINOR_COUNT); FELICA_LOG_DEBUG("[MFDD] %s START", __func__); } /* * open device */ static int snfc_rfs_open(struct inode *inode, struct file *file) { int uid_ret = 0; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); uid_ret = snfc_uid_check(); if (uid_ret < 0) { FELICA_LOG_ERR ("[MFDD] %s open fail=[%d]", __func__, uid_ret); return -EACCES; } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * close device */ static int snfc_rfs_close(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); /* no operation */ FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * read operation */ static ssize_t snfc_rfs_read(struct file *file, char __user *buf, \ size_t len, loff_t *ppos) { int ret; char retparam; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); ret = gpio_get_value(GPIO_PINID_SNFC_RFS); if (ret == GPIO_VALUE_HIGH) { retparam = SNFC_RFS_STANDBY; FELICA_LOG_DEBUG("Carrier-Wave Status is [%d][standby]", retparam); } else if (ret == GPIO_VALUE_LOW) { retparam = SNFC_RFS_DETECTED; FELICA_LOG_DEBUG("Carrier-Wave Status is [%d][detected]", retparam); } else { FELICA_LOG_ERR("[MFDD] %s ERROR(gpio_get_value), ret=[%d]", __func__, ret); return -EIO; } ret = copy_to_user(buf, &retparam, SNFC_RFS_DATA_LEN); if (ret != 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(copy_to_user), ret=[%d]", __func__, ret); return -EFAULT; } *ppos += 1; FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return SNFC_RFS_DATA_LEN; } /****************************************************************************** * /dev/snfc_uart ******************************************************************************/ /* character device definition */ static dev_t devid_snfc; static struct cdev cdev_snfc; static const struct file_operations fops_snfc = { .owner = THIS_MODULE, .open = snfc_uart_open, .release = snfc_uart_close, .read = snfc_uart_read, .write = snfc_uart_write, .unlocked_ioctl = snfc_uart_ioctl, .poll = snfc_uart_poll, }; /* * initialize device */ static void snfc_uart_init(void) { int ret; struct device *device_snfc; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); devid_snfc = MKDEV(FELICA_MAJOR, FELICA_MINOR); ret = alloc_chrdev_region(&devid_snfc, FELICA_BASEMINOR, FELICA_MINOR_COUNT, SNFC_UART_NAME); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region), ret=[%d]", __func__, ret); return; } cdev_init(&cdev_snfc, &fops_snfc); ret = cdev_add(&cdev_snfc, devid_snfc, FELICA_MINOR_COUNT); if (ret < 0) { unregister_chrdev_region(devid_snfc, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]", __func__, ret); return; } device_snfc = device_create(felica_class, NULL, devid_snfc, NULL, SNFC_UART_NAME); if (IS_ERR(device_snfc)) { cdev_del(&cdev_snfc); unregister_chrdev_region(devid_snfc, FELICA_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create)", __func__); return; } FELICA_LOG_DEBUG("[MFDD] %s END, major=[%d], minor=[%d]", __func__, MAJOR(devid_snfc), MINOR(devid_snfc)); } /* * finalize device */ static void snfc_uart_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); device_destroy(felica_class, devid_snfc); cdev_del(&cdev_snfc); unregister_chrdev_region(devid_snfc, FELICA_MINOR_COUNT); FELICA_LOG_DEBUG("[MFDD] %s START", __func__); } /* * open device */ static int snfc_uart_open(struct inode *inode, struct file *file) { uid_t uid; /* struct termios termios;*/ struct file *tty; int ret=0; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); /* check NFC uid */ uid = __task_cred(current)->uid; if(uid != gnfc_uid){ FELICA_LOG_DEBUG(\ "[MFDD] %s END, uid=[%d]\n", __func__, uid); FELICA_LOG_DEBUG(\ "[MFDD] %s END, gnfc_uid=[%d]\n", __func__, gnfc_uid); return -EACCES; } file->private_data = dev_sem; if(down_interruptible(&dev_sem->felica_sem)){ FELICA_LOG_DEBUG(\ "[MFDD] %s END, ERR[ERESTARTSYS]\n", __func__); return -ERESTARTSYS; } if(pg_tty != NULL){ FELICA_LOG_DEBUG("[MFDD] %s multi open result=%d",__func__,ret); gnfc_open_cnt++; up(&dev_sem->felica_sem); return ret; } tty = filp_open(UART_DEV_NAME, O_RDWR | O_NOCTTY | O_NONBLOCK, 0); if(IS_ERR(tty)){ pg_tty = NULL; ret = PTR_ERR(tty); up(&dev_sem->felica_sem); FELICA_LOG_DEBUG("[MFDD] %s filp_open result = %d",__func__,ret); return ret; } pg_tty = tty; gnfc_open_cnt++; ret = 0; up(&dev_sem->felica_sem); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return ret; } /* * close device */ static int snfc_uart_close(struct inode *inode, struct file *file) { struct file *tty; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if(down_interruptible(&dev_sem->felica_sem)){ return -ERESTARTSYS; } gnfc_open_cnt--; tty = NULL; if(gnfc_open_cnt == 0){ tty = pg_tty; if(tty){ filp_close(tty,0); pg_tty = NULL; } } up(&dev_sem->felica_sem); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } /* * read operation */ static ssize_t snfc_uart_read(struct file *file, char __user *buf,\ size_t len, loff_t *ppos) { struct file *tty; ssize_t ret = -ENOSYS; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if(down_interruptible(&dev_sem->felica_sem)){ return -ERESTARTSYS; } tty = pg_tty; if(tty && tty->f_op->read){ ret = tty->f_op->read(tty,buf,len,ppos); } up(&dev_sem->felica_sem); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return ret; } /* * write operation */ static ssize_t snfc_uart_write(struct file *file, const char __user *data, \ size_t len, loff_t *ppos) { struct file *tty; ssize_t ret = -ENOSYS; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if(down_interruptible(&dev_sem->felica_sem)){ return -ERESTARTSYS; } tty = pg_tty; if(tty && tty->f_op->write){ ret = tty->f_op->write(tty,data,len,ppos); } up(&dev_sem->felica_sem); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return ret; } /* * ioctl operation */ static long snfc_uart_ioctl(struct file *file, unsigned int cmd, \ unsigned long arg) { struct file *tty; ssize_t ret = -ENOSYS; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if(down_interruptible(&dev_sem->felica_sem)){ return -ERESTARTSYS; } tty = pg_tty; if(tty && tty->f_op->unlocked_ioctl){ ret = tty->f_op->unlocked_ioctl(tty,cmd,arg); } up(&dev_sem->felica_sem); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return ret; } /* * poll operation */ static unsigned int snfc_uart_poll(struct file *file, \ struct poll_table_struct *poll_table) { struct file *tty; unsigned int ret = 0; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if(down_interruptible(&dev_sem->felica_sem)){ return 0; } tty = pg_tty; if(tty && tty->f_op->poll){ ret = tty->f_op->poll(tty,poll_table); } up(&dev_sem->felica_sem); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return ret; } /****************************************************************************** * /dev/uartcc ******************************************************************************/ /* character device definition */ static struct felica_sem_data *guartcc_sem; static dev_t devid_uartcc; static struct cdev cdev_uartcc; static const struct file_operations fops_uartcc = { .owner = THIS_MODULE, .open = uartcc_open, .release = uartcc_close, .unlocked_ioctl = uartcc_ioctl, }; /* * uartcc_init */ static void uartcc_init(void) { int ret; struct device *device_uartcc; FELICA_LOG_DEBUG("[MFDD] %s START\n", __func__); devid_uartcc = MKDEV(UARTCC_MAJOR, UARTCC_MINOR); ret = alloc_chrdev_region(&devid_uartcc, UARTCC_BASEMINOR, UARTCC_MINOR_COUNT, UARTCC_NAME); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(alloc_chrdev_region), ret=[%d]\n", \ __func__, ret); return; } cdev_init(&cdev_uartcc, &fops_uartcc); ret = cdev_add(&cdev_uartcc, devid_uartcc, UARTCC_MINOR_COUNT); if (ret < 0) { unregister_chrdev_region(devid_uartcc, UARTCC_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(cdev_add), ret=[%d]\n", __func__, \ ret); return; } device_uartcc = device_create(felica_class, NULL, devid_uartcc, NULL, UARTCC_NAME); if (IS_ERR(device_uartcc)) { cdev_del(&cdev_uartcc); unregister_chrdev_region(devid_uartcc, UARTCC_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(device_create)\n", __func__); return; } guartcc_sem = kmalloc(sizeof(struct felica_sem_data), GFP_KERNEL); if (!guartcc_sem) { cdev_del(&cdev_uartcc); unregister_chrdev_region(devid_uartcc, UARTCC_MINOR_COUNT); FELICA_LOG_ERR("[MFDD] %s ERROR(guartcc_sem malloc)\n", __func__); return; } sema_init(&guartcc_sem->felica_sem, 1); FELICA_LOG_DEBUG("[MFDD] %s END, major=[%d], minor=[%d]\n", __func__, \ MAJOR(devid_uartcc), MINOR(devid_uartcc)); } /* * uartcc_exit */ static void uartcc_exit(void) { FELICA_LOG_DEBUG("[MFDD] %s START\n", __func__); kfree(guartcc_sem); device_destroy(felica_class, devid_uartcc); cdev_del(&cdev_uartcc); unregister_chrdev_region(devid_uartcc, UARTCC_MINOR_COUNT); FELICA_LOG_DEBUG("[MFDD] %s END\n", __func__); } /* * open device */ static int uartcc_open(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START\n", __func__); /* no operation */ FELICA_LOG_DEBUG("[MFDD] %s END\n", __func__); return 0; } /* * close device */ static int uartcc_close(struct inode *inode, struct file *file) { FELICA_LOG_DEBUG("[MFDD] %s START\n", __func__); /* no operation */ FELICA_LOG_DEBUG("[MFDD] %s END\n", __func__); return 0; } /* * available operation */ static long uartcc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = 0; int user_buf = 0; FELICA_LOG_DEBUG("[MFDD] %s START, cmd=[%d]\n", __func__, cmd); switch (cmd) { case UARTCC_SET_NFC_START_REQ: if (down_interruptible(&guartcc_sem->felica_sem)) { FELICA_LOG_ERR("[MFDD] %s ERROR(down_interruptible)\n", \ __func__); return -ERESTARTSYS; } ret = copy_from_user(&user_buf, (void __user *)arg, sizeof(user_buf)); if (ret != 0) { FELICA_LOG_ERR ("[MFDD] %s ERROR(copy_from_user), ret=[%d]\n", __func__, ret); up(&guartcc_sem->felica_sem); return -EFAULT; } guartcc_start_req = user_buf; /* felica start request */ uartcc_felica_start_request(); FELICA_LOG_DEBUG("[MFDD] %s Set nfc start request=[%d]\n", \ __func__, guartcc_start_req); up(&guartcc_sem->felica_sem); break; case UARTCC_GET_FELICA_STATUS: if (down_interruptible(&guartcc_sem->felica_sem)){ FELICA_LOG_ERR("[MFDD] %s ERROR(down_interruptible)\n", \ __func__); return -ERESTARTSYS; } user_buf = uartcc_get_felica_status(); ret = copy_to_user((int __user*)arg, &user_buf, sizeof(user_buf)); if (ret != 0) { FELICA_LOG_ERR ("[MFDD] %s ERROR(copy_to_user), ret=[%d]\n", \ __func__, ret); up(&guartcc_sem->felica_sem); return -EFAULT; } FELICA_LOG_DEBUG("[MFDD] %s Get felica status=[%d]\n", \ __func__, guartcc_felica_status); up(&guartcc_sem->felica_sem); break; case UARTCC_SET_FELICA_STATUS: case UARTCC_GET_NFC_START_REQ: FELICA_LOG_DEBUG("[MFDD] %s No operation cmd\n", __func__); break; default: FELICA_LOG_ERR("[MFDD] %s ERROR(unknown command)\n", __func__); ret = -EIO; break; } FELICA_LOG_DEBUG("[MFDD] %s END\n", __func__); return ret; } /* * felica start request */ static void uartcc_felica_start_request(void) { struct icc_poll_data *felica_poll = &gfelica_poll_data; FELICA_LOG_DEBUG("[MFDD] %s START\n", __func__); FELICA_LOG_DEBUG("[MFDD] %s start_req=[%d], felica_status=[%d]\n", \ __func__, guartcc_start_req, guartcc_felica_status); /* wake up condition * - nfc star req is ENDPROC * - felica usage is waiting AutoPolling */ if ((guartcc_start_req == UARTCC_NFC_START_ENDPROC) && (uartcc_get_felica_status() == UARTCC_FELICA_STATAUS_WAIT_POLLING)) { felica_poll->rsp_done = 1; wake_up_interruptible(&felica_poll->rsp_wait); FELICA_LOG_DEBUG("[MFDD] %s wake up felica start event\n", __func__); } FELICA_LOG_DEBUG("[MFDD] %s END\n", __func__); } /* * set felica status */ static void uartcc_set_felica_status(int status) { FELICA_LOG_DEBUG("[MFDD] %s START, status=[%d]\n", __func__, status); guartcc_felica_status = status; FELICA_LOG_DEBUG("[MFDD] %s guartcc_felica_status=[%d]\n", __func__, guartcc_felica_status); FELICA_LOG_DEBUG("[MFDD] %s END\n", __func__); } /* * get felica status */ static int uartcc_get_felica_status(void) { FELICA_LOG_DEBUG("[MFDD] %s START, guartcc_felica_status=[%d]\n", \ __func__, guartcc_felica_status); FELICA_LOG_DEBUG("[MFDD] %s END\n", __func__); return guartcc_felica_status; } /* * check for idle state felica */ static unsigned int uartcc_is_idle_status(void) { int ret = 0; int flc_cts = 0; FELICA_LOG_DEBUG("[MFDD] %s START\n", __func__); flc_cts = uartcc_get_felica_status(); if (!flc_cts) { /* idle */ FELICA_LOG_DEBUG("[MFDD] %s felica status is idle\n", __func__); ret = 1; } else { /* else state */ FELICA_LOG_DEBUG("[MFDD] %s felica status isn't idle=[%d]\n", \ __func__, flc_cts); } FELICA_LOG_DEBUG("[MFDD] %s END\n", __func__); return ret; } /****************************************************************************** * common ******************************************************************************/ /* * add CXD2235 additional pin registration */ static void snfc_register_device(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); intu_poll_init(); schedule_delayed_work(&g_intu_d->work, msecs_to_jiffies(10)); rfs_poll_init(); schedule_delayed_work(&g_rfs_d->work, msecs_to_jiffies(10)); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * add CXD2235 additional pin unregistration */ static void snfc_deregister_device(void) { FELICA_LOG_DEBUG("[MFDD] %s START", __func__); available_poll_exit(); intu_poll_exit(); snfc_rfs_exit(); rfs_poll_exit(); hsel_exit(); cxd2235power_exit(); FELICA_LOG_DEBUG("[MFDD] %s END", __func__); } /* * add read and store CEN status for snfc_available_poll device */ static ssize_t snfc_cen_sts_init(void) { int ret; unsigned char address = gi2c_lockaddress; unsigned char read_buff = 0; gread_msgs[0].addr = gi2c_address; gread_msgs[0].buf = &address; gread_msgs[1].addr = gi2c_address; gread_msgs[1].buf = &read_buff; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); if (felica_i2c_client == NULL) { FELICA_LOG_DEBUG("felica_i2c_client is NULL"); return -EIO; } ret = i2c_transfer(felica_i2c_client->adapter, &gread_msgs[0], 1); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(i2c_transfer[0]), ret=[%d]", __func__, ret); return -EIO; } ret = i2c_transfer(felica_i2c_client->adapter, &gread_msgs[1], 1); if (ret < 0) { FELICA_LOG_ERR("[MFDD] %s ERROR(i2c_transfer[1]), ret=[%d]", __func__, ret); return -EIO; } FELICA_LOG_DEBUG("[MFDD] %s read_buff=[%d]", __func__, read_buff); read_buff &= FELICA_CONTROL_LOCK_MASK; FELICA_LOG_DEBUG("[MFDD] %s read_buff=[%d]", __func__, read_buff); g_cen_sts = read_buff; FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return FELICA_CEN_DATA_LEN; } /* * uid check */ static int snfc_uid_check(void) { uid_t uid; uid = __task_cred(current)->uid; FELICA_LOG_DEBUG("[MFDD] %s START", __func__); FELICA_LOG_DEBUG("[MFDD] %s gnfc_uid=[%d]", __func__, (int)gnfc_uid); if ((uid != gnfc_uid) && (uid != gdiag_uid)) { FELICA_LOG_ERR ("[MFDD] %s END, uid=[%d], gnfc_uid=[%d], gdiag_uid=[%d]", \ __func__, uid, gnfc_uid, gdiag_uid); return -1; } FELICA_LOG_DEBUG("[MFDD] %s END", __func__); return 0; } #endif /* CONFIG_NFC_FELICA */ module_init(felica_init); module_exit(felica_exit); MODULE_DESCRIPTION("felica_dd"); MODULE_LICENSE("GPL v2"); #endif /* CONFIG_FELICA */
droidroidz/USCC_R970_kernel
drivers/felica/felica.c
C
gpl-2.0
108,582
/*****************************************************************************/ /* * devio.c -- User space communication with USB devices. * * Copyright (C) 1999-2000 Thomas Sailer (sailer@ife.ee.ethz.ch) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * This file implements the usbfs/x/y files, where * x is the bus number and y the device number. * * It allows user space programs/"drivers" to communicate directly * with USB devices without intervening kernel driver. * * Revision history * 22.12.1999 0.1 Initial release (split from proc_usb.c) * 04.01.2000 0.2 Turned into its own filesystem * 30.09.2005 0.3 Fix user-triggerable oops in async URB delivery * (CAN-2005-3055) */ /*****************************************************************************/ #include <linux/fs.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/signal.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/string.h> #include <linux/usb.h> #include <linux/usbdevice_fs.h> #include <linux/usb/hcd.h> /* for usbcore internals */ #include <linux/cdev.h> #include <linux/notifier.h> #include <linux/security.h> #include <linux/user_namespace.h> #include <linux/scatterlist.h> #include <asm/uaccess.h> #include <asm/byteorder.h> #include <linux/moduleparam.h> #include "usb.h" #define USB_MAXBUS 64 #define USB_DEVICE_MAX USB_MAXBUS * 128 #define USB_SG_SIZE 16384 /* split-size for large txs */ /* Mutual exclusion for removal, open, and release */ DEFINE_MUTEX(usbfs_mutex); struct dev_state { struct list_head list; /* state list */ struct usb_device *dev; struct file *file; spinlock_t lock; /* protects the async urb lists */ struct list_head async_pending; struct list_head async_completed; wait_queue_head_t wait; /* wake up if a request completed */ unsigned int discsignr; struct pid *disc_pid; const struct cred *cred; void __user *disccontext; unsigned long ifclaimed; u32 secid; u32 disabled_bulk_eps; }; struct async { struct list_head asynclist; struct dev_state *ps; struct pid *pid; const struct cred *cred; unsigned int signr; unsigned int ifnum; void __user *userbuffer; void __user *userurb; struct urb *urb; unsigned int mem_usage; int status; u32 secid; u8 bulk_addr; u8 bulk_status; }; static bool usbfs_snoop; module_param(usbfs_snoop, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(usbfs_snoop, "true to log all usbfs traffic"); #define snoop(dev, format, arg...) \ do { \ if (usbfs_snoop) \ dev_info(dev , format , ## arg); \ } while (0) enum snoop_when { SUBMIT, COMPLETE }; #define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0) /* Limit on the total amount of memory we can allocate for transfers */ static unsigned usbfs_memory_mb = 16; module_param(usbfs_memory_mb, uint, 0644); MODULE_PARM_DESC(usbfs_memory_mb, "maximum MB allowed for usbfs buffers (0 = no limit)"); /* Hard limit, necessary to avoid aithmetic overflow */ #define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000) static atomic_t usbfs_memory_usage; /* Total memory currently allocated */ /* Check whether it's okay to allocate more memory for a transfer */ static int usbfs_increase_memory_usage(unsigned amount) { unsigned lim; /* * Convert usbfs_memory_mb to bytes, avoiding overflows. * 0 means use the hard limit (effectively unlimited). */ lim = ACCESS_ONCE(usbfs_memory_mb); if (lim == 0 || lim > (USBFS_XFER_MAX >> 20)) lim = USBFS_XFER_MAX; else lim <<= 20; atomic_add(amount, &usbfs_memory_usage); if (atomic_read(&usbfs_memory_usage) <= lim) return 0; atomic_sub(amount, &usbfs_memory_usage); return -ENOMEM; } /* Memory for a transfer is being deallocated */ static void usbfs_decrease_memory_usage(unsigned amount) { atomic_sub(amount, &usbfs_memory_usage); } static int connected(struct dev_state *ps) { return (!list_empty(&ps->list) && ps->dev->state != USB_STATE_NOTATTACHED); } static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig) { loff_t ret; mutex_lock(&file_inode(file)->i_mutex); switch (orig) { case 0: file->f_pos = offset; ret = file->f_pos; break; case 1: file->f_pos += offset; ret = file->f_pos; break; case 2: default: ret = -EINVAL; } mutex_unlock(&file_inode(file)->i_mutex); return ret; } static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct dev_state *ps = file->private_data; struct usb_device *dev = ps->dev; ssize_t ret = 0; unsigned len; loff_t pos; int i; pos = *ppos; usb_lock_device(dev); if (!connected(ps)) { ret = -ENODEV; goto err; } else if (pos < 0) { ret = -EINVAL; goto err; } if (pos < sizeof(struct usb_device_descriptor)) { /* 18 bytes - fits on the stack */ struct usb_device_descriptor temp_desc; memcpy(&temp_desc, &dev->descriptor, sizeof(dev->descriptor)); le16_to_cpus(&temp_desc.bcdUSB); le16_to_cpus(&temp_desc.idVendor); le16_to_cpus(&temp_desc.idProduct); le16_to_cpus(&temp_desc.bcdDevice); len = sizeof(struct usb_device_descriptor) - pos; if (len > nbytes) len = nbytes; if (copy_to_user(buf, ((char *)&temp_desc) + pos, len)) { ret = -EFAULT; goto err; } *ppos += len; buf += len; nbytes -= len; ret += len; } pos = sizeof(struct usb_device_descriptor); for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) { struct usb_config_descriptor *config = (struct usb_config_descriptor *)dev->rawdescriptors[i]; unsigned int length = le16_to_cpu(config->wTotalLength); if (*ppos < pos + length) { /* The descriptor may claim to be longer than it * really is. Here is the actual allocated length. */ unsigned alloclen = le16_to_cpu(dev->config[i].desc.wTotalLength); len = length - (*ppos - pos); if (len > nbytes) len = nbytes; /* Simply don't write (skip over) unallocated parts */ if (alloclen > (*ppos - pos)) { alloclen -= (*ppos - pos); if (copy_to_user(buf, dev->rawdescriptors[i] + (*ppos - pos), min(len, alloclen))) { ret = -EFAULT; goto err; } } *ppos += len; buf += len; nbytes -= len; ret += len; } pos += length; } err: usb_unlock_device(dev); return ret; } /* * async list handling */ static struct async *alloc_async(unsigned int numisoframes) { struct async *as; as = kzalloc(sizeof(struct async), GFP_KERNEL); if (!as) return NULL; as->urb = usb_alloc_urb(numisoframes, GFP_KERNEL); if (!as->urb) { kfree(as); return NULL; } return as; } static void free_async(struct async *as) { int i; put_pid(as->pid); if (as->cred) put_cred(as->cred); for (i = 0; i < as->urb->num_sgs; i++) { if (sg_page(&as->urb->sg[i])) kfree(sg_virt(&as->urb->sg[i])); } kfree(as->urb->sg); kfree(as->urb->transfer_buffer); kfree(as->urb->setup_packet); usb_free_urb(as->urb); usbfs_decrease_memory_usage(as->mem_usage); kfree(as); } static void async_newpending(struct async *as) { struct dev_state *ps = as->ps; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); list_add_tail(&as->asynclist, &ps->async_pending); spin_unlock_irqrestore(&ps->lock, flags); } static void async_removepending(struct async *as) { struct dev_state *ps = as->ps; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); list_del_init(&as->asynclist); spin_unlock_irqrestore(&ps->lock, flags); } static struct async *async_getcompleted(struct dev_state *ps) { unsigned long flags; struct async *as = NULL; spin_lock_irqsave(&ps->lock, flags); if (!list_empty(&ps->async_completed)) { as = list_entry(ps->async_completed.next, struct async, asynclist); list_del_init(&as->asynclist); } spin_unlock_irqrestore(&ps->lock, flags); return as; } static struct async *async_getpending(struct dev_state *ps, void __user *userurb) { struct async *as; list_for_each_entry(as, &ps->async_pending, asynclist) if (as->userurb == userurb) { list_del_init(&as->asynclist); return as; } return NULL; } static void snoop_urb(struct usb_device *udev, void __user *userurb, int pipe, unsigned length, int timeout_or_status, enum snoop_when when, unsigned char *data, unsigned data_len) { static const char *types[] = {"isoc", "int", "ctrl", "bulk"}; static const char *dirs[] = {"out", "in"}; int ep; const char *t, *d; if (!usbfs_snoop) return; ep = usb_pipeendpoint(pipe); t = types[usb_pipetype(pipe)]; d = dirs[!!usb_pipein(pipe)]; if (userurb) { /* Async */ if (when == SUBMIT) dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " "length %u\n", userurb, ep, t, d, length); else dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " "actual_length %u status %d\n", userurb, ep, t, d, length, timeout_or_status); } else { if (when == SUBMIT) dev_info(&udev->dev, "ep%d %s-%s, length %u, " "timeout %d\n", ep, t, d, length, timeout_or_status); else dev_info(&udev->dev, "ep%d %s-%s, actual_length %u, " "status %d\n", ep, t, d, length, timeout_or_status); } if (data && data_len > 0) { print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1, data, data_len, 1); } } static void snoop_urb_data(struct urb *urb, unsigned len) { int i, size; if (!usbfs_snoop) return; if (urb->num_sgs == 0) { print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1, urb->transfer_buffer, len, 1); return; } for (i = 0; i < urb->num_sgs && len; i++) { size = (len > USB_SG_SIZE) ? USB_SG_SIZE : len; print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1, sg_virt(&urb->sg[i]), size, 1); len -= size; } } static int copy_urb_data_to_user(u8 __user *userbuffer, struct urb *urb) { unsigned i, len, size; if (urb->number_of_packets > 0) /* Isochronous */ len = urb->transfer_buffer_length; else /* Non-Isoc */ len = urb->actual_length; if (urb->num_sgs == 0) { if (copy_to_user(userbuffer, urb->transfer_buffer, len)) return -EFAULT; return 0; } for (i = 0; i < urb->num_sgs && len; i++) { size = (len > USB_SG_SIZE) ? USB_SG_SIZE : len; if (copy_to_user(userbuffer, sg_virt(&urb->sg[i]), size)) return -EFAULT; userbuffer += size; len -= size; } return 0; } #define AS_CONTINUATION 1 #define AS_UNLINK 2 static void cancel_bulk_urbs(struct dev_state *ps, unsigned bulk_addr) __releases(ps->lock) __acquires(ps->lock) { struct urb *urb; struct async *as; /* Mark all the pending URBs that match bulk_addr, up to but not * including the first one without AS_CONTINUATION. If such an * URB is encountered then a new transfer has already started so * the endpoint doesn't need to be disabled; otherwise it does. */ list_for_each_entry(as, &ps->async_pending, asynclist) { if (as->bulk_addr == bulk_addr) { if (as->bulk_status != AS_CONTINUATION) goto rescan; as->bulk_status = AS_UNLINK; as->bulk_addr = 0; } } ps->disabled_bulk_eps |= (1 << bulk_addr); /* Now carefully unlink all the marked pending URBs */ rescan: list_for_each_entry(as, &ps->async_pending, asynclist) { if (as->bulk_status == AS_UNLINK) { as->bulk_status = 0; /* Only once */ urb = as->urb; usb_get_urb(urb); spin_unlock(&ps->lock); /* Allow completions */ usb_unlink_urb(urb); usb_put_urb(urb); spin_lock(&ps->lock); goto rescan; } } } static void async_completed(struct urb *urb) { struct async *as = urb->context; struct dev_state *ps = as->ps; struct siginfo sinfo; struct pid *pid = NULL; u32 secid = 0; const struct cred *cred = NULL; int signr; spin_lock(&ps->lock); list_move_tail(&as->asynclist, &ps->async_completed); as->status = urb->status; signr = as->signr; if (signr) { memset(&sinfo, 0, sizeof(sinfo)); sinfo.si_signo = as->signr; sinfo.si_errno = as->status; sinfo.si_code = SI_ASYNCIO; sinfo.si_addr = as->userurb; pid = get_pid(as->pid); cred = get_cred(as->cred); secid = as->secid; } snoop(&urb->dev->dev, "urb complete\n"); snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length, as->status, COMPLETE, NULL, 0); if ((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_IN) snoop_urb_data(urb, urb->actual_length); if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET && as->status != -ENOENT) cancel_bulk_urbs(ps, as->bulk_addr); spin_unlock(&ps->lock); if (signr) { kill_pid_info_as_cred(sinfo.si_signo, &sinfo, pid, cred, secid); put_pid(pid); put_cred(cred); } wake_up(&ps->wait); } static void destroy_async(struct dev_state *ps, struct list_head *list) { struct urb *urb; struct async *as; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); while (!list_empty(list)) { as = list_entry(list->next, struct async, asynclist); list_del_init(&as->asynclist); urb = as->urb; usb_get_urb(urb); /* drop the spinlock so the completion handler can run */ spin_unlock_irqrestore(&ps->lock, flags); usb_kill_urb(urb); usb_put_urb(urb); spin_lock_irqsave(&ps->lock, flags); } spin_unlock_irqrestore(&ps->lock, flags); } static void destroy_async_on_interface(struct dev_state *ps, unsigned int ifnum) { struct list_head *p, *q, hitlist; unsigned long flags; INIT_LIST_HEAD(&hitlist); spin_lock_irqsave(&ps->lock, flags); list_for_each_safe(p, q, &ps->async_pending) if (ifnum == list_entry(p, struct async, asynclist)->ifnum) list_move_tail(p, &hitlist); spin_unlock_irqrestore(&ps->lock, flags); destroy_async(ps, &hitlist); } static void destroy_all_async(struct dev_state *ps) { destroy_async(ps, &ps->async_pending); } /* * interface claims are made only at the request of user level code, * which can also release them (explicitly or by closing files). * they're also undone when devices disconnect. */ static int driver_probe(struct usb_interface *intf, const struct usb_device_id *id) { return -ENODEV; } static void driver_disconnect(struct usb_interface *intf) { struct dev_state *ps = usb_get_intfdata(intf); unsigned int ifnum = intf->altsetting->desc.bInterfaceNumber; if (!ps) return; /* NOTE: this relies on usbcore having canceled and completed * all pending I/O requests; 2.6 does that. */ if (likely(ifnum < 8*sizeof(ps->ifclaimed))) clear_bit(ifnum, &ps->ifclaimed); else dev_warn(&intf->dev, "interface number %u out of range\n", ifnum); usb_set_intfdata(intf, NULL); /* force async requests to complete */ destroy_async_on_interface(ps, ifnum); } /* The following routines are merely placeholders. There is no way * to inform a user task about suspend or resumes. */ static int driver_suspend(struct usb_interface *intf, pm_message_t msg) { return 0; } static int driver_resume(struct usb_interface *intf) { return 0; } struct usb_driver usbfs_driver = { .name = "usbfs", .probe = driver_probe, .disconnect = driver_disconnect, .suspend = driver_suspend, .resume = driver_resume, }; static int claimintf(struct dev_state *ps, unsigned int ifnum) { struct usb_device *dev = ps->dev; struct usb_interface *intf; int err; if (ifnum >= 8*sizeof(ps->ifclaimed)) return -EINVAL; /* already claimed */ if (test_bit(ifnum, &ps->ifclaimed)) return 0; intf = usb_ifnum_to_if(dev, ifnum); if (!intf) err = -ENOENT; else err = usb_driver_claim_interface(&usbfs_driver, intf, ps); if (err == 0) set_bit(ifnum, &ps->ifclaimed); return err; } static int releaseintf(struct dev_state *ps, unsigned int ifnum) { struct usb_device *dev; struct usb_interface *intf; int err; err = -EINVAL; if (ifnum >= 8*sizeof(ps->ifclaimed)) return err; dev = ps->dev; intf = usb_ifnum_to_if(dev, ifnum); if (!intf) err = -ENOENT; else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) { usb_driver_release_interface(&usbfs_driver, intf); err = 0; } return err; } static int checkintf(struct dev_state *ps, unsigned int ifnum) { if (ps->dev->state != USB_STATE_CONFIGURED) return -EHOSTUNREACH; if (ifnum >= 8*sizeof(ps->ifclaimed)) return -EINVAL; if (test_bit(ifnum, &ps->ifclaimed)) return 0; /* if not yet claimed, claim it for the driver */ dev_warn(&ps->dev->dev, "usbfs: process %d (%s) did not claim " "interface %u before use\n", task_pid_nr(current), current->comm, ifnum); return claimintf(ps, ifnum); } static int findintfep(struct usb_device *dev, unsigned int ep) { unsigned int i, j, e; struct usb_interface *intf; struct usb_host_interface *alts; struct usb_endpoint_descriptor *endpt; if (ep & ~(USB_DIR_IN|0xf)) return -EINVAL; if (!dev->actconfig) return -ESRCH; for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { intf = dev->actconfig->interface[i]; for (j = 0; j < intf->num_altsetting; j++) { alts = &intf->altsetting[j]; for (e = 0; e < alts->desc.bNumEndpoints; e++) { endpt = &alts->endpoint[e].desc; if (endpt->bEndpointAddress == ep) return alts->desc.bInterfaceNumber; } } } return -ENOENT; } static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype, unsigned int request, unsigned int index) { int ret = 0; struct usb_host_interface *alt_setting; if (ps->dev->state != USB_STATE_UNAUTHENTICATED && ps->dev->state != USB_STATE_ADDRESS && ps->dev->state != USB_STATE_CONFIGURED) return -EHOSTUNREACH; if (USB_TYPE_VENDOR == (USB_TYPE_MASK & requesttype)) return 0; /* * check for the special corner case 'get_device_id' in the printer * class specification, where wIndex is (interface << 8 | altsetting) * instead of just interface */ if (requesttype == 0xa1 && request == 0) { alt_setting = usb_find_alt_setting(ps->dev->actconfig, index >> 8, index & 0xff); if (alt_setting && alt_setting->desc.bInterfaceClass == USB_CLASS_PRINTER) index >>= 8; } index &= 0xff; switch (requesttype & USB_RECIP_MASK) { case USB_RECIP_ENDPOINT: if ((index & ~USB_DIR_IN) == 0) return 0; ret = findintfep(ps->dev, index); if (ret < 0) { /* * Some not fully compliant Win apps seem to get * index wrong and have the endpoint number here * rather than the endpoint address (with the * correct direction). Win does let this through, * so we'll not reject it here but leave it to * the device to not break KVM. But we warn. */ ret = findintfep(ps->dev, index ^ 0x80); if (ret >= 0) dev_info(&ps->dev->dev, "%s: process %i (%s) requesting ep %02x but needs %02x\n", __func__, task_pid_nr(current), current->comm, index, index ^ 0x80); } if (ret >= 0) ret = checkintf(ps, ret); break; case USB_RECIP_INTERFACE: ret = checkintf(ps, index); break; } return ret; } static int match_devt(struct device *dev, void *data) { return dev->devt == (dev_t) (unsigned long) data; } static struct usb_device *usbdev_lookup_by_devt(dev_t devt) { struct device *dev; dev = bus_find_device(&usb_bus_type, NULL, (void *) (unsigned long) devt, match_devt); if (!dev) return NULL; return container_of(dev, struct usb_device, dev); } /* * file operations */ static int usbdev_open(struct inode *inode, struct file *file) { struct usb_device *dev = NULL; struct dev_state *ps; int ret; ret = -ENOMEM; ps = kmalloc(sizeof(struct dev_state), GFP_KERNEL); if (!ps) goto out_free_ps; ret = -ENODEV; /* Protect against simultaneous removal or release */ mutex_lock(&usbfs_mutex); /* usbdev device-node */ if (imajor(inode) == USB_DEVICE_MAJOR) dev = usbdev_lookup_by_devt(inode->i_rdev); mutex_unlock(&usbfs_mutex); if (!dev) goto out_free_ps; usb_lock_device(dev); if (dev->state == USB_STATE_NOTATTACHED) goto out_unlock_device; ret = usb_autoresume_device(dev); if (ret) goto out_unlock_device; ps->dev = dev; ps->file = file; spin_lock_init(&ps->lock); INIT_LIST_HEAD(&ps->list); INIT_LIST_HEAD(&ps->async_pending); INIT_LIST_HEAD(&ps->async_completed); init_waitqueue_head(&ps->wait); ps->discsignr = 0; ps->disc_pid = get_pid(task_pid(current)); ps->cred = get_current_cred(); ps->disccontext = NULL; ps->ifclaimed = 0; security_task_getsecid(current, &ps->secid); smp_wmb(); list_add_tail(&ps->list, &dev->filelist); file->private_data = ps; usb_unlock_device(dev); snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current), current->comm); MYDBG("opened by process %d: %s\n", task_pid_nr(current), current->comm); return ret; out_unlock_device: usb_unlock_device(dev); usb_put_dev(dev); out_free_ps: kfree(ps); return ret; } static int usbdev_release(struct inode *inode, struct file *file) { struct dev_state *ps = file->private_data; struct usb_device *dev = ps->dev; unsigned int ifnum; struct async *as; usb_lock_device(dev); usb_hub_release_all_ports(dev, ps); list_del_init(&ps->list); for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed); ifnum++) { if (test_bit(ifnum, &ps->ifclaimed)) releaseintf(ps, ifnum); } destroy_all_async(ps); usb_autosuspend_device(dev); usb_unlock_device(dev); usb_put_dev(dev); put_pid(ps->disc_pid); put_cred(ps->cred); as = async_getcompleted(ps); while (as) { free_async(as); as = async_getcompleted(ps); } kfree(ps); return 0; } static int proc_control(struct dev_state *ps, void __user *arg) { struct usb_device *dev = ps->dev; struct usbdevfs_ctrltransfer ctrl; unsigned int tmo; unsigned char *tbuf; unsigned wLength; int i, pipe, ret; if (copy_from_user(&ctrl, arg, sizeof(ctrl))) return -EFAULT; ret = check_ctrlrecip(ps, ctrl.bRequestType, ctrl.bRequest, ctrl.wIndex); if (ret) return ret; wLength = ctrl.wLength; /* To suppress 64k PAGE_SIZE warning */ if (wLength > PAGE_SIZE) return -EINVAL; ret = usbfs_increase_memory_usage(PAGE_SIZE + sizeof(struct urb) + sizeof(struct usb_ctrlrequest)); if (ret) return ret; tbuf = (unsigned char *)__get_free_page(GFP_KERNEL); if (!tbuf) { ret = -ENOMEM; goto done; } tmo = ctrl.timeout; snoop(&dev->dev, "control urb: bRequestType=%02x " "bRequest=%02x wValue=%04x " "wIndex=%04x wLength=%04x\n", ctrl.bRequestType, ctrl.bRequest, __le16_to_cpup(&ctrl.wValue), __le16_to_cpup(&ctrl.wIndex), __le16_to_cpup(&ctrl.wLength)); if (ctrl.bRequestType & 0x80) { if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data, ctrl.wLength)) { ret = -EINVAL; goto done; } pipe = usb_rcvctrlpipe(dev, 0); snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, NULL, 0); usb_unlock_device(dev); i = usb_control_msg(dev, pipe, ctrl.bRequest, ctrl.bRequestType, ctrl.wValue, ctrl.wIndex, tbuf, ctrl.wLength, tmo); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, tbuf, max(i, 0)); if ((i > 0) && ctrl.wLength) { if (copy_to_user(ctrl.data, tbuf, i)) { ret = -EFAULT; goto done; } } } else { if (ctrl.wLength) { if (copy_from_user(tbuf, ctrl.data, ctrl.wLength)) { ret = -EFAULT; goto done; } } pipe = usb_sndctrlpipe(dev, 0); snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, tbuf, ctrl.wLength); usb_unlock_device(dev); i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest, ctrl.bRequestType, ctrl.wValue, ctrl.wIndex, tbuf, ctrl.wLength, tmo); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, NULL, 0); } if (i < 0 && i != -EPIPE) { dev_printk(KERN_DEBUG, &dev->dev, "usbfs: USBDEVFS_CONTROL " "failed cmd %s rqt %u rq %u len %u ret %d\n", current->comm, ctrl.bRequestType, ctrl.bRequest, ctrl.wLength, i); } ret = i; done: free_page((unsigned long) tbuf); usbfs_decrease_memory_usage(PAGE_SIZE + sizeof(struct urb) + sizeof(struct usb_ctrlrequest)); return ret; } static int proc_bulk(struct dev_state *ps, void __user *arg) { struct usb_device *dev = ps->dev; struct usbdevfs_bulktransfer bulk; unsigned int tmo, len1, pipe; int len2; unsigned char *tbuf; int i, ret; if (copy_from_user(&bulk, arg, sizeof(bulk))) return -EFAULT; ret = findintfep(ps->dev, bulk.ep); if (ret < 0) return ret; ret = checkintf(ps, ret); if (ret) return ret; if (bulk.ep & USB_DIR_IN) pipe = usb_rcvbulkpipe(dev, bulk.ep & 0x7f); else pipe = usb_sndbulkpipe(dev, bulk.ep & 0x7f); if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN))) return -EINVAL; len1 = bulk.len; if (len1 >= USBFS_XFER_MAX) return -EINVAL; ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb)); if (ret) return ret; if (!(tbuf = kmalloc(len1, GFP_KERNEL))) { ret = -ENOMEM; goto done; } tmo = bulk.timeout; if (bulk.ep & 0x80) { if (len1 && !access_ok(VERIFY_WRITE, bulk.data, len1)) { ret = -EINVAL; goto done; } snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0); usb_unlock_device(dev); i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, tbuf, len2); if (!i && len2) { if (copy_to_user(bulk.data, tbuf, len2)) { ret = -EFAULT; goto done; } } } else { if (len1) { if (copy_from_user(tbuf, bulk.data, len1)) { ret = -EFAULT; goto done; } } snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1); usb_unlock_device(dev); i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0); } ret = (i < 0 ? i : len2); done: kfree(tbuf); usbfs_decrease_memory_usage(len1 + sizeof(struct urb)); return ret; } static int proc_resetep(struct dev_state *ps, void __user *arg) { unsigned int ep; int ret; if (get_user(ep, (unsigned int __user *)arg)) return -EFAULT; ret = findintfep(ps->dev, ep); if (ret < 0) return ret; ret = checkintf(ps, ret); if (ret) return ret; usb_reset_endpoint(ps->dev, ep); return 0; } static int proc_clearhalt(struct dev_state *ps, void __user *arg) { unsigned int ep; int pipe; int ret; if (get_user(ep, (unsigned int __user *)arg)) return -EFAULT; ret = findintfep(ps->dev, ep); if (ret < 0) return ret; ret = checkintf(ps, ret); if (ret) return ret; if (ep & USB_DIR_IN) pipe = usb_rcvbulkpipe(ps->dev, ep & 0x7f); else pipe = usb_sndbulkpipe(ps->dev, ep & 0x7f); return usb_clear_halt(ps->dev, pipe); } static int proc_getdriver(struct dev_state *ps, void __user *arg) { struct usbdevfs_getdriver gd; struct usb_interface *intf; int ret; if (copy_from_user(&gd, arg, sizeof(gd))) return -EFAULT; intf = usb_ifnum_to_if(ps->dev, gd.interface); if (!intf || !intf->dev.driver) ret = -ENODATA; else { strlcpy(gd.driver, intf->dev.driver->name, sizeof(gd.driver)); ret = (copy_to_user(arg, &gd, sizeof(gd)) ? -EFAULT : 0); } return ret; } static int proc_connectinfo(struct dev_state *ps, void __user *arg) { struct usbdevfs_connectinfo ci = { .devnum = ps->dev->devnum, .slow = ps->dev->speed == USB_SPEED_LOW }; if (copy_to_user(arg, &ci, sizeof(ci))) return -EFAULT; return 0; } static int proc_resetdevice(struct dev_state *ps) { return usb_reset_device(ps->dev); } static int proc_setintf(struct dev_state *ps, void __user *arg) { struct usbdevfs_setinterface setintf; int ret; if (copy_from_user(&setintf, arg, sizeof(setintf))) return -EFAULT; if ((ret = checkintf(ps, setintf.interface))) return ret; return usb_set_interface(ps->dev, setintf.interface, setintf.altsetting); } static int proc_setconfig(struct dev_state *ps, void __user *arg) { int u; int status = 0; struct usb_host_config *actconfig; if (get_user(u, (int __user *)arg)) return -EFAULT; actconfig = ps->dev->actconfig; /* Don't touch the device if any interfaces are claimed. * It could interfere with other drivers' operations, and if * an interface is claimed by usbfs it could easily deadlock. */ if (actconfig) { int i; for (i = 0; i < actconfig->desc.bNumInterfaces; ++i) { if (usb_interface_claimed(actconfig->interface[i])) { dev_warn(&ps->dev->dev, "usbfs: interface %d claimed by %s " "while '%s' sets config #%d\n", actconfig->interface[i] ->cur_altsetting ->desc.bInterfaceNumber, actconfig->interface[i] ->dev.driver->name, current->comm, u); status = -EBUSY; break; } } } /* SET_CONFIGURATION is often abused as a "cheap" driver reset, * so avoid usb_set_configuration()'s kick to sysfs */ if (status == 0) { if (actconfig && actconfig->desc.bConfigurationValue == u) status = usb_reset_configuration(ps->dev); else status = usb_set_configuration(ps->dev, u); } return status; } static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, struct usbdevfs_iso_packet_desc __user *iso_frame_desc, void __user *arg) { struct usbdevfs_iso_packet_desc *isopkt = NULL; struct usb_host_endpoint *ep; struct async *as = NULL; struct usb_ctrlrequest *dr = NULL; unsigned int u, totlen, isofrmlen; int i, ret, is_in, num_sgs = 0, ifnum = -1; void *buf; if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP | USBDEVFS_URB_SHORT_NOT_OK | USBDEVFS_URB_BULK_CONTINUATION | USBDEVFS_URB_NO_FSBR | USBDEVFS_URB_ZERO_PACKET | USBDEVFS_URB_NO_INTERRUPT)) return -EINVAL; if (uurb->buffer_length > 0 && !uurb->buffer) return -EINVAL; if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL && (uurb->endpoint & ~USB_ENDPOINT_DIR_MASK) == 0)) { ifnum = findintfep(ps->dev, uurb->endpoint); if (ifnum < 0) return ifnum; ret = checkintf(ps, ifnum); if (ret) return ret; } if ((uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0) { is_in = 1; ep = ps->dev->ep_in[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK]; } else { is_in = 0; ep = ps->dev->ep_out[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK]; } if (!ep) return -ENOENT; u = 0; switch(uurb->type) { case USBDEVFS_URB_TYPE_CONTROL: if (!usb_endpoint_xfer_control(&ep->desc)) return -EINVAL; /* min 8 byte setup packet */ if (uurb->buffer_length < 8) return -EINVAL; dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!dr) return -ENOMEM; if (copy_from_user(dr, uurb->buffer, 8)) { ret = -EFAULT; goto error; } if (uurb->buffer_length < (le16_to_cpup(&dr->wLength) + 8)) { ret = -EINVAL; goto error; } ret = check_ctrlrecip(ps, dr->bRequestType, dr->bRequest, le16_to_cpup(&dr->wIndex)); if (ret) goto error; uurb->number_of_packets = 0; uurb->buffer_length = le16_to_cpup(&dr->wLength); uurb->buffer += 8; if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) { is_in = 1; uurb->endpoint |= USB_DIR_IN; } else { is_in = 0; uurb->endpoint &= ~USB_DIR_IN; } snoop(&ps->dev->dev, "control urb: bRequestType=%02x " "bRequest=%02x wValue=%04x " "wIndex=%04x wLength=%04x\n", dr->bRequestType, dr->bRequest, __le16_to_cpup(&dr->wValue), __le16_to_cpup(&dr->wIndex), __le16_to_cpup(&dr->wLength)); u = sizeof(struct usb_ctrlrequest); break; case USBDEVFS_URB_TYPE_BULK: switch (usb_endpoint_type(&ep->desc)) { case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_ISOC: return -EINVAL; case USB_ENDPOINT_XFER_INT: /* allow single-shot interrupt transfers */ uurb->type = USBDEVFS_URB_TYPE_INTERRUPT; goto interrupt_urb; } uurb->number_of_packets = 0; num_sgs = DIV_ROUND_UP(uurb->buffer_length, USB_SG_SIZE); if (num_sgs == 1 || num_sgs > ps->dev->bus->sg_tablesize) num_sgs = 0; break; case USBDEVFS_URB_TYPE_INTERRUPT: if (!usb_endpoint_xfer_int(&ep->desc)) return -EINVAL; interrupt_urb: uurb->number_of_packets = 0; break; case USBDEVFS_URB_TYPE_ISO: /* arbitrary limit */ if (uurb->number_of_packets < 1 || uurb->number_of_packets > 128) return -EINVAL; if (!usb_endpoint_xfer_isoc(&ep->desc)) return -EINVAL; isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) * uurb->number_of_packets; if (!(isopkt = kmalloc(isofrmlen, GFP_KERNEL))) return -ENOMEM; if (copy_from_user(isopkt, iso_frame_desc, isofrmlen)) { ret = -EFAULT; goto error; } for (totlen = u = 0; u < uurb->number_of_packets; u++) { /* * arbitrary limit need for USB 3.0 * bMaxBurst (0~15 allowed, 1~16 packets) * bmAttributes (bit 1:0, mult 0~2, 1~3 packets) * sizemax: 1024 * 16 * 3 = 49152 */ if (isopkt[u].length > 49152) { ret = -EINVAL; goto error; } totlen += isopkt[u].length; } u *= sizeof(struct usb_iso_packet_descriptor); uurb->buffer_length = totlen; break; default: return -EINVAL; } if (uurb->buffer_length >= USBFS_XFER_MAX) { ret = -EINVAL; goto error; } if (uurb->buffer_length > 0 && !access_ok(is_in ? VERIFY_WRITE : VERIFY_READ, uurb->buffer, uurb->buffer_length)) { ret = -EFAULT; goto error; } as = alloc_async(uurb->number_of_packets); if (!as) { ret = -ENOMEM; goto error; } u += sizeof(struct async) + sizeof(struct urb) + uurb->buffer_length + num_sgs * sizeof(struct scatterlist); ret = usbfs_increase_memory_usage(u); if (ret) goto error; as->mem_usage = u; if (num_sgs) { as->urb->sg = kmalloc(num_sgs * sizeof(struct scatterlist), GFP_KERNEL); if (!as->urb->sg) { ret = -ENOMEM; goto error; } as->urb->num_sgs = num_sgs; sg_init_table(as->urb->sg, as->urb->num_sgs); totlen = uurb->buffer_length; for (i = 0; i < as->urb->num_sgs; i++) { u = (totlen > USB_SG_SIZE) ? USB_SG_SIZE : totlen; buf = kmalloc(u, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto error; } sg_set_buf(&as->urb->sg[i], buf, u); if (!is_in) { if (copy_from_user(buf, uurb->buffer, u)) { ret = -EFAULT; goto error; } uurb->buffer += u; } totlen -= u; } } else if (uurb->buffer_length > 0) { as->urb->transfer_buffer = kmalloc(uurb->buffer_length, GFP_KERNEL); if (!as->urb->transfer_buffer) { ret = -ENOMEM; goto error; } if (!is_in) { if (copy_from_user(as->urb->transfer_buffer, uurb->buffer, uurb->buffer_length)) { ret = -EFAULT; goto error; } } else if (uurb->type == USBDEVFS_URB_TYPE_ISO) { /* * Isochronous input data may end up being * discontiguous if some of the packets are short. * Clear the buffer so that the gaps don't leak * kernel data to userspace. */ memset(as->urb->transfer_buffer, 0, uurb->buffer_length); } } as->urb->dev = ps->dev; as->urb->pipe = (uurb->type << 30) | __create_pipe(ps->dev, uurb->endpoint & 0xf) | (uurb->endpoint & USB_DIR_IN); /* This tedious sequence is necessary because the URB_* flags * are internal to the kernel and subject to change, whereas * the USBDEVFS_URB_* flags are a user API and must not be changed. */ u = (is_in ? URB_DIR_IN : URB_DIR_OUT); if (uurb->flags & USBDEVFS_URB_ISO_ASAP) u |= URB_ISO_ASAP; if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) u |= URB_SHORT_NOT_OK; if (uurb->flags & USBDEVFS_URB_NO_FSBR) u |= URB_NO_FSBR; if (uurb->flags & USBDEVFS_URB_ZERO_PACKET) u |= URB_ZERO_PACKET; if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT) u |= URB_NO_INTERRUPT; as->urb->transfer_flags = u; as->urb->transfer_buffer_length = uurb->buffer_length; as->urb->setup_packet = (unsigned char *)dr; dr = NULL; as->urb->start_frame = uurb->start_frame; as->urb->number_of_packets = uurb->number_of_packets; if (uurb->type == USBDEVFS_URB_TYPE_ISO || ps->dev->speed == USB_SPEED_HIGH) as->urb->interval = 1 << min(15, ep->desc.bInterval - 1); else as->urb->interval = ep->desc.bInterval; as->urb->context = as; as->urb->complete = async_completed; for (totlen = u = 0; u < uurb->number_of_packets; u++) { as->urb->iso_frame_desc[u].offset = totlen; as->urb->iso_frame_desc[u].length = isopkt[u].length; totlen += isopkt[u].length; } kfree(isopkt); isopkt = NULL; as->ps = ps; as->userurb = arg; if (is_in && uurb->buffer_length > 0) as->userbuffer = uurb->buffer; else as->userbuffer = NULL; as->signr = uurb->signr; as->ifnum = ifnum; as->pid = get_pid(task_pid(current)); as->cred = get_current_cred(); security_task_getsecid(current, &as->secid); snoop_urb(ps->dev, as->userurb, as->urb->pipe, as->urb->transfer_buffer_length, 0, SUBMIT, NULL, 0); if (!is_in) snoop_urb_data(as->urb, as->urb->transfer_buffer_length); async_newpending(as); if (usb_endpoint_xfer_bulk(&ep->desc)) { spin_lock_irq(&ps->lock); /* Not exactly the endpoint address; the direction bit is * shifted to the 0x10 position so that the value will be * between 0 and 31. */ as->bulk_addr = usb_endpoint_num(&ep->desc) | ((ep->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK) >> 3); /* If this bulk URB is the start of a new transfer, re-enable * the endpoint. Otherwise mark it as a continuation URB. */ if (uurb->flags & USBDEVFS_URB_BULK_CONTINUATION) as->bulk_status = AS_CONTINUATION; else ps->disabled_bulk_eps &= ~(1 << as->bulk_addr); /* Don't accept continuation URBs if the endpoint is * disabled because of an earlier error. */ if (ps->disabled_bulk_eps & (1 << as->bulk_addr)) ret = -EREMOTEIO; else ret = usb_submit_urb(as->urb, GFP_ATOMIC); spin_unlock_irq(&ps->lock); } else { ret = usb_submit_urb(as->urb, GFP_KERNEL); } if (ret) { dev_printk(KERN_DEBUG, &ps->dev->dev, "usbfs: usb_submit_urb returned %d\n", ret); snoop_urb(ps->dev, as->userurb, as->urb->pipe, 0, ret, COMPLETE, NULL, 0); async_removepending(as); goto error; } return 0; error: kfree(isopkt); kfree(dr); if (as) free_async(as); return ret; } static int proc_submiturb(struct dev_state *ps, void __user *arg) { struct usbdevfs_urb uurb; if (copy_from_user(&uurb, arg, sizeof(uurb))) return -EFAULT; return proc_do_submiturb(ps, &uurb, (((struct usbdevfs_urb __user *)arg)->iso_frame_desc), arg); } static int proc_unlinkurb(struct dev_state *ps, void __user *arg) { struct urb *urb; struct async *as; unsigned long flags; spin_lock_irqsave(&ps->lock, flags); as = async_getpending(ps, arg); if (!as) { spin_unlock_irqrestore(&ps->lock, flags); return -EINVAL; } urb = as->urb; usb_get_urb(urb); spin_unlock_irqrestore(&ps->lock, flags); usb_kill_urb(urb); usb_put_urb(urb); return 0; } static int processcompl(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; struct usbdevfs_urb __user *userurb = as->userurb; void __user *addr = as->userurb; unsigned int i; if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) goto err_out; } if (put_user(as->status, &userurb->status)) goto err_out; if (put_user(urb->actual_length, &userurb->actual_length)) goto err_out; if (put_user(urb->error_count, &userurb->error_count)) goto err_out; if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { for (i = 0; i < urb->number_of_packets; i++) { if (put_user(urb->iso_frame_desc[i].actual_length, &userurb->iso_frame_desc[i].actual_length)) goto err_out; if (put_user(urb->iso_frame_desc[i].status, &userurb->iso_frame_desc[i].status)) goto err_out; } } if (put_user(addr, (void __user * __user *)arg)) return -EFAULT; return 0; err_out: return -EFAULT; } static struct async *reap_as(struct dev_state *ps) { DECLARE_WAITQUEUE(wait, current); struct async *as = NULL; struct usb_device *dev = ps->dev; add_wait_queue(&ps->wait, &wait); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); as = async_getcompleted(ps); if (as) break; if (signal_pending(current)) break; usb_unlock_device(dev); schedule(); usb_lock_device(dev); } remove_wait_queue(&ps->wait, &wait); set_current_state(TASK_RUNNING); return as; } static int proc_reapurb(struct dev_state *ps, void __user *arg) { struct async *as = reap_as(ps); if (as) { int retval = processcompl(as, (void __user * __user *)arg); free_async(as); return retval; } if (signal_pending(current)) return -EINTR; return -EIO; } static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) { int retval; struct async *as; as = async_getcompleted(ps); retval = -EAGAIN; if (as) { retval = processcompl(as, (void __user * __user *)arg); free_async(as); } return retval; } #ifdef CONFIG_COMPAT static int proc_control_compat(struct dev_state *ps, struct usbdevfs_ctrltransfer32 __user *p32) { struct usbdevfs_ctrltransfer __user *p; __u32 udata; p = compat_alloc_user_space(sizeof(*p)); if (copy_in_user(p, p32, (sizeof(*p32) - sizeof(compat_caddr_t))) || get_user(udata, &p32->data) || put_user(compat_ptr(udata), &p->data)) return -EFAULT; return proc_control(ps, p); } static int proc_bulk_compat(struct dev_state *ps, struct usbdevfs_bulktransfer32 __user *p32) { struct usbdevfs_bulktransfer __user *p; compat_uint_t n; compat_caddr_t addr; p = compat_alloc_user_space(sizeof(*p)); if (get_user(n, &p32->ep) || put_user(n, &p->ep) || get_user(n, &p32->len) || put_user(n, &p->len) || get_user(n, &p32->timeout) || put_user(n, &p->timeout) || get_user(addr, &p32->data) || put_user(compat_ptr(addr), &p->data)) return -EFAULT; return proc_bulk(ps, p); } static int proc_disconnectsignal_compat(struct dev_state *ps, void __user *arg) { struct usbdevfs_disconnectsignal32 ds; if (copy_from_user(&ds, arg, sizeof(ds))) return -EFAULT; ps->discsignr = ds.signr; ps->disccontext = compat_ptr(ds.context); return 0; } static int get_urb32(struct usbdevfs_urb *kurb, struct usbdevfs_urb32 __user *uurb) { __u32 uptr; if (!access_ok(VERIFY_READ, uurb, sizeof(*uurb)) || __get_user(kurb->type, &uurb->type) || __get_user(kurb->endpoint, &uurb->endpoint) || __get_user(kurb->status, &uurb->status) || __get_user(kurb->flags, &uurb->flags) || __get_user(kurb->buffer_length, &uurb->buffer_length) || __get_user(kurb->actual_length, &uurb->actual_length) || __get_user(kurb->start_frame, &uurb->start_frame) || __get_user(kurb->number_of_packets, &uurb->number_of_packets) || __get_user(kurb->error_count, &uurb->error_count) || __get_user(kurb->signr, &uurb->signr)) return -EFAULT; if (__get_user(uptr, &uurb->buffer)) return -EFAULT; kurb->buffer = compat_ptr(uptr); if (__get_user(uptr, &uurb->usercontext)) return -EFAULT; kurb->usercontext = compat_ptr(uptr); return 0; } static int proc_submiturb_compat(struct dev_state *ps, void __user *arg) { struct usbdevfs_urb uurb; if (get_urb32(&uurb, (struct usbdevfs_urb32 __user *)arg)) return -EFAULT; return proc_do_submiturb(ps, &uurb, ((struct usbdevfs_urb32 __user *)arg)->iso_frame_desc, arg); } static int processcompl_compat(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; struct usbdevfs_urb32 __user *userurb = as->userurb; void __user *addr = as->userurb; unsigned int i; if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) return -EFAULT; } if (put_user(as->status, &userurb->status)) return -EFAULT; if (put_user(urb->actual_length, &userurb->actual_length)) return -EFAULT; if (put_user(urb->error_count, &userurb->error_count)) return -EFAULT; if (usb_endpoint_xfer_isoc(&urb->ep->desc)) { for (i = 0; i < urb->number_of_packets; i++) { if (put_user(urb->iso_frame_desc[i].actual_length, &userurb->iso_frame_desc[i].actual_length)) return -EFAULT; if (put_user(urb->iso_frame_desc[i].status, &userurb->iso_frame_desc[i].status)) return -EFAULT; } } if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) return -EFAULT; return 0; } static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) { struct async *as = reap_as(ps); if (as) { int retval = processcompl_compat(as, (void __user * __user *)arg); free_async(as); return retval; } if (signal_pending(current)) return -EINTR; return -EIO; } static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) { int retval; struct async *as; retval = -EAGAIN; as = async_getcompleted(ps); if (as) { retval = processcompl_compat(as, (void __user * __user *)arg); free_async(as); } return retval; } #endif static int proc_disconnectsignal(struct dev_state *ps, void __user *arg) { struct usbdevfs_disconnectsignal ds; if (copy_from_user(&ds, arg, sizeof(ds))) return -EFAULT; ps->discsignr = ds.signr; ps->disccontext = ds.context; return 0; } static int proc_claiminterface(struct dev_state *ps, void __user *arg) { unsigned int ifnum; if (get_user(ifnum, (unsigned int __user *)arg)) return -EFAULT; return claimintf(ps, ifnum); } static int proc_releaseinterface(struct dev_state *ps, void __user *arg) { unsigned int ifnum; int ret; if (get_user(ifnum, (unsigned int __user *)arg)) return -EFAULT; if ((ret = releaseintf(ps, ifnum)) < 0) return ret; destroy_async_on_interface (ps, ifnum); return 0; } static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl) { int size; void *buf = NULL; int retval = 0; struct usb_interface *intf = NULL; struct usb_driver *driver = NULL; /* alloc buffer */ if ((size = _IOC_SIZE(ctl->ioctl_code)) > 0) { if ((buf = kmalloc(size, GFP_KERNEL)) == NULL) return -ENOMEM; if ((_IOC_DIR(ctl->ioctl_code) & _IOC_WRITE)) { if (copy_from_user(buf, ctl->data, size)) { kfree(buf); return -EFAULT; } } else { memset(buf, 0, size); } } if (!connected(ps)) { kfree(buf); return -ENODEV; } if (ps->dev->state != USB_STATE_CONFIGURED) retval = -EHOSTUNREACH; else if (!(intf = usb_ifnum_to_if(ps->dev, ctl->ifno))) retval = -EINVAL; else switch (ctl->ioctl_code) { /* disconnect kernel driver from interface */ case USBDEVFS_DISCONNECT: if (intf->dev.driver) { driver = to_usb_driver(intf->dev.driver); dev_dbg(&intf->dev, "disconnect by usbfs\n"); usb_driver_release_interface(driver, intf); } else retval = -ENODATA; break; /* let kernel drivers try to (re)bind to the interface */ case USBDEVFS_CONNECT: if (!intf->dev.driver) retval = device_attach(&intf->dev); else retval = -EBUSY; break; /* talk directly to the interface's driver */ default: if (intf->dev.driver) driver = to_usb_driver(intf->dev.driver); if (driver == NULL || driver->unlocked_ioctl == NULL) { retval = -ENOTTY; } else { retval = driver->unlocked_ioctl(intf, ctl->ioctl_code, buf); if (retval == -ENOIOCTLCMD) retval = -ENOTTY; } } /* cleanup and return */ if (retval >= 0 && (_IOC_DIR(ctl->ioctl_code) & _IOC_READ) != 0 && size > 0 && copy_to_user(ctl->data, buf, size) != 0) retval = -EFAULT; kfree(buf); return retval; } static int proc_ioctl_default(struct dev_state *ps, void __user *arg) { struct usbdevfs_ioctl ctrl; if (copy_from_user(&ctrl, arg, sizeof(ctrl))) return -EFAULT; return proc_ioctl(ps, &ctrl); } #ifdef CONFIG_COMPAT static int proc_ioctl_compat(struct dev_state *ps, compat_uptr_t arg) { struct usbdevfs_ioctl32 __user *uioc; struct usbdevfs_ioctl ctrl; u32 udata; uioc = compat_ptr((long)arg); if (!access_ok(VERIFY_READ, uioc, sizeof(*uioc)) || __get_user(ctrl.ifno, &uioc->ifno) || __get_user(ctrl.ioctl_code, &uioc->ioctl_code) || __get_user(udata, &uioc->data)) return -EFAULT; ctrl.data = compat_ptr(udata); return proc_ioctl(ps, &ctrl); } #endif static int proc_claim_port(struct dev_state *ps, void __user *arg) { unsigned portnum; int rc; if (get_user(portnum, (unsigned __user *) arg)) return -EFAULT; rc = usb_hub_claim_port(ps->dev, portnum, ps); if (rc == 0) snoop(&ps->dev->dev, "port %d claimed by process %d: %s\n", portnum, task_pid_nr(current), current->comm); return rc; } static int proc_release_port(struct dev_state *ps, void __user *arg) { unsigned portnum; if (get_user(portnum, (unsigned __user *) arg)) return -EFAULT; return usb_hub_release_port(ps->dev, portnum, ps); } static int proc_get_capabilities(struct dev_state *ps, void __user *arg) { __u32 caps; caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM; if (!ps->dev->bus->no_stop_on_short) caps |= USBDEVFS_CAP_BULK_CONTINUATION; if (ps->dev->bus->sg_tablesize) caps |= USBDEVFS_CAP_BULK_SCATTER_GATHER; if (put_user(caps, (__u32 __user *)arg)) return -EFAULT; return 0; } static int proc_disconnect_claim(struct dev_state *ps, void __user *arg) { struct usbdevfs_disconnect_claim dc; struct usb_interface *intf; if (copy_from_user(&dc, arg, sizeof(dc))) return -EFAULT; intf = usb_ifnum_to_if(ps->dev, dc.interface); if (!intf) return -EINVAL; if (intf->dev.driver) { struct usb_driver *driver = to_usb_driver(intf->dev.driver); if ((dc.flags & USBDEVFS_DISCONNECT_CLAIM_IF_DRIVER) && strncmp(dc.driver, intf->dev.driver->name, sizeof(dc.driver)) != 0) return -EBUSY; if ((dc.flags & USBDEVFS_DISCONNECT_CLAIM_EXCEPT_DRIVER) && strncmp(dc.driver, intf->dev.driver->name, sizeof(dc.driver)) == 0) return -EBUSY; dev_dbg(&intf->dev, "disconnect by usbfs\n"); usb_driver_release_interface(driver, intf); } return claimintf(ps, dc.interface); } /* * NOTE: All requests here that have interface numbers as parameters * are assuming that somehow the configuration has been prevented from * changing. But there's no mechanism to ensure that... */ static long usbdev_do_ioctl(struct file *file, unsigned int cmd, void __user *p) { struct dev_state *ps = file->private_data; struct inode *inode = file_inode(file); struct usb_device *dev = ps->dev; int ret = -ENOTTY; if (!(file->f_mode & FMODE_WRITE)) return -EPERM; usb_lock_device(dev); if (!connected(ps)) { usb_unlock_device(dev); return -ENODEV; } switch (cmd) { case USBDEVFS_CONTROL: snoop(&dev->dev, "%s: CONTROL\n", __func__); ret = proc_control(ps, p); if (ret >= 0) inode->i_mtime = CURRENT_TIME; break; case USBDEVFS_BULK: snoop(&dev->dev, "%s: BULK\n", __func__); ret = proc_bulk(ps, p); if (ret >= 0) inode->i_mtime = CURRENT_TIME; break; case USBDEVFS_RESETEP: snoop(&dev->dev, "%s: RESETEP\n", __func__); ret = proc_resetep(ps, p); if (ret >= 0) inode->i_mtime = CURRENT_TIME; break; case USBDEVFS_RESET: snoop(&dev->dev, "%s: RESET\n", __func__); ret = proc_resetdevice(ps); break; case USBDEVFS_CLEAR_HALT: snoop(&dev->dev, "%s: CLEAR_HALT\n", __func__); ret = proc_clearhalt(ps, p); if (ret >= 0) inode->i_mtime = CURRENT_TIME; break; case USBDEVFS_GETDRIVER: snoop(&dev->dev, "%s: GETDRIVER\n", __func__); ret = proc_getdriver(ps, p); break; case USBDEVFS_CONNECTINFO: snoop(&dev->dev, "%s: CONNECTINFO\n", __func__); ret = proc_connectinfo(ps, p); break; case USBDEVFS_SETINTERFACE: snoop(&dev->dev, "%s: SETINTERFACE\n", __func__); ret = proc_setintf(ps, p); break; case USBDEVFS_SETCONFIGURATION: snoop(&dev->dev, "%s: SETCONFIGURATION\n", __func__); ret = proc_setconfig(ps, p); break; case USBDEVFS_SUBMITURB: snoop(&dev->dev, "%s: SUBMITURB\n", __func__); ret = proc_submiturb(ps, p); if (ret >= 0) inode->i_mtime = CURRENT_TIME; break; #ifdef CONFIG_COMPAT case USBDEVFS_CONTROL32: snoop(&dev->dev, "%s: CONTROL32\n", __func__); ret = proc_control_compat(ps, p); if (ret >= 0) inode->i_mtime = CURRENT_TIME; break; case USBDEVFS_BULK32: snoop(&dev->dev, "%s: BULK32\n", __func__); ret = proc_bulk_compat(ps, p); if (ret >= 0) inode->i_mtime = CURRENT_TIME; break; case USBDEVFS_DISCSIGNAL32: snoop(&dev->dev, "%s: DISCSIGNAL32\n", __func__); ret = proc_disconnectsignal_compat(ps, p); break; case USBDEVFS_SUBMITURB32: snoop(&dev->dev, "%s: SUBMITURB32\n", __func__); ret = proc_submiturb_compat(ps, p); if (ret >= 0) inode->i_mtime = CURRENT_TIME; break; case USBDEVFS_REAPURB32: snoop(&dev->dev, "%s: REAPURB32\n", __func__); ret = proc_reapurb_compat(ps, p); break; case USBDEVFS_REAPURBNDELAY32: snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__); ret = proc_reapurbnonblock_compat(ps, p); break; case USBDEVFS_IOCTL32: snoop(&dev->dev, "%s: IOCTL32\n", __func__); ret = proc_ioctl_compat(ps, ptr_to_compat(p)); break; #endif case USBDEVFS_DISCARDURB: snoop(&dev->dev, "%s: DISCARDURB\n", __func__); ret = proc_unlinkurb(ps, p); break; case USBDEVFS_REAPURB: snoop(&dev->dev, "%s: REAPURB\n", __func__); ret = proc_reapurb(ps, p); break; case USBDEVFS_REAPURBNDELAY: snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__); ret = proc_reapurbnonblock(ps, p); break; case USBDEVFS_DISCSIGNAL: snoop(&dev->dev, "%s: DISCSIGNAL\n", __func__); ret = proc_disconnectsignal(ps, p); break; case USBDEVFS_CLAIMINTERFACE: snoop(&dev->dev, "%s: CLAIMINTERFACE\n", __func__); ret = proc_claiminterface(ps, p); break; case USBDEVFS_RELEASEINTERFACE: snoop(&dev->dev, "%s: RELEASEINTERFACE\n", __func__); ret = proc_releaseinterface(ps, p); break; case USBDEVFS_IOCTL: snoop(&dev->dev, "%s: IOCTL\n", __func__); ret = proc_ioctl_default(ps, p); break; case USBDEVFS_CLAIM_PORT: snoop(&dev->dev, "%s: CLAIM_PORT\n", __func__); ret = proc_claim_port(ps, p); break; case USBDEVFS_RELEASE_PORT: snoop(&dev->dev, "%s: RELEASE_PORT\n", __func__); ret = proc_release_port(ps, p); break; case USBDEVFS_GET_CAPABILITIES: ret = proc_get_capabilities(ps, p); break; case USBDEVFS_DISCONNECT_CLAIM: ret = proc_disconnect_claim(ps, p); break; } usb_unlock_device(dev); if (ret >= 0) inode->i_atime = CURRENT_TIME; return ret; } static long usbdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; ret = usbdev_do_ioctl(file, cmd, (void __user *)arg); return ret; } #ifdef CONFIG_COMPAT static long usbdev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; ret = usbdev_do_ioctl(file, cmd, compat_ptr(arg)); return ret; } #endif /* No kernel lock - fine */ static unsigned int usbdev_poll(struct file *file, struct poll_table_struct *wait) { struct dev_state *ps = file->private_data; unsigned int mask = 0; poll_wait(file, &ps->wait, wait); if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) mask |= POLLOUT | POLLWRNORM; if (!connected(ps)) mask |= POLLERR | POLLHUP; return mask; } const struct file_operations usbdev_file_operations = { .owner = THIS_MODULE, .llseek = usbdev_lseek, .read = usbdev_read, .poll = usbdev_poll, .unlocked_ioctl = usbdev_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = usbdev_compat_ioctl, #endif .open = usbdev_open, .release = usbdev_release, }; static void usbdev_remove(struct usb_device *udev) { struct dev_state *ps; struct siginfo sinfo; while (!list_empty(&udev->filelist)) { ps = list_entry(udev->filelist.next, struct dev_state, list); destroy_all_async(ps); wake_up_all(&ps->wait); list_del_init(&ps->list); if (ps->discsignr) { memset(&sinfo, 0, sizeof(sinfo)); sinfo.si_signo = ps->discsignr; sinfo.si_errno = EPIPE; sinfo.si_code = SI_ASYNCIO; sinfo.si_addr = ps->disccontext; kill_pid_info_as_cred(ps->discsignr, &sinfo, ps->disc_pid, ps->cred, ps->secid); } } } static int usbdev_notify(struct notifier_block *self, unsigned long action, void *dev) { switch (action) { case USB_DEVICE_ADD: break; case USB_DEVICE_REMOVE: usbdev_remove(dev); break; } return NOTIFY_OK; } static struct notifier_block usbdev_nb = { .notifier_call = usbdev_notify, }; static struct cdev usb_device_cdev; int __init usb_devio_init(void) { int retval; retval = register_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX, "usb_device"); if (retval) { printk(KERN_ERR "Unable to register minors for usb_device\n"); goto out; } cdev_init(&usb_device_cdev, &usbdev_file_operations); retval = cdev_add(&usb_device_cdev, USB_DEVICE_DEV, USB_DEVICE_MAX); if (retval) { printk(KERN_ERR "Unable to get usb_device major %d\n", USB_DEVICE_MAJOR); goto error_cdev; } usb_register_notify(&usbdev_nb); out: return retval; error_cdev: unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX); goto out; } void usb_devio_cleanup(void) { usb_unregister_notify(&usbdev_nb); cdev_del(&usb_device_cdev); unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX); }
InnerFire/thunderzap_sprout
drivers/usb/core/devio.c
C
gpl-2.0
57,290
/* * * oFono - Open Source Telephony * * Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies). * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #include <glib.h> #include <gisi/client.h> #include <gisi/message.h> #include <gisi/iter.h> #include <ofono/log.h> #include <ofono/modem.h> #include <ofono/call-barring.h> #include "util.h" #include "isimodem.h" #include "isiutil.h" #include "ss.h" #include "debug.h" struct barr_data { GIsiClient *client; }; static int lock_code_to_mmi(const char *lock) { if (strcmp(lock, "AO") == 0) return SS_GSM_BARR_ALL_OUT; else if (strcmp(lock, "OI") == 0) return SS_GSM_BARR_OUT_INTER; else if (strcmp(lock, "OX") == 0) return SS_GSM_BARR_OUT_INTER_EXC_HOME; else if (strcmp(lock, "AI") == 0) return SS_GSM_BARR_ALL_IN; else if (strcmp(lock, "IR") == 0) return SS_GSM_BARR_ALL_IN_ROAM; else if (strcmp(lock, "AB") == 0) return SS_GSM_ALL_BARRINGS; else if (strcmp(lock, "AG") == 0) return SS_GSM_OUTGOING_BARR_SERV; else if (strcmp(lock, "AC") == 0) return SS_GSM_INCOMING_BARR_SERV; else return 0; } static void update_status_mask(uint32_t *mask, uint8_t bsc) { switch (bsc) { case SS_GSM_TELEPHONY: *mask |= 1; break; case SS_GSM_ALL_DATA_TELE: *mask |= 1 << 1; break; case SS_GSM_FACSIMILE: *mask |= 1 << 2; break; case SS_GSM_SMS: *mask |= 1 << 3; break; case SS_GSM_ALL_DATA_CIRCUIT_SYNC: *mask |= 1 << 4; break; case SS_GSM_ALL_DATA_CIRCUIT_ASYNC: *mask |= 1 << 5; break; case SS_GSM_ALL_DATA_PACKET_SYNC: *mask |= 1 << 6; break; case SS_GSM_ALL_PAD_ACCESS: *mask |= 1 << 7; break; default: DBG("Unknown BSC value %d, please report", bsc); break; } } static gboolean check_resp(const GIsiMessage *msg, uint8_t msgid, uint8_t type) { uint8_t service; if (g_isi_msg_error(msg) < 0) { DBG("Error: %s", g_isi_msg_strerror(msg)); return FALSE; } if (g_isi_msg_id(msg) != msgid) { DBG("Unexpected msg: %s", ss_message_id_name(g_isi_msg_id(msg))); return FALSE; } if (!g_isi_msg_data_get_byte(msg, 0, &service) || service != type) { DBG("Unexpected service type: 0x%02X", service); return FALSE; } return TRUE; } static gboolean decode_gsm_bsc_info(GIsiSubBlockIter *iter, uint32_t *mask) { uint8_t *bsc; uint8_t num, i; if (!g_isi_sb_iter_get_byte(iter, &num, 2)) return FALSE; if (!g_isi_sb_iter_get_struct(iter, (void **) &bsc, num, 3)) return FALSE; for (i = 0; i < num; i++) update_status_mask(mask, bsc[i]); return TRUE; } static gboolean decode_gsm_barring_info(GIsiSubBlockIter *outer, uint32_t *mask) { GIsiSubBlockIter iter; uint8_t status; uint8_t bsc; for (g_isi_sb_subiter_init(outer, &iter, 4); g_isi_sb_iter_is_valid(&iter); g_isi_sb_iter_next(&iter)) { if (g_isi_sb_iter_get_id(&iter) != SS_GSM_BARRING_FEATURE) continue; if (!g_isi_sb_iter_get_byte(&iter, &bsc, 2)) return FALSE; if (!g_isi_sb_iter_get_byte(&iter, &status, 3)) return FALSE; if (status & SS_GSM_ACTIVE) update_status_mask(mask, bsc); return TRUE; } return FALSE; } static void unset_resp_cb(const GIsiMessage *msg, void *data) { struct isi_cb_data *cbd = data; ofono_call_barring_set_cb_t cb = cbd->cb; if (check_resp(msg, SS_SERVICE_COMPLETED_RESP, SS_DEACTIVATION)) CALLBACK_WITH_SUCCESS(cb, cbd->data); else CALLBACK_WITH_FAILURE(cb, cbd->data); } static void set_resp_cb(const GIsiMessage *msg, void *data) { struct isi_cb_data *cbd = data; ofono_call_barring_set_cb_t cb = cbd->cb; if (check_resp(msg, SS_SERVICE_COMPLETED_RESP, SS_ACTIVATION)) CALLBACK_WITH_SUCCESS(cb, cbd->data); else CALLBACK_WITH_FAILURE(cb, cbd->data); } static void isi_set(struct ofono_call_barring *barr, const char *lock, int enable, const char *passwd, int cls, ofono_call_barring_set_cb_t cb, void *data) { struct barr_data *bd = ofono_call_barring_get_data(barr); struct isi_cb_data *cbd = isi_cb_data_new(barr, cb, data); int ss_code = lock_code_to_mmi(lock); const uint8_t msg[] = { SS_SERVICE_REQ, enable ? SS_ACTIVATION : SS_DEACTIVATION, SS_ALL_TELE_AND_BEARER, ss_code >> 8, ss_code & 0xFF, /* Service code */ SS_SEND_ADDITIONAL_INFO, 1, /* Subblock count */ SS_GSM_PASSWORD, 28, /* Subblock length */ 0, passwd[0], 0, passwd[1], 0, passwd[2], 0, passwd[3], 0, 0, 0, 0, 0, 0, 0, 0, /* Filler */ 0, 0, 0, 0, 0, 0, 0, 0, /* Filler */ 0, 0, /* Filler */ }; DBG("lock code %s enable %d class %d password %s", lock, enable, cls, passwd); if (cbd == NULL || bd == NULL) goto error; if (g_isi_client_send(bd->client, msg, sizeof(msg), enable ? set_resp_cb : unset_resp_cb, cbd, g_free)) return; error: CALLBACK_WITH_FAILURE(cb, data); g_free(cbd); } static void query_resp_cb(const GIsiMessage *msg, void *data) { struct isi_cb_data *cbd = data; ofono_call_barring_query_cb_t cb = cbd->cb; GIsiSubBlockIter iter; uint32_t mask = 0; uint8_t status; if (!check_resp(msg, SS_SERVICE_COMPLETED_RESP, SS_INTERROGATION)) goto error; for (g_isi_sb_iter_init(&iter, msg, 6); g_isi_sb_iter_is_valid(&iter); g_isi_sb_iter_next(&iter)) { switch (g_isi_sb_iter_get_id(&iter)) { case SS_STATUS_RESULT: if (!g_isi_sb_iter_get_byte(&iter, &status, 2)) goto error; if (status & SS_GSM_ACTIVE) mask = 1; break; case SS_GSM_BARRING_INFO: if (!decode_gsm_barring_info(&iter, &mask)) goto error; break; case SS_GSM_BSC_INFO: if (!decode_gsm_bsc_info(&iter, &mask)) goto error; break; case SS_GSM_ADDITIONAL_INFO: break; } } DBG("mask=0x%04X", mask); CALLBACK_WITH_SUCCESS(cb, mask, cbd->data); return; error: CALLBACK_WITH_FAILURE(cb, 0, cbd->data); } static void isi_query(struct ofono_call_barring *barr, const char *lock, int cls, ofono_call_barring_query_cb_t cb, void *data) { struct barr_data *bd = ofono_call_barring_get_data(barr); struct isi_cb_data *cbd = isi_cb_data_new(barr, cb, data); int ss_code = lock_code_to_mmi(lock); unsigned char msg[] = { SS_SERVICE_REQ, SS_INTERROGATION, SS_ALL_TELE_AND_BEARER, ss_code >> 8, ss_code & 0xFF, /* services code */ SS_SEND_ADDITIONAL_INFO, /* Get BER-encoded result */ 0 /* Subblock count */ }; DBG("barring query lock code %s", lock); if (cbd == NULL || bd == NULL) goto error; if (g_isi_client_send(bd->client, msg, sizeof(msg), query_resp_cb, cbd, g_free)) return; error: CALLBACK_WITH_FAILURE(cb, 0, data); g_free(cbd); } static void set_passwd_resp_cb(const GIsiMessage *msg, void *data) { struct isi_cb_data *cbd = data; ofono_call_barring_set_cb_t cb = cbd->cb; if (check_resp(msg, SS_SERVICE_COMPLETED_RESP, SS_GSM_PASSWORD_REGISTRATION)) CALLBACK_WITH_SUCCESS(cb, cbd->data); else CALLBACK_WITH_FAILURE(cb, cbd->data); } static void isi_set_passwd(struct ofono_call_barring *barr, const char *lock, const char *old_passwd, const char *new_passwd, ofono_call_barring_set_cb_t cb, void *data) { struct barr_data *bd = ofono_call_barring_get_data(barr); struct isi_cb_data *cbd = isi_cb_data_new(barr, cb, data); int ss_code = lock_code_to_mmi(lock); const uint8_t msg[] = { SS_SERVICE_REQ, SS_GSM_PASSWORD_REGISTRATION, SS_ALL_TELE_AND_BEARER, ss_code >> 8, ss_code & 0xFF, /* Service code */ SS_SEND_ADDITIONAL_INFO, 1, /* Subblock count */ SS_GSM_PASSWORD, 28, /* Subblock length */ 0, old_passwd[0], 0, old_passwd[1], 0, old_passwd[2], 0, old_passwd[3], 0, new_passwd[0], 0, new_passwd[1], 0, new_passwd[2], 0, new_passwd[3], 0, new_passwd[0], 0, new_passwd[1], 0, new_passwd[2], 0, new_passwd[3], 0, 0, /* Filler */ }; DBG("lock code %s (%u) old password %s new password %s", lock, ss_code, old_passwd, new_passwd); if (cbd == NULL || bd == NULL) goto error; if (g_isi_client_send(bd->client, msg, sizeof(msg), set_passwd_resp_cb, cbd, g_free)) return; error: CALLBACK_WITH_FAILURE(cb, data); g_free(cbd); } static void reachable_cb(const GIsiMessage *msg, void *data) { struct ofono_call_barring *barr = data; if (g_isi_msg_error(msg) < 0) { ofono_call_barring_remove(barr); return; } ISI_RESOURCE_DBG(msg); ofono_call_barring_register(barr); } static int isi_call_barring_probe(struct ofono_call_barring *barr, unsigned int vendor, void *user) { GIsiModem *modem = user; struct barr_data *bd; bd = g_try_new0(struct barr_data, 1); if (bd == NULL) return -ENOMEM; bd->client = g_isi_client_create(modem, PN_SS); if (bd->client == NULL) { g_free(bd); return -ENOMEM; } ofono_call_barring_set_data(barr, bd); g_isi_client_verify(bd->client, reachable_cb, barr, NULL); return 0; } static void isi_call_barring_remove(struct ofono_call_barring *barr) { struct barr_data *data = ofono_call_barring_get_data(barr); ofono_call_barring_set_data(barr, NULL); if (data == NULL) return; g_isi_client_destroy(data->client); g_free(data); } static struct ofono_call_barring_driver driver = { .name = "isimodem", .probe = isi_call_barring_probe, .remove = isi_call_barring_remove, .set = isi_set, .query = isi_query, .set_passwd = isi_set_passwd }; void isi_call_barring_init(void) { ofono_call_barring_driver_register(&driver); } void isi_call_barring_exit(void) { ofono_call_barring_driver_unregister(&driver); }
alfonsosanchezbeato/ofono
drivers/isimodem/call-barring.c
C
gpl-2.0
10,098
/* Copyright (C) 2000 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* strmov(dst, src) moves all the characters of src (including the closing NUL) to dst, and returns a pointer to the new closing NUL in dst. The similar UNIX routine strcpy returns the old value of dst, which I have never found useful. strmov(strmov(dst,a),b) moves a//b into dst, which seems useful. */ #include <my_global.h> #include "m_string.h" #ifdef strmov #undef strmov #define strmov strmov_overlapp #endif #if !defined(MC68000) && !defined(DS90) char *strmov(register char *dst, register const char *src) { while ((*dst++ = *src++)) ; return dst-1; } #else char *strmov(dst, src) char *dst, *src; { asm(" movl 4(a7),a1 "); asm(" movl 8(a7),a0 "); asm(".L4: movb (a0)+,(a1)+ "); asm(" jne .L4 "); asm(" movl a1,d0 "); asm(" subql #1,d0 "); } #endif
w5860363/wownemesis
dep/mysqllite/strings/strmov.c
C
gpl-2.0
1,506
/* * AMLOGIC Audio/Video streaming port driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the named License, * or any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. :* * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA * * Author: Tim Yao <timyao@amlogic.com> * */ #include <linux/kernel.h> #include <linux/types.h> #include <mach/am_regs.h> #include "../vdec_reg.h" #include "../amports_config.h" #include "../vdec.h" #include "../vdec_clk.h" /* HHI_VDEC_CLK_CNTL 0x1078[11:9] (fclk = 2550MHz) 0: fclk_div4 1: fclk_div3 2: fclk_div5 3: fclk_div7 4: mpll_clk_out1 5: mpll_clk_out2 0x1078[6:0] devider 0x1078[8] enable */ //182.14M <-- (2550/7)/2 #define VDEC1_182M() WRITE_MPEG_REG_BITS(HHI_VDEC_CLK_CNTL, (3 << 9) | (1), 0, 16) //212.50M <-- (2550/3)/4 #define VDEC1_212M() WRITE_MPEG_REG_BITS(HHI_VDEC_CLK_CNTL, (1 << 9) | (3), 0, 16) //255.00M <-- (2550/5)/2 #define VDEC1_255M() WRITE_MPEG_REG_BITS(HHI_VDEC_CLK_CNTL, (2 << 9) | (1), 0, 16) #define HCODEC_255M() WRITE_MPEG_REG_BITS(HHI_VDEC_CLK_CNTL, (2 << 9) | (1), 16, 16) #define HEVC_255M() WRITE_MPEG_REG_BITS(HHI_VDEC2_CLK_CNTL, (2 << 9) | (1), 16, 16) //283.33M <-- (2550/3)/3 #define VDEC1_283M() WRITE_MPEG_REG_BITS(HHI_VDEC_CLK_CNTL, (1 << 9) | (2), 0, 16) //318.75M <-- (2550/4)/2 #define VDEC1_319M() WRITE_MPEG_REG_BITS(HHI_VDEC_CLK_CNTL, (0 << 9) | (1), 0, 16) //364.29M <-- (2550/7)/1 #define VDEC1_364M() WRITE_MPEG_REG_BITS(HHI_VDEC_CLK_CNTL, (3 << 9) | (0), 0, 16) //425.00M <-- (2550/3)/2 #define VDEC1_425M() WRITE_MPEG_REG_BITS(HHI_VDEC_CLK_CNTL, (1 << 9) | (2), 0, 16) //510.00M <-- (2550/5)/1 #define VDEC1_510M() WRITE_MPEG_REG_BITS(HHI_VDEC_CLK_CNTL, (2 << 9) | (0), 0, 16) #define HEVC_510M() WRITE_MPEG_REG_BITS(HHI_VDEC2_CLK_CNTL, (2 << 9) | (0), 16, 16) //637.50M <-- (2550/4)/1 #define VDEC1_638M() WRITE_MPEG_REG_BITS(HHI_VDEC_CLK_CNTL, (0 << 9) | (0), 0, 16) #define HEVC_638M() WRITE_MPEG_REG_BITS(HHI_VDEC2_CLK_CNTL, (0 << 9) | (0), 16, 16) #define VDEC1_CLOCK_ON() WRITE_MPEG_REG_BITS(HHI_VDEC_CLK_CNTL, 1, 8, 1); \ WRITE_MPEG_REG_BITS(HHI_VDEC3_CLK_CNTL, 0, 15, 1); \ WRITE_MPEG_REG_BITS(HHI_VDEC3_CLK_CNTL, 0, 8, 1); \ WRITE_VREG_BITS(DOS_GCLK_EN0, 0x3ff,0,10) #define HCODEC_CLOCK_ON() WRITE_MPEG_REG_BITS(HHI_VDEC_CLK_CNTL, 1, 24, 1); \ WRITE_VREG_BITS(DOS_GCLK_EN0, 0x7fff, 12, 15) #define HEVC_CLOCK_ON() WRITE_MPEG_REG_BITS(HHI_VDEC2_CLK_CNTL, 0, 24, 1); \ WRITE_MPEG_REG_BITS(HHI_VDEC2_CLK_CNTL, 0, 31, 1); \ WRITE_MPEG_REG_BITS(HHI_VDEC2_CLK_CNTL, 1, 24, 1); \ WRITE_VREG(DOS_GCLK_EN3, 0xffffffff) #define VDEC1_SAFE_CLOCK() WRITE_MPEG_REG_BITS(HHI_VDEC3_CLK_CNTL, READ_MPEG_REG(HHI_VDEC_CLK_CNTL) & 0x7f, 0, 7); \ WRITE_MPEG_REG_BITS(HHI_VDEC3_CLK_CNTL, 1, 8, 1); \ WRITE_MPEG_REG_BITS(HHI_VDEC3_CLK_CNTL, 1, 15, 1); #define VDEC1_CLOCK_OFF() WRITE_MPEG_REG_BITS(HHI_VDEC_CLK_CNTL, 0, 8, 1) #define HCODEC_CLOCK_OFF() WRITE_MPEG_REG_BITS(HHI_VDEC_CLK_CNTL, 0, 24, 1) #define HEVC_SAFE_CLOCK() WRITE_MPEG_REG_BITS(HHI_VDEC2_CLK_CNTL, (READ_MPEG_REG(HHI_VDEC2_CLK_CNTL) >> 16) & 0x7f, 16, 7); \ WRITE_MPEG_REG_BITS(HHI_VDEC2_CLK_CNTL, 1, 24, 1); \ WRITE_MPEG_REG_BITS(HHI_VDEC2_CLK_CNTL, 1, 31, 1) #define HEVC_CLOCK_OFF() WRITE_MPEG_REG_BITS(HHI_VDEC2_CLK_CNTL, 0, 24, 1) static int clock_level[VDEC_MAX+1]; void vdec_clock_enable(void) { VDEC1_CLOCK_OFF(); VDEC1_255M(); VDEC1_CLOCK_ON(); clock_level[VDEC_1] = 0; } void vdec_clock_hi_enable(void) { VDEC1_CLOCK_OFF(); VDEC1_638M(); VDEC1_CLOCK_ON(); clock_level[VDEC_1] = 1; } void vdec_clock_on(void) { VDEC1_CLOCK_ON(); } void vdec_clock_off(void) { VDEC1_CLOCK_OFF(); } void hcodec_clock_enable(void) { HCODEC_CLOCK_OFF(); HCODEC_255M(); HCODEC_CLOCK_ON(); } void hcodec_clock_on(void) { HCODEC_CLOCK_ON(); } void hcodec_clock_off(void) { HCODEC_CLOCK_OFF(); } void hevc_clock_enable(void) { HEVC_CLOCK_OFF(); // HEVC_255M(); HEVC_638M(); HEVC_CLOCK_ON(); } void hevc_clock_hi_enable(void) { HEVC_CLOCK_OFF(); HEVC_638M(); HEVC_CLOCK_ON(); clock_level[VDEC_HEVC] = 1; } void hevc_clock_on(void) { HEVC_CLOCK_ON(); } void hevc_clock_off(void) { HEVC_CLOCK_OFF(); } void vdec_clock_prepare_switch(void) { VDEC1_SAFE_CLOCK(); } void hevc_clock_prepare_switch(void) { HEVC_SAFE_CLOCK(); } int vdec_clock_level(vdec_type_t core) { if (core >= VDEC_MAX) { return 0; } return clock_level[core]; }
ndufresne/linux-meson
drivers/amlogic/amports/m8m2/vdec_clk.c
C
gpl-2.0
5,297
/* * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /* * This file is available under and governed by the GNU General Public * License version 2 only, as published by the Free Software Foundation. * However, the following notice accompanied the original version of this * file, and Oracle licenses the original version of this file under the BSD * license: */ /* Copyright 2009-2013 Attila Szegedi Licensed under both the Apache License, Version 2.0 (the "Apache License") and the BSD License (the "BSD License"), with licensee being free to choose either of the two at their discretion. You may not use this file except in compliance with either the Apache License or the BSD License. If you choose to use this file in compliance with the Apache License, the following notice applies to you: You may obtain a copy of the Apache License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. If you choose to use this file in compliance with the BSD License, the following notice applies to you: Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package jdk.internal.dynalink.support; import java.util.LinkedList; import java.util.List; import java.util.ServiceLoader; import jdk.internal.dynalink.DynamicLinkerFactory; import jdk.internal.dynalink.linker.GuardingDynamicLinker; /** * Provides methods for automatic discovery of all guarding dynamic linkers listed in the * <tt>/META-INF/services/jdk.internal.dynalink.linker.GuardingDynamicLinker</tt> resources of all JAR files for a * particular class loader. Ordinarily, you will not use this class directly, but you will use a * {@link DynamicLinkerFactory} instead. */ public class AutoDiscovery { private AutoDiscovery() { } /** * Discovers all guarding dynamic linkers listed in JAR files of the context class loader of the current thread. * * @return a list of available linkers. Can be zero-length list but not null. */ public static List<GuardingDynamicLinker> loadLinkers() { return getLinkers(ServiceLoader.load(GuardingDynamicLinker.class)); } /** * Discovers all guarding dynamic linkers listed in JAR files of the specified class loader. * * @param cl the class loader to use * @return a list of guarding dynamic linkers available through the specified class loader. Can be zero-length list * but not null. */ public static List<GuardingDynamicLinker> loadLinkers(final ClassLoader cl) { return getLinkers(ServiceLoader.load(GuardingDynamicLinker.class, cl)); } /** * I can't believe there's no Collections API for making a List given an Iterator... */ private static <T> List<T> getLinkers(final ServiceLoader<T> loader) { final List<T> list = new LinkedList<>(); for(final T linker: loader) { list.add(linker); } return list; } }
shelan/jdk9-mirror
nashorn/src/jdk.scripting.nashorn/share/classes/jdk/internal/dynalink/support/AutoDiscovery.java
Java
gpl-2.0
5,945
/* * Handles the M-Systems DiskOnChip G3 chip * * Copyright (C) 2011 Robert Jarzmik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Taken from linux kernel. */ #include <common.h> #include <init.h> #include <driver.h> #include <errno.h> #include <malloc.h> #include <clock.h> #include <io.h> #include <linux/kernel.h> #include <linux/mtd/mtd.h> #include <linux/err.h> #include <linux/bitrev.h> #include <linux/bch.h> #include "docg3.h" static unsigned reliable_mode; /** * struct docg3_bch - BCH engine */ static struct bch_control *docg3_bch; struct mtd_info *docg3_floors[DOC_MAX_NBFLOORS]; static inline u8 doc_readb(struct docg3 *docg3, u16 reg) { u8 val = readb(docg3->base + reg); doc_dbg("readb(%04x) -> %02x\n", reg, val); return val; } static inline u16 doc_readw(struct docg3 *docg3, u16 reg) { u16 val = readw(docg3->base + reg); doc_dbg("readb(%04x) -> %04x\n", reg, val); return val; } static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg) { doc_dbg("writeb(%02x into %04x)\n", val, reg); writeb(val, docg3->base + reg); } static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg) { doc_dbg("writew(%04x into %04x)\n", val, reg); writew(val, docg3->base + reg); } static inline void doc_flash_command(struct docg3 *docg3, u8 cmd) { doc_writeb(docg3, cmd, DOC_FLASHCOMMAND); } static inline void doc_flash_sequence(struct docg3 *docg3, u8 seq) { doc_writeb(docg3, seq, DOC_FLASHSEQUENCE); } static inline void doc_flash_address(struct docg3 *docg3, u8 addr) { doc_writeb(docg3, addr, DOC_FLASHADDRESS); } static int doc_register_readb(struct docg3 *docg3, int reg) { u8 val; doc_writew(docg3, reg, DOC_READADDRESS); val = doc_readb(docg3, reg); doc_vdbg("Read register %04x : %02x\n", reg, val); return val; } static int doc_register_readw(struct docg3 *docg3, int reg) { u16 val; doc_writew(docg3, reg, DOC_READADDRESS); val = doc_readw(docg3, reg); doc_vdbg("Read register %04x : %04x\n", reg, val); return val; } static void doc_delay(struct docg3 *docg3, int nbNOPs) { int i; doc_vdbg("NOP x %d\n", nbNOPs); for (i = 0; i < nbNOPs; i++) doc_writeb(docg3, 0, DOC_NOP); } static int is_prot_seq_error(struct docg3 *docg3) { int ctrl; ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); return ctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR); } static int doc_is_ready(struct docg3 *docg3) { int ctrl; ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); return ctrl & DOC_CTRL_FLASHREADY; } static int doc_wait_ready(struct docg3 *docg3) { int maxWaitCycles = 100; do { doc_delay(docg3, 4); } while (!doc_is_ready(docg3) && maxWaitCycles--); doc_delay(docg3, 2); if (maxWaitCycles > 0) return 0; else return -EIO; } static int doc_reset_seq(struct docg3 *docg3) { int ret; doc_writeb(docg3, 0x10, DOC_FLASHCONTROL); doc_flash_sequence(docg3, DOC_SEQ_RESET); doc_flash_command(docg3, DOC_CMD_RESET); doc_delay(docg3, 2); ret = doc_wait_ready(docg3); doc_dbg("doc_reset_seq() -> isReady=%s\n", ret ? "false" : "true"); return ret; } static void doc_read_data_area(struct docg3 *docg3, void *buf, int len, int first) { int i, cdr, len4; u16 data16, *dst16; u8 data8, *dst8; doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len); cdr = len & 0x1; len4 = len - cdr; if (first) doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS); dst16 = buf; for (i = 0; i < len4; i += 2) { data16 = doc_readw(docg3, DOC_IOSPACE_DATA); if (dst16) { *dst16 = data16; dst16++; } } if (cdr) { doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE, DOC_READADDRESS); doc_delay(docg3, 1); dst8 = (u8 *)dst16; for (i = 0; i < cdr; i++) { data8 = doc_readb(docg3, DOC_IOSPACE_DATA); if (dst8) { *dst8 = data8; dst8++; } } } } static void doc_set_reliable_mode(struct docg3 *docg3) { static char *strmode[] = { "normal", "fast", "reliable", "invalid" }; doc_dbg("doc_set_reliable_mode(%s)\n", strmode[docg3->reliable]); switch (docg3->reliable) { case 0: break; case 1: doc_flash_sequence(docg3, DOC_SEQ_SET_FASTMODE); doc_flash_command(docg3, DOC_CMD_FAST_MODE); break; case 2: doc_flash_sequence(docg3, DOC_SEQ_SET_RELIABLEMODE); doc_flash_command(docg3, DOC_CMD_FAST_MODE); doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE); break; default: doc_err("doc_set_reliable_mode(): invalid mode\n"); break; } doc_delay(docg3, 2); } static void doc_set_asic_mode(struct docg3 *docg3, u8 mode) { int i; for (i = 0; i < 12; i++) doc_readb(docg3, DOC_IOSPACE_IPL); mode |= DOC_ASICMODE_MDWREN; doc_dbg("doc_set_asic_mode(%02x)\n", mode); doc_writeb(docg3, mode, DOC_ASICMODE); doc_writeb(docg3, ~mode, DOC_ASICMODECONFIRM); doc_delay(docg3, 1); } static void doc_set_device_id(struct docg3 *docg3, int id) { u8 ctrl; doc_dbg("doc_set_device_id(%d)\n", id); doc_writeb(docg3, id, DOC_DEVICESELECT); ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); ctrl &= ~DOC_CTRL_VIOLATION; ctrl |= DOC_CTRL_CE; doc_writeb(docg3, ctrl, DOC_FLASHCONTROL); } static int doc_set_extra_page_mode(struct docg3 *docg3) { int fctrl; doc_dbg("doc_set_extra_page_mode()\n"); doc_flash_sequence(docg3, DOC_SEQ_PAGE_SIZE_532); doc_flash_command(docg3, DOC_CMD_PAGE_SIZE_532); doc_delay(docg3, 2); fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); if (fctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR)) return -EIO; else return 0; } static void doc_setup_addr_sector(struct docg3 *docg3, int sector) { doc_delay(docg3, 1); doc_flash_address(docg3, sector & 0xff); doc_flash_address(docg3, (sector >> 8) & 0xff); doc_flash_address(docg3, (sector >> 16) & 0xff); doc_delay(docg3, 1); } static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page, int wear, int ofs) { int sector, ret = 0; doc_dbg("doc_seek(blocks=(%d,%d), page=%d, ofs=%d, wear=%d)\n", block0, block1, page, ofs, wear); if (!wear && (ofs < 2 * DOC_LAYOUT_PAGE_SIZE)) { doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1); doc_flash_command(docg3, DOC_CMD_READ_PLANE1); doc_delay(docg3, 2); } else { doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2); doc_flash_command(docg3, DOC_CMD_READ_PLANE2); doc_delay(docg3, 2); } doc_set_reliable_mode(docg3); if (wear) ret = doc_set_extra_page_mode(docg3); if (ret) goto out; doc_flash_sequence(docg3, DOC_SEQ_READ); sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR); doc_setup_addr_sector(docg3, sector); sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR); doc_setup_addr_sector(docg3, sector); doc_delay(docg3, 1); out: return ret; } static int doc_read_page_ecc_init(struct docg3 *docg3, int len) { doc_writew(docg3, DOC_ECCCONF0_READ_MODE | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE | (len & DOC_ECCCONF0_DATA_BYTES_MASK), DOC_ECCCONF0); doc_delay(docg3, 4); doc_register_readb(docg3, DOC_FLASHCONTROL); return doc_wait_ready(docg3); } static void doc_hamming_ecc_init(struct docg3 *docg3, int nb_bytes) { u8 ecc_conf1; ecc_conf1 = doc_register_readb(docg3, DOC_ECCCONF1); ecc_conf1 &= ~DOC_ECCCONF1_HAMMING_BITS_MASK; ecc_conf1 |= (nb_bytes & DOC_ECCCONF1_HAMMING_BITS_MASK); doc_writeb(docg3, ecc_conf1, DOC_ECCCONF1); } static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc) { u8 ecc[DOC_ECC_BCH_SIZE]; int errorpos[DOC_ECC_BCH_T], i, numerrs; for (i = 0; i < DOC_ECC_BCH_SIZE; i++) ecc[i] = bitrev8(hwecc[i]); numerrs = decode_bch(docg3_bch, NULL, DOC_ECC_BCH_COVERED_BYTES, NULL, ecc, NULL, errorpos); BUG_ON(numerrs == -EINVAL); if (numerrs < 0) goto out; for (i = 0; i < numerrs; i++) errorpos[i] = (errorpos[i] & ~7) | (7 - (errorpos[i] & 7)); for (i = 0; i < numerrs; i++) if (errorpos[i] < DOC_ECC_BCH_COVERED_BYTES*8) /* error is located in data, correct it */ change_bit(errorpos[i], buf); out: doc_dbg("doc_ecc_bch_fix_data: flipped %d bits\n", numerrs); return numerrs; } static int doc_read_page_prepare(struct docg3 *docg3, int block0, int block1, int page, int offset) { int wear_area = 0, ret = 0; doc_dbg("doc_read_page_prepare(blocks=(%d,%d), page=%d, ofsInPage=%d)\n", block0, block1, page, offset); if (offset >= DOC_LAYOUT_WEAR_OFFSET) wear_area = 1; if (!wear_area && offset > (DOC_LAYOUT_PAGE_OOB_SIZE * 2)) return -EINVAL; doc_set_device_id(docg3, docg3->device_id); ret = doc_reset_seq(docg3); if (ret) goto err; /* Program the flash address block and page */ ret = doc_read_seek(docg3, block0, block1, page, wear_area, offset); if (ret) goto err; doc_flash_command(docg3, DOC_CMD_READ_ALL_PLANES); doc_delay(docg3, 2); doc_wait_ready(docg3); doc_flash_command(docg3, DOC_CMD_SET_ADDR_READ); doc_delay(docg3, 1); if (offset >= DOC_LAYOUT_PAGE_SIZE * 2) offset -= 2 * DOC_LAYOUT_PAGE_SIZE; doc_flash_address(docg3, offset >> 2); doc_delay(docg3, 1); doc_wait_ready(docg3); doc_flash_command(docg3, DOC_CMD_READ_FLASH); return 0; err: doc_writeb(docg3, 0, DOC_DATAEND); doc_delay(docg3, 2); return -EIO; } static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf, int first, int last_odd) { if (last_odd && len > 0) { doc_read_data_area(docg3, buf, 1, first); doc_read_data_area(docg3, buf ? buf + 1 : buf, len - 1, 0); } else { doc_read_data_area(docg3, buf, len, first); } doc_delay(docg3, 2); return len; } static void doc_get_bch_hw_ecc(struct docg3 *docg3, u8 *hwecc) { int i; for (i = 0; i < DOC_ECC_BCH_SIZE; i++) hwecc[i] = doc_register_readb(docg3, DOC_BCH_HW_ECC(i)); } static void doc_page_finish(struct docg3 *docg3) { doc_writeb(docg3, 0, DOC_DATAEND); doc_delay(docg3, 2); } static void doc_read_page_finish(struct docg3 *docg3) { doc_page_finish(docg3); doc_set_device_id(docg3, 0); } static void calc_block_sector(loff_t from, int *block0, int *block1, int *page, int *ofs, int reliable) { uint sector, pages_biblock; pages_biblock = DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES; if (reliable == 1 || reliable == 2) pages_biblock /= 2; sector = from / DOC_LAYOUT_PAGE_SIZE; *block0 = sector / pages_biblock * DOC_LAYOUT_NBPLANES; *block1 = *block0 + 1; *page = sector % pages_biblock; *page /= DOC_LAYOUT_NBPLANES; if (reliable == 1 || reliable == 2) *page *= 2; if (sector % 2) *ofs = DOC_LAYOUT_PAGE_OOB_SIZE; else *ofs = 0; } static int doc_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { struct docg3 *docg3 = mtd->priv; int block0, block1, page, ret, skip, ofs = 0; u8 *oobbuf = ops->oobbuf; u8 *buf = ops->datbuf; size_t len, ooblen, nbdata, nboob; u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1; if (buf) len = ops->len; else len = 0; if (oobbuf) ooblen = ops->ooblen; else ooblen = 0; if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB) oobbuf += ops->ooboffs; doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n", from, ops->mode, buf, len, oobbuf, ooblen); if (ooblen % DOC_LAYOUT_OOB_SIZE) return -EINVAL; ret = -EINVAL; calc_block_sector(from + len, &block0, &block1, &page, &ofs, docg3->reliable); if (block1 > docg3->max_block) goto err; ops->oobretlen = 0; ops->retlen = 0; ret = 0; skip = from % DOC_LAYOUT_PAGE_SIZE; while (!ret && (len > 0 || ooblen > 0)) { calc_block_sector(from, &block0, &block1, &page, &ofs, docg3->reliable); nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip); nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE); ret = doc_read_page_prepare(docg3, block0, block1, page, ofs); if (ret < 0) goto err; ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES); if (ret < 0) goto err_in_read; ret = doc_read_page_getbytes(docg3, skip, NULL, 1, 0); if (ret < skip) goto err_in_read; ret = doc_read_page_getbytes(docg3, nbdata, buf, 0, skip % 2); if (ret < nbdata) goto err_in_read; doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE - nbdata - skip, NULL, 0, (skip + nbdata) % 2); ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0, 0); if (ret < nboob) goto err_in_read; doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob, NULL, 0, nboob % 2); doc_get_bch_hw_ecc(docg3, hwecc); eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1); if (nboob >= DOC_LAYOUT_OOB_SIZE) { doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", oobbuf[0], oobbuf[1], oobbuf[2], oobbuf[3], oobbuf[4], oobbuf[5], oobbuf[6]); doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]); doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", oobbuf[8], oobbuf[9], oobbuf[10], oobbuf[11], oobbuf[12], oobbuf[13], oobbuf[14]); doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]); } doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1); doc_dbg("ECC HW_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", hwecc[0], hwecc[1], hwecc[2], hwecc[3], hwecc[4], hwecc[5], hwecc[6]); ret = -EIO; if (is_prot_seq_error(docg3)) goto err_in_read; ret = 0; if ((block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) && (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) && (eccconf1 & DOC_ECCCONF1_PAGE_IS_WRITTEN) && (ops->mode != MTD_OPS_RAW) && (nbdata == DOC_LAYOUT_PAGE_SIZE)) { ret = doc_ecc_bch_fix_data(docg3, buf, hwecc); if (ret < 0) { mtd->ecc_stats.failed++; ret = -EBADMSG; } if (ret > 0) { mtd->ecc_stats.corrected += ret; ret = -EUCLEAN; } } doc_read_page_finish(docg3); ops->retlen += nbdata; ops->oobretlen += nboob; buf += nbdata; oobbuf += nboob; len -= nbdata; ooblen -= nboob; from += DOC_LAYOUT_PAGE_SIZE; skip = 0; } return ret; err_in_read: doc_read_page_finish(docg3); err: return ret; } static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct mtd_oob_ops ops; size_t ret; memset(&ops, 0, sizeof(ops)); ops.datbuf = buf; ops.len = len; ops.mode = MTD_OPS_AUTO_OOB; ret = doc_read_oob(mtd, from, &ops); *retlen = ops.retlen; return ret; } static int doc_reload_bbt(struct docg3 *docg3) { int block = DOC_LAYOUT_BLOCK_BBT; int ret = 0, nbpages, page; u_char *buf = docg3->bbt; nbpages = DIV_ROUND_UP(docg3->max_block + 1, 8 * DOC_LAYOUT_PAGE_SIZE); for (page = 0; !ret && (page < nbpages); page++) { ret = doc_read_page_prepare(docg3, block, block + 1, page + DOC_LAYOUT_PAGE_BBT, 0); if (!ret) ret = doc_read_page_ecc_init(docg3, DOC_LAYOUT_PAGE_SIZE); if (!ret) doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE, buf, 1, 0); buf += DOC_LAYOUT_PAGE_SIZE; } doc_read_page_finish(docg3); return ret; } static int doc_block_isbad(struct mtd_info *mtd, loff_t from) { struct docg3 *docg3 = mtd->priv; int block0, block1, page, ofs, is_good; calc_block_sector(from, &block0, &block1, &page, &ofs, docg3->reliable); doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n", from, block0, block1, page, ofs); if (block0 < DOC_LAYOUT_BLOCK_FIRST_DATA) return 0; if (block1 > docg3->max_block) return -EINVAL; is_good = docg3->bbt[block0 >> 3] & (1 << (block0 & 0x7)); return !is_good; } #ifdef CONFIG_MTD_WRITE static int doc_guess_autoecc(struct mtd_oob_ops *ops) { int autoecc; switch (ops->mode) { case MTD_OPS_PLACE_OOB: case MTD_OPS_AUTO_OOB: autoecc = 1; break; case MTD_OPS_RAW: autoecc = 0; break; default: autoecc = -EINVAL; } return autoecc; } static void doc_fill_autooob(u8 *dst, u8 *oobsrc) { memcpy(dst, oobsrc, DOC_LAYOUT_OOB_PAGEINFO_SZ); dst[DOC_LAYOUT_OOB_UNUSED_OFS] = oobsrc[DOC_LAYOUT_OOB_PAGEINFO_SZ]; } static int doc_backup_oob(struct docg3 *docg3, loff_t to, struct mtd_oob_ops *ops) { int ooblen = ops->ooblen, autoecc; if (ooblen != DOC_LAYOUT_OOB_SIZE) return -EINVAL; autoecc = doc_guess_autoecc(ops); if (autoecc < 0) return autoecc; docg3->oob_write_ofs = to; docg3->oob_autoecc = autoecc; if (ops->mode == MTD_OPS_AUTO_OOB) { doc_fill_autooob(docg3->oob_write_buf, ops->oobbuf); ops->oobretlen = 8; } else { memcpy(docg3->oob_write_buf, ops->oobbuf, DOC_LAYOUT_OOB_SIZE); ops->oobretlen = DOC_LAYOUT_OOB_SIZE; } return 0; } static void doc_write_data_area(struct docg3 *docg3, const void *buf, int len) { int i, cdr, len4; u16 *src16; u8 *src8; doc_dbg("doc_write_data_area(buf=%p, len=%d)\n", buf, len); cdr = len & 0x3; len4 = len - cdr; doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS); src16 = (u16 *)buf; for (i = 0; i < len4; i += 2) { doc_writew(docg3, *src16, DOC_IOSPACE_DATA); src16++; } src8 = (u8 *)src16; for (i = 0; i < cdr; i++) { doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE, DOC_READADDRESS); doc_writeb(docg3, *src8, DOC_IOSPACE_DATA); src8++; } } static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs) { ofs = ofs >> 2; doc_delay(docg3, 1); doc_flash_address(docg3, ofs & 0xff); doc_flash_address(docg3, sector & 0xff); doc_flash_address(docg3, (sector >> 8) & 0xff); doc_flash_address(docg3, (sector >> 16) & 0xff); doc_delay(docg3, 1); } static int doc_write_seek(struct docg3 *docg3, int block0, int block1, int page, int ofs) { int ret = 0, sector; doc_dbg("doc_write_seek(blocks=(%d,%d), page=%d, ofs=%d)\n", block0, block1, page, ofs); doc_set_reliable_mode(docg3); if (ofs < 2 * DOC_LAYOUT_PAGE_SIZE) { doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1); doc_flash_command(docg3, DOC_CMD_READ_PLANE1); doc_delay(docg3, 2); } else { doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2); doc_flash_command(docg3, DOC_CMD_READ_PLANE2); doc_delay(docg3, 2); } doc_flash_sequence(docg3, DOC_SEQ_PAGE_SETUP); doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1); sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); doc_setup_writeaddr_sector(docg3, sector, ofs); doc_flash_command(docg3, DOC_CMD_PROG_CYCLE3); doc_delay(docg3, 2); ret = doc_wait_ready(docg3); if (ret) goto out; doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1); sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); doc_setup_writeaddr_sector(docg3, sector, ofs); doc_delay(docg3, 1); out: return ret; } static int doc_write_page_ecc_init(struct docg3 *docg3, int len) { doc_writew(docg3, DOC_ECCCONF0_WRITE_MODE | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE | (len & DOC_ECCCONF0_DATA_BYTES_MASK), DOC_ECCCONF0); doc_delay(docg3, 4); doc_register_readb(docg3, DOC_FLASHCONTROL); return doc_wait_ready(docg3); } static void doc_write_page_putbytes(struct docg3 *docg3, int len, const u_char *buf) { doc_write_data_area(docg3, buf, len); doc_delay(docg3, 2); } static void doc_ecc_disable(struct docg3 *docg3) { doc_writew(docg3, DOC_ECCCONF0_READ_MODE, DOC_ECCCONF0); doc_delay(docg3, 4); } static int doc_get_op_status(struct docg3 *docg3) { u8 status; doc_flash_sequence(docg3, DOC_SEQ_PLANES_STATUS); doc_flash_command(docg3, DOC_CMD_PLANES_STATUS); doc_delay(docg3, 5); doc_ecc_disable(docg3); doc_read_data_area(docg3, &status, 1, 1); return status; } static int doc_write_erase_wait_status(struct docg3 *docg3) { int status, ret = 0; uint64_t start = get_time_ns(); while (!is_timeout(start, 3000 * MSECOND) && !doc_is_ready(docg3)); if (!doc_is_ready(docg3)) { doc_dbg("Timeout reached and the chip is still not ready\n"); ret = -EAGAIN; goto out; } status = doc_get_op_status(docg3); if (status & DOC_PLANES_STATUS_FAIL) { doc_dbg("Erase/Write failed on (a) plane(s), status = %x\n", status); ret = -EIO; } out: doc_page_finish(docg3); return ret; } static int doc_erase_block(struct docg3 *docg3, int block0, int block1) { int ret, sector; doc_dbg("doc_erase_block(blocks=(%d,%d))\n", block0, block1); ret = doc_reset_seq(docg3); if (ret) return -EIO; doc_set_reliable_mode(docg3); doc_flash_sequence(docg3, DOC_SEQ_ERASE); sector = block0 << DOC_ADDR_BLOCK_SHIFT; doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR); doc_setup_addr_sector(docg3, sector); sector = block1 << DOC_ADDR_BLOCK_SHIFT; doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR); doc_setup_addr_sector(docg3, sector); doc_delay(docg3, 1); doc_flash_command(docg3, DOC_CMD_ERASECYCLE2); doc_delay(docg3, 2); if (is_prot_seq_error(docg3)) { doc_err("Erase blocks %d,%d error\n", block0, block1); return -EIO; } return doc_write_erase_wait_status(docg3); } static int doc_erase(struct mtd_info *mtd, struct erase_info *info) { struct docg3 *docg3 = mtd->priv; uint64_t len; int block0, block1, page, ret, ofs = 0; doc_dbg("doc_erase(from=%d, len=%d\n", info->addr, info->len); doc_set_device_id(docg3, docg3->device_id); info->state = MTD_ERASE_PENDING; calc_block_sector(info->addr + info->len, &block0, &block1, &page, &ofs, docg3->reliable); ret = -EINVAL; if (block1 > docg3->max_block || page || ofs) goto reset_err; ret = 0; calc_block_sector(info->addr, &block0, &block1, &page, &ofs, docg3->reliable); doc_set_reliable_mode(docg3); for (len = info->len; !ret && len > 0; len -= mtd->erasesize) { info->state = MTD_ERASING; ret = doc_erase_block(docg3, block0, block1); block0 += 2; block1 += 2; } if (ret) goto reset_err; info->state = MTD_ERASE_DONE; return 0; reset_err: info->state = MTD_ERASE_FAILED; return ret; } static int doc_write_page(struct docg3 *docg3, loff_t to, const u_char *buf, const u_char *oob, int autoecc) { int block0, block1, page, ret, ofs = 0; u8 hwecc[DOC_ECC_BCH_SIZE], hamming; doc_dbg("doc_write_page(to=%lld)\n", to); calc_block_sector(to, &block0, &block1, &page, &ofs, docg3->reliable); doc_set_device_id(docg3, docg3->device_id); ret = doc_reset_seq(docg3); if (ret) goto err; /* Program the flash address block and page */ ret = doc_write_seek(docg3, block0, block1, page, ofs); if (ret) goto err; doc_write_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES); doc_delay(docg3, 2); doc_write_page_putbytes(docg3, DOC_LAYOUT_PAGE_SIZE, buf); if (oob && autoecc) { doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ, oob); doc_delay(docg3, 2); oob += DOC_LAYOUT_OOB_UNUSED_OFS; hamming = doc_register_readb(docg3, DOC_HAMMINGPARITY); doc_delay(docg3, 2); doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_HAMMING_SZ, &hamming); doc_delay(docg3, 2); doc_get_bch_hw_ecc(docg3, hwecc); doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_BCH_SZ, hwecc); doc_delay(docg3, 2); doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_UNUSED_SZ, oob); } if (oob && !autoecc) doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_SIZE, oob); doc_delay(docg3, 2); doc_page_finish(docg3); doc_delay(docg3, 2); doc_flash_command(docg3, DOC_CMD_PROG_CYCLE2); doc_delay(docg3, 2); /* * The wait status will perform another doc_page_finish() call, but that * seems to please the docg3, so leave it. */ ret = doc_write_erase_wait_status(docg3); return ret; err: doc_read_page_finish(docg3); return ret; } static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, struct mtd_oob_ops *ops) { struct docg3 *docg3 = mtd->priv; int block0, block1, page, ret, pofs = 0, autoecc, oobdelta; u8 *oobbuf = ops->oobbuf; u8 *buf = ops->datbuf; size_t len, ooblen; u8 oob[DOC_LAYOUT_OOB_SIZE] __aligned(4); if (buf) len = ops->len; else len = 0; if (oobbuf) ooblen = ops->ooblen; else ooblen = 0; if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB) oobbuf += ops->ooboffs; doc_dbg("doc_write_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n", ofs, ops->mode, buf, len, oobbuf, ooblen); switch (ops->mode) { case MTD_OPS_PLACE_OOB: case MTD_OPS_RAW: oobdelta = mtd->oobsize; break; case MTD_OPS_AUTO_OOB: oobdelta = mtd->ecclayout->oobavail; break; default: oobdelta = 0; } if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) || (ofs % DOC_LAYOUT_PAGE_SIZE)) return -EINVAL; if (len && ooblen && (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta)) return -EINVAL; ret = -EINVAL; calc_block_sector(ofs + len, &block0, &block1, &page, &pofs, docg3->reliable); if (block1 > docg3->max_block) goto err; ops->oobretlen = 0; ops->retlen = 0; ret = 0; if (len == 0 && ooblen == 0) return -EINVAL; if (len == 0 && ooblen > 0) return doc_backup_oob(docg3, ofs, ops); autoecc = doc_guess_autoecc(ops); if (autoecc < 0) return autoecc; while (!ret && len > 0) { memset(oob, 0, sizeof(oob)); if (ofs == docg3->oob_write_ofs) memcpy(oob, docg3->oob_write_buf, DOC_LAYOUT_OOB_SIZE); else if (ooblen > 0 && ops->mode == MTD_OPS_AUTO_OOB) doc_fill_autooob(oob, oobbuf); else if (ooblen > 0) memcpy(oob, oobbuf, DOC_LAYOUT_OOB_SIZE); ret = doc_write_page(docg3, ofs, buf, oob, autoecc); ofs += DOC_LAYOUT_PAGE_SIZE; len -= DOC_LAYOUT_PAGE_SIZE; buf += DOC_LAYOUT_PAGE_SIZE; if (ooblen) { oobbuf += oobdelta; ooblen -= oobdelta; ops->oobretlen += oobdelta; } ops->retlen += DOC_LAYOUT_PAGE_SIZE; } err: doc_set_device_id(docg3, 0); return ret; } static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct docg3 *docg3 = mtd->priv; int ret; struct mtd_oob_ops ops; doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len); ops.datbuf = (char *)buf; ops.len = len; ops.mode = MTD_OPS_PLACE_OOB; ops.oobbuf = NULL; ops.ooblen = 0; ops.ooboffs = 0; ret = doc_write_oob(mtd, to, &ops); *retlen = ops.retlen; return ret; } #endif static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd) { struct docg3 *docg3 = mtd->priv; int cfg; cfg = doc_register_readb(docg3, DOC_CONFIGURATION); docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0); docg3->reliable = reliable_mode; switch (chip_id) { case DOC_CHIPID_G3: mtd->name = asprintf("DiskOnChip G3 floor %d", docg3->device_id); docg3->max_block = 2047; break; } mtd->type = MTD_NANDFLASH; mtd->flags = MTD_CAP_NANDFLASH; mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE; if (docg3->reliable == 2) mtd->size /= 2; mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES; if (docg3->reliable == 2) mtd->erasesize /= 2; mtd->writesize = DOC_LAYOUT_PAGE_SIZE; mtd->oobsize = DOC_LAYOUT_OOB_SIZE; mtd->read = doc_read; mtd->read_oob = doc_read_oob; mtd->block_isbad = doc_block_isbad; #ifdef CONFIG_MTD_WRITE mtd->erase = doc_erase; mtd->write = doc_write; mtd->write_oob = doc_write_oob; #endif } static struct mtd_info *doc_probe_device(void __iomem *base, int floor, struct device_d *dev) { int ret, bbt_nbpages; u16 chip_id, chip_id_inv; struct docg3 *docg3; struct mtd_info *mtd; ret = -ENOMEM; docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL); if (!docg3) goto nomem1; mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); if (!mtd) goto nomem2; mtd->priv = docg3; bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1, 8 * DOC_LAYOUT_PAGE_SIZE); docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL); if (!docg3->bbt) goto nomem3; docg3->dev = dev; docg3->device_id = floor; docg3->base = base; doc_set_device_id(docg3, docg3->device_id); if (!floor) doc_set_asic_mode(docg3, DOC_ASICMODE_RESET); doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL); chip_id = doc_register_readw(docg3, DOC_CHIPID); chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV); ret = 0; if (chip_id != (u16)(~chip_id_inv)) goto nomem3; switch (chip_id) { case DOC_CHIPID_G3: doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n", base, floor); break; default: doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id); goto nomem3; } doc_set_driver_info(chip_id, mtd); doc_hamming_ecc_init(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ); doc_reload_bbt(docg3); return mtd; nomem3: kfree(mtd); nomem2: kfree(docg3); nomem1: return ERR_PTR(ret); } static int __init docg3_probe(struct device_d *dev) { struct mtd_info *mtd; void __iomem *base; int ret, floor, found = 0; base = dev_request_mem_region(dev, 0); ret = -ENOMEM; docg3_bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T, DOC_ECC_BCH_PRIMPOLY); if (!docg3_bch) goto nomem2; /* for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) { */ for (floor = 0; floor < 2; floor++) { mtd = doc_probe_device(base, floor, dev); if (IS_ERR(mtd)) { ret = PTR_ERR(mtd); goto err_probe; } if (!mtd) { if (floor == 0) goto notfound; else continue; } docg3_floors[floor] = mtd; mtd->parent = dev; ret = add_mtd_device(mtd, NULL, DEVICE_ID_DYNAMIC); if (ret) goto err_probe; found++; } if (ret) goto err_probe; if (!found) goto notfound; return 0; notfound: ret = -ENODEV; dev_info(dev, "No supported DiskOnChip found\n"); err_probe: free_bch(docg3_bch); nomem2: return ret; } static struct driver_d g3_driver = { .name = "docg3", .probe = docg3_probe, }; device_platform_driver(g3_driver);
toxxin/barebox-mpu
drivers/mtd/devices/docg3.c
C
gpl-2.0
29,622
/* arch/arm/mach-msm/qdsp5/audio_mp3.c * * mp3 audio output device * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <asm/atomic.h> #include <asm/ioctls.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/dma-mapping.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/earlysuspend.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/msm_audio.h> #include <linux/memory_alloc.h> #include <linux/ion.h> #include <mach/msm_adsp.h> #include <mach/iommu.h> #include <mach/iommu_domains.h> #include <mach/msm_memtypes.h> #include <mach/qdsp5/qdsp5audppcmdi.h> #include <mach/qdsp5/qdsp5audppmsg.h> #include <mach/qdsp5/qdsp5audpp.h> #include <mach/qdsp5/qdsp5audplaycmdi.h> #include <mach/qdsp5/qdsp5audplaymsg.h> #include <mach/qdsp5/qdsp5rmtcmdi.h> #include <mach/debug_mm.h> #include "audmgr.h" #define ADRV_STATUS_AIO_INTF 0x00000001 #define ADRV_STATUS_OBUF_GIVEN 0x00000002 #define ADRV_STATUS_IBUF_GIVEN 0x00000004 #define ADRV_STATUS_FSYNC 0x00000008 /* Size must be power of 2 */ #define BUFSZ_MAX 32768 #define BUFSZ_MIN 4096 #define DMASZ_MAX (BUFSZ_MAX * 2) #define DMASZ_MIN (BUFSZ_MIN * 2) #define AUDPLAY_INVALID_READ_PTR_OFFSET 0xFFFF #define AUDDEC_DEC_MP3 2 #define PCM_BUFSZ_MIN 4800 /* Hold one stereo MP3 frame */ #define PCM_BUF_MAX_COUNT 5 /* DSP only accepts 5 buffers at most but support 2 buffers currently */ #define ROUTING_MODE_FTRT 1 #define ROUTING_MODE_RT 2 /* Decoder status received from AUDPPTASK */ #define AUDPP_DEC_STATUS_SLEEP 0 #define AUDPP_DEC_STATUS_INIT 1 #define AUDPP_DEC_STATUS_CFG 2 #define AUDPP_DEC_STATUS_PLAY 3 #define AUDMP3_METAFIELD_MASK 0xFFFF0000 #define AUDMP3_EOS_FLG_OFFSET 0x0A /* Offset from beginning of buffer */ #define AUDMP3_EOS_FLG_MASK 0x01 #define AUDMP3_EOS_NONE 0x0 /* No EOS detected */ #define AUDMP3_EOS_SET 0x1 /* EOS set in meta field */ #define AUDMP3_EVENT_NUM 10 /* Default number of pre-allocated event packets */ #define __CONTAINS(r, v, l) ({ \ typeof(r) __r = r; \ typeof(v) __v = v; \ typeof(v) __e = __v + l; \ int res = ((__v >= __r->vaddr) && \ (__e <= __r->vaddr + __r->len)); \ res; \ }) #define CONTAINS(r1, r2) ({ \ typeof(r2) __r2 = r2; \ __CONTAINS(r1, __r2->vaddr, __r2->len); \ }) #define IN_RANGE(r, v) ({ \ typeof(r) __r = r; \ typeof(v) __vv = v; \ int res = ((__vv >= __r->vaddr) && \ (__vv < (__r->vaddr + __r->len))); \ res; \ }) #define OVERLAPS(r1, r2) ({ \ typeof(r1) __r1 = r1; \ typeof(r2) __r2 = r2; \ typeof(__r2->vaddr) __v = __r2->vaddr; \ typeof(__v) __e = __v + __r2->len - 1; \ int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \ res; \ }) struct audio; struct buffer { void *data; unsigned size; unsigned used; /* Input usage actual DSP produced PCM size */ unsigned addr; unsigned short mfield_sz; /*only useful for data has meta field */ }; #ifdef CONFIG_HAS_EARLYSUSPEND struct audmp3_suspend_ctl { struct early_suspend node; struct audio *audio; }; #endif struct audmp3_event { struct list_head list; int event_type; union msm_audio_event_payload payload; }; struct audmp3_ion_region { struct list_head list; struct ion_handle *handle; int fd; void *vaddr; unsigned long paddr; unsigned long kvaddr; unsigned long len; unsigned ref_cnt; }; struct audmp3_buffer_node { struct list_head list; struct msm_audio_aio_buf buf; unsigned long paddr; }; struct audmp3_drv_operations { void (*pcm_buf_update)(struct audio *, uint32_t *); void (*buffer_refresh)(struct audio *); void (*send_data)(struct audio *, unsigned); void (*out_flush)(struct audio *); void (*in_flush)(struct audio *); int (*fsync)(struct audio *); }; struct audio { struct buffer out[2]; spinlock_t dsp_lock; uint8_t out_head; uint8_t out_tail; uint8_t out_needed; /* number of buffers the dsp is waiting for */ unsigned out_dma_sz; struct list_head out_queue; /* queue to retain output buffers */ atomic_t out_bytes; struct mutex lock; struct mutex write_lock; wait_queue_head_t write_wait; /* Host PCM section */ struct buffer in[PCM_BUF_MAX_COUNT]; struct mutex read_lock; wait_queue_head_t read_wait; /* Wait queue for read */ char *read_data; /* pointer to reader buffer */ int32_t read_phys; /* physical address of reader buffer */ uint8_t read_next; /* index to input buffers to be read next */ uint8_t fill_next; /* index to buffer that DSP should be filling */ uint8_t pcm_buf_count; /* number of pcm buffer allocated */ struct list_head in_queue; /* queue to retain input buffers */ /* ---- End of Host PCM section */ struct msm_adsp_module *audplay; /* configuration to use on next enable */ uint32_t out_sample_rate; uint32_t out_channel_mode; struct audmgr audmgr; /* data allocated for various buffers */ char *data; int32_t phys; /* physical address of write buffer */ void *map_v_read; void *map_v_write; uint32_t drv_status; int mfield; /* meta field embedded in data */ int rflush; /* Read flush */ int wflush; /* Write flush */ int opened; int enabled; int running; int stopped; /* set when stopped, cleared on flush */ int pcm_feedback; int buf_refresh; int rmt_resource_released; int teos; /* valid only if tunnel mode & no data left for decoder */ enum msm_aud_decoder_state dec_state; /* Represents decoder state */ int reserved; /* A byte is being reserved */ char rsv_byte; /* Handle odd length user data */ const char *module_name; unsigned queue_id; uint16_t dec_id; uint32_t read_ptr_offset; #ifdef CONFIG_HAS_EARLYSUSPEND struct audmp3_suspend_ctl suspend_ctl; #endif #ifdef CONFIG_DEBUG_FS struct dentry *dentry; #endif wait_queue_head_t wait; struct list_head free_event_queue; struct list_head event_queue; wait_queue_head_t event_wait; spinlock_t event_queue_lock; struct mutex get_event_lock; int event_abort; struct list_head ion_region_queue; /* protected by lock */ struct audmp3_drv_operations drv_ops; int eq_enable; int eq_needs_commit; audpp_cmd_cfg_object_params_eqalizer eq; audpp_cmd_cfg_object_params_volume vol_pan; struct ion_client *client; struct ion_handle *input_buff_handle; struct ion_handle *output_buff_handle; }; static int auddec_dsp_config(struct audio *audio, int enable); static void audpp_cmd_cfg_adec_params(struct audio *audio); static void audpp_cmd_cfg_routing_mode(struct audio *audio); static void audplay_send_data(struct audio *audio, unsigned needed); static void audplay_config_hostpcm(struct audio *audio); static void audplay_buffer_refresh(struct audio *audio); static void audio_dsp_event(void *private, unsigned id, uint16_t *msg); static void audmp3_post_event(struct audio *audio, int type, union msm_audio_event_payload payload); static unsigned long audmp3_ion_fixup(struct audio *audio, void *addr, unsigned long len, int ref_up); static int rmt_put_resource(struct audio *audio) { struct aud_codec_config_cmd cmd; unsigned short client_idx; cmd.cmd_id = RM_CMD_AUD_CODEC_CFG; cmd.client_id = RM_AUD_CLIENT_ID; cmd.task_id = audio->dec_id; cmd.enable = RMT_DISABLE; cmd.dec_type = AUDDEC_DEC_MP3; client_idx = ((cmd.client_id << 8) | cmd.task_id); return put_adsp_resource(client_idx, &cmd, sizeof(cmd)); } static int rmt_get_resource(struct audio *audio) { struct aud_codec_config_cmd cmd; unsigned short client_idx; cmd.cmd_id = RM_CMD_AUD_CODEC_CFG; cmd.client_id = RM_AUD_CLIENT_ID; cmd.task_id = audio->dec_id; cmd.enable = RMT_ENABLE; cmd.dec_type = AUDDEC_DEC_MP3; client_idx = ((cmd.client_id << 8) | cmd.task_id); return get_adsp_resource(client_idx, &cmd, sizeof(cmd)); } /* must be called with audio->lock held */ static int audio_enable(struct audio *audio) { struct audmgr_config cfg; int rc; MM_DBG("\n"); /* Macro prints the file name and function */ if (audio->enabled) return 0; if (audio->rmt_resource_released == 1) { audio->rmt_resource_released = 0; rc = rmt_get_resource(audio); if (rc) { MM_ERR("ADSP resources are not available for MP3 \ session 0x%08x on decoder: %d\n Ignoring \ error and going ahead with the playback\n", (int)audio, audio->dec_id); } } audio->dec_state = MSM_AUD_DECODER_STATE_NONE; audio->out_tail = 0; audio->out_needed = 0; if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) { cfg.tx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE; cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_48000; cfg.def_method = RPC_AUD_DEF_METHOD_PLAYBACK; cfg.codec = RPC_AUD_DEF_CODEC_MP3; cfg.snd_method = RPC_SND_METHOD_MIDI; rc = audmgr_enable(&audio->audmgr, &cfg); if (rc < 0) { msm_adsp_dump(audio->audplay); return rc; } } if (msm_adsp_enable(audio->audplay)) { MM_ERR("msm_adsp_enable(audplay) failed\n"); if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) audmgr_disable(&audio->audmgr); return -ENODEV; } if (audpp_enable(audio->dec_id, audio_dsp_event, audio)) { MM_ERR("audpp_enable() failed\n"); msm_adsp_disable(audio->audplay); if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) audmgr_disable(&audio->audmgr); return -ENODEV; } audio->enabled = 1; return 0; } /* must be called with audio->lock held */ static int audio_disable(struct audio *audio) { int rc = 0; MM_DBG("\n"); /* Macro prints the file name and function */ if (audio->enabled) { audio->enabled = 0; audio->dec_state = MSM_AUD_DECODER_STATE_NONE; auddec_dsp_config(audio, 0); rc = wait_event_interruptible_timeout(audio->wait, audio->dec_state != MSM_AUD_DECODER_STATE_NONE, msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS)); if (rc == 0) rc = -ETIMEDOUT; else if (audio->dec_state != MSM_AUD_DECODER_STATE_CLOSE) rc = -EFAULT; else rc = 0; audio->stopped = 1; wake_up(&audio->write_wait); wake_up(&audio->read_wait); msm_adsp_disable(audio->audplay); audpp_disable(audio->dec_id, audio); if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) { rc = audmgr_disable(&audio->audmgr); if (rc < 0) msm_adsp_dump(audio->audplay); } audio->out_needed = 0; rmt_put_resource(audio); audio->rmt_resource_released = 1; } return rc; } /* ------------------- dsp --------------------- */ static void audmp3_async_pcm_buf_update(struct audio *audio, uint32_t *payload) { unsigned long flags; union msm_audio_event_payload event_payload; struct audmp3_buffer_node *filled_buf; uint8_t index; if (audio->rflush) return; spin_lock_irqsave(&audio->dsp_lock, flags); for (index = 0; index < payload[1]; index++) { BUG_ON(list_empty(&audio->in_queue)); filled_buf = list_first_entry(&audio->in_queue, struct audmp3_buffer_node, list); if (filled_buf->paddr == payload[2 + index * 2]) { list_del(&filled_buf->list); event_payload.aio_buf = filled_buf->buf; event_payload.aio_buf.data_len = payload[3 + index * 2]; MM_DBG("pcm buf %p data_len %d\n", filled_buf, event_payload.aio_buf.data_len); audmp3_post_event(audio, AUDIO_EVENT_READ_DONE, event_payload); kfree(filled_buf); } else { MM_ERR("expected=%lx ret=%x\n", filled_buf->paddr, payload[2 + index * 2]); break; } } audio->drv_status &= ~ADRV_STATUS_IBUF_GIVEN; audio->drv_ops.buffer_refresh(audio); spin_unlock_irqrestore(&audio->dsp_lock, flags); } static void audio_update_pcm_buf_entry(struct audio *audio, uint32_t *payload) { uint8_t index; unsigned long flags; if (audio->rflush) return; spin_lock_irqsave(&audio->dsp_lock, flags); for (index = 0; index < payload[1]; index++) { if (audio->in[audio->fill_next].addr == payload[2 + index * 2]) { MM_DBG("in[%d] ready\n", audio->fill_next); audio->in[audio->fill_next].used = payload[3 + index * 2]; if ((++audio->fill_next) == audio->pcm_buf_count) audio->fill_next = 0; } else { MM_ERR("expected=%x ret=%x\n", audio->in[audio->fill_next].addr, payload[2 + index * 2]); break; } } if (audio->in[audio->fill_next].used == 0) { audio->drv_ops.buffer_refresh(audio); } else { MM_DBG("read cannot keep up\n"); audio->buf_refresh = 1; } wake_up(&audio->read_wait); spin_unlock_irqrestore(&audio->dsp_lock, flags); } static void audplay_dsp_event(void *data, unsigned id, size_t len, void (*getevent) (void *ptr, size_t len)) { struct audio *audio = data; uint32_t msg[28]; getevent(msg, sizeof(msg)); MM_DBG("msg_id=%x\n", id); switch (id) { case AUDPLAY_MSG_DEC_NEEDS_DATA: audio->drv_ops.send_data(audio, 1); break; case AUDPLAY_MSG_BUFFER_UPDATE: audio->drv_ops.pcm_buf_update(audio, msg); break; case ADSP_MESSAGE_ID: MM_DBG("Received ADSP event: module enable(audplaytask)\n"); break; default: MM_ERR("unexpected message from decoder \n"); break; } } static void audio_dsp_event(void *private, unsigned id, uint16_t *msg) { struct audio *audio = private; switch (id) { case AUDPP_MSG_STATUS_MSG:{ unsigned status = msg[1]; switch (status) { case AUDPP_DEC_STATUS_SLEEP: { uint16_t reason = msg[2]; MM_DBG("decoder status: sleep reason=0x%04x\n", reason); if ((reason == AUDPP_MSG_REASON_MEM) || (reason == AUDPP_MSG_REASON_NODECODER)) { audio->dec_state = MSM_AUD_DECODER_STATE_FAILURE; wake_up(&audio->wait); } else if (reason == AUDPP_MSG_REASON_NONE) { /* decoder is in disable state */ audio->dec_state = MSM_AUD_DECODER_STATE_CLOSE; wake_up(&audio->wait); } break; } case AUDPP_DEC_STATUS_INIT: MM_DBG("decoder status: init \n"); if (audio->pcm_feedback) audpp_cmd_cfg_routing_mode(audio); else audpp_cmd_cfg_adec_params(audio); break; case AUDPP_DEC_STATUS_CFG: MM_DBG("decoder status: cfg \n"); break; case AUDPP_DEC_STATUS_PLAY: MM_DBG("decoder status: play \n"); if (audio->pcm_feedback) { audplay_config_hostpcm(audio); audio->drv_ops.buffer_refresh(audio); } audio->dec_state = MSM_AUD_DECODER_STATE_SUCCESS; wake_up(&audio->wait); break; default: MM_ERR("unknown decoder status \n"); break; } break; } case AUDPP_MSG_CFG_MSG: if (msg[0] == AUDPP_MSG_ENA_ENA) { MM_DBG("CFG_MSG ENABLE\n"); auddec_dsp_config(audio, 1); audio->out_needed = 0; audio->running = 1; audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan); audpp_dsp_set_eq(audio->dec_id, audio->eq_enable, &audio->eq); audpp_avsync(audio->dec_id, 22050); } else if (msg[0] == AUDPP_MSG_ENA_DIS) { MM_DBG("CFG_MSG DISABLE\n"); audpp_avsync(audio->dec_id, 0); audio->running = 0; } else { MM_DBG("CFG_MSG %d?\n", msg[0]); } break; case AUDPP_MSG_ROUTING_ACK: MM_DBG("ROUTING_ACK mode=%d\n", msg[1]); audpp_cmd_cfg_adec_params(audio); break; case AUDPP_MSG_FLUSH_ACK: MM_DBG("FLUSH_ACK\n"); audio->wflush = 0; audio->rflush = 0; wake_up(&audio->write_wait); if (audio->pcm_feedback) audio->drv_ops.buffer_refresh(audio); break; case AUDPP_MSG_PCMDMAMISSED: MM_DBG("PCMDMAMISSED\n"); audio->teos = 1; wake_up(&audio->write_wait); break; default: MM_ERR("UNKNOWN (%d)\n", id); } } struct msm_adsp_ops audplay_adsp_ops = { .event = audplay_dsp_event, }; #define audplay_send_queue0(audio, cmd, len) \ msm_adsp_write(audio->audplay, audio->queue_id, \ cmd, len) static int auddec_dsp_config(struct audio *audio, int enable) { u16 cfg_dec_cmd[AUDPP_CMD_CFG_DEC_TYPE_LEN / sizeof(unsigned short)]; memset(cfg_dec_cmd, 0, sizeof(cfg_dec_cmd)); cfg_dec_cmd[0] = AUDPP_CMD_CFG_DEC_TYPE; if (enable) cfg_dec_cmd[1 + audio->dec_id] = AUDPP_CMD_UPDATDE_CFG_DEC | AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_MP3; else cfg_dec_cmd[1 + audio->dec_id] = AUDPP_CMD_UPDATDE_CFG_DEC | AUDPP_CMD_DIS_DEC_V; return audpp_send_queue1(&cfg_dec_cmd, sizeof(cfg_dec_cmd)); } static void audpp_cmd_cfg_adec_params(struct audio *audio) { audpp_cmd_cfg_adec_params_mp3 cmd; memset(&cmd, 0, sizeof(cmd)); cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS; cmd.common.length = AUDPP_CMD_CFG_ADEC_PARAMS_MP3_LEN; cmd.common.dec_id = audio->dec_id; cmd.common.input_sampling_frequency = audio->out_sample_rate; audpp_send_queue2(&cmd, sizeof(cmd)); } static void audpp_cmd_cfg_routing_mode(struct audio *audio) { struct audpp_cmd_routing_mode cmd; MM_DBG("\n"); /* Macro prints the file name and function */ memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDPP_CMD_ROUTING_MODE; cmd.object_number = audio->dec_id; if (audio->pcm_feedback) cmd.routing_mode = ROUTING_MODE_FTRT; else cmd.routing_mode = ROUTING_MODE_RT; audpp_send_queue1(&cmd, sizeof(cmd)); } static int audplay_dsp_send_data_avail(struct audio *audio, unsigned idx, unsigned len) { struct audplay_cmd_bitstream_data_avail_nt2 cmd; cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL_NT2; if (audio->mfield) cmd.decoder_id = AUDMP3_METAFIELD_MASK | (audio->out[idx].mfield_sz >> 1); else cmd.decoder_id = audio->dec_id; cmd.buf_ptr = audio->out[idx].addr; cmd.buf_size = len/2; cmd.partition_number = 0; /* complete all the writes to the input buffer */ wmb(); return audplay_send_queue0(audio, &cmd, sizeof(cmd)); } /* Caller holds irq_lock */ static void audmp3_async_buffer_refresh(struct audio *audio) { struct audplay_cmd_buffer_refresh refresh_cmd; struct audmp3_buffer_node *next_buf; if (!audio->running || audio->drv_status & ADRV_STATUS_IBUF_GIVEN) return; if (!list_empty(&audio->in_queue)) { next_buf = list_first_entry(&audio->in_queue, struct audmp3_buffer_node, list); if (!next_buf) return; MM_DBG("next buf %p phy %lx len %d\n", next_buf, next_buf->paddr, next_buf->buf.buf_len); refresh_cmd.cmd_id = AUDPLAY_CMD_BUFFER_REFRESH; refresh_cmd.num_buffers = 1; refresh_cmd.buf0_address = next_buf->paddr; refresh_cmd.buf0_length = next_buf->buf.buf_len - (next_buf->buf.buf_len % 576) + (audio->mfield ? 24 : 0); /* Mp3 frame size */ refresh_cmd.buf_read_count = 0; audio->drv_status |= ADRV_STATUS_IBUF_GIVEN; (void) audplay_send_queue0(audio, &refresh_cmd, sizeof(refresh_cmd)); } } static void audplay_buffer_refresh(struct audio *audio) { struct audplay_cmd_buffer_refresh refresh_cmd; refresh_cmd.cmd_id = AUDPLAY_CMD_BUFFER_REFRESH; refresh_cmd.num_buffers = 1; refresh_cmd.buf0_address = audio->in[audio->fill_next].addr; refresh_cmd.buf0_length = audio->in[audio->fill_next].size - (audio->in[audio->fill_next].size % 576) + (audio->mfield ? 24 : 0); /* Mp3 frame size */ refresh_cmd.buf_read_count = 0; MM_DBG("buf0_addr=%x buf0_len=%d\n", refresh_cmd.buf0_address, refresh_cmd.buf0_length); (void)audplay_send_queue0(audio, &refresh_cmd, sizeof(refresh_cmd)); } static void audplay_config_hostpcm(struct audio *audio) { struct audplay_cmd_hpcm_buf_cfg cfg_cmd; MM_DBG("\n"); /* Macro prints the file name and function */ cfg_cmd.cmd_id = AUDPLAY_CMD_HPCM_BUF_CFG; cfg_cmd.max_buffers = 1; cfg_cmd.byte_swap = 0; cfg_cmd.hostpcm_config = (0x8000) | (0x4000); cfg_cmd.feedback_frequency = 1; cfg_cmd.partition_number = 0; (void)audplay_send_queue0(audio, &cfg_cmd, sizeof(cfg_cmd)); } static void audmp3_async_send_data(struct audio *audio, unsigned needed) { unsigned long flags; spin_lock_irqsave(&audio->dsp_lock, flags); if (!audio->running) goto done; if (needed && !audio->wflush) { audio->out_needed = 1; if (audio->drv_status & ADRV_STATUS_OBUF_GIVEN) { /* pop one node out of queue */ union msm_audio_event_payload payload; struct audmp3_buffer_node *used_buf; MM_DBG("consumed\n"); BUG_ON(list_empty(&audio->out_queue)); used_buf = list_first_entry(&audio->out_queue, struct audmp3_buffer_node, list); list_del(&used_buf->list); payload.aio_buf = used_buf->buf; audmp3_post_event(audio, AUDIO_EVENT_WRITE_DONE, payload); kfree(used_buf); audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN; } } if (audio->out_needed) { struct audmp3_buffer_node *next_buf; struct audplay_cmd_bitstream_data_avail_nt2 cmd; if (!list_empty(&audio->out_queue)) { next_buf = list_first_entry(&audio->out_queue, struct audmp3_buffer_node, list); MM_DBG("next_buf %p\n", next_buf); if (next_buf) { MM_DBG("next buf phy %lx len %d\n", next_buf->paddr, next_buf->buf.data_len); cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL_NT2; if (audio->mfield) cmd.decoder_id = AUDMP3_METAFIELD_MASK | (next_buf->buf.mfield_sz >> 1); else cmd.decoder_id = audio->dec_id; cmd.buf_ptr = (unsigned) next_buf->paddr; cmd.buf_size = next_buf->buf.data_len >> 1; cmd.partition_number = 0; /* complete the writes to the input buffer */ wmb(); audplay_send_queue0(audio, &cmd, sizeof(cmd)); audio->out_needed = 0; audio->drv_status |= ADRV_STATUS_OBUF_GIVEN; } } } done: spin_unlock_irqrestore(&audio->dsp_lock, flags); } static void audplay_send_data(struct audio *audio, unsigned needed) { struct buffer *frame; unsigned long flags; spin_lock_irqsave(&audio->dsp_lock, flags); if (!audio->running) goto done; if (needed && !audio->wflush) { /* We were called from the callback because the DSP * requested more data. Note that the DSP does want * more data, and if a buffer was in-flight, mark it * as available (since the DSP must now be done with * it). */ audio->out_needed = 1; frame = audio->out + audio->out_tail; if (frame->used == 0xffffffff) { MM_DBG("frame %d free\n", audio->out_tail); frame->used = 0; audio->out_tail ^= 1; wake_up(&audio->write_wait); } } if (audio->out_needed) { /* If the DSP currently wants data and we have a * buffer available, we will send it and reset * the needed flag. We'll mark the buffer as in-flight * so that it won't be recycled until the next buffer * is requested */ frame = audio->out + audio->out_tail; if (frame->used) { BUG_ON(frame->used == 0xffffffff); MM_DBG("frame %d busy\n", audio->out_tail); audplay_dsp_send_data_avail(audio, audio->out_tail, frame->used); frame->used = 0xffffffff; audio->out_needed = 0; } } done: spin_unlock_irqrestore(&audio->dsp_lock, flags); } /* ------------------- device --------------------- */ static void audmp3_async_flush(struct audio *audio) { struct audmp3_buffer_node *buf_node; struct list_head *ptr, *next; union msm_audio_event_payload payload; unsigned long flags; spin_lock_irqsave(&audio->dsp_lock, flags); MM_DBG("\n"); /* Macro prints the file name and function */ list_for_each_safe(ptr, next, &audio->out_queue) { buf_node = list_entry(ptr, struct audmp3_buffer_node, list); list_del(&buf_node->list); payload.aio_buf = buf_node->buf; audmp3_post_event(audio, AUDIO_EVENT_WRITE_DONE, payload); kfree(buf_node); } audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN; audio->out_needed = 0; atomic_set(&audio->out_bytes, 0); spin_unlock_irqrestore(&audio->dsp_lock, flags); } static void audio_flush(struct audio *audio) { unsigned long flags; spin_lock_irqsave(&audio->dsp_lock, flags); audio->out[0].used = 0; audio->out[1].used = 0; audio->out_head = 0; audio->out_tail = 0; audio->reserved = 0; audio->out_needed = 0; spin_unlock_irqrestore(&audio->dsp_lock, flags); atomic_set(&audio->out_bytes, 0); } static void audmp3_async_flush_pcm_buf(struct audio *audio) { struct audmp3_buffer_node *buf_node; struct list_head *ptr, *next; union msm_audio_event_payload payload; MM_DBG("\n"); /* Macro prints the file name and function */ list_for_each_safe(ptr, next, &audio->in_queue) { buf_node = list_entry(ptr, struct audmp3_buffer_node, list); list_del(&buf_node->list); payload.aio_buf = buf_node->buf; payload.aio_buf.data_len = 0; audmp3_post_event(audio, AUDIO_EVENT_READ_DONE, payload); kfree(buf_node); } audio->drv_status &= ~ADRV_STATUS_IBUF_GIVEN; } static void audio_flush_pcm_buf(struct audio *audio) { uint8_t index; unsigned long flags; spin_lock_irqsave(&audio->dsp_lock, flags); for (index = 0; index < PCM_BUF_MAX_COUNT; index++) audio->in[index].used = 0; audio->buf_refresh = 0; audio->read_next = 0; audio->fill_next = 0; spin_unlock_irqrestore(&audio->dsp_lock, flags); } static void audio_ioport_reset(struct audio *audio) { if (audio->drv_status & ADRV_STATUS_AIO_INTF) { /* If fsync is in progress, make sure * return value of fsync indicates * abort due to flush */ if (audio->drv_status & ADRV_STATUS_FSYNC) { MM_DBG("fsync in progress\n"); wake_up(&audio->write_wait); mutex_lock(&audio->write_lock); audio->drv_ops.out_flush(audio); mutex_unlock(&audio->write_lock); } else audio->drv_ops.out_flush(audio); audio->drv_ops.in_flush(audio); } else { /* Make sure read/write thread are free from * sleep and knowing that system is not able * to process io request at the moment */ wake_up(&audio->write_wait); mutex_lock(&audio->write_lock); audio->drv_ops.out_flush(audio); mutex_unlock(&audio->write_lock); wake_up(&audio->read_wait); mutex_lock(&audio->read_lock); audio->drv_ops.in_flush(audio); mutex_unlock(&audio->read_lock); } } static int audmp3_events_pending(struct audio *audio) { unsigned long flags; int empty; spin_lock_irqsave(&audio->event_queue_lock, flags); empty = !list_empty(&audio->event_queue); spin_unlock_irqrestore(&audio->event_queue_lock, flags); return empty || audio->event_abort; } static void audmp3_reset_event_queue(struct audio *audio) { unsigned long flags; struct audmp3_event *drv_evt; struct list_head *ptr, *next; spin_lock_irqsave(&audio->event_queue_lock, flags); list_for_each_safe(ptr, next, &audio->event_queue) { drv_evt = list_first_entry(&audio->event_queue, struct audmp3_event, list); list_del(&drv_evt->list); kfree(drv_evt); } list_for_each_safe(ptr, next, &audio->free_event_queue) { drv_evt = list_first_entry(&audio->free_event_queue, struct audmp3_event, list); list_del(&drv_evt->list); kfree(drv_evt); } spin_unlock_irqrestore(&audio->event_queue_lock, flags); return; } static long audmp3_process_event_req(struct audio *audio, void __user *arg) { long rc; struct msm_audio_event usr_evt; struct audmp3_event *drv_evt = NULL; int timeout; unsigned long flags; if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event))) return -EFAULT; timeout = (int) usr_evt.timeout_ms; if (timeout > 0) { rc = wait_event_interruptible_timeout( audio->event_wait, audmp3_events_pending(audio), msecs_to_jiffies(timeout)); if (rc == 0) return -ETIMEDOUT; } else { rc = wait_event_interruptible( audio->event_wait, audmp3_events_pending(audio)); } if (rc < 0) return rc; if (audio->event_abort) { audio->event_abort = 0; return -ENODEV; } rc = 0; spin_lock_irqsave(&audio->event_queue_lock, flags); if (!list_empty(&audio->event_queue)) { drv_evt = list_first_entry(&audio->event_queue, struct audmp3_event, list); list_del(&drv_evt->list); } if (drv_evt) { usr_evt.event_type = drv_evt->event_type; usr_evt.event_payload = drv_evt->payload; list_add_tail(&drv_evt->list, &audio->free_event_queue); } else rc = -1; spin_unlock_irqrestore(&audio->event_queue_lock, flags); if (drv_evt->event_type == AUDIO_EVENT_WRITE_DONE || drv_evt->event_type == AUDIO_EVENT_READ_DONE) { mutex_lock(&audio->lock); audmp3_ion_fixup(audio, drv_evt->payload.aio_buf.buf_addr, drv_evt->payload.aio_buf.buf_len, 0); mutex_unlock(&audio->lock); } /* order reads from the output buffer */ if (drv_evt->event_type == AUDIO_EVENT_READ_DONE) rmb(); if (!rc && copy_to_user(arg, &usr_evt, sizeof(usr_evt))) rc = -EFAULT; return rc; } static int audmp3_ion_check(struct audio *audio, void *vaddr, unsigned long len) { struct audmp3_ion_region *region_elt; struct audmp3_ion_region t = { .vaddr = vaddr, .len = len }; list_for_each_entry(region_elt, &audio->ion_region_queue, list) { if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) || OVERLAPS(region_elt, &t)) { MM_ERR("region (vaddr %p len %ld)" " clashes with registered region" " (vaddr %p paddr %p len %ld)\n", vaddr, len, region_elt->vaddr, (void *)region_elt->paddr, region_elt->len); return -EINVAL; } } return 0; } static int audmp3_ion_add(struct audio *audio, struct msm_audio_ion_info *info) { ion_phys_addr_t paddr; size_t len; unsigned long kvaddr; struct audmp3_ion_region *region; int rc = -EINVAL; struct ion_handle *handle; unsigned long ionflag; void *temp_ptr; MM_DBG("\n"); /* Macro prints the file name and function */ region = kmalloc(sizeof(*region), GFP_KERNEL); if (!region) { rc = -ENOMEM; goto end; } handle = ion_import_dma_buf(audio->client, info->fd); if (IS_ERR_OR_NULL(handle)) { pr_err("%s: could not get handle of the given fd\n", __func__); goto import_error; } rc = ion_handle_get_flags(audio->client, handle, &ionflag); if (rc) { pr_err("%s: could not get flags for the handle\n", __func__); goto flag_error; } temp_ptr = ion_map_kernel(audio->client, handle, ionflag); if (IS_ERR_OR_NULL(temp_ptr)) { pr_err("%s: could not get virtual address\n", __func__); goto map_error; } kvaddr = (unsigned long) temp_ptr; rc = ion_phys(audio->client, handle, &paddr, &len); if (rc) { pr_err("%s: could not get physical address\n", __func__); goto ion_error; } rc = audmp3_ion_check(audio, info->vaddr, len); if (rc < 0) { MM_ERR("audpcm_ion_check failed\n"); goto ion_error; } region->handle = handle; region->vaddr = info->vaddr; region->fd = info->fd; region->paddr = paddr; region->kvaddr = kvaddr; region->len = len; region->ref_cnt = 0; MM_DBG("[%p]:add region paddr %lx vaddr %p, len %lu kvaddr %lx\n", audio, region->paddr, region->vaddr, region->len, region->kvaddr); list_add_tail(&region->list, &audio->ion_region_queue); return rc; ion_error: ion_unmap_kernel(audio->client, handle); map_error: flag_error: ion_free(audio->client, handle); import_error: kfree(region); end: return rc; } static int audmp3_ion_remove(struct audio *audio, struct msm_audio_ion_info *info) { struct audmp3_ion_region *region; struct list_head *ptr, *next; int rc = -EINVAL; list_for_each_safe(ptr, next, &audio->ion_region_queue) { region = list_entry(ptr, struct audmp3_ion_region, list); if (region != NULL && (region->fd == info->fd) && (region->vaddr == info->vaddr)) { if (region->ref_cnt) { MM_DBG("%s[%p]:region %p in use ref_cnt %d\n", __func__, audio, region, region->ref_cnt); break; } MM_DBG("remove region fd %d vaddr %p\n", info->fd, info->vaddr); list_del(&region->list); ion_unmap_kernel(audio->client, region->handle); ion_free(audio->client, region->handle); kfree(region); rc = 0; break; } } return rc; } static int audmp3_ion_lookup_vaddr(struct audio *audio, void *addr, unsigned long len, struct audmp3_ion_region **region) { struct audmp3_ion_region *region_elt; int match_count = 0; *region = NULL; /* returns physical address or zero */ list_for_each_entry(region_elt, &audio->ion_region_queue, list) { if (addr >= region_elt->vaddr && addr < region_elt->vaddr + region_elt->len && addr + len <= region_elt->vaddr + region_elt->len) { /* offset since we could pass vaddr inside a registerd * ion buffer */ match_count++; if (!*region) *region = region_elt; } } if (match_count > 1) { MM_ERR("%s[%p]:multiple hits for vaddr %p, len %ld\n", __func__, audio, addr, len); list_for_each_entry(region_elt, &audio->ion_region_queue, list) { if (addr >= region_elt->vaddr && addr < region_elt->vaddr + region_elt->len && addr + len <= region_elt->vaddr + region_elt->len) MM_ERR("\t%s[%p]:%p, %ld --> %p\n", __func__, audio, region_elt->vaddr, region_elt->len, (void *)region_elt->paddr); } } return *region ? 0 : -1; } unsigned long audmp3_ion_fixup(struct audio *audio, void *addr, unsigned long len, int ref_up) { struct audmp3_ion_region *region; unsigned long paddr; int ret; ret = audmp3_ion_lookup_vaddr(audio, addr, len, &region); if (ret) { MM_ERR("lookup (%p, %ld) failed\n", addr, len); return 0; } if (ref_up) region->ref_cnt++; else region->ref_cnt--; MM_DBG("found region %p ref_cnt %d\n", region, region->ref_cnt); paddr = region->paddr + (addr - region->vaddr); return paddr; } /* audio -> lock must be held at this point */ static int audmp3_aio_buf_add(struct audio *audio, unsigned dir, void __user *arg) { unsigned long flags; struct audmp3_buffer_node *buf_node; buf_node = kmalloc(sizeof(*buf_node), GFP_KERNEL); if (!buf_node) return -ENOMEM; if (copy_from_user(&buf_node->buf, arg, sizeof(buf_node->buf))) { kfree(buf_node); return -EFAULT; } MM_DBG("node %p dir %x buf_addr %p buf_len %d data_len \ %d\n", buf_node, dir, buf_node->buf.buf_addr, buf_node->buf.buf_len, buf_node->buf.data_len); buf_node->paddr = audmp3_ion_fixup( audio, buf_node->buf.buf_addr, buf_node->buf.buf_len, 1); if (dir) { /* write */ if (!buf_node->paddr || (buf_node->paddr & 0x1) || (buf_node->buf.data_len & 0x1) || (!audio->pcm_feedback && !buf_node->buf.data_len)) { kfree(buf_node); return -EINVAL; } spin_lock_irqsave(&audio->dsp_lock, flags); list_add_tail(&buf_node->list, &audio->out_queue); spin_unlock_irqrestore(&audio->dsp_lock, flags); audio->drv_ops.send_data(audio, 0); } else { /* read */ if (!buf_node->paddr || (buf_node->paddr & 0x1) || (buf_node->buf.buf_len < PCM_BUFSZ_MIN)) { kfree(buf_node); return -EINVAL; } spin_lock_irqsave(&audio->dsp_lock, flags); list_add_tail(&buf_node->list, &audio->in_queue); audio->drv_ops.buffer_refresh(audio); spin_unlock_irqrestore(&audio->dsp_lock, flags); } MM_DBG("Add buf_node %p paddr %lx\n", buf_node, buf_node->paddr); return 0; } static int audio_enable_eq(struct audio *audio, int enable) { if (audio->eq_enable == enable && !audio->eq_needs_commit) return 0; audio->eq_enable = enable; if (audio->running) { audpp_dsp_set_eq(audio->dec_id, enable, &audio->eq); audio->eq_needs_commit = 0; } return 0; } static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct audio *audio = file->private_data; int rc = -EINVAL; unsigned long flags = 0; uint16_t enable_mask; int enable; int prev_state; unsigned long ionflag = 0; ion_phys_addr_t addr = 0; struct ion_handle *handle = NULL; int len = 0; MM_DBG("cmd = %d\n", cmd); if (cmd == AUDIO_GET_STATS) { struct msm_audio_stats stats; stats.byte_count = audpp_avsync_byte_count(audio->dec_id); stats.sample_count = audpp_avsync_sample_count(audio->dec_id); if (copy_to_user((void *) arg, &stats, sizeof(stats))) return -EFAULT; return 0; } switch (cmd) { case AUDIO_ENABLE_AUDPP: if (copy_from_user(&enable_mask, (void *) arg, sizeof(enable_mask))) { rc = -EFAULT; break; } spin_lock_irqsave(&audio->dsp_lock, flags); enable = (enable_mask & EQ_ENABLE) ? 1 : 0; audio_enable_eq(audio, enable); spin_unlock_irqrestore(&audio->dsp_lock, flags); rc = 0; break; case AUDIO_SET_VOLUME: spin_lock_irqsave(&audio->dsp_lock, flags); audio->vol_pan.volume = arg; if (audio->running) audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan); spin_unlock_irqrestore(&audio->dsp_lock, flags); rc = 0; break; case AUDIO_SET_PAN: spin_lock_irqsave(&audio->dsp_lock, flags); audio->vol_pan.pan = arg; if (audio->running) audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan); spin_unlock_irqrestore(&audio->dsp_lock, flags); rc = 0; break; case AUDIO_SET_EQ: prev_state = audio->eq_enable; audio->eq_enable = 0; if (copy_from_user(&audio->eq.num_bands, (void *) arg, sizeof(audio->eq) - (AUDPP_CMD_CFG_OBJECT_PARAMS_COMMON_LEN + 2))) { rc = -EFAULT; break; } audio->eq_enable = prev_state; audio->eq_needs_commit = 1; rc = 0; break; } if (-EINVAL != rc) return rc; if (cmd == AUDIO_GET_EVENT) { MM_DBG(" AUDIO_GET_EVENT\n"); if (mutex_trylock(&audio->get_event_lock)) { rc = audmp3_process_event_req(audio, (void __user *) arg); mutex_unlock(&audio->get_event_lock); } else rc = -EBUSY; return rc; } if (cmd == AUDIO_ABORT_GET_EVENT) { audio->event_abort = 1; wake_up(&audio->event_wait); return 0; } mutex_lock(&audio->lock); switch (cmd) { case AUDIO_START: MM_DBG("AUDIO_START\n"); rc = audio_enable(audio); if (!rc) { rc = wait_event_interruptible_timeout(audio->wait, audio->dec_state != MSM_AUD_DECODER_STATE_NONE, msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS)); MM_INFO("dec_state %d rc = %d\n", audio->dec_state, rc); if (audio->dec_state != MSM_AUD_DECODER_STATE_SUCCESS) rc = -ENODEV; else rc = 0; } break; case AUDIO_STOP: MM_DBG("AUDIO_STOP\n"); rc = audio_disable(audio); audio_ioport_reset(audio); audio->stopped = 0; break; case AUDIO_FLUSH: MM_DBG("AUDIO_FLUSH\n"); audio->rflush = 1; audio->wflush = 1; audio_ioport_reset(audio); if (audio->running) { audpp_flush(audio->dec_id); rc = wait_event_interruptible(audio->write_wait, !audio->wflush); if (rc < 0) { MM_ERR("AUDIO_FLUSH interrupted\n"); rc = -EINTR; } } else { audio->rflush = 0; audio->wflush = 0; } break; case AUDIO_SET_CONFIG: { struct msm_audio_config config; if (copy_from_user(&config, (void *) arg, sizeof(config))) { rc = -EFAULT; break; } if (config.channel_count == 1) { config.channel_count = AUDPP_CMD_PCM_INTF_MONO_V; } else if (config.channel_count == 2) { config.channel_count = AUDPP_CMD_PCM_INTF_STEREO_V; } else { rc = -EINVAL; break; } audio->mfield = config.meta_field; audio->out_sample_rate = config.sample_rate; audio->out_channel_mode = config.channel_count; rc = 0; break; } case AUDIO_GET_CONFIG: { struct msm_audio_config config; config.buffer_size = (audio->out_dma_sz >> 1); config.buffer_count = 2; config.sample_rate = audio->out_sample_rate; if (audio->out_channel_mode == AUDPP_CMD_PCM_INTF_MONO_V) { config.channel_count = 1; } else { config.channel_count = 2; } config.meta_field = 0; config.unused[0] = 0; config.unused[1] = 0; config.unused[2] = 0; if (copy_to_user((void *) arg, &config, sizeof(config))) { rc = -EFAULT; } else { rc = 0; } break; } case AUDIO_GET_PCM_CONFIG:{ struct msm_audio_pcm_config config; config.pcm_feedback = audio->pcm_feedback; config.buffer_count = PCM_BUF_MAX_COUNT; config.buffer_size = PCM_BUFSZ_MIN; if (copy_to_user((void *)arg, &config, sizeof(config))) rc = -EFAULT; else rc = 0; break; } case AUDIO_SET_PCM_CONFIG:{ struct msm_audio_pcm_config config; if (copy_from_user (&config, (void *)arg, sizeof(config))) { rc = -EFAULT; break; } if (config.pcm_feedback != audio->pcm_feedback) { MM_ERR("Not sufficient permission to" "change the playback mode\n"); rc = -EACCES; break; } if (audio->drv_status & ADRV_STATUS_AIO_INTF) { rc = 0; break; } if ((config.buffer_count > PCM_BUF_MAX_COUNT) || (config.buffer_count == 1)) config.buffer_count = PCM_BUF_MAX_COUNT; if (config.buffer_size < PCM_BUFSZ_MIN) config.buffer_size = PCM_BUFSZ_MIN; /* Check if pcm feedback is required */ if ((config.pcm_feedback) && (!audio->read_data)) { MM_DBG("allocate PCM buffer %d\n", config.buffer_count * config.buffer_size); handle = ion_alloc(audio->client, (config.buffer_size * config.buffer_count), SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID)); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to alloc I/P buffs\n"); rc = -ENOMEM; break; } audio->input_buff_handle = handle; rc = ion_phys(audio->client , handle, &addr, &len); if (rc) { MM_ERR("Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); rc = -ENOMEM; break; } else { MM_INFO("Got valid phy: %x sz: %x\n", (unsigned int) audio->read_phys, (unsigned int) len); } audio->read_phys = (int32_t)addr; rc = ion_handle_get_flags(audio->client, handle, &ionflag); if (rc) { MM_ERR("could not get flags\n"); rc = -ENOMEM; break; } audio->map_v_read = ion_map_kernel( audio->client, handle, ionflag); if (IS_ERR(audio->map_v_read)) { MM_ERR("map of read buf failed\n"); rc = -ENOMEM; ion_free(audio->client, handle); } else { uint8_t index; uint32_t offset = 0; audio->read_data = audio->map_v_read; audio->buf_refresh = 0; audio->pcm_buf_count = config.buffer_count; audio->read_next = 0; audio->fill_next = 0; for (index = 0; index < config.buffer_count; index++) { audio->in[index].data = audio->read_data + offset; audio->in[index].addr = audio->read_phys + offset; audio->in[index].size = config.buffer_size; audio->in[index].used = 0; offset += config.buffer_size; } rc = 0; MM_DBG("read buf: phy addr \ 0x%08x kernel addr 0x%08x\n", audio->read_phys, (int)audio->read_data); } } else { rc = 0; } break; } case AUDIO_PAUSE: MM_DBG("AUDIO_PAUSE %ld\n", arg); rc = audpp_pause(audio->dec_id, (int) arg); break; case AUDIO_REGISTER_ION: { struct msm_audio_ion_info info; MM_DBG("AUDIO_REGISTER_ION\n"); if (copy_from_user(&info, (void *) arg, sizeof(info))) rc = -EFAULT; else rc = audmp3_ion_add(audio, &info); break; } case AUDIO_DEREGISTER_ION: { struct msm_audio_ion_info info; MM_DBG("AUDIO_DEREGISTER_ION\n"); if (copy_from_user(&info, (void *) arg, sizeof(info))) rc = -EFAULT; else rc = audmp3_ion_remove(audio, &info); break; } case AUDIO_ASYNC_WRITE: if (audio->drv_status & ADRV_STATUS_FSYNC) rc = -EBUSY; else rc = audmp3_aio_buf_add(audio, 1, (void __user *) arg); break; case AUDIO_ASYNC_READ: if (audio->pcm_feedback) rc = audmp3_aio_buf_add(audio, 0, (void __user *) arg); else rc = -EPERM; break; default: rc = -EINVAL; } mutex_unlock(&audio->lock); return rc; } /* Only useful in tunnel-mode */ int audmp3_async_fsync(struct audio *audio) { int rc = 0; MM_DBG("\n"); /* Macro prints the file name and function */ /* Blocking client sends more data */ mutex_lock(&audio->lock); audio->drv_status |= ADRV_STATUS_FSYNC; mutex_unlock(&audio->lock); mutex_lock(&audio->write_lock); /* pcm dmamiss message is sent continously * when decoder is starved so no race * condition concern */ audio->teos = 0; rc = wait_event_interruptible(audio->write_wait, (audio->teos && audio->out_needed && list_empty(&audio->out_queue)) || audio->wflush || audio->stopped); if (audio->stopped || audio->wflush) rc = -EBUSY; mutex_unlock(&audio->write_lock); mutex_lock(&audio->lock); audio->drv_status &= ~ADRV_STATUS_FSYNC; mutex_unlock(&audio->lock); return rc; } int audmp3_sync_fsync(struct audio *audio) { struct buffer *frame; int rc = 0; MM_DBG("\n"); /* Macro prints the file name and function */ mutex_lock(&audio->write_lock); rc = wait_event_interruptible(audio->write_wait, (!audio->out[0].used && !audio->out[1].used && audio->out_needed) || audio->wflush); if (rc < 0) goto done; else if (audio->wflush) { rc = -EBUSY; goto done; } if (audio->reserved) { MM_DBG("send reserved byte\n"); frame = audio->out + audio->out_tail; ((char *) frame->data)[0] = audio->rsv_byte; ((char *) frame->data)[1] = 0; frame->used = 2; audio->drv_ops.send_data(audio, 0); rc = wait_event_interruptible(audio->write_wait, (!audio->out[0].used && !audio->out[1].used && audio->out_needed) || audio->wflush); if (rc < 0) goto done; else if (audio->wflush) { rc = -EBUSY; goto done; } } /* pcm dmamiss message is sent continously * when decoder is starved so no race * condition concern */ audio->teos = 0; rc = wait_event_interruptible(audio->write_wait, audio->teos || audio->wflush); if (audio->wflush) rc = -EBUSY; done: mutex_unlock(&audio->write_lock); return rc; } int audmp3_fsync(struct file *file, loff_t a, loff_t b, int datasync) { struct audio *audio = file->private_data; if (!audio->running || audio->pcm_feedback) return -EINVAL; return audio->drv_ops.fsync(audio); } static ssize_t audio_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct audio *audio = file->private_data; const char __user *start = buf; int rc = 0; if (audio->drv_status & ADRV_STATUS_AIO_INTF) return -EPERM; else if (!audio->pcm_feedback) return 0; /* PCM feedback disabled. Nothing to read */ mutex_lock(&audio->read_lock); MM_DBG("%d \n", count); while (count > 0) { rc = wait_event_interruptible( audio->read_wait, (audio->in[audio->read_next]. used > 0) || (audio->stopped) || (audio->rflush)); if (rc < 0) break; if (audio->stopped || audio->rflush) { rc = -EBUSY; break; } if (count < audio->in[audio->read_next].used) { /* Read must happen in frame boundary. Since * driver does not know frame size, read count * must be greater or equal * to size of PCM samples */ MM_DBG("no partial frame done reading\n"); break; } else { MM_DBG("read from in[%d]\n", audio->read_next); /* order reads from the output buffer */ rmb(); if (copy_to_user (buf, audio->in[audio->read_next].data, audio->in[audio->read_next].used)) { MM_ERR("invalid addr %x \n", (unsigned int)buf); rc = -EFAULT; break; } count -= audio->in[audio->read_next].used; buf += audio->in[audio->read_next].used; audio->in[audio->read_next].used = 0; if ((++audio->read_next) == audio->pcm_buf_count) audio->read_next = 0; break; /* Force to exit while loop * to prevent output thread * sleep too long if data is * not ready at this moment. */ } } /* don't feed output buffer to HW decoder during flushing * buffer refresh command will be sent once flush completes * send buf refresh command here can confuse HW decoder */ if (audio->buf_refresh && !audio->rflush) { audio->buf_refresh = 0; MM_DBG("kick start pcm feedback again\n"); audio->drv_ops.buffer_refresh(audio); } mutex_unlock(&audio->read_lock); if (buf > start) rc = buf - start; MM_DBG("read %d bytes\n", rc); return rc; } static int audmp3_process_eos(struct audio *audio, const char __user *buf_start, unsigned short mfield_size) { int rc = 0; struct buffer *frame; char *buf_ptr; if (audio->reserved) { MM_DBG("flush reserve byte\n"); frame = audio->out + audio->out_head; buf_ptr = frame->data; rc = wait_event_interruptible(audio->write_wait, (frame->used == 0) || (audio->stopped) || (audio->wflush)); if (rc < 0) goto done; if (audio->stopped || audio->wflush) { rc = -EBUSY; goto done; } buf_ptr[0] = audio->rsv_byte; buf_ptr[1] = 0; audio->out_head ^= 1; frame->mfield_sz = 0; frame->used = 2; audio->reserved = 0; audio->drv_ops.send_data(audio, 0); } frame = audio->out + audio->out_head; rc = wait_event_interruptible(audio->write_wait, (audio->out_needed && audio->out[0].used == 0 && audio->out[1].used == 0) || (audio->stopped) || (audio->wflush)); if (rc < 0) goto done; if (audio->stopped || audio->wflush) { rc = -EBUSY; goto done; } if (copy_from_user(frame->data, buf_start, mfield_size)) { rc = -EFAULT; goto done; } frame->mfield_sz = mfield_size; audio->out_head ^= 1; frame->used = mfield_size; audio->drv_ops.send_data(audio, 0); done: return rc; } static ssize_t audio_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct audio *audio = file->private_data; const char __user *start = buf; struct buffer *frame; size_t xfer; char *cpy_ptr; int rc = 0, eos_condition = AUDMP3_EOS_NONE; unsigned dsize; unsigned short mfield_size = 0; if (audio->drv_status & ADRV_STATUS_AIO_INTF) return -EPERM; MM_DBG("cnt=%d\n", count); mutex_lock(&audio->write_lock); while (count > 0) { frame = audio->out + audio->out_head; cpy_ptr = frame->data; dsize = 0; rc = wait_event_interruptible(audio->write_wait, (frame->used == 0) || (audio->stopped) || (audio->wflush)); if (rc < 0) break; if (audio->stopped || audio->wflush) { rc = -EBUSY; break; } if (audio->mfield) { if (buf == start) { /* Processing beginning of user buffer */ if (__get_user(mfield_size, (unsigned short __user *) buf)) { rc = -EFAULT; break; } else if (mfield_size > count) { rc = -EINVAL; break; } MM_DBG("mf offset_val %x\n", mfield_size); if (copy_from_user(cpy_ptr, buf, mfield_size)) { rc = -EFAULT; break; } /* Check if EOS flag is set and buffer has * contains just meta field */ if (cpy_ptr[AUDMP3_EOS_FLG_OFFSET] & AUDMP3_EOS_FLG_MASK) { MM_DBG("EOS SET\n"); eos_condition = AUDMP3_EOS_SET; if (mfield_size == count) { buf += mfield_size; break; } else cpy_ptr[AUDMP3_EOS_FLG_OFFSET] &= ~AUDMP3_EOS_FLG_MASK; } cpy_ptr += mfield_size; count -= mfield_size; dsize += mfield_size; buf += mfield_size; } else { mfield_size = 0; MM_DBG("continuous buffer\n"); } frame->mfield_sz = mfield_size; } if (audio->reserved) { MM_DBG("append reserved byte %x\n", audio->rsv_byte); *cpy_ptr = audio->rsv_byte; xfer = (count > ((frame->size - mfield_size) - 1)) ? (frame->size - mfield_size) - 1 : count; cpy_ptr++; dsize += 1; audio->reserved = 0; } else xfer = (count > (frame->size - mfield_size)) ? (frame->size - mfield_size) : count; if (copy_from_user(cpy_ptr, buf, xfer)) { rc = -EFAULT; break; } dsize += xfer; if (dsize & 1) { audio->rsv_byte = ((char *) frame->data)[dsize - 1]; MM_DBG("odd length buf reserve last byte %x\n", audio->rsv_byte); audio->reserved = 1; dsize--; } count -= xfer; buf += xfer; if (dsize > 0) { audio->out_head ^= 1; frame->used = dsize; audio->drv_ops.send_data(audio, 0); } } if (eos_condition == AUDMP3_EOS_SET) rc = audmp3_process_eos(audio, start, mfield_size); mutex_unlock(&audio->write_lock); if (!rc) { if (buf > start) return buf - start; } return rc; } static void audmp3_reset_ion_region(struct audio *audio) { struct audmp3_ion_region *region; struct list_head *ptr, *next; list_for_each_safe(ptr, next, &audio->ion_region_queue) { region = list_entry(ptr, struct audmp3_ion_region, list); list_del(&region->list); ion_unmap_kernel(audio->client, region->handle); ion_free(audio->client, region->handle); kfree(region); } return; } static int audio_release(struct inode *inode, struct file *file) { struct audio *audio = file->private_data; MM_INFO("audio instance 0x%08x freeing\n", (int)audio); mutex_lock(&audio->lock); audio_disable(audio); if (audio->rmt_resource_released == 0) rmt_put_resource(audio); audio->drv_ops.out_flush(audio); audio->drv_ops.in_flush(audio); audmp3_reset_ion_region(audio); msm_adsp_put(audio->audplay); audpp_adec_free(audio->dec_id); #ifdef CONFIG_HAS_EARLYSUSPEND unregister_early_suspend(&audio->suspend_ctl.node); #endif audio->opened = 0; audio->event_abort = 1; wake_up(&audio->event_wait); audmp3_reset_event_queue(audio); mutex_unlock(&audio->lock); #ifdef CONFIG_DEBUG_FS if (audio->dentry) debugfs_remove(audio->dentry); #endif if (!(audio->drv_status & ADRV_STATUS_AIO_INTF)) { ion_unmap_kernel(audio->client, audio->output_buff_handle); ion_free(audio->client, audio->output_buff_handle); ion_unmap_kernel(audio->client, audio->input_buff_handle); ion_free(audio->client, audio->input_buff_handle); } ion_client_destroy(audio->client); kfree(audio); return 0; } static void audmp3_post_event(struct audio *audio, int type, union msm_audio_event_payload payload) { struct audmp3_event *e_node = NULL; unsigned long flags; spin_lock_irqsave(&audio->event_queue_lock, flags); if (!list_empty(&audio->free_event_queue)) { e_node = list_first_entry(&audio->free_event_queue, struct audmp3_event, list); list_del(&e_node->list); } else { e_node = kmalloc(sizeof(struct audmp3_event), GFP_ATOMIC); if (!e_node) { MM_ERR("No mem to post event %d\n", type); spin_unlock_irqrestore(&audio->event_queue_lock, flags); return; } } e_node->event_type = type; e_node->payload = payload; list_add_tail(&e_node->list, &audio->event_queue); spin_unlock_irqrestore(&audio->event_queue_lock, flags); wake_up(&audio->event_wait); } #ifdef CONFIG_HAS_EARLYSUSPEND static void audmp3_suspend(struct early_suspend *h) { struct audmp3_suspend_ctl *ctl = container_of(h, struct audmp3_suspend_ctl, node); union msm_audio_event_payload payload; MM_DBG("\n"); /* Macro prints the file name and function */ audmp3_post_event(ctl->audio, AUDIO_EVENT_SUSPEND, payload); } static void audmp3_resume(struct early_suspend *h) { struct audmp3_suspend_ctl *ctl = container_of(h, struct audmp3_suspend_ctl, node); union msm_audio_event_payload payload; MM_DBG("\n"); /* Macro prints the file name and function */ audmp3_post_event(ctl->audio, AUDIO_EVENT_RESUME, payload); } #endif #ifdef CONFIG_DEBUG_FS static ssize_t audmp3_debug_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t audmp3_debug_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { const int debug_bufmax = 4096; static char buffer[4096]; int n = 0, i; struct audio *audio = file->private_data; mutex_lock(&audio->lock); n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened); n += scnprintf(buffer + n, debug_bufmax - n, "enabled %d\n", audio->enabled); n += scnprintf(buffer + n, debug_bufmax - n, "stopped %d\n", audio->stopped); n += scnprintf(buffer + n, debug_bufmax - n, "pcm_feedback %d\n", audio->pcm_feedback); n += scnprintf(buffer + n, debug_bufmax - n, "out_buf_sz %d\n", audio->out[0].size); n += scnprintf(buffer + n, debug_bufmax - n, "pcm_buf_count %d \n", audio->pcm_buf_count); n += scnprintf(buffer + n, debug_bufmax - n, "pcm_buf_sz %d \n", audio->in[0].size); n += scnprintf(buffer + n, debug_bufmax - n, "volume %x \n", audio->vol_pan.volume); n += scnprintf(buffer + n, debug_bufmax - n, "sample rate %d \n", audio->out_sample_rate); n += scnprintf(buffer + n, debug_bufmax - n, "channel mode %d \n", audio->out_channel_mode); mutex_unlock(&audio->lock); /* Following variables are only useful for debugging when * when playback halts unexpectedly. Thus, no mutual exclusion * enforced */ n += scnprintf(buffer + n, debug_bufmax - n, "wflush %d\n", audio->wflush); n += scnprintf(buffer + n, debug_bufmax - n, "rflush %d\n", audio->rflush); n += scnprintf(buffer + n, debug_bufmax - n, "running %d \n", audio->running); n += scnprintf(buffer + n, debug_bufmax - n, "dec state %d \n", audio->dec_state); n += scnprintf(buffer + n, debug_bufmax - n, "out_needed %d \n", audio->out_needed); n += scnprintf(buffer + n, debug_bufmax - n, "out_head %d \n", audio->out_head); n += scnprintf(buffer + n, debug_bufmax - n, "out_tail %d \n", audio->out_tail); n += scnprintf(buffer + n, debug_bufmax - n, "out[0].used %d \n", audio->out[0].used); n += scnprintf(buffer + n, debug_bufmax - n, "out[1].used %d \n", audio->out[1].used); n += scnprintf(buffer + n, debug_bufmax - n, "buffer_refresh %d \n", audio->buf_refresh); n += scnprintf(buffer + n, debug_bufmax - n, "read_next %d \n", audio->read_next); n += scnprintf(buffer + n, debug_bufmax - n, "fill_next %d \n", audio->fill_next); for (i = 0; i < audio->pcm_buf_count; i++) n += scnprintf(buffer + n, debug_bufmax - n, "in[%d].size %d \n", i, audio->in[i].used); buffer[n] = 0; return simple_read_from_buffer(buf, count, ppos, buffer, n); } static const struct file_operations audmp3_debug_fops = { .read = audmp3_debug_read, .open = audmp3_debug_open, }; #endif static int audio_open(struct inode *inode, struct file *file) { struct audio *audio = NULL; int rc, i, dec_attrb, decid; struct audmp3_event *e_node = NULL; unsigned mem_sz = DMASZ_MAX; unsigned long ionflag = 0; ion_phys_addr_t addr = 0; struct ion_handle *handle = NULL; struct ion_client *client = NULL; int len = 0; #ifdef CONFIG_DEBUG_FS /* 4 bytes represents decoder number, 1 byte for terminate string */ char name[sizeof "msm_mp3_" + 5]; #endif /* Allocate audio instance, set to zero */ audio = kzalloc(sizeof(struct audio), GFP_KERNEL); if (!audio) { MM_ERR("no memory to allocate audio instance \n"); rc = -ENOMEM; goto done; } MM_INFO("audio instance 0x%08x created\n", (int)audio); /* Allocate the decoder */ dec_attrb = AUDDEC_DEC_MP3; if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { dec_attrb |= MSM_AUD_MODE_NONTUNNEL; audio->pcm_feedback = NON_TUNNEL_MODE_PLAYBACK; } else if ((file->f_mode & FMODE_WRITE) && !(file->f_mode & FMODE_READ)) { dec_attrb |= MSM_AUD_MODE_TUNNEL; audio->pcm_feedback = TUNNEL_MODE_PLAYBACK; } else { kfree(audio); rc = -EACCES; goto done; } decid = audpp_adec_alloc(dec_attrb, &audio->module_name, &audio->queue_id); if (decid < 0) { MM_ERR("No free decoder available, freeing instance 0x%08x\n", (int)audio); rc = -ENODEV; kfree(audio); goto done; } audio->dec_id = decid & MSM_AUD_DECODER_MASK; client = msm_ion_client_create(UINT_MAX, "Audio_MP3_Client"); if (IS_ERR_OR_NULL(client)) { pr_err("Unable to create ION client\n"); rc = -ENOMEM; goto client_create_error; } audio->client = client; /* Non AIO interface */ if (!(file->f_flags & O_NONBLOCK)) { MM_DBG("memsz = %d\n", mem_sz); handle = ion_alloc(client, mem_sz, SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID)); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to create allocate O/P buffers\n"); rc = -ENOMEM; goto output_buff_alloc_error; } audio->output_buff_handle = handle; rc = ion_phys(client , handle, &addr, &len); if (rc) { MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); goto output_buff_get_phys_error; } else { MM_INFO("O/P buffers:valid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); } audio->phys = (int32_t)addr; rc = ion_handle_get_flags(client, handle, &ionflag); if (rc) { MM_ERR("could not get flags for the handle\n"); goto output_buff_get_flags_error; } audio->map_v_write = ion_map_kernel(client, handle, ionflag); if (IS_ERR(audio->map_v_write)) { MM_ERR("could not map write buffers\n"); rc = -ENOMEM; goto output_buff_map_error; } audio->data = audio->map_v_write; MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n", audio->phys, (int)audio->data); audio->out_dma_sz = mem_sz; } if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) { rc = audmgr_open(&audio->audmgr); if (rc) { MM_ERR("audmgr open failed, freeing instance \ 0x%08x\n", (int)audio); if (!(file->f_flags & O_NONBLOCK)) goto err; else goto resource_err; } } rc = msm_adsp_get(audio->module_name, &audio->audplay, &audplay_adsp_ops, audio); if (rc) { MM_ERR("failed to get %s module, freeing instance 0x%08x\n", audio->module_name, (int)audio); if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) audmgr_close(&audio->audmgr); if (!(file->f_flags & O_NONBLOCK)) goto err; else goto resource_err; } rc = rmt_get_resource(audio); if (rc) { MM_ERR("ADSP resources are not available for MP3 session \ 0x%08x on decoder: %d\n", (int)audio, audio->dec_id); if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) audmgr_close(&audio->audmgr); msm_adsp_put(audio->audplay); if (!(file->f_flags & O_NONBLOCK)) goto err; else goto resource_err; } if (file->f_flags & O_NONBLOCK) { MM_DBG("set to aio interface\n"); audio->drv_status |= ADRV_STATUS_AIO_INTF; audio->drv_ops.pcm_buf_update = audmp3_async_pcm_buf_update; audio->drv_ops.buffer_refresh = audmp3_async_buffer_refresh; audio->drv_ops.send_data = audmp3_async_send_data; audio->drv_ops.out_flush = audmp3_async_flush; audio->drv_ops.in_flush = audmp3_async_flush_pcm_buf; audio->drv_ops.fsync = audmp3_async_fsync; } else { MM_DBG("set to std io interface\n"); audio->drv_ops.pcm_buf_update = audio_update_pcm_buf_entry; audio->drv_ops.buffer_refresh = audplay_buffer_refresh; audio->drv_ops.send_data = audplay_send_data; audio->drv_ops.out_flush = audio_flush; audio->drv_ops.in_flush = audio_flush_pcm_buf; audio->drv_ops.fsync = audmp3_sync_fsync; audio->out[0].data = audio->data + 0; audio->out[0].addr = audio->phys + 0; audio->out[0].size = (audio->out_dma_sz >> 1); audio->out[1].data = audio->data + audio->out[0].size; audio->out[1].addr = audio->phys + audio->out[0].size; audio->out[1].size = audio->out[0].size; } /* Initialize all locks of audio instance */ mutex_init(&audio->lock); mutex_init(&audio->write_lock); mutex_init(&audio->read_lock); mutex_init(&audio->get_event_lock); spin_lock_init(&audio->dsp_lock); init_waitqueue_head(&audio->write_wait); init_waitqueue_head(&audio->read_wait); INIT_LIST_HEAD(&audio->out_queue); INIT_LIST_HEAD(&audio->in_queue); INIT_LIST_HEAD(&audio->ion_region_queue); INIT_LIST_HEAD(&audio->free_event_queue); INIT_LIST_HEAD(&audio->event_queue); init_waitqueue_head(&audio->wait); init_waitqueue_head(&audio->event_wait); spin_lock_init(&audio->event_queue_lock); audio->out_sample_rate = 44100; audio->out_channel_mode = AUDPP_CMD_PCM_INTF_STEREO_V; audio->vol_pan.volume = 0x2000; audio->drv_ops.out_flush(audio); file->private_data = audio; audio->opened = 1; #ifdef CONFIG_DEBUG_FS snprintf(name, sizeof name, "msm_mp3_%04x", audio->dec_id); audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, NULL, (void *) audio, &audmp3_debug_fops); if (IS_ERR(audio->dentry)) MM_DBG("debugfs_create_file failed\n"); #endif #ifdef CONFIG_HAS_EARLYSUSPEND audio->suspend_ctl.node.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; audio->suspend_ctl.node.resume = audmp3_resume; audio->suspend_ctl.node.suspend = audmp3_suspend; audio->suspend_ctl.audio = audio; register_early_suspend(&audio->suspend_ctl.node); #endif for (i = 0; i < AUDMP3_EVENT_NUM; i++) { e_node = kmalloc(sizeof(struct audmp3_event), GFP_KERNEL); if (e_node) list_add_tail(&e_node->list, &audio->free_event_queue); else { MM_ERR("event pkt alloc failed\n"); break; } } done: return rc; err: ion_unmap_kernel(client, audio->output_buff_handle); output_buff_map_error: output_buff_get_flags_error: output_buff_get_phys_error: ion_free(client, audio->output_buff_handle); output_buff_alloc_error: ion_client_destroy(client); client_create_error: resource_err: audpp_adec_free(audio->dec_id); kfree(audio); return rc; } static const struct file_operations audio_mp3_fops = { .owner = THIS_MODULE, .open = audio_open, .release = audio_release, .read = audio_read, .write = audio_write, .unlocked_ioctl = audio_ioctl, .fsync = audmp3_fsync, }; struct miscdevice audio_mp3_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_mp3", .fops = &audio_mp3_fops, }; static int __init audio_init(void) { return misc_register(&audio_mp3_misc); } static void __exit audio_exit(void) { misc_deregister(&audio_mp3_misc); } module_init(audio_init); module_exit(audio_exit); MODULE_DESCRIPTION("MSM MP3 driver"); MODULE_LICENSE("GPL v2");
prototype-U/lenovo_a706_kernel
arch/arm/mach-msm/qdsp5/audio_mp3.c
C
gpl-2.0
64,951
<?php /* Plugin Name: Video Feed Box */ ?> <?php global $SMTheme; $video_defaults = array( 'width' =>'272', 'title' => 'Video', 'videos' => array( array( 'title' => 'The Mountain', 'url' => 'http://vimeo.com/22439234/', 'type' => 'vimeo', 'videoid' => '22439234' ), array( 'title' => 'Amazing nature scenery', 'url' => 'http://www.youtube.com/watch?v=6v2L2UGZJAM', 'type' => 'youtube', 'videoid' => '6v2L2UGZJAM' ) ) ); class VideoFeed extends WP_Widget { function __construct(){ $widget_options = array('description' => 'Video Feed Box widget.' ); $control_options = array(); parent::__construct('VideoFeed', '&raquo; Video Feed', $widget_options, $control_options); } function widget($args, $instance){ extract( $args ); $title = apply_filters('widget_title', $instance['title']); $width = intval($instance['width']); $videos = $instance['videos']; if(is_array($videos)) { ?> <?php echo $args['before_widget']?> <?php if ( $title ) { ?><?php echo $args['before_title']?><?php echo $title?><?php echo $args['after_title']?><ul><?php } foreach ($videos as $video) {?><li> <a href='<?php echo $video['url']; ?>' rel='nofollow' target='_blank'><?php echo $video['title']; ?></a> <?php switch( $video['type'] ) { case 'vimeo': $videoinf = unserialize(file_get_contents("http://vimeo.com/api/v2/video/".$video['videoid'].".php")); echo '<p style="text-align:center;display:block;overflow:hidden;"><a href="http://vimeo.com/'.$video['videoid'].'" target="_blank" alt="'.$video['videoid'].'" class="vimeo"><img alt="" src="'.$videoinf[0]['thumbnail_large'].'" width="'.$width.'"></a></p><script>loadVimeo();</script>'; break; case 'youtube': echo '<p style="text-align:center;display:block;overflow:hidden;clear:left"><a href="http://www.youtube.com/watch?v='.$video['videoid'].'" target="_blank" alt="'.$video['videoid'].'" class="youtube"><img src="http://img.youtube.com/vi/'.$video['videoid'].'/0.jpg" width="'.$width.'" /></a></p><script>loadYouTube();</script>'; }?> </li> <?php } ?> </ul> <?php echo $args['after_widget']?> <?php } } function update($new_instance, $old_instance) { $instance = $old_instance; $instance['title'] = strip_tags($new_instance['title']); $instance['width'] = intval($new_instance['width']); $instance['videos'] = $new_instance['videos']; unset($instance['videos']['the__id__']); return $instance; } function form($instance){ global $video_defaults; $instance = wp_parse_args( (array) $instance, $video_defaults ); $get_videos = $instance['videos']; $get_this_id = preg_replace("/[^0-9]/", '', $this->get_field_id('this_id_videos')); $get_this_id = !$get_this_id ? 'this_id_videos___i__' : 'this_id_videos_' . $get_this_id; ?> <script type="text/javascript"> jQuery(document).ready(function(){ jQuery('.new_video').die(); jQuery('.new_video2').die(); jQuery('.delete_video').die(); jQuery('.preview_video').die(); jQuery('.new_video').live('click',function(){ var get_id=jQuery(this).attr('alt'); var new_video_id = 10000+Math.floor(Math.random()*100000); var new_video=jQuery('.videos_new_'+get_id+' .new_video_container div:first').clone(); var new_name=''; jQuery('input', new_video).each(function() { new_name=jQuery(this).attr('name').replace(/the__id__/g, new_video_id); jQuery(this).attr('name',new_name); }); jQuery(new_video).appendTo('.videos_'+get_id); }); jQuery('.delete_video').live('click',function(){ if (confirm('The selected video will be deleted! Do you really want to continue?')) { jQuery(this).parents('.tt-clearfix').remove(); } }); jQuery('.preview_banner').live('click', function() { if (jQuery(this).text()=='Preview') { var txtarea=jQuery(this).parents('.tt-clearfix').find('textarea'); var el=jQuery(txtarea).fadeOut().css('display','none').next('div').fadeIn(); jQuery(el).empty(); var bannersource = jQuery(txtarea).val(); jQuery(el).append(''+bannersource+''); jQuery(this).text('Edit'); } else { var txtarea=jQuery(this).parents('.tt-clearfix').find('textarea'); jQuery(txtarea).next('div').fadeOut().css('display','none'); jQuery(txtarea).fadeIn(); jQuery(this).text('Preview'); } }); }); </script> <div style="margin-bottom: 20px;"> <p><label for="<?php echo $this->get_field_id('title')?>">Title:</label><input class="widefat" id="<?php echo $this->get_field_id('title')?>" name="<?php echo $this->get_field_name('title')?>" type="text" value="<?php echo esc_attr($instance['title'])?>" /></p> <p><label for="<?php echo $this->get_field_id('width')?>">Width(px):</label><input class="widefat" id="<?php echo $this->get_field_id('width')?>" name="<?php echo $this->get_field_name('width')?>" type="text" value="<?php echo esc_attr($instance['width'])?>" /></p> <a class="button new_video" alt='<?php echo $get_this_id?>'>Add New Video</a> </div> <div class="videos_<?php echo $get_this_id?>"> <?php if(is_array($get_videos)) { foreach($get_videos as $video_id=>$video_source) { ?> <div class="tt-clearfix " style="padding: 0 0 20px 0; border-bottom: 1px solid #ddd; margin-bottom: 20px;"> <div> <p><label>Title:</label><input alt='title' class="widefat" name="<?php echo $this->get_field_name('videos')?>[<?php echo $video_id ?>][title]" type="text" value="<?php echo $video_source['title']?>" /></p> <p><label>URL:</label><input alt='url' class="widefat" name="<?php echo $this->get_field_name('videos')?>[<?php echo $video_id ?>][url]" type="text" value="<?php echo $video_source['url']?>" /></p> <p><label>Type:</label> <select alt='type' class="widefat" name="<?php echo $this->get_field_name('videos')?>[<?php echo $video_id ?>][type]"> <?php if ($video_source['type']=='vimeo') { ?> <option value='vimeo' selected='selected'>Vimeo</option> <option value='youtube'>YouTube</option> <?php } else { ?> <option value='vimeo'>Vimeo</option> <option value='youtube' selected='selected'>YouTube</option> <?php } ?> </select> </p> <p><label>Video ID:</label><input alt='videoid' class="widefat" name="<?php echo $this->get_field_name('videos')?>[<?php echo $video_id ?>][videoid]" type="text" value="<?php echo $video_source['videoid']?>" /></p> </div> <div style='margin-top:10px;'> <div><a class="button preview_video" alt="<?php echo $this->get_field_id($video_id)?>">Preview</a> <a class="button tt-button-red delete_video" alt="<?php echo $this->get_field_id($video_id)?>">Delete</a></div> </div> </div> <?php } } ?> </div> <div class="videos_new_<?php echo $get_this_id?>"> <div class="new_video_container" style="display: none;"> <div class="tt-clearfix" style="padding: 0 0 20px 0; border-bottom: 1px solid #ddd; margin-bottom: 20px;"> <div> <p><label>Title:</label><input alt='title' class="widefat" name="<?php echo $this->get_field_name('videos')?>[the__id__][title]" type="text" value="" /></p> <p><label>URL:</label><input alt='url' class="widefat" name="<?php echo $this->get_field_name('videos')?>[the__id__][url]" type="text" value="" /></p> <p><label>Type:</label><select alt='type' class="widefat" name="<?php echo $this->get_field_name('videos')?>[the__id__][type]"> <option value='vimeo'>Vimeo</option> <option value='youtube'>YouTube</option> </select></p> <p><label>Video ID:</label><input alt='videoid' class="widefat" name="<?php echo $this->get_field_name('videos')?>[the__id__][videoid]" type="text" value="" /></p> </div> <div style='margin-top:10px;'> <div><a class="button preview_banner">Preview</a> <a class="button tt-button-red delete_banner">Delete</a></div> </div> </div> </div> </div> <?php } } add_action('widgets_init', create_function('', 'return register_widget("VideoFeed");')); ?>
bfay/pharma
wp-content/themes/bullsandbears/inc/widgets/video.php
PHP
gpl-2.0
8,968
/******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * Contact Information: * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * ******************************************************************************/ #include <linux/etherdevice.h> #include <linux/of_net.h> #include <linux/pci.h> #ifdef CONFIG_SPARC #include <asm/idprom.h> #include <asm/prom.h> #endif /* Local includes */ #include "i40e.h" #include "i40e_diag.h" #if IS_ENABLED(CONFIG_VXLAN) #include <net/vxlan.h> #endif #if IS_ENABLED(CONFIG_GENEVE) #include <net/geneve.h> #endif const char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = "Intel(R) Ethernet Connection XL710 Network Driver"; #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 #define DRV_VERSION_BUILD 8 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN const char i40e_driver_version_str[] = DRV_VERSION; static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; /* a bit of forward declarations */ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); static void i40e_handle_reset_warning(struct i40e_pf *pf); static int i40e_add_vsi(struct i40e_vsi *vsi); static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); static int i40e_setup_misc_vector(struct i40e_pf *pf); static void i40e_determine_queue_usage(struct i40e_pf *pf); static int i40e_setup_pf_filter_control(struct i40e_pf *pf); static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, u16 rss_table_size, u16 rss_size); static void i40e_fdir_sb_setup(struct i40e_pf *pf); static int i40e_veb_get_bw_info(struct i40e_veb *veb); /* i40e_pci_tbl - PCI Device ID Table * * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, /* required last entry */ {0, } }; MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); #define I40E_MAX_VF_COUNT 128 static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); /** * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested * @alignment: what to align the allocation to **/ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, u64 size, u32 alignment) { struct i40e_pf *pf = (struct i40e_pf *)hw->back; mem->size = ALIGN(size, alignment); mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, GFP_KERNEL); if (!mem->va) return -ENOMEM; return 0; } /** * i40e_free_dma_mem_d - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) { struct i40e_pf *pf = (struct i40e_pf *)hw->back; dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); mem->va = NULL; mem->pa = 0; mem->size = 0; return 0; } /** * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested **/ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size) { mem->size = size; mem->va = kzalloc(size, GFP_KERNEL); if (!mem->va) return -ENOMEM; return 0; } /** * i40e_free_virt_mem_d - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) { /* it's ok to kfree a NULL pointer */ kfree(mem->va); mem->va = NULL; mem->size = 0; return 0; } /** * i40e_get_lump - find a lump of free generic resource * @pf: board private structure * @pile: the pile of resource to search * @needed: the number of items needed * @id: an owner id to stick on the items assigned * * Returns the base item index of the lump, or negative for error * * The search_hint trick and lack of advanced fit-finding only work * because we're highly likely to have all the same size lump requests. * Linear search time and any fragmentation should be minimal. **/ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, u16 needed, u16 id) { int ret = -ENOMEM; int i, j; if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { dev_info(&pf->pdev->dev, "param err: pile=%p needed=%d id=0x%04x\n", pile, needed, id); return -EINVAL; } /* start the linear search with an imperfect hint */ i = pile->search_hint; while (i < pile->num_entries) { /* skip already allocated entries */ if (pile->list[i] & I40E_PILE_VALID_BIT) { i++; continue; } /* do we have enough in this lump? */ for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { if (pile->list[i+j] & I40E_PILE_VALID_BIT) break; } if (j == needed) { /* there was enough, so assign it to the requestor */ for (j = 0; j < needed; j++) pile->list[i+j] = id | I40E_PILE_VALID_BIT; ret = i; pile->search_hint = i + j; break; } /* not enough, so skip over it and continue looking */ i += j; } return ret; } /** * i40e_put_lump - return a lump of generic resource * @pile: the pile of resource to search * @index: the base item index * @id: the owner id of the items assigned * * Returns the count of items in the lump **/ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) { int valid_id = (id | I40E_PILE_VALID_BIT); int count = 0; int i; if (!pile || index >= pile->num_entries) return -EINVAL; for (i = index; i < pile->num_entries && pile->list[i] == valid_id; i++) { pile->list[i] = 0; count++; } if (count && index < pile->search_hint) pile->search_hint = index; return count; } /** * i40e_find_vsi_from_id - searches for the vsi with the given id * @pf - the pf structure to search for the vsi * @id - id of the vsi it is searching for **/ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) { int i; for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i] && (pf->vsi[i]->id == id)) return pf->vsi[i]; return NULL; } /** * i40e_service_event_schedule - Schedule the service task to wake up * @pf: board private structure * * If not already scheduled, this puts the task into the work queue **/ static void i40e_service_event_schedule(struct i40e_pf *pf) { if (!test_bit(__I40E_DOWN, &pf->state) && !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) schedule_work(&pf->service_task); } /** * i40e_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * * If any port has noticed a Tx timeout, it is likely that the whole * device is munged, not just the one netdev port, so go for the full * reset. **/ #ifdef I40E_FCOE void i40e_tx_timeout(struct net_device *netdev) #else static void i40e_tx_timeout(struct net_device *netdev) #endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_ring *tx_ring = NULL; unsigned int i, hung_queue = 0; u32 head, val; pf->tx_timeout_count++; /* find the stopped queue the same way the stack does */ for (i = 0; i < netdev->num_tx_queues; i++) { struct netdev_queue *q; unsigned long trans_start; q = netdev_get_tx_queue(netdev, i); trans_start = q->trans_start ? : netdev->trans_start; if (netif_xmit_stopped(q) && time_after(jiffies, (trans_start + netdev->watchdog_timeo))) { hung_queue = i; break; } } if (i == netdev->num_tx_queues) { netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); } else { /* now that we have an index, find the tx_ring struct */ for (i = 0; i < vsi->num_queue_pairs; i++) { if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { if (hung_queue == vsi->tx_rings[i]->queue_index) { tx_ring = vsi->tx_rings[i]; break; } } } } if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) pf->tx_timeout_recovery_level = 1; /* reset after some time */ else if (time_before(jiffies, (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) return; /* don't do any new action before the next timeout */ if (tx_ring) { head = i40e_get_head(tx_ring); /* Read interrupt register */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) val = rd32(&pf->hw, I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + tx_ring->vsi->base_vector - 1)); else val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", vsi->seid, hung_queue, tx_ring->next_to_clean, head, tx_ring->next_to_use, readl(tx_ring->tail), val); } pf->tx_timeout_last_recovery = jiffies; netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", pf->tx_timeout_recovery_level, hung_queue); switch (pf->tx_timeout_recovery_level) { case 1: set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); break; case 2: set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); break; case 3: set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); break; default: netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); break; } i40e_service_event_schedule(pf); pf->tx_timeout_recovery_level++; } /** * i40e_release_rx_desc - Store the new tail and head values * @rx_ring: ring to bump * @val: new head index **/ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); writel(val, rx_ring->tail); } /** * i40e_get_vsi_stats_struct - Get System Network Statistics * @vsi: the VSI we care about * * Returns the address of the device statistics structure. * The statistics are actually updated from the service task. **/ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) { return &vsi->net_stats; } /** * i40e_get_netdev_stats_struct - Get statistics for netdev interface * @netdev: network interface device structure * * Returns the address of the device statistics structure. * The statistics are actually updated from the service task. **/ #ifdef I40E_FCOE struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( struct net_device *netdev, struct rtnl_link_stats64 *stats) #else static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( struct net_device *netdev, struct rtnl_link_stats64 *stats) #endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); int i; if (test_bit(__I40E_DOWN, &vsi->state)) return stats; if (!vsi->tx_rings) return stats; rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { u64 bytes, packets; unsigned int start; tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); if (!tx_ring) continue; do { start = u64_stats_fetch_begin_irq(&tx_ring->syncp); packets = tx_ring->stats.packets; bytes = tx_ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; rx_ring = &tx_ring[1]; do { start = u64_stats_fetch_begin_irq(&rx_ring->syncp); packets = rx_ring->stats.packets; bytes = rx_ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; } rcu_read_unlock(); /* following stats updated by i40e_watchdog_subtask() */ stats->multicast = vsi_stats->multicast; stats->tx_errors = vsi_stats->tx_errors; stats->tx_dropped = vsi_stats->tx_dropped; stats->rx_errors = vsi_stats->rx_errors; stats->rx_dropped = vsi_stats->rx_dropped; stats->rx_crc_errors = vsi_stats->rx_crc_errors; stats->rx_length_errors = vsi_stats->rx_length_errors; return stats; } /** * i40e_vsi_reset_stats - Resets all stats of the given vsi * @vsi: the VSI to have its stats reset **/ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) { struct rtnl_link_stats64 *ns; int i; if (!vsi) return; ns = i40e_get_vsi_stats_struct(vsi); memset(ns, 0, sizeof(*ns)); memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); if (vsi->rx_rings && vsi->rx_rings[0]) { for (i = 0; i < vsi->num_queue_pairs; i++) { memset(&vsi->rx_rings[i]->stats, 0, sizeof(vsi->rx_rings[i]->stats)); memset(&vsi->rx_rings[i]->rx_stats, 0, sizeof(vsi->rx_rings[i]->rx_stats)); memset(&vsi->tx_rings[i]->stats, 0, sizeof(vsi->tx_rings[i]->stats)); memset(&vsi->tx_rings[i]->tx_stats, 0, sizeof(vsi->tx_rings[i]->tx_stats)); } } vsi->stat_offsets_loaded = false; } /** * i40e_pf_reset_stats - Reset all of the stats for the given PF * @pf: the PF to be reset **/ void i40e_pf_reset_stats(struct i40e_pf *pf) { int i; memset(&pf->stats, 0, sizeof(pf->stats)); memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); pf->stat_offsets_loaded = false; for (i = 0; i < I40E_MAX_VEB; i++) { if (pf->veb[i]) { memset(&pf->veb[i]->stats, 0, sizeof(pf->veb[i]->stats)); memset(&pf->veb[i]->stats_offsets, 0, sizeof(pf->veb[i]->stats_offsets)); pf->veb[i]->stat_offsets_loaded = false; } } } /** * i40e_stat_update48 - read and update a 48 bit stat from the chip * @hw: ptr to the hardware info * @hireg: the high 32 bit reg to read * @loreg: the low 32 bit reg to read * @offset_loaded: has the initial offset been loaded yet * @offset: ptr to current offset value * @stat: ptr to the stat * * Since the device stats are not reset at PFReset, they likely will not * be zeroed when the driver starts. We'll save the first values read * and use them as offsets to be subtracted from the raw values in order * to report stats that count from zero. In the process, we also manage * the potential roll-over. **/ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, bool offset_loaded, u64 *offset, u64 *stat) { u64 new_data; if (hw->device_id == I40E_DEV_ID_QEMU) { new_data = rd32(hw, loreg); new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; } else { new_data = rd64(hw, loreg); } if (!offset_loaded) *offset = new_data; if (likely(new_data >= *offset)) *stat = new_data - *offset; else *stat = (new_data + BIT_ULL(48)) - *offset; *stat &= 0xFFFFFFFFFFFFULL; } /** * i40e_stat_update32 - read and update a 32 bit stat from the chip * @hw: ptr to the hardware info * @reg: the hw reg to read * @offset_loaded: has the initial offset been loaded yet * @offset: ptr to current offset value * @stat: ptr to the stat **/ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, bool offset_loaded, u64 *offset, u64 *stat) { u32 new_data; new_data = rd32(hw, reg); if (!offset_loaded) *offset = new_data; if (likely(new_data >= *offset)) *stat = (u32)(new_data - *offset); else *stat = (u32)((new_data + BIT_ULL(32)) - *offset); } /** * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. * @vsi: the VSI to be updated **/ void i40e_update_eth_stats(struct i40e_vsi *vsi) { int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ es = &vsi->eth_stats; oes = &vsi->eth_stats_offsets; /* Gather up the stats that the hw collects */ i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), vsi->stat_offsets_loaded, &oes->tx_errors, &es->tx_errors); i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), vsi->stat_offsets_loaded, &oes->rx_discards, &es->rx_discards); i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unknown_protocol, &es->rx_unknown_protocol); i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), vsi->stat_offsets_loaded, &oes->tx_errors, &es->tx_errors); i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), I40E_GLV_GORCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_bytes, &es->rx_bytes); i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), I40E_GLV_UPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unicast, &es->rx_unicast); i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), I40E_GLV_MPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_multicast, &es->rx_multicast); i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), I40E_GLV_BPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_broadcast, &es->rx_broadcast); i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), I40E_GLV_GOTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_bytes, &es->tx_bytes); i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), I40E_GLV_UPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_unicast, &es->tx_unicast); i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), I40E_GLV_MPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_multicast, &es->tx_multicast); i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), I40E_GLV_BPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); vsi->stat_offsets_loaded = true; } /** * i40e_update_veb_stats - Update Switch component statistics * @veb: the VEB being updated **/ static void i40e_update_veb_stats(struct i40e_veb *veb) { struct i40e_pf *pf = veb->pf; struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ struct i40e_veb_tc_stats *veb_oes; struct i40e_veb_tc_stats *veb_es; int i, idx = 0; idx = veb->stats_idx; es = &veb->stats; oes = &veb->stats_offsets; veb_es = &veb->tc_stats; veb_oes = &veb->tc_stats_offsets; /* Gather up the stats that the hw collects */ i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), veb->stat_offsets_loaded, &oes->tx_discards, &es->tx_discards); if (hw->revision_id > 0) i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), veb->stat_offsets_loaded, &oes->rx_unknown_protocol, &es->rx_unknown_protocol); i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), veb->stat_offsets_loaded, &oes->rx_bytes, &es->rx_bytes); i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), veb->stat_offsets_loaded, &oes->rx_unicast, &es->rx_unicast); i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), veb->stat_offsets_loaded, &oes->rx_multicast, &es->rx_multicast); i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), veb->stat_offsets_loaded, &oes->rx_broadcast, &es->rx_broadcast); i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), veb->stat_offsets_loaded, &oes->tx_bytes, &es->tx_bytes); i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), veb->stat_offsets_loaded, &oes->tx_unicast, &es->tx_unicast); i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), veb->stat_offsets_loaded, &oes->tx_multicast, &es->tx_multicast); i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), veb->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx), I40E_GLVEBTC_RPCL(i, idx), veb->stat_offsets_loaded, &veb_oes->tc_rx_packets[i], &veb_es->tc_rx_packets[i]); i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx), I40E_GLVEBTC_RBCL(i, idx), veb->stat_offsets_loaded, &veb_oes->tc_rx_bytes[i], &veb_es->tc_rx_bytes[i]); i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx), I40E_GLVEBTC_TPCL(i, idx), veb->stat_offsets_loaded, &veb_oes->tc_tx_packets[i], &veb_es->tc_tx_packets[i]); i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx), I40E_GLVEBTC_TBCL(i, idx), veb->stat_offsets_loaded, &veb_oes->tc_tx_bytes[i], &veb_es->tc_tx_bytes[i]); } veb->stat_offsets_loaded = true; } #ifdef I40E_FCOE /** * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters. * @vsi: the VSI that is capable of doing FCoE **/ static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_fcoe_stats *ofs; struct i40e_fcoe_stats *fs; /* device's eth stats */ int idx; if (vsi->type != I40E_VSI_FCOE) return; idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET; fs = &vsi->fcoe_stats; ofs = &vsi->fcoe_stats_offsets; i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx), vsi->fcoe_stat_offsets_loaded, &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets); i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx), vsi->fcoe_stat_offsets_loaded, &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords); i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx), vsi->fcoe_stat_offsets_loaded, &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped); i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx), vsi->fcoe_stat_offsets_loaded, &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets); i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx), vsi->fcoe_stat_offsets_loaded, &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords); i40e_stat_update32(hw, I40E_GL_FCOECRC(idx), vsi->fcoe_stat_offsets_loaded, &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc); i40e_stat_update32(hw, I40E_GL_FCOELAST(idx), vsi->fcoe_stat_offsets_loaded, &ofs->fcoe_last_error, &fs->fcoe_last_error); i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx), vsi->fcoe_stat_offsets_loaded, &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count); vsi->fcoe_stat_offsets_loaded = true; } #endif /** * i40e_update_vsi_stats - Update the vsi statistics counters. * @vsi: the VSI to be updated * * There are a few instances where we store the same stat in a * couple of different structs. This is partly because we have * the netdev stats that need to be filled out, which is slightly * different from the "eth_stats" defined by the chip and used in * VF communications. We sort it out here. **/ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct rtnl_link_stats64 *ons; struct rtnl_link_stats64 *ns; /* netdev stats */ struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ u32 tx_restart, tx_busy; struct i40e_ring *p; u32 rx_page, rx_buf; u64 bytes, packets; unsigned int start; u64 tx_linearize; u64 tx_force_wb; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; if (test_bit(__I40E_DOWN, &vsi->state) || test_bit(__I40E_CONFIG_BUSY, &pf->state)) return; ns = i40e_get_vsi_stats_struct(vsi); ons = &vsi->net_stats_offsets; es = &vsi->eth_stats; oes = &vsi->eth_stats_offsets; /* Gather up the netdev and vsi stats that the driver collects * on the fly during packet processing */ rx_b = rx_p = 0; tx_b = tx_p = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; rx_page = 0; rx_buf = 0; rcu_read_lock(); for (q = 0; q < vsi->num_queue_pairs; q++) { /* locate Tx ring */ p = ACCESS_ONCE(vsi->tx_rings[q]); do { start = u64_stats_fetch_begin_irq(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; } while (u64_stats_fetch_retry_irq(&p->syncp, start)); tx_b += bytes; tx_p += packets; tx_restart += p->tx_stats.restart_queue; tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; /* Rx queue is part of the same block as Tx queue */ p = &p[1]; do { start = u64_stats_fetch_begin_irq(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; } while (u64_stats_fetch_retry_irq(&p->syncp, start)); rx_b += bytes; rx_p += packets; rx_buf += p->rx_stats.alloc_buff_failed; rx_page += p->rx_stats.alloc_page_failed; } rcu_read_unlock(); vsi->tx_restart = tx_restart; vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; vsi->tx_force_wb = tx_force_wb; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; ns->rx_packets = rx_p; ns->rx_bytes = rx_b; ns->tx_packets = tx_p; ns->tx_bytes = tx_b; /* update netdev stats from eth stats */ i40e_update_eth_stats(vsi); ons->tx_errors = oes->tx_errors; ns->tx_errors = es->tx_errors; ons->multicast = oes->rx_multicast; ns->multicast = es->rx_multicast; ons->rx_dropped = oes->rx_discards; ns->rx_dropped = es->rx_discards; ons->tx_dropped = oes->tx_discards; ns->tx_dropped = es->tx_discards; /* pull in a couple PF stats if this is the main vsi */ if (vsi == pf->vsi[pf->lan_vsi]) { ns->rx_crc_errors = pf->stats.crc_errors; ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; ns->rx_length_errors = pf->stats.rx_length_errors; } } /** * i40e_update_pf_stats - Update the PF statistics counters. * @pf: the PF to be updated **/ static void i40e_update_pf_stats(struct i40e_pf *pf) { struct i40e_hw_port_stats *osd = &pf->stats_offsets; struct i40e_hw_port_stats *nsd = &pf->stats; struct i40e_hw *hw = &pf->hw; u32 val; int i; i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), I40E_GLPRT_GORCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_bytes, &nsd->eth.rx_bytes); i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), I40E_GLPRT_GOTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_bytes, &nsd->eth.tx_bytes); i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_discards, &nsd->eth.rx_discards); i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), I40E_GLPRT_UPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_unicast, &nsd->eth.rx_unicast); i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), I40E_GLPRT_MPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_multicast, &nsd->eth.rx_multicast); i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), I40E_GLPRT_BPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_broadcast, &nsd->eth.rx_broadcast); i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), I40E_GLPRT_UPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_unicast, &nsd->eth.tx_unicast); i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), I40E_GLPRT_MPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_multicast, &nsd->eth.tx_multicast); i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), I40E_GLPRT_BPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_broadcast, &nsd->eth.tx_broadcast); i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), pf->stat_offsets_loaded, &osd->tx_dropped_link_down, &nsd->tx_dropped_link_down); i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), pf->stat_offsets_loaded, &osd->crc_errors, &nsd->crc_errors); i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), pf->stat_offsets_loaded, &osd->illegal_bytes, &nsd->illegal_bytes); i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), pf->stat_offsets_loaded, &osd->mac_local_faults, &nsd->mac_local_faults); i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), pf->stat_offsets_loaded, &osd->mac_remote_faults, &nsd->mac_remote_faults); i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), pf->stat_offsets_loaded, &osd->rx_length_errors, &nsd->rx_length_errors); i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_rx, &nsd->link_xon_rx); i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_tx, &nsd->link_xon_tx); i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_rx, &nsd->link_xoff_rx); i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_tx, &nsd->link_xoff_tx); for (i = 0; i < 8; i++) { i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xoff_rx[i], &nsd->priority_xoff_rx[i]); i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xon_rx[i], &nsd->priority_xon_rx[i]); i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xon_tx[i], &nsd->priority_xon_tx[i]); i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xoff_tx[i], &nsd->priority_xoff_tx[i]); i40e_stat_update32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xon_2_xoff[i], &nsd->priority_xon_2_xoff[i]); } i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), I40E_GLPRT_PRC64L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_64, &nsd->rx_size_64); i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), I40E_GLPRT_PRC127L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_127, &nsd->rx_size_127); i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), I40E_GLPRT_PRC255L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_255, &nsd->rx_size_255); i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), I40E_GLPRT_PRC511L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_511, &nsd->rx_size_511); i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), I40E_GLPRT_PRC1023L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1023, &nsd->rx_size_1023); i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), I40E_GLPRT_PRC1522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1522, &nsd->rx_size_1522); i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), I40E_GLPRT_PRC9522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_big, &nsd->rx_size_big); i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), I40E_GLPRT_PTC64L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_64, &nsd->tx_size_64); i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), I40E_GLPRT_PTC127L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_127, &nsd->tx_size_127); i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), I40E_GLPRT_PTC255L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_255, &nsd->tx_size_255); i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), I40E_GLPRT_PTC511L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_511, &nsd->tx_size_511); i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), I40E_GLPRT_PTC1023L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1023, &nsd->tx_size_1023); i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), I40E_GLPRT_PTC1522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1522, &nsd->tx_size_1522); i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), I40E_GLPRT_PTC9522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_big, &nsd->tx_size_big); i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), pf->stat_offsets_loaded, &osd->rx_undersize, &nsd->rx_undersize); i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), pf->stat_offsets_loaded, &osd->rx_fragments, &nsd->rx_fragments); i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), pf->stat_offsets_loaded, &osd->rx_oversize, &nsd->rx_oversize); i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), pf->stat_offsets_loaded, &osd->rx_jabber, &nsd->rx_jabber); /* FDIR stats */ i40e_stat_update32(hw, I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)), pf->stat_offsets_loaded, &osd->fd_atr_match, &nsd->fd_atr_match); i40e_stat_update32(hw, I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)), pf->stat_offsets_loaded, &osd->fd_sb_match, &nsd->fd_sb_match); i40e_stat_update32(hw, I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)), pf->stat_offsets_loaded, &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match); val = rd32(hw, I40E_PRTPM_EEE_STAT); nsd->tx_lpi_status = (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; nsd->rx_lpi_status = (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; i40e_stat_update32(hw, I40E_PRTPM_TLPIC, pf->stat_offsets_loaded, &osd->tx_lpi_count, &nsd->tx_lpi_count); i40e_stat_update32(hw, I40E_PRTPM_RLPIC, pf->stat_offsets_loaded, &osd->rx_lpi_count, &nsd->rx_lpi_count); if (pf->flags & I40E_FLAG_FD_SB_ENABLED && !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) nsd->fd_sb_status = true; else nsd->fd_sb_status = false; if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) nsd->fd_atr_status = true; else nsd->fd_atr_status = false; pf->stat_offsets_loaded = true; } /** * i40e_update_stats - Update the various statistics counters. * @vsi: the VSI to be updated * * Update the various stats for this VSI and its related entities. **/ void i40e_update_stats(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; if (vsi == pf->vsi[pf->lan_vsi]) i40e_update_pf_stats(pf); i40e_update_vsi_stats(vsi); #ifdef I40E_FCOE i40e_update_fcoe_stats(vsi); #endif } /** * i40e_find_filter - Search VSI filter list for specific mac/vlan filter * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan * @is_vf: make sure its a VF filter, else doesn't matter * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns ptr to the filter object or NULL **/ static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, bool is_vf, bool is_netdev) { struct i40e_mac_filter *f; if (!vsi || !macaddr) return NULL; list_for_each_entry(f, &vsi->mac_filter_list, list) { if ((ether_addr_equal(macaddr, f->macaddr)) && (vlan == f->vlan) && (!is_vf || f->is_vf) && (!is_netdev || f->is_netdev)) return f; } return NULL; } /** * i40e_find_mac - Find a mac addr in the macvlan filters list * @vsi: the VSI to be searched * @macaddr: the MAC address we are searching for * @is_vf: make sure its a VF filter, else doesn't matter * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns the first filter with the provided MAC address or NULL if * MAC address was not found **/ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev) { struct i40e_mac_filter *f; if (!vsi || !macaddr) return NULL; list_for_each_entry(f, &vsi->mac_filter_list, list) { if ((ether_addr_equal(macaddr, f->macaddr)) && (!is_vf || f->is_vf) && (!is_netdev || f->is_netdev)) return f; } return NULL; } /** * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode * @vsi: the VSI to be searched * * Returns true if VSI is in vlan mode or false otherwise **/ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) { struct i40e_mac_filter *f; /* Only -1 for all the filters denotes not in vlan mode * so we have to go through all the list in order to make sure */ list_for_each_entry(f, &vsi->mac_filter_list, list) { if (f->vlan >= 0 || vsi->info.pvid) return true; } return false; } /** * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans * @vsi: the VSI to be searched * @macaddr: the mac address to be filtered * @is_vf: true if it is a VF * @is_netdev: true if it is a netdev * * Goes through all the macvlan filters and adds a * macvlan filter for each unique vlan that already exists * * Returns first filter found on success, else NULL **/ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev) { struct i40e_mac_filter *f; list_for_each_entry(f, &vsi->mac_filter_list, list) { if (vsi->info.pvid) f->vlan = le16_to_cpu(vsi->info.pvid); if (!i40e_find_filter(vsi, macaddr, f->vlan, is_vf, is_netdev)) { if (!i40e_add_filter(vsi, macaddr, f->vlan, is_vf, is_netdev)) return NULL; } } return list_first_entry_or_null(&vsi->mac_filter_list, struct i40e_mac_filter, list); } /** * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS * @vsi: the VSI to be searched * @macaddr: the mac address to be removed * @is_vf: true if it is a VF * @is_netdev: true if it is a netdev * * Removes a given MAC address from a VSI, regardless of VLAN * * Returns 0 for success, or error **/ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev) { struct i40e_mac_filter *f = NULL; int changed = 0; WARN(!spin_is_locked(&vsi->mac_filter_list_lock), "Missing mac_filter_list_lock\n"); list_for_each_entry(f, &vsi->mac_filter_list, list) { if ((ether_addr_equal(macaddr, f->macaddr)) && (is_vf == f->is_vf) && (is_netdev == f->is_netdev)) { f->counter--; f->changed = true; changed = 1; } } if (changed) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->back->flags |= I40E_FLAG_FILTER_SYNC; return 0; } return -ENOENT; } /** * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM * @vsi: the PF Main VSI - inappropriate for any other VSI * @macaddr: the MAC address * * Some older firmware configurations set up a default promiscuous VLAN * filter that needs to be removed. **/ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) { struct i40e_aqc_remove_macvlan_element_data element; struct i40e_pf *pf = vsi->back; i40e_status ret; /* Only appropriate for the PF main VSI */ if (vsi->type != I40E_VSI_MAIN) return -EINVAL; memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, macaddr); element.vlan_tag = 0; element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); if (ret) return -ENOENT; return 0; } /** * i40e_add_filter - Add a mac/vlan filter to the VSI * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan * @is_vf: make sure its a VF filter, else doesn't matter * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns ptr to the filter object or NULL when no memory available. * * NOTE: This function is expected to be called with mac_filter_list_lock * being held. **/ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, bool is_vf, bool is_netdev) { struct i40e_mac_filter *f; if (!vsi || !macaddr) return NULL; f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) goto add_filter_out; ether_addr_copy(f->macaddr, macaddr); f->vlan = vlan; f->changed = true; INIT_LIST_HEAD(&f->list); list_add(&f->list, &vsi->mac_filter_list); } /* increment counter and add a new flag if needed */ if (is_vf) { if (!f->is_vf) { f->is_vf = true; f->counter++; } } else if (is_netdev) { if (!f->is_netdev) { f->is_netdev = true; f->counter++; } } else { f->counter++; } /* changed tells sync_filters_subtask to * push the filter down to the firmware */ if (f->changed) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->back->flags |= I40E_FLAG_FILTER_SYNC; } add_filter_out: return f; } /** * i40e_del_filter - Remove a mac/vlan filter from the VSI * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan * @is_vf: make sure it's a VF filter, else doesn't matter * @is_netdev: make sure it's a netdev filter, else doesn't matter * * NOTE: This function is expected to be called with mac_filter_list_lock * being held. **/ void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, bool is_vf, bool is_netdev) { struct i40e_mac_filter *f; if (!vsi || !macaddr) return; f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); if (!f || f->counter == 0) return; if (is_vf) { if (f->is_vf) { f->is_vf = false; f->counter--; } } else if (is_netdev) { if (f->is_netdev) { f->is_netdev = false; f->counter--; } } else { /* make sure we don't remove a filter in use by VF or netdev */ int min_f = 0; min_f += (f->is_vf ? 1 : 0); min_f += (f->is_netdev ? 1 : 0); if (f->counter > min_f) f->counter--; } /* counter == 0 tells sync_filters_subtask to * remove the filter from the firmware's list */ if (f->counter == 0) { f->changed = true; vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->back->flags |= I40E_FLAG_FILTER_SYNC; } } /** * i40e_set_mac - NDO callback to set mac address * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/ #ifdef I40E_FCOE int i40e_set_mac(struct net_device *netdev, void *p) #else static int i40e_set_mac(struct net_device *netdev, void *p) #endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct sockaddr *addr = p; struct i40e_mac_filter *f; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { netdev_info(netdev, "already using mac address %pM\n", addr->sa_data); return 0; } if (test_bit(__I40E_DOWN, &vsi->back->state) || test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) return -EADDRNOTAVAIL; if (ether_addr_equal(hw->mac.addr, addr->sa_data)) netdev_info(netdev, "returning to hw mac address %pM\n", hw->mac.addr); else netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; ret = i40e_aq_mac_address_write(&vsi->back->hw, I40E_AQC_WRITE_TYPE_LAA_WOL, addr->sa_data, NULL); if (ret) { netdev_info(netdev, "Addr change for Main VSI failed: %d\n", ret); return -EADDRNOTAVAIL; } } if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) { struct i40e_aqc_remove_macvlan_element_data element; memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, netdev->dev_addr); element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); } else { spin_lock_bh(&vsi->mac_filter_list_lock); i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false); spin_unlock_bh(&vsi->mac_filter_list_lock); } if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { struct i40e_aqc_add_macvlan_element_data element; memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, hw->mac.addr); element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); } else { spin_lock_bh(&vsi->mac_filter_list_lock); f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false); if (f) f->is_laa = true; spin_unlock_bh(&vsi->mac_filter_list_lock); } ether_addr_copy(netdev->dev_addr, addr->sa_data); return i40e_sync_vsi_filters(vsi); } /** * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc * @vsi: the VSI being setup * @ctxt: VSI context structure * @enabled_tc: Enabled TCs bitmap * @is_add: True if called before Add VSI * * Setup VSI queue mapping for enabled traffic classes. **/ #ifdef I40E_FCOE void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt, u8 enabled_tc, bool is_add) #else static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt, u8 enabled_tc, bool is_add) #endif { struct i40e_pf *pf = vsi->back; u16 sections = 0; u8 netdev_tc = 0; u16 numtc = 0; u16 qcount; u8 offset; u16 qmap; int i; u16 num_tc_qps = 0; sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; offset = 0; if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Find numtc from enabled TC bitmap */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) /* TC is enabled */ numtc++; } if (!numtc) { dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); numtc = 1; } } else { /* At least TC0 is enabled in case of non-DCB case */ numtc = 1; } vsi->tc_config.numtc = numtc; vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; /* Number of queues per enabled TC */ /* In MFP case we can have a much lower count of MSIx * vectors available and so we need to lower the used * q count. */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); else qcount = vsi->alloc_queue_pairs; num_tc_qps = qcount / numtc; num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); /* Setup queue offset/count for all TCs for given VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* See if the given TC is enabled for the given VSI */ if (vsi->tc_config.enabled_tc & BIT(i)) { /* TC is enabled */ int pow, num_qps; switch (vsi->type) { case I40E_VSI_MAIN: qcount = min_t(int, pf->alloc_rss_size, num_tc_qps); break; #ifdef I40E_FCOE case I40E_VSI_FCOE: qcount = num_tc_qps; break; #endif case I40E_VSI_FDIR: case I40E_VSI_SRIOV: case I40E_VSI_VMDQ2: default: qcount = num_tc_qps; WARN_ON(i != 0); break; } vsi->tc_config.tc_info[i].qoffset = offset; vsi->tc_config.tc_info[i].qcount = qcount; /* find the next higher power-of-2 of num queue pairs */ num_qps = qcount; pow = 0; while (num_qps && (BIT_ULL(pow) < qcount)) { pow++; num_qps >>= 1; } vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); offset += qcount; } else { /* TC is not enabled so set the offset to * default queue and allocate one queue * for the given TC. */ vsi->tc_config.tc_info[i].qoffset = 0; vsi->tc_config.tc_info[i].qcount = 1; vsi->tc_config.tc_info[i].netdev_tc = 0; qmap = 0; } ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); } /* Set actual Tx/Rx queue pairs */ vsi->num_queue_pairs = offset; if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { if (vsi->req_queue_pairs > 0) vsi->num_queue_pairs = vsi->req_queue_pairs; else if (pf->flags & I40E_FLAG_MSIX_ENABLED) vsi->num_queue_pairs = pf->num_lan_msix; } /* Scheduler section valid can only be set for ADD VSI */ if (is_add) { sections |= I40E_AQ_VSI_PROP_SCHED_VALID; ctxt->info.up_enable_bits = enabled_tc; } if (vsi->type == I40E_VSI_SRIOV) { ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); for (i = 0; i < vsi->num_queue_pairs; i++) ctxt->info.queue_mapping[i] = cpu_to_le16(vsi->base_queue + i); } else { ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); } ctxt->info.valid_sections |= cpu_to_le16(sections); } /** * i40e_set_rx_mode - NDO callback to set the netdev filters * @netdev: network interface device structure **/ #ifdef I40E_FCOE void i40e_set_rx_mode(struct net_device *netdev) #else static void i40e_set_rx_mode(struct net_device *netdev) #endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_mac_filter *f, *ftmp; struct i40e_vsi *vsi = np->vsi; struct netdev_hw_addr *uca; struct netdev_hw_addr *mca; struct netdev_hw_addr *ha; spin_lock_bh(&vsi->mac_filter_list_lock); /* add addr if not already in the filter list */ netdev_for_each_uc_addr(uca, netdev) { if (!i40e_find_mac(vsi, uca->addr, false, true)) { if (i40e_is_vsi_in_vlan(vsi)) i40e_put_mac_in_vlan(vsi, uca->addr, false, true); else i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY, false, true); } } netdev_for_each_mc_addr(mca, netdev) { if (!i40e_find_mac(vsi, mca->addr, false, true)) { if (i40e_is_vsi_in_vlan(vsi)) i40e_put_mac_in_vlan(vsi, mca->addr, false, true); else i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY, false, true); } } /* remove filter if not in netdev list */ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { if (!f->is_netdev) continue; netdev_for_each_mc_addr(mca, netdev) if (ether_addr_equal(mca->addr, f->macaddr)) goto bottom_of_search_loop; netdev_for_each_uc_addr(uca, netdev) if (ether_addr_equal(uca->addr, f->macaddr)) goto bottom_of_search_loop; for_each_dev_addr(netdev, ha) if (ether_addr_equal(ha->addr, f->macaddr)) goto bottom_of_search_loop; /* f->macaddr wasn't found in uc, mc, or ha list so delete it */ i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true); bottom_of_search_loop: continue; } spin_unlock_bh(&vsi->mac_filter_list_lock); /* check for other flag changes */ if (vsi->current_netdev_flags != vsi->netdev->flags) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->back->flags |= I40E_FLAG_FILTER_SYNC; } } /** * i40e_mac_filter_entry_clone - Clones a MAC filter entry * @src: source MAC filter entry to be clones * * Returns the pointer to newly cloned MAC filter entry or NULL * in case of error **/ static struct i40e_mac_filter *i40e_mac_filter_entry_clone( struct i40e_mac_filter *src) { struct i40e_mac_filter *f; f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) return NULL; *f = *src; INIT_LIST_HEAD(&f->list); return f; } /** * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries * @vsi: pointer to vsi struct * @from: Pointer to list which contains MAC filter entries - changes to * those entries needs to be undone. * * MAC filter entries from list were slated to be removed from device. **/ static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, struct list_head *from) { struct i40e_mac_filter *f, *ftmp; list_for_each_entry_safe(f, ftmp, from, list) { f->changed = true; /* Move the element back into MAC filter list*/ list_move_tail(&f->list, &vsi->mac_filter_list); } } /** * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries * @vsi: pointer to vsi struct * * MAC filter entries from list were slated to be added from device. **/ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi) { struct i40e_mac_filter *f, *ftmp; list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { if (!f->changed && f->counter) f->changed = true; } } /** * i40e_cleanup_add_list - Deletes the element from add list and release * memory * @add_list: Pointer to list which contains MAC filter entries **/ static void i40e_cleanup_add_list(struct list_head *add_list) { struct i40e_mac_filter *f, *ftmp; list_for_each_entry_safe(f, ftmp, add_list, list) { list_del(&f->list); kfree(f); } } /** * i40e_sync_vsi_filters - Update the VSI filter list to the HW * @vsi: ptr to the VSI * * Push any outstanding VSI filter changes through the AdminQ. * * Returns 0 or error value **/ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) { struct list_head tmp_del_list, tmp_add_list; struct i40e_mac_filter *f, *ftmp, *fclone; bool promisc_forced_on = false; bool add_happened = false; int filter_list_len = 0; u32 changed_flags = 0; i40e_status aq_ret = 0; bool err_cond = false; int retval = 0; struct i40e_pf *pf; int num_add = 0; int num_del = 0; int aq_err = 0; u16 cmd_flags; /* empty array typed pointers, kcalloc later */ struct i40e_aqc_add_macvlan_element_data *add_list; struct i40e_aqc_remove_macvlan_element_data *del_list; while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) usleep_range(1000, 2000); pf = vsi->back; if (vsi->netdev) { changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; vsi->current_netdev_flags = vsi->netdev->flags; } INIT_LIST_HEAD(&tmp_del_list); INIT_LIST_HEAD(&tmp_add_list); if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; spin_lock_bh(&vsi->mac_filter_list_lock); list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { if (!f->changed) continue; if (f->counter != 0) continue; f->changed = false; /* Move the element into temporary del_list */ list_move_tail(&f->list, &tmp_del_list); } list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { if (!f->changed) continue; if (f->counter == 0) continue; f->changed = false; /* Clone MAC filter entry and add into temporary list */ fclone = i40e_mac_filter_entry_clone(f); if (!fclone) { err_cond = true; break; } list_add_tail(&fclone->list, &tmp_add_list); } /* if failed to clone MAC filter entry - undo */ if (err_cond) { i40e_undo_del_filter_entries(vsi, &tmp_del_list); i40e_undo_add_filter_entries(vsi); } spin_unlock_bh(&vsi->mac_filter_list_lock); if (err_cond) { i40e_cleanup_add_list(&tmp_add_list); retval = -ENOMEM; goto out; } } /* Now process 'del_list' outside the lock */ if (!list_empty(&tmp_del_list)) { int del_list_size; filter_list_len = pf->hw.aq.asq_buf_size / sizeof(struct i40e_aqc_remove_macvlan_element_data); del_list_size = filter_list_len * sizeof(struct i40e_aqc_remove_macvlan_element_data); del_list = kzalloc(del_list_size, GFP_KERNEL); if (!del_list) { i40e_cleanup_add_list(&tmp_add_list); /* Undo VSI's MAC filter entry element updates */ spin_lock_bh(&vsi->mac_filter_list_lock); i40e_undo_del_filter_entries(vsi, &tmp_del_list); i40e_undo_add_filter_entries(vsi); spin_unlock_bh(&vsi->mac_filter_list_lock); retval = -ENOMEM; goto out; } list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) { cmd_flags = 0; /* add to delete list */ ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); del_list[num_del].vlan_tag = cpu_to_le16((u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; del_list[num_del].flags = cmd_flags; num_del++; /* flush a full buffer */ if (num_del == filter_list_len) { aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, del_list, num_del, NULL); aq_err = pf->hw.aq.asq_last_status; num_del = 0; memset(del_list, 0, del_list_size); if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) { retval = -EIO; dev_err(&pf->pdev->dev, "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, aq_err)); } } /* Release memory for MAC filter entries which were * synced up with HW. */ list_del(&f->list); kfree(f); } if (num_del) { aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, del_list, num_del, NULL); aq_err = pf->hw.aq.asq_last_status; num_del = 0; if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) dev_info(&pf->pdev->dev, "ignoring delete macvlan error, err %s aq_err %s\n", i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, aq_err)); } kfree(del_list); del_list = NULL; } if (!list_empty(&tmp_add_list)) { int add_list_size; /* do all the adds now */ filter_list_len = pf->hw.aq.asq_buf_size / sizeof(struct i40e_aqc_add_macvlan_element_data), add_list_size = filter_list_len * sizeof(struct i40e_aqc_add_macvlan_element_data); add_list = kzalloc(add_list_size, GFP_KERNEL); if (!add_list) { /* Purge element from temporary lists */ i40e_cleanup_add_list(&tmp_add_list); /* Undo add filter entries from VSI MAC filter list */ spin_lock_bh(&vsi->mac_filter_list_lock); i40e_undo_add_filter_entries(vsi); spin_unlock_bh(&vsi->mac_filter_list_lock); retval = -ENOMEM; goto out; } list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { add_happened = true; cmd_flags = 0; /* add to add array */ ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); add_list[num_add].vlan_tag = cpu_to_le16( (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); add_list[num_add].queue_number = 0; cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; add_list[num_add].flags = cpu_to_le16(cmd_flags); num_add++; /* flush a full buffer */ if (num_add == filter_list_len) { aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, add_list, num_add, NULL); aq_err = pf->hw.aq.asq_last_status; num_add = 0; if (aq_ret) break; memset(add_list, 0, add_list_size); } /* Entries from tmp_add_list were cloned from MAC * filter list, hence clean those cloned entries */ list_del(&f->list); kfree(f); } if (num_add) { aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, add_list, num_add, NULL); aq_err = pf->hw.aq.asq_last_status; num_add = 0; } kfree(add_list); add_list = NULL; if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) { retval = i40e_aq_rc_to_posix(aq_ret, aq_err); dev_info(&pf->pdev->dev, "add filter failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, aq_err)); if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) { promisc_forced_on = true; set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); } } } /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { bool cur_multipromisc; cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, vsi->seid, cur_multipromisc, NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set multi promisc failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { bool cur_promisc; cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)); if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) { /* set defport ON for Main VSI instead of true promisc * this way we will get all unicast/multicast and VLAN * promisc behavior but will not get VF or VMDq traffic * replicated on the Main VSI. */ if (pf->cur_promisc != cur_promisc) { pf->cur_promisc = cur_promisc; set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); } } else { aq_ret = i40e_aq_set_vsi_unicast_promiscuous( &vsi->back->hw, vsi->seid, cur_promisc, NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set unicast promisc failed, err %d, aq_err %d\n", aq_ret, pf->hw.aq.asq_last_status); } aq_ret = i40e_aq_set_vsi_multicast_promiscuous( &vsi->back->hw, vsi->seid, cur_promisc, NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set multicast promisc failed, err %d, aq_err %d\n", aq_ret, pf->hw.aq.asq_last_status); } } aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, vsi->seid, cur_promisc, NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set brdcast promisc failed, err %s, aq_err %s\n", i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } out: clear_bit(__I40E_CONFIG_BUSY, &vsi->state); return retval; } /** * i40e_sync_filters_subtask - Sync the VSI filter list with HW * @pf: board private structure **/ static void i40e_sync_filters_subtask(struct i40e_pf *pf) { int v; if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) return; pf->flags &= ~I40E_FLAG_FILTER_SYNC; for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { int ret = i40e_sync_vsi_filters(pf->vsi[v]); if (ret) { /* come back and try again later */ pf->flags |= I40E_FLAG_FILTER_SYNC; break; } } } } /** * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) { struct i40e_netdev_priv *np = netdev_priv(netdev); int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; struct i40e_vsi *vsi = np->vsi; /* MTU < 68 is an error and causes problems on some kernels */ if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) return -EINVAL; netdev_info(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) i40e_vsi_reinit_locked(vsi); return 0; } /** * i40e_ioctl - Access the hwtstamp interface * @netdev: network interface device structure * @ifr: interface request data * @cmd: ioctl command **/ int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; switch (cmd) { case SIOCGHWTSTAMP: return i40e_ptp_get_ts_config(pf, ifr); case SIOCSHWTSTAMP: return i40e_ptp_set_ts_config(pf, ifr); default: return -EOPNOTSUPP; } } /** * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI * @vsi: the vsi being adjusted **/ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) { struct i40e_vsi_context ctxt; i40e_status ret; if ((vsi->info.valid_sections & cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) return; /* already enabled */ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; ctxt.seid = vsi->seid; ctxt.info = vsi->info; ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "update vlan stripping failed, err %s aq_err %s\n", i40e_stat_str(&vsi->back->hw, ret), i40e_aq_str(&vsi->back->hw, vsi->back->hw.aq.asq_last_status)); } } /** * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI * @vsi: the vsi being adjusted **/ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) { struct i40e_vsi_context ctxt; i40e_status ret; if ((vsi->info.valid_sections & cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == I40E_AQ_VSI_PVLAN_EMOD_MASK)) return; /* already disabled */ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_NOTHING; ctxt.seid = vsi->seid; ctxt.info = vsi->info; ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "update vlan stripping failed, err %s aq_err %s\n", i40e_stat_str(&vsi->back->hw, ret), i40e_aq_str(&vsi->back->hw, vsi->back->hw.aq.asq_last_status)); } } /** * i40e_vlan_rx_register - Setup or shutdown vlan offload * @netdev: network interface to be adjusted * @features: netdev features to test if VLAN offload is enabled or not **/ static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; if (features & NETIF_F_HW_VLAN_CTAG_RX) i40e_vlan_stripping_enable(vsi); else i40e_vlan_stripping_disable(vsi); } /** * i40e_vsi_add_vlan - Add vsi membership for given vlan * @vsi: the vsi being configured * @vid: vlan id to be added (0 = untagged only , -1 = any) **/ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) { struct i40e_mac_filter *f, *add_f; bool is_netdev, is_vf; is_vf = (vsi->type == I40E_VSI_SRIOV); is_netdev = !!(vsi->netdev); /* Locked once because all functions invoked below iterates list*/ spin_lock_bh(&vsi->mac_filter_list_lock); if (is_netdev) { add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, is_vf, is_netdev); if (!add_f) { dev_info(&vsi->back->pdev->dev, "Could not add vlan filter %d for %pM\n", vid, vsi->netdev->dev_addr); spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } list_for_each_entry(f, &vsi->mac_filter_list, list) { add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); if (!add_f) { dev_info(&vsi->back->pdev->dev, "Could not add vlan filter %d for %pM\n", vid, f->macaddr); spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } /* Now if we add a vlan tag, make sure to check if it is the first * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" * with 0, so we now accept untagged and specified tagged traffic * (and not any taged and untagged) */ if (vid > 0) { if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, I40E_VLAN_ANY, is_vf, is_netdev)) { i40e_del_filter(vsi, vsi->netdev->dev_addr, I40E_VLAN_ANY, is_vf, is_netdev); add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0, is_vf, is_netdev); if (!add_f) { dev_info(&vsi->back->pdev->dev, "Could not add filter 0 for %pM\n", vsi->netdev->dev_addr); spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } } /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ if (vid > 0 && !vsi->info.pvid) { list_for_each_entry(f, &vsi->mac_filter_list, list) { if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, is_vf, is_netdev)) continue; i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, is_vf, is_netdev); add_f = i40e_add_filter(vsi, f->macaddr, 0, is_vf, is_netdev); if (!add_f) { dev_info(&vsi->back->pdev->dev, "Could not add filter 0 for %pM\n", f->macaddr); spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } } spin_unlock_bh(&vsi->mac_filter_list_lock); /* schedule our worker thread which will take care of * applying the new filter changes */ i40e_service_event_schedule(vsi->back); return 0; } /** * i40e_vsi_kill_vlan - Remove vsi membership for given vlan * @vsi: the vsi being configured * @vid: vlan id to be removed (0 = untagged only , -1 = any) * * Return: 0 on success or negative otherwise **/ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) { struct net_device *netdev = vsi->netdev; struct i40e_mac_filter *f, *add_f; bool is_vf, is_netdev; int filter_count = 0; is_vf = (vsi->type == I40E_VSI_SRIOV); is_netdev = !!(netdev); /* Locked once because all functions invoked below iterates list */ spin_lock_bh(&vsi->mac_filter_list_lock); if (is_netdev) i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); list_for_each_entry(f, &vsi->mac_filter_list, list) i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); /* go through all the filters for this VSI and if there is only * vid == 0 it means there are no other filters, so vid 0 must * be replaced with -1. This signifies that we should from now * on accept any traffic (with any tag present, or untagged) */ list_for_each_entry(f, &vsi->mac_filter_list, list) { if (is_netdev) { if (f->vlan && ether_addr_equal(netdev->dev_addr, f->macaddr)) filter_count++; } if (f->vlan) filter_count++; } if (!filter_count && is_netdev) { i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev); f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, is_vf, is_netdev); if (!f) { dev_info(&vsi->back->pdev->dev, "Could not add filter %d for %pM\n", I40E_VLAN_ANY, netdev->dev_addr); spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } if (!filter_count) { list_for_each_entry(f, &vsi->mac_filter_list, list) { i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, is_vf, is_netdev); if (!add_f) { dev_info(&vsi->back->pdev->dev, "Could not add filter %d for %pM\n", I40E_VLAN_ANY, f->macaddr); spin_unlock_bh(&vsi->mac_filter_list_lock); return -ENOMEM; } } } spin_unlock_bh(&vsi->mac_filter_list_lock); /* schedule our worker thread which will take care of * applying the new filter changes */ i40e_service_event_schedule(vsi->back); return 0; } /** * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload * @netdev: network interface to be adjusted * @vid: vlan id to be added * * net_device_ops implementation for adding vlan ids **/ #ifdef I40E_FCOE int i40e_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) #else static int i40e_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) #endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; int ret = 0; if (vid > 4095) return -EINVAL; netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); /* If the network stack called us with vid = 0 then * it is asking to receive priority tagged packets with * vlan id 0. Our HW receives them by default when configured * to receive untagged packets so there is no need to add an * extra filter for vlan 0 tagged packets. */ if (vid) ret = i40e_vsi_add_vlan(vsi, vid); if (!ret && (vid < VLAN_N_VID)) set_bit(vid, vsi->active_vlans); return ret; } /** * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload * @netdev: network interface to be adjusted * @vid: vlan id to be removed * * net_device_ops implementation for removing vlan ids **/ #ifdef I40E_FCOE int i40e_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) #else static int i40e_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) #endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); /* return code is ignored as there is nothing a user * can do about failure to remove and a log message was * already printed from the other function */ i40e_vsi_kill_vlan(vsi, vid); clear_bit(vid, vsi->active_vlans); return 0; } /** * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up * @vsi: the vsi being brought back up **/ static void i40e_restore_vlan(struct i40e_vsi *vsi) { u16 vid; if (!vsi->netdev) return; i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid); } /** * i40e_vsi_add_pvid - Add pvid for the VSI * @vsi: the vsi being adjusted * @vid: the vlan id to set as a PVID **/ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) { struct i40e_vsi_context ctxt; i40e_status ret; vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.pvid = cpu_to_le16(vid); vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | I40E_AQ_VSI_PVLAN_INSERT_PVID | I40E_AQ_VSI_PVLAN_EMOD_STR; ctxt.seid = vsi->seid; ctxt.info = vsi->info; ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "add pvid failed, err %s aq_err %s\n", i40e_stat_str(&vsi->back->hw, ret), i40e_aq_str(&vsi->back->hw, vsi->back->hw.aq.asq_last_status)); return -ENOENT; } return 0; } /** * i40e_vsi_remove_pvid - Remove the pvid from the VSI * @vsi: the vsi being adjusted * * Just use the vlan_rx_register() service to put it back to normal **/ void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) { i40e_vlan_stripping_disable(vsi); vsi->info.pvid = 0; } /** * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources * @vsi: ptr to the VSI * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) { int i, err = 0; for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); return err; } /** * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues * @vsi: ptr to the VSI * * Free VSI's transmit software resources **/ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) { int i; if (!vsi->tx_rings) return; for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) i40e_free_tx_resources(vsi->tx_rings[i]); } /** * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources * @vsi: ptr to the VSI * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) { int i, err = 0; for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); #ifdef I40E_FCOE i40e_fcoe_setup_ddp_resources(vsi); #endif return err; } /** * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues * @vsi: ptr to the VSI * * Free all receive software resources **/ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) { int i; if (!vsi->rx_rings) return; for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) i40e_free_rx_resources(vsi->rx_rings[i]); #ifdef I40E_FCOE i40e_fcoe_free_ddp_resources(vsi); #endif } /** * i40e_config_xps_tx_ring - Configure XPS for a Tx ring * @ring: The Tx ring to configure * * This enables/disables XPS for a given Tx descriptor ring * based on the TCs enabled for the VSI that ring belongs to. **/ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) { struct i40e_vsi *vsi = ring->vsi; cpumask_var_t mask; if (!ring->q_vector || !ring->netdev) return; /* Single TC mode enable XPS */ if (vsi->tc_config.numtc <= 1) { if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask, ring->queue_index); } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { /* Disable XPS to allow selection based on TC */ bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); netif_set_xps_queue(ring->netdev, mask, ring->queue_index); free_cpumask_var(mask); } /* schedule our worker thread which will take care of * applying the new filter changes */ i40e_service_event_schedule(vsi->back); } /** * i40e_configure_tx_ring - Configure a transmit ring context and rest * @ring: The Tx ring to configure * * Configure the Tx descriptor ring in the HMC context. **/ static int i40e_configure_tx_ring(struct i40e_ring *ring) { struct i40e_vsi *vsi = ring->vsi; u16 pf_q = vsi->base_queue + ring->queue_index; struct i40e_hw *hw = &vsi->back->hw; struct i40e_hmc_obj_txq tx_ctx; i40e_status err = 0; u32 qtx_ctl = 0; /* some ATR related tx ring init */ if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { ring->atr_sample_rate = vsi->back->atr_sample_rate; ring->atr_count = 0; } else { ring->atr_sample_rate = 0; } /* configure XPS */ i40e_config_xps_tx_ring(ring); /* clear the context structure first */ memset(&tx_ctx, 0, sizeof(tx_ctx)); tx_ctx.new_context = 1; tx_ctx.base = (ring->dma / 128); tx_ctx.qlen = ring->count; tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)); #ifdef I40E_FCOE tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); #endif tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); /* FDIR VSI tx ring can still use RS bit and writebacks */ if (vsi->type != I40E_VSI_FDIR) tx_ctx.head_wb_ena = 1; tx_ctx.head_wb_addr = ring->dma + (ring->count * sizeof(struct i40e_tx_desc)); /* As part of VSI creation/update, FW allocates certain * Tx arbitration queue sets for each TC enabled for * the VSI. The FW returns the handles to these queue * sets as part of the response buffer to Add VSI, * Update VSI, etc. AQ commands. It is expected that * these queue set handles be associated with the Tx * queues by the driver as part of the TX queue context * initialization. This has to be done regardless of * DCB as by default everything is mapped to TC0. */ tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); tx_ctx.rdylist_act = 0; /* clear the context in the HMC */ err = i40e_clear_lan_tx_queue_context(hw, pf_q); if (err) { dev_info(&vsi->back->pdev->dev, "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", ring->queue_index, pf_q, err); return -ENOMEM; } /* set the context in the HMC */ err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); if (err) { dev_info(&vsi->back->pdev->dev, "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", ring->queue_index, pf_q, err); return -ENOMEM; } /* Now associate this queue with this PCI function */ if (vsi->type == I40E_VSI_VMDQ2) { qtx_ctl = I40E_QTX_CTL_VM_QUEUE; qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & I40E_QTX_CTL_VFVM_INDX_MASK; } else { qtx_ctl = I40E_QTX_CTL_PF_QUEUE; } qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); i40e_flush(hw); /* cache tail off for easier writes later */ ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); return 0; } /** * i40e_configure_rx_ring - Configure a receive ring context * @ring: The Rx ring to configure * * Configure the Rx descriptor ring in the HMC context. **/ static int i40e_configure_rx_ring(struct i40e_ring *ring) { struct i40e_vsi *vsi = ring->vsi; u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; u16 pf_q = vsi->base_queue + ring->queue_index; struct i40e_hw *hw = &vsi->back->hw; struct i40e_hmc_obj_rxq rx_ctx; i40e_status err = 0; ring->state = 0; /* clear the context structure first */ memset(&rx_ctx, 0, sizeof(rx_ctx)); ring->rx_buf_len = vsi->rx_buf_len; ring->rx_hdr_len = vsi->rx_hdr_len; rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; rx_ctx.base = (ring->dma / 128); rx_ctx.qlen = ring->count; if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { set_ring_16byte_desc_enabled(ring); rx_ctx.dsize = 0; } else { rx_ctx.dsize = 1; } rx_ctx.dtype = vsi->dtype; if (vsi->dtype) { set_ring_ps_enabled(ring); rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | I40E_RX_SPLIT_IP | I40E_RX_SPLIT_TCP_UDP | I40E_RX_SPLIT_SCTP; } else { rx_ctx.hsplit_0 = 0; } rx_ctx.rxmax = min_t(u16, vsi->max_frame, (chain_len * ring->rx_buf_len)); if (hw->revision_id == 0) rx_ctx.lrxqthresh = 0; else rx_ctx.lrxqthresh = 2; rx_ctx.crcstrip = 1; rx_ctx.l2tsel = 1; /* this controls whether VLAN is stripped from inner headers */ rx_ctx.showiv = 0; #ifdef I40E_FCOE rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); #endif /* set the prefena field to 1 because the manual says to */ rx_ctx.prefena = 1; /* clear the context in the HMC */ err = i40e_clear_lan_rx_queue_context(hw, pf_q); if (err) { dev_info(&vsi->back->pdev->dev, "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", ring->queue_index, pf_q, err); return -ENOMEM; } /* set the context in the HMC */ err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); if (err) { dev_info(&vsi->back->pdev->dev, "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", ring->queue_index, pf_q, err); return -ENOMEM; } /* cache tail for quicker writes, and clear the reg before use */ ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); if (ring_is_ps_enabled(ring)) { i40e_alloc_rx_headers(ring); i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring)); } else { i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); } return 0; } /** * i40e_vsi_configure_tx - Configure the VSI for Tx * @vsi: VSI structure describing this set of rings and resources * * Configure the Tx VSI for operation. **/ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) { int err = 0; u16 i; for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) err = i40e_configure_tx_ring(vsi->tx_rings[i]); return err; } /** * i40e_vsi_configure_rx - Configure the VSI for Rx * @vsi: the VSI being configured * * Configure the Rx VSI for operation. **/ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) { int err = 0; u16 i; if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) vsi->max_frame = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; else vsi->max_frame = I40E_RXBUFFER_2048; /* figure out correct receive buffer length */ switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | I40E_FLAG_RX_PS_ENABLED)) { case I40E_FLAG_RX_1BUF_ENABLED: vsi->rx_hdr_len = 0; vsi->rx_buf_len = vsi->max_frame; vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; break; case I40E_FLAG_RX_PS_ENABLED: vsi->rx_hdr_len = I40E_RX_HDR_SIZE; vsi->rx_buf_len = I40E_RXBUFFER_2048; vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; break; default: vsi->rx_hdr_len = I40E_RX_HDR_SIZE; vsi->rx_buf_len = I40E_RXBUFFER_2048; vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; break; } #ifdef I40E_FCOE /* setup rx buffer for FCoE */ if ((vsi->type == I40E_VSI_FCOE) && (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { vsi->rx_hdr_len = 0; vsi->rx_buf_len = I40E_RXBUFFER_3072; vsi->max_frame = I40E_RXBUFFER_3072; vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; } #endif /* I40E_FCOE */ /* round up for the chip's needs */ vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT)); vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); /* set up individual rings */ for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_configure_rx_ring(vsi->rx_rings[i]); return err; } /** * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC * @vsi: ptr to the VSI **/ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) { struct i40e_ring *tx_ring, *rx_ring; u16 qoffset, qcount; int i, n; if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Reset the TC information */ for (i = 0; i < vsi->num_queue_pairs; i++) { rx_ring = vsi->rx_rings[i]; tx_ring = vsi->tx_rings[i]; rx_ring->dcb_tc = 0; tx_ring->dcb_tc = 0; } } for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) continue; qoffset = vsi->tc_config.tc_info[n].qoffset; qcount = vsi->tc_config.tc_info[n].qcount; for (i = qoffset; i < (qoffset + qcount); i++) { rx_ring = vsi->rx_rings[i]; tx_ring = vsi->tx_rings[i]; rx_ring->dcb_tc = n; tx_ring->dcb_tc = n; } } } /** * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI * @vsi: ptr to the VSI **/ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) { if (vsi->netdev) i40e_set_rx_mode(vsi->netdev); } /** * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters * @vsi: Pointer to the targeted VSI * * This function replays the hlist on the hw where all the SB Flow Director * filters were saved. **/ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) { struct i40e_fdir_filter *filter; struct i40e_pf *pf = vsi->back; struct hlist_node *node; if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return; hlist_for_each_entry_safe(filter, node, &pf->fdir_filter_list, fdir_node) { i40e_add_del_fdir(vsi, filter, true); } } /** * i40e_vsi_configure - Set up the VSI for action * @vsi: the VSI being configured **/ static int i40e_vsi_configure(struct i40e_vsi *vsi) { int err; i40e_set_vsi_rx_mode(vsi); i40e_restore_vlan(vsi); i40e_vsi_config_dcb_rings(vsi); err = i40e_vsi_configure_tx(vsi); if (!err) err = i40e_vsi_configure_rx(vsi); return err; } /** * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW * @vsi: the VSI being configured **/ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u16 vector; int i, q; u32 qp; /* The interrupt indexing is offset by 1 in the PFINT_ITRn * and PFINT_LNKLSTn registers, e.g.: * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) */ qp = vsi->base_queue; vector = vsi->base_vector; for (i = 0; i < vsi->num_q_vectors; i++, vector++) { struct i40e_q_vector *q_vector = vsi->q_vectors[i]; q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); q_vector->rx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr); q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); q_vector->tx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr); wr32(hw, I40E_PFINT_RATEN(vector - 1), INTRL_USEC_TO_REG(vsi->int_rate_limit)); /* Linked list for the queuepairs assigned to this vector */ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); for (q = 0; q < q_vector->num_ringpairs; q++) { u32 val; val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_RQCTL(qp), val); val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); /* Terminate the linked list */ if (q == (q_vector->num_ringpairs - 1)) val |= (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_QINT_TQCTL(qp), val); qp++; } } i40e_flush(hw); } /** * i40e_enable_misc_int_causes - enable the non-queue interrupts * @hw: ptr to the hardware info **/ static void i40e_enable_misc_int_causes(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 val; /* clear things first */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ rd32(hw, I40E_PFINT_ICR0); /* read to clear */ val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | I40E_PFINT_ICR0_ENA_GRST_MASK | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | I40E_PFINT_ICR0_ENA_GPIO_MASK | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK; if (pf->flags & I40E_FLAG_IWARP_ENABLED) val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; if (pf->flags & I40E_FLAG_PTP) val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, val); /* SW_ITR_IDX = 0, but don't change INTENA */ wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); /* OTHER_ITR_IDX = 0 */ wr32(hw, I40E_PFINT_STAT_CTL0, 0); } /** * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW * @vsi: the VSI being configured **/ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) { struct i40e_q_vector *q_vector = vsi->q_vectors[0]; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u32 val; /* set the ITR configuration */ q_vector->itr_countdown = ITR_COUNTDOWN_START; q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); q_vector->rx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); q_vector->tx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); i40e_enable_misc_int_causes(pf); /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ wr32(hw, I40E_PFINT_LNKLST0, 0); /* Associate the queue pair to the vector and enable the queue int */ val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_RQCTL(0), val); val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_QINT_TQCTL(0), val); i40e_flush(hw); } /** * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 * @pf: board private structure **/ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; wr32(hw, I40E_PFINT_DYN_CTL0, I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); i40e_flush(hw); } /** * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 * @pf: board private structure **/ void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 val; val = I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTL0, val); i40e_flush(hw); } /** * i40e_irq_dynamic_disable - Disable default interrupt generation settings * @vsi: pointer to a vsi * @vector: disable a particular Hw Interrupt vector **/ void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u32 val; val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); i40e_flush(hw); } /** * i40e_msix_clean_rings - MSIX mode Interrupt Handler * @irq: interrupt number * @data: pointer to a q_vector **/ static irqreturn_t i40e_msix_clean_rings(int irq, void *data) { struct i40e_q_vector *q_vector = data; if (!q_vector->tx.ring && !q_vector->rx.ring) return IRQ_HANDLED; napi_schedule_irqoff(&q_vector->napi); return IRQ_HANDLED; } /** * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts * @vsi: the VSI being configured * @basename: name for the vector * * Allocates MSI-X vectors and requests interrupts from the kernel. **/ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) { int q_vectors = vsi->num_q_vectors; struct i40e_pf *pf = vsi->back; int base = vsi->base_vector; int rx_int_idx = 0; int tx_int_idx = 0; int vector, err; for (vector = 0; vector < q_vectors; vector++) { struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "TxRx", rx_int_idx++); tx_int_idx++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "rx", rx_int_idx++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, "tx", tx_int_idx++); } else { /* skip this unused q_vector */ continue; } err = request_irq(pf->msix_entries[base + vector].vector, vsi->irq_handler, 0, q_vector->name, q_vector); if (err) { dev_info(&pf->pdev->dev, "MSIX request_irq failed, error: %d\n", err); goto free_queue_irqs; } /* assign the mask for this irq */ irq_set_affinity_hint(pf->msix_entries[base + vector].vector, &q_vector->affinity_mask); } vsi->irqs_ready = true; return 0; free_queue_irqs: while (vector) { vector--; irq_set_affinity_hint(pf->msix_entries[base + vector].vector, NULL); free_irq(pf->msix_entries[base + vector].vector, &(vsi->q_vectors[vector])); } return err; } /** * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI * @vsi: the VSI being un-configured **/ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int base = vsi->base_vector; int i; for (i = 0; i < vsi->num_queue_pairs; i++) { wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); } if (pf->flags & I40E_FLAG_MSIX_ENABLED) { for (i = vsi->base_vector; i < (vsi->num_q_vectors + vsi->base_vector); i++) wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); i40e_flush(hw); for (i = 0; i < vsi->num_q_vectors; i++) synchronize_irq(pf->msix_entries[i + base].vector); } else { /* Legacy and MSI mode - this stops all interrupt handling */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); wr32(hw, I40E_PFINT_DYN_CTL0, 0); i40e_flush(hw); synchronize_irq(pf->pdev->irq); } } /** * i40e_vsi_enable_irq - Enable IRQ for the given VSI * @vsi: the VSI being configured **/ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { for (i = 0; i < vsi->num_q_vectors; i++) i40e_irq_dynamic_enable(vsi, i); } else { i40e_irq_dynamic_enable_icr0(pf); } i40e_flush(&pf->hw); return 0; } /** * i40e_stop_misc_vector - Stop the vector that handles non-queue events * @pf: board private structure **/ static void i40e_stop_misc_vector(struct i40e_pf *pf) { /* Disable ICR 0 */ wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); i40e_flush(&pf->hw); } /** * i40e_intr - MSI/Legacy and non-queue interrupt handler * @irq: interrupt number * @data: pointer to a q_vector * * This is the handler used for all MSI/Legacy interrupts, and deals * with both queue and non-queue interrupts. This is also used in * MSIX mode to handle the non-queue interrupts. **/ static irqreturn_t i40e_intr(int irq, void *data) { struct i40e_pf *pf = (struct i40e_pf *)data; struct i40e_hw *hw = &pf->hw; irqreturn_t ret = IRQ_NONE; u32 icr0, icr0_remaining; u32 val, ena_mask; icr0 = rd32(hw, I40E_PFINT_ICR0); ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); /* if sharing a legacy IRQ, we might get called w/o an intr pending */ if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) goto enable_intr; /* if interrupt but no bits showing, must be SWINT */ if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) pf->sw_int_count++; if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n"); } /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_q_vector *q_vector = vsi->q_vectors[0]; /* temporarily disable queue cause for NAPI processing */ u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; wr32(hw, I40E_QINT_RQCTL(0), qval); qval = rd32(hw, I40E_QINT_TQCTL(0)); qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; wr32(hw, I40E_QINT_TQCTL(0), qval); if (!test_bit(__I40E_DOWN, &pf->state)) napi_schedule_irqoff(&q_vector->napi); } if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); } if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); } if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); } if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; val = rd32(hw, I40E_GLGEN_RSTAT); val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; if (val == I40E_RESET_CORER) { pf->corer_count++; } else if (val == I40E_RESET_GLOBR) { pf->globr_count++; } else if (val == I40E_RESET_EMPR) { pf->empr_count++; set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); } } if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; dev_info(&pf->pdev->dev, "HMC error interrupt\n"); dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", rd32(hw, I40E_PFHMC_ERRORINFO), rd32(hw, I40E_PFHMC_ERRORDATA)); } if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; i40e_ptp_tx_hwtstamp(pf); } } /* If a critical error is pending we have no choice but to reset the * device. * Report and mask out any remaining unexpected interrupts. */ icr0_remaining = icr0 & ena_mask; if (icr0_remaining) { dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", icr0_remaining); if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { dev_info(&pf->pdev->dev, "device will be reset\n"); set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); i40e_service_event_schedule(pf); } ena_mask &= ~icr0_remaining; } ret = IRQ_HANDLED; enable_intr: /* re-enable interrupt causes */ wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); if (!test_bit(__I40E_DOWN, &pf->state)) { i40e_service_event_schedule(pf); i40e_irq_dynamic_enable_icr0(pf); } return ret; } /** * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes * @tx_ring: tx ring to clean * @budget: how many cleans we're allowed * * Returns true if there's any budget left (e.g. the clean is finished) **/ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) { struct i40e_vsi *vsi = tx_ring->vsi; u16 i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; struct i40e_tx_desc *tx_desc; tx_buf = &tx_ring->tx_bi[i]; tx_desc = I40E_TX_DESC(tx_ring, i); i -= tx_ring->count; do { struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break; /* prevent any other reads prior to eop_desc */ read_barrier_depends(); /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->cmd_type_offset_bsz & cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) break; /* clear next_to_watch to prevent false hangs */ tx_buf->next_to_watch = NULL; tx_desc->buffer_addr = 0; tx_desc->cmd_type_offset_bsz = 0; /* move past filter desc */ tx_buf++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buf = tx_ring->tx_bi; tx_desc = I40E_TX_DESC(tx_ring, 0); } /* unmap skb header data */ dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) kfree(tx_buf->raw_buf); tx_buf->raw_buf = NULL; tx_buf->tx_flags = 0; tx_buf->next_to_watch = NULL; dma_unmap_len_set(tx_buf, len, 0); tx_desc->buffer_addr = 0; tx_desc->cmd_type_offset_bsz = 0; /* move us past the eop_desc for start of next FD desc */ tx_buf++; tx_desc++; i++; if (unlikely(!i)) { i -= tx_ring->count; tx_buf = tx_ring->tx_bi; tx_desc = I40E_TX_DESC(tx_ring, 0); } /* update budget accounting */ budget--; } while (likely(budget)); i += tx_ring->count; tx_ring->next_to_clean = i; if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); return budget > 0; } /** * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring * @irq: interrupt number * @data: pointer to a q_vector **/ static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) { struct i40e_q_vector *q_vector = data; struct i40e_vsi *vsi; if (!q_vector->tx.ring) return IRQ_HANDLED; vsi = q_vector->tx.ring->vsi; i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); return IRQ_HANDLED; } /** * i40e_map_vector_to_qp - Assigns the queue pair to the vector * @vsi: the VSI being configured * @v_idx: vector index * @qp_idx: queue pair index **/ static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) { struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; tx_ring->q_vector = q_vector; tx_ring->next = q_vector->tx.ring; q_vector->tx.ring = tx_ring; q_vector->tx.count++; rx_ring->q_vector = q_vector; rx_ring->next = q_vector->rx.ring; q_vector->rx.ring = rx_ring; q_vector->rx.count++; } /** * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors * @vsi: the VSI being configured * * This function maps descriptor rings to the queue-specific vectors * we were allotted through the MSI-X enabling code. Ideally, we'd have * one vector per queue pair, but on a constrained vector budget, we * group the queue pairs as "efficiently" as possible. **/ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) { int qp_remaining = vsi->num_queue_pairs; int q_vectors = vsi->num_q_vectors; int num_ringpairs; int v_start = 0; int qp_idx = 0; /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to * group them so there are multiple queues per vector. * It is also important to go through all the vectors available to be * sure that if we don't use all the vectors, that the remaining vectors * are cleared. This is especially important when decreasing the * number of queues in use. */ for (; v_start < q_vectors; v_start++) { struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); q_vector->num_ringpairs = num_ringpairs; q_vector->rx.count = 0; q_vector->tx.count = 0; q_vector->rx.ring = NULL; q_vector->tx.ring = NULL; while (num_ringpairs--) { i40e_map_vector_to_qp(vsi, v_start, qp_idx); qp_idx++; qp_remaining--; } } } /** * i40e_vsi_request_irq - Request IRQ from the OS * @vsi: the VSI being configured * @basename: name for the vector **/ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) { struct i40e_pf *pf = vsi->back; int err; if (pf->flags & I40E_FLAG_MSIX_ENABLED) err = i40e_vsi_request_irq_msix(vsi, basename); else if (pf->flags & I40E_FLAG_MSI_ENABLED) err = request_irq(pf->pdev->irq, i40e_intr, 0, pf->int_name, pf); else err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, pf->int_name, pf); if (err) dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); return err; } #ifdef CONFIG_NET_POLL_CONTROLLER /** * i40e_netpoll - A Polling 'interrupt'handler * @netdev: network interface device structure * * This is used by netconsole to send skbs without having to re-enable * interrupts. It's not called while the normal interrupt routine is executing. **/ #ifdef I40E_FCOE void i40e_netpoll(struct net_device *netdev) #else static void i40e_netpoll(struct net_device *netdev) #endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; int i; /* if interface is down do nothing */ if (test_bit(__I40E_DOWN, &vsi->state)) return; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { for (i = 0; i < vsi->num_q_vectors; i++) i40e_msix_clean_rings(0, vsi->q_vectors[i]); } else { i40e_intr(pf->pdev->irq, netdev); } } #endif /** * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled * @pf: the PF being configured * @pf_q: the PF queue * @enable: enable or disable state of the queue * * This routine will wait for the given Tx queue of the PF to reach the * enabled or disabled state. * Returns -ETIMEDOUT in case of failing to reach the requested state after * multiple retries; else will return 0 in case of success. **/ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) { int i; u32 tx_reg; for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) break; usleep_range(10, 20); } if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) return -ETIMEDOUT; return 0; } /** * i40e_vsi_control_tx - Start or stop a VSI's rings * @vsi: the VSI being configured * @enable: start or stop the rings **/ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int i, j, pf_q, ret = 0; u32 tx_reg; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { /* warn the TX unit of coming changes */ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); if (!enable) usleep_range(10, 20); for (j = 0; j < 50; j++) { tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) break; usleep_range(1000, 2000); } /* Skip if the queue is already in the requested state */ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) continue; /* turn on/off the queue */ if (enable) { wr32(hw, I40E_QTX_HEAD(pf_q), 0); tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; } else { tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; } wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); /* No waiting for the Tx queue to disable */ if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) continue; /* wait for the change to finish */ ret = i40e_pf_txq_wait(pf, pf_q, enable); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d Tx ring %d %sable timeout\n", vsi->seid, pf_q, (enable ? "en" : "dis")); break; } } if (hw->revision_id == 0) mdelay(50); return ret; } /** * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled * @pf: the PF being configured * @pf_q: the PF queue * @enable: enable or disable state of the queue * * This routine will wait for the given Rx queue of the PF to reach the * enabled or disabled state. * Returns -ETIMEDOUT in case of failing to reach the requested state after * multiple retries; else will return 0 in case of success. **/ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) { int i; u32 rx_reg; for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) break; usleep_range(10, 20); } if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) return -ETIMEDOUT; return 0; } /** * i40e_vsi_control_rx - Start or stop a VSI's rings * @vsi: the VSI being configured * @enable: start or stop the rings **/ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int i, j, pf_q, ret = 0; u32 rx_reg; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { for (j = 0; j < 50; j++) { rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) break; usleep_range(1000, 2000); } /* Skip if the queue is already in the requested state */ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) continue; /* turn on/off the queue */ if (enable) rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; else rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); /* wait for the change to finish */ ret = i40e_pf_rxq_wait(pf, pf_q, enable); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d Rx ring %d %sable timeout\n", vsi->seid, pf_q, (enable ? "en" : "dis")); break; } } return ret; } /** * i40e_vsi_control_rings - Start or stop a VSI's rings * @vsi: the VSI being configured * @enable: start or stop the rings **/ int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) { int ret = 0; /* do rx first for enable and last for disable */ if (request) { ret = i40e_vsi_control_rx(vsi, request); if (ret) return ret; ret = i40e_vsi_control_tx(vsi, request); } else { /* Ignore return value, we need to shutdown whatever we can */ i40e_vsi_control_tx(vsi, request); i40e_vsi_control_rx(vsi, request); } return ret; } /** * i40e_vsi_free_irq - Free the irq association with the OS * @vsi: the VSI being configured **/ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; int base = vsi->base_vector; u32 val, qp; int i; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { if (!vsi->q_vectors) return; if (!vsi->irqs_ready) return; vsi->irqs_ready = false; for (i = 0; i < vsi->num_q_vectors; i++) { u16 vector = i + base; /* free only the irqs that were actually requested */ if (!vsi->q_vectors[i] || !vsi->q_vectors[i]->num_ringpairs) continue; /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(pf->msix_entries[vector].vector, NULL); free_irq(pf->msix_entries[vector].vector, vsi->q_vectors[i]); /* Tear down the interrupt queue link list * * We know that they come in pairs and always * the Rx first, then the Tx. To clear the * link list, stick the EOL value into the * next_q field of the registers. */ val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; val |= I40E_QUEUE_END_OF_LIST << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); while (qp != I40E_QUEUE_END_OF_LIST) { u32 next; val = rd32(hw, I40E_QINT_RQCTL(qp)); val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | I40E_QINT_RQCTL_MSIX0_INDX_MASK | I40E_QINT_RQCTL_CAUSE_ENA_MASK | I40E_QINT_RQCTL_INTEVENT_MASK); val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | I40E_QINT_RQCTL_NEXTQ_INDX_MASK); wr32(hw, I40E_QINT_RQCTL(qp), val); val = rd32(hw, I40E_QINT_TQCTL(qp)); next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | I40E_QINT_TQCTL_MSIX0_INDX_MASK | I40E_QINT_TQCTL_CAUSE_ENA_MASK | I40E_QINT_TQCTL_INTEVENT_MASK); val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | I40E_QINT_TQCTL_NEXTQ_INDX_MASK); wr32(hw, I40E_QINT_TQCTL(qp), val); qp = next; } } } else { free_irq(pf->pdev->irq, pf); val = rd32(hw, I40E_PFINT_LNKLST0); qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; val |= I40E_QUEUE_END_OF_LIST << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; wr32(hw, I40E_PFINT_LNKLST0, val); val = rd32(hw, I40E_QINT_RQCTL(qp)); val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | I40E_QINT_RQCTL_MSIX0_INDX_MASK | I40E_QINT_RQCTL_CAUSE_ENA_MASK | I40E_QINT_RQCTL_INTEVENT_MASK); val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | I40E_QINT_RQCTL_NEXTQ_INDX_MASK); wr32(hw, I40E_QINT_RQCTL(qp), val); val = rd32(hw, I40E_QINT_TQCTL(qp)); val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | I40E_QINT_TQCTL_MSIX0_INDX_MASK | I40E_QINT_TQCTL_CAUSE_ENA_MASK | I40E_QINT_TQCTL_INTEVENT_MASK); val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | I40E_QINT_TQCTL_NEXTQ_INDX_MASK); wr32(hw, I40E_QINT_TQCTL(qp), val); } } /** * i40e_free_q_vector - Free memory allocated for specific interrupt vector * @vsi: the VSI being configured * @v_idx: Index of vector to be freed * * This function frees the memory allocated to the q_vector. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) { struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; struct i40e_ring *ring; if (!q_vector) return; /* disassociate q_vector from rings */ i40e_for_each_ring(ring, q_vector->tx) ring->q_vector = NULL; i40e_for_each_ring(ring, q_vector->rx) ring->q_vector = NULL; /* only VSI w/ an associated netdev is set up w/ NAPI */ if (vsi->netdev) netif_napi_del(&q_vector->napi); vsi->q_vectors[v_idx] = NULL; kfree_rcu(q_vector, rcu); } /** * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors * @vsi: the VSI being un-configured * * This frees the memory allocated to the q_vectors and * deletes references to the NAPI struct. **/ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) { int v_idx; for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) i40e_free_q_vector(vsi, v_idx); } /** * i40e_reset_interrupt_capability - Disable interrupt setup in OS * @pf: board private structure **/ static void i40e_reset_interrupt_capability(struct i40e_pf *pf) { /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) { pci_disable_msix(pf->pdev); kfree(pf->msix_entries); pf->msix_entries = NULL; kfree(pf->irq_pile); pf->irq_pile = NULL; } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { pci_disable_msi(pf->pdev); } pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); } /** * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings * @pf: board private structure * * We go through and clear interrupt specific resources and reset the structure * to pre-load conditions **/ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) { int i; i40e_stop_misc_vector(pf); if (pf->flags & I40E_FLAG_MSIX_ENABLED) { synchronize_irq(pf->msix_entries[0].vector); free_irq(pf->msix_entries[0].vector, pf); } i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i]) i40e_vsi_free_q_vectors(pf->vsi[i]); i40e_reset_interrupt_capability(pf); } /** * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI * @vsi: the VSI being configured **/ static void i40e_napi_enable_all(struct i40e_vsi *vsi) { int q_idx; if (!vsi->netdev) return; for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) napi_enable(&vsi->q_vectors[q_idx]->napi); } /** * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI * @vsi: the VSI being configured **/ static void i40e_napi_disable_all(struct i40e_vsi *vsi) { int q_idx; if (!vsi->netdev) return; for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) napi_disable(&vsi->q_vectors[q_idx]->napi); } /** * i40e_vsi_close - Shut down a VSI * @vsi: the vsi to be quelled **/ static void i40e_vsi_close(struct i40e_vsi *vsi) { if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) i40e_down(vsi); i40e_vsi_free_irq(vsi); i40e_vsi_free_tx_resources(vsi); i40e_vsi_free_rx_resources(vsi); vsi->current_netdev_flags = 0; } /** * i40e_quiesce_vsi - Pause a given VSI * @vsi: the VSI being paused **/ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) { if (test_bit(__I40E_DOWN, &vsi->state)) return; /* No need to disable FCoE VSI when Tx suspended */ if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && vsi->type == I40E_VSI_FCOE) { dev_dbg(&vsi->back->pdev->dev, "VSI seid %d skipping FCoE VSI disable\n", vsi->seid); return; } set_bit(__I40E_NEEDS_RESTART, &vsi->state); if (vsi->netdev && netif_running(vsi->netdev)) vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); else i40e_vsi_close(vsi); } /** * i40e_unquiesce_vsi - Resume a given VSI * @vsi: the VSI being resumed **/ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) { if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) return; clear_bit(__I40E_NEEDS_RESTART, &vsi->state); if (vsi->netdev && netif_running(vsi->netdev)) vsi->netdev->netdev_ops->ndo_open(vsi->netdev); else i40e_vsi_open(vsi); /* this clears the DOWN bit */ } /** * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF * @pf: the PF **/ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) { int v; for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v]) i40e_quiesce_vsi(pf->vsi[v]); } } /** * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF * @pf: the PF **/ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) { int v; for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v]) i40e_unquiesce_vsi(pf->vsi[v]); } } #ifdef CONFIG_I40E_DCB /** * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled * @vsi: the VSI being configured * * This function waits for the given VSI's Tx queues to be disabled. **/ static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i, pf_q, ret; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { /* Check and wait for the disable status of the queue */ ret = i40e_pf_txq_wait(pf, pf_q, false); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d Tx ring %d disable timeout\n", vsi->seid, pf_q); return ret; } } return 0; } /** * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled * @pf: the PF * * This function waits for the Tx queues to be in disabled state for all the * VSIs that are managed by this PF. **/ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) { int v, ret = 0; for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { /* No need to wait for FCoE VSI queues */ if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]); if (ret) break; } } return ret; } #endif /** * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue * @q_idx: TX queue number * @vsi: Pointer to VSI struct * * This function checks specified queue for given VSI. Detects hung condition. * Sets hung bit since it is two step process. Before next run of service task * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not, * hung condition remain unchanged and during subsequent run, this function * issues SW interrupt to recover from hung condition. **/ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) { struct i40e_ring *tx_ring = NULL; struct i40e_pf *pf; u32 head, val, tx_pending; int i; pf = vsi->back; /* now that we have an index, find the tx_ring struct */ for (i = 0; i < vsi->num_queue_pairs; i++) { if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { if (q_idx == vsi->tx_rings[i]->queue_index) { tx_ring = vsi->tx_rings[i]; break; } } } if (!tx_ring) return; /* Read interrupt register */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) val = rd32(&pf->hw, I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + tx_ring->vsi->base_vector - 1)); else val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); /* Bail out if interrupts are disabled because napi_poll * execution in-progress or will get scheduled soon. * napi_poll cleans TX and RX queues and updates 'next_to_clean'. */ if (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)) return; head = i40e_get_head(tx_ring); tx_pending = i40e_get_tx_pending(tx_ring); /* HW is done executing descriptors, updated HEAD write back, * but SW hasn't processed those descriptors. If interrupt is * not generated from this point ON, it could result into * dev_watchdog detecting timeout on those netdev_queue, * hence proactively trigger SW interrupt. */ if (tx_pending) { /* NAPI Poll didn't run and clear since it was set */ if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &tx_ring->q_vector->hung_detected)) { netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", vsi->seid, q_idx, tx_pending, tx_ring->next_to_clean, head, tx_ring->next_to_use, readl(tx_ring->tail)); netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n", vsi->seid, q_idx, val); i40e_force_wb(vsi, tx_ring->q_vector); } else { /* First Chance - detected possible hung */ set_bit(I40E_Q_VECTOR_HUNG_DETECT, &tx_ring->q_vector->hung_detected); } } } /** * i40e_detect_recover_hung - Function to detect and recover hung_queues * @pf: pointer to PF struct * * LAN VSI has netdev and netdev has TX queues. This function is to check * each of those TX queues if they are hung, trigger recovery by issuing * SW interrupt. **/ static void i40e_detect_recover_hung(struct i40e_pf *pf) { struct net_device *netdev; struct i40e_vsi *vsi; int i; /* Only for LAN VSI */ vsi = pf->vsi[pf->lan_vsi]; if (!vsi) return; /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */ if (test_bit(__I40E_DOWN, &vsi->back->state) || test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) return; /* Make sure type is MAIN VSI */ if (vsi->type != I40E_VSI_MAIN) return; netdev = vsi->netdev; if (!netdev) return; /* Bail out if netif_carrier is not OK */ if (!netif_carrier_ok(netdev)) return; /* Go thru' TX queues for netdev */ for (i = 0; i < netdev->num_tx_queues; i++) { struct netdev_queue *q; q = netdev_get_tx_queue(netdev, i); if (q) i40e_detect_recover_hung_queue(i, vsi); } } /** * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP * @pf: pointer to PF * * Get TC map for ISCSI PF type that will include iSCSI TC * and LAN TC. **/ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) { struct i40e_dcb_app_priority_table app; struct i40e_hw *hw = &pf->hw; u8 enabled_tc = 1; /* TC0 is always enabled */ u8 tc, i; /* Get the iSCSI APP TLV */ struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; for (i = 0; i < dcbcfg->numapps; i++) { app = dcbcfg->app[i]; if (app.selector == I40E_APP_SEL_TCPIP && app.protocolid == I40E_APP_PROTOID_ISCSI) { tc = dcbcfg->etscfg.prioritytable[app.priority]; enabled_tc |= BIT(tc); break; } } return enabled_tc; } /** * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config * @dcbcfg: the corresponding DCBx configuration structure * * Return the number of TCs from given DCBx configuration **/ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) { u8 num_tc = 0; int i; /* Scan the ETS Config Priority Table to find * traffic class enabled for a given priority * and use the traffic class index to get the * number of traffic classes enabled */ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { if (dcbcfg->etscfg.prioritytable[i] > num_tc) num_tc = dcbcfg->etscfg.prioritytable[i]; } /* Traffic class index starts from zero so * increment to return the actual count */ return num_tc + 1; } /** * i40e_dcb_get_enabled_tc - Get enabled traffic classes * @dcbcfg: the corresponding DCBx configuration structure * * Query the current DCB configuration and return the number of * traffic classes enabled from the given DCBX config **/ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) { u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); u8 enabled_tc = 1; u8 i; for (i = 0; i < num_tc; i++) enabled_tc |= BIT(i); return enabled_tc; } /** * i40e_pf_get_num_tc - Get enabled traffic classes for PF * @pf: PF being queried * * Return number of traffic classes enabled for the given PF **/ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u8 i, enabled_tc; u8 num_tc = 0; struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; /* If DCB is not enabled then always in single TC */ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) return 1; /* SFP mode will be enabled for all TCs on port */ if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) return i40e_dcb_get_num_tc(dcbcfg); /* MFP mode return count of enabled TCs for this PF */ if (pf->hw.func_caps.iscsi) enabled_tc = i40e_get_iscsi_tc_map(pf); else return 1; /* Only TC0 */ /* At least have TC0 */ enabled_tc = (enabled_tc ? enabled_tc : 0x1); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) num_tc++; } return num_tc; } /** * i40e_pf_get_default_tc - Get bitmap for first enabled TC * @pf: PF being queried * * Return a bitmap for first enabled traffic class for this PF. **/ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) { u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; u8 i = 0; if (!enabled_tc) return 0x1; /* TC0 */ /* Find the first enabled TC */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) break; } return BIT(i); } /** * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes * @pf: PF being queried * * Return a bitmap for enabled traffic classes for this PF. **/ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) { /* If DCB is not enabled for this PF then just return default TC */ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) return i40e_pf_get_default_tc(pf); /* SFP mode we want PF to be enabled for all TCs */ if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); /* MFP enabled and iSCSI PF type */ if (pf->hw.func_caps.iscsi) return i40e_get_iscsi_tc_map(pf); else return i40e_pf_get_default_tc(pf); } /** * i40e_vsi_get_bw_info - Query VSI BW Information * @vsi: the VSI being queried * * Returns 0 on success, negative value on failure **/ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) { struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; i40e_status ret; u32 tc_bw_max; int i; /* Get the VSI level BW configuration */ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't get PF vsi bw config, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } /* Get the VSI level BW configuration per TC */ ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't get PF vsi ets bw config, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { dev_info(&pf->pdev->dev, "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", bw_config.tc_valid_bits, bw_ets_config.tc_valid_bits); /* Still continuing */ } vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); vsi->bw_max_quanta = bw_config.max_bw; tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; vsi->bw_ets_limit_credits[i] = le16_to_cpu(bw_ets_config.credits[i]); /* 3 bits out of 4 for each TC */ vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); } return 0; } /** * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC * @vsi: the VSI being configured * @enabled_tc: TC bitmap * @bw_credits: BW shared credits per TC * * Returns 0 on success, negative value on failure **/ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, u8 *bw_share) { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; i40e_status ret; int i; bw_data.tc_valid_bits = enabled_tc; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) bw_data.tc_bw_credits[i] = bw_share[i]; ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "AQ command Config VSI BW allocation per TC failed = %d\n", vsi->back->hw.aq.asq_last_status); return -EINVAL; } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) vsi->info.qs_handle[i] = bw_data.qs_handles[i]; return 0; } /** * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration * @vsi: the VSI being configured * @enabled_tc: TC map to be enabled * **/ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) { struct net_device *netdev = vsi->netdev; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u8 netdev_tc = 0; int i; struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; if (!netdev) return; if (!enabled_tc) { netdev_reset_tc(netdev); return; } /* Set up actual enabled TCs on the VSI */ if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) return; /* set per TC queues for the VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* Only set TC queues for enabled tcs * * e.g. For a VSI that has TC0 and TC3 enabled the * enabled_tc bitmap would be 0x00001001; the driver * will set the numtc for netdev as 2 that will be * referenced by the netdev layer as TC 0 and 1. */ if (vsi->tc_config.enabled_tc & BIT(i)) netdev_set_tc_queue(netdev, vsi->tc_config.tc_info[i].netdev_tc, vsi->tc_config.tc_info[i].qcount, vsi->tc_config.tc_info[i].qoffset); } /* Assign UP2TC map for the VSI */ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { /* Get the actual TC# for the UP */ u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; /* Get the mapped netdev TC# for the UP */ netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; netdev_set_prio_tc_map(netdev, i, netdev_tc); } } /** * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map * @vsi: the VSI being configured * @ctxt: the ctxt buffer returned from AQ VSI update param command **/ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) { /* copy just the sections touched not the entire info * since not all sections are valid as returned by * update vsi params */ vsi->info.mapping_flags = ctxt->info.mapping_flags; memcpy(&vsi->info.queue_mapping, &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, sizeof(vsi->info.tc_mapping)); } /** * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map * @vsi: VSI to be configured * @enabled_tc: TC bitmap * * This configures a particular VSI for TCs that are mapped to the * given TC bitmap. It uses default bandwidth share for TCs across * VSIs to configure TC for a particular VSI. * * NOTE: * It is expected that the VSI queues have been quisced before calling * this function. **/ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) { u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; struct i40e_vsi_context ctxt; int ret = 0; int i; /* Check if enabled_tc is same as existing or new TCs */ if (vsi->tc_config.enabled_tc == enabled_tc) return ret; /* Enable ETS TCs with equal BW Share for now across all VSIs */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) bw_share[i] = 1; } ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); if (ret) { dev_info(&vsi->back->pdev->dev, "Failed configuring TC map %d for VSI %d\n", enabled_tc, vsi->seid); goto out; } /* Update Queue Pairs Mapping for currently enabled UPs */ ctxt.seid = vsi->seid; ctxt.pf_num = vsi->back->hw.pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; ctxt.info = vsi->info; i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); /* Update the VSI after updating the VSI queue-mapping information */ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "Update vsi tc config failed, err %s aq_err %s\n", i40e_stat_str(&vsi->back->hw, ret), i40e_aq_str(&vsi->back->hw, vsi->back->hw.aq.asq_last_status)); goto out; } /* update the local VSI info with updated queue map */ i40e_vsi_update_queue_map(vsi, &ctxt); vsi->info.valid_sections = 0; /* Update current VSI BW information */ ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&vsi->back->pdev->dev, "Failed updating vsi bw info, err %s aq_err %s\n", i40e_stat_str(&vsi->back->hw, ret), i40e_aq_str(&vsi->back->hw, vsi->back->hw.aq.asq_last_status)); goto out; } /* Update the netdev TC setup */ i40e_vsi_config_netdev_tc(vsi, enabled_tc); out: return ret; } /** * i40e_veb_config_tc - Configure TCs for given VEB * @veb: given VEB * @enabled_tc: TC bitmap * * Configures given TC bitmap for VEB (switching) element **/ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) { struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; struct i40e_pf *pf = veb->pf; int ret = 0; int i; /* No TCs or already enabled TCs just return */ if (!enabled_tc || veb->enabled_tc == enabled_tc) return ret; bw_data.tc_valid_bits = enabled_tc; /* bw_data.absolute_credits is not set (relative) */ /* Enable ETS TCs with equal BW Share for now */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) bw_data.tc_bw_share_credits[i] = 1; } ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, "VEB bw config failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto out; } /* Update the BW information */ ret = i40e_veb_get_bw_info(veb); if (ret) { dev_info(&pf->pdev->dev, "Failed getting veb bw config, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } out: return ret; } #ifdef CONFIG_I40E_DCB /** * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs * @pf: PF struct * * Reconfigure VEB/VSIs on a given PF; it is assumed that * the caller would've quiesce all the VSIs before calling * this function **/ static void i40e_dcb_reconfigure(struct i40e_pf *pf) { u8 tc_map = 0; int ret; u8 v; /* Enable the TCs available on PF to all VEBs */ tc_map = i40e_pf_get_tc_map(pf); for (v = 0; v < I40E_MAX_VEB; v++) { if (!pf->veb[v]) continue; ret = i40e_veb_config_tc(pf->veb[v], tc_map); if (ret) { dev_info(&pf->pdev->dev, "Failed configuring TC for VEB seid=%d\n", pf->veb[v]->seid); /* Will try to configure as many components */ } } /* Update each VSI */ for (v = 0; v < pf->num_alloc_vsi; v++) { if (!pf->vsi[v]) continue; /* - Enable all TCs for the LAN VSI #ifdef I40E_FCOE * - For FCoE VSI only enable the TC configured * as per the APP TLV #endif * - For all others keep them at TC0 for now */ if (v == pf->lan_vsi) tc_map = i40e_pf_get_tc_map(pf); else tc_map = i40e_pf_get_default_tc(pf); #ifdef I40E_FCOE if (pf->vsi[v]->type == I40E_VSI_FCOE) tc_map = i40e_get_fcoe_tc_map(pf); #endif /* #ifdef I40E_FCOE */ ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); if (ret) { dev_info(&pf->pdev->dev, "Failed configuring TC for VSI seid=%d\n", pf->vsi[v]->seid); /* Will try to configure as many components */ } else { /* Re-configure VSI vectors based on updated TC map */ i40e_vsi_map_rings_to_vectors(pf->vsi[v]); if (pf->vsi[v]->netdev) i40e_dcbnl_set_all(pf->vsi[v]); } } } /** * i40e_resume_port_tx - Resume port Tx * @pf: PF struct * * Resume a port's Tx and issue a PF reset in case of failure to * resume. **/ static int i40e_resume_port_tx(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; int ret; ret = i40e_aq_resume_port_tx(hw, NULL); if (ret) { dev_info(&pf->pdev->dev, "Resume Port Tx failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); i40e_service_event_schedule(pf); } return ret; } /** * i40e_init_pf_dcb - Initialize DCB configuration * @pf: PF being configured * * Query the current DCB configuration and cache it * in the hardware structure **/ static int i40e_init_pf_dcb(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; int err = 0; /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4)) goto out; /* Get the initial DCB configuration */ err = i40e_init_dcb(hw); if (!err) { /* Device/Function is not DCBX capable */ if ((!hw->func_caps.dcb) || (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { dev_info(&pf->pdev->dev, "DCBX offload is not supported or is disabled for this PF.\n"); if (pf->flags & I40E_FLAG_MFP_ENABLED) goto out; } else { /* When status is not DISABLED then DCBX in FW */ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; pf->flags |= I40E_FLAG_DCB_CAPABLE; /* Enable DCB tagging only when more than one TC */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; dev_dbg(&pf->pdev->dev, "DCBX offload is supported for this PF.\n"); } } else { dev_info(&pf->pdev->dev, "Query for DCB configuration failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } out: return err; } #endif /* CONFIG_I40E_DCB */ #define SPEED_SIZE 14 #define FC_SIZE 8 /** * i40e_print_link_message - print link up or down * @vsi: the VSI for which link needs a message */ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) { char *speed = "Unknown"; char *fc = "Unknown"; if (vsi->current_isup == isup) return; vsi->current_isup = isup; if (!isup) { netdev_info(vsi->netdev, "NIC Link is Down\n"); return; } /* Warn user if link speed on NPAR enabled partition is not at * least 10GB */ if (vsi->back->hw.func_caps.npar_enable && (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) netdev_warn(vsi->netdev, "The partition detected link speed that is less than 10Gbps\n"); switch (vsi->back->hw.phy.link_info.link_speed) { case I40E_LINK_SPEED_40GB: speed = "40 G"; break; case I40E_LINK_SPEED_20GB: speed = "20 G"; break; case I40E_LINK_SPEED_10GB: speed = "10 G"; break; case I40E_LINK_SPEED_1GB: speed = "1000 M"; break; case I40E_LINK_SPEED_100MB: speed = "100 M"; break; default: break; } switch (vsi->back->hw.fc.current_mode) { case I40E_FC_FULL: fc = "RX/TX"; break; case I40E_FC_TX_PAUSE: fc = "TX"; break; case I40E_FC_RX_PAUSE: fc = "RX"; break; default: fc = "None"; break; } netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n", speed, fc); } /** * i40e_up_complete - Finish the last steps of bringing up a connection * @vsi: the VSI being configured **/ static int i40e_up_complete(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int err; if (pf->flags & I40E_FLAG_MSIX_ENABLED) i40e_vsi_configure_msix(vsi); else i40e_configure_msi_and_legacy(vsi); /* start rings */ err = i40e_vsi_control_rings(vsi, true); if (err) return err; clear_bit(__I40E_DOWN, &vsi->state); i40e_napi_enable_all(vsi); i40e_vsi_enable_irq(vsi); if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && (vsi->netdev)) { i40e_print_link_message(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); } else if (vsi->netdev) { i40e_print_link_message(vsi, false); /* need to check for qualified module here*/ if ((pf->hw.phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && (!(pf->hw.phy.link_info.an_info & I40E_AQ_QUALIFIED_MODULE))) netdev_err(vsi->netdev, "the driver failed to link because an unqualified module was detected."); } /* replay FDIR SB filters */ if (vsi->type == I40E_VSI_FDIR) { /* reset fd counters */ pf->fd_add_err = pf->fd_atr_cnt = 0; if (pf->fd_tcp_rule > 0) { pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); pf->fd_tcp_rule = 0; } i40e_fdir_filter_restore(vsi); } i40e_service_event_schedule(pf); return 0; } /** * i40e_vsi_reinit_locked - Reset the VSI * @vsi: the VSI being configured * * Rebuild the ring structs after some configuration * has changed, e.g. MTU size. **/ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; WARN_ON(in_interrupt()); while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) usleep_range(1000, 2000); i40e_down(vsi); /* Give a VF some time to respond to the reset. The * two second wait is based upon the watchdog cycle in * the VF driver. */ if (vsi->type == I40E_VSI_SRIOV) msleep(2000); i40e_up(vsi); clear_bit(__I40E_CONFIG_BUSY, &pf->state); } /** * i40e_up - Bring the connection back up after being down * @vsi: the VSI being configured **/ int i40e_up(struct i40e_vsi *vsi) { int err; err = i40e_vsi_configure(vsi); if (!err) err = i40e_up_complete(vsi); return err; } /** * i40e_down - Shutdown the connection processing * @vsi: the VSI being stopped **/ void i40e_down(struct i40e_vsi *vsi) { int i; /* It is assumed that the caller of this function * sets the vsi->state __I40E_DOWN bit. */ if (vsi->netdev) { netif_carrier_off(vsi->netdev); netif_tx_disable(vsi->netdev); } i40e_vsi_disable_irq(vsi); i40e_vsi_control_rings(vsi, false); i40e_napi_disable_all(vsi); for (i = 0; i < vsi->num_queue_pairs; i++) { i40e_clean_tx_ring(vsi->tx_rings[i]); i40e_clean_rx_ring(vsi->rx_rings[i]); } } /** * i40e_setup_tc - configure multiple traffic classes * @netdev: net device to configure * @tc: number of traffic classes to enable **/ #ifdef I40E_FCOE int i40e_setup_tc(struct net_device *netdev, u8 tc) #else static int i40e_setup_tc(struct net_device *netdev, u8 tc) #endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u8 enabled_tc = 0; int ret = -EINVAL; int i; /* Check if DCB enabled to continue */ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { netdev_info(netdev, "DCB is not enabled for adapter\n"); goto exit; } /* Check if MFP enabled */ if (pf->flags & I40E_FLAG_MFP_ENABLED) { netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); goto exit; } /* Check whether tc count is within enabled limit */ if (tc > i40e_pf_get_num_tc(pf)) { netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); goto exit; } /* Generate TC map for number of tc requested */ for (i = 0; i < tc; i++) enabled_tc |= BIT(i); /* Requesting same TC configuration as already enabled */ if (enabled_tc == vsi->tc_config.enabled_tc) return 0; /* Quiesce VSI queues */ i40e_quiesce_vsi(vsi); /* Configure VSI for enabled TCs */ ret = i40e_vsi_config_tc(vsi, enabled_tc); if (ret) { netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", vsi->seid); goto exit; } /* Unquiesce VSI */ i40e_unquiesce_vsi(vsi); exit: return ret; } /** * i40e_open - Called when a network interface is made active * @netdev: network interface device structure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the netdev watchdog subtask is * enabled, and the stack is notified that the interface is ready. * * Returns 0 on success, negative value on failure **/ int i40e_open(struct net_device *netdev) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; int err; /* disallow open during test or if eeprom is broken */ if (test_bit(__I40E_TESTING, &pf->state) || test_bit(__I40E_BAD_EEPROM, &pf->state)) return -EBUSY; netif_carrier_off(netdev); err = i40e_vsi_open(vsi); if (err) return err; /* configure global TSO hardware offload settings */ wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | TCP_FLAG_FIN) >> 16); wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | TCP_FLAG_FIN | TCP_FLAG_CWR) >> 16); wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); #ifdef CONFIG_I40E_VXLAN vxlan_get_rx_port(netdev); #endif #ifdef CONFIG_I40E_GENEVE geneve_get_rx_port(netdev); #endif return 0; } /** * i40e_vsi_open - * @vsi: the VSI to open * * Finish initialization of the VSI. * * Returns 0 on success, negative value on failure **/ int i40e_vsi_open(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; char int_name[I40E_INT_NAME_STR_LEN]; int err; /* allocate descriptors */ err = i40e_vsi_setup_tx_resources(vsi); if (err) goto err_setup_tx; err = i40e_vsi_setup_rx_resources(vsi); if (err) goto err_setup_rx; err = i40e_vsi_configure(vsi); if (err) goto err_setup_rx; if (vsi->netdev) { snprintf(int_name, sizeof(int_name) - 1, "%s-%s", dev_driver_string(&pf->pdev->dev), vsi->netdev->name); err = i40e_vsi_request_irq(vsi, int_name); if (err) goto err_setup_rx; /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs); if (err) goto err_set_queues; err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs); if (err) goto err_set_queues; } else if (vsi->type == I40E_VSI_FDIR) { snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", dev_driver_string(&pf->pdev->dev), dev_name(&pf->pdev->dev)); err = i40e_vsi_request_irq(vsi, int_name); } else { err = -EINVAL; goto err_setup_rx; } err = i40e_up_complete(vsi); if (err) goto err_up_complete; return 0; err_up_complete: i40e_down(vsi); err_set_queues: i40e_vsi_free_irq(vsi); err_setup_rx: i40e_vsi_free_rx_resources(vsi); err_setup_tx: i40e_vsi_free_tx_resources(vsi); if (vsi == pf->vsi[pf->lan_vsi]) i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); return err; } /** * i40e_fdir_filter_exit - Cleans up the Flow Director accounting * @pf: Pointer to PF * * This function destroys the hlist where all the Flow Director * filters were saved. **/ static void i40e_fdir_filter_exit(struct i40e_pf *pf) { struct i40e_fdir_filter *filter; struct hlist_node *node2; hlist_for_each_entry_safe(filter, node2, &pf->fdir_filter_list, fdir_node) { hlist_del(&filter->fdir_node); kfree(filter); } pf->fdir_pf_active_filters = 0; } /** * i40e_close - Disables a network interface * @netdev: network interface device structure * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the driver's control, but * this netdev interface is disabled. * * Returns 0, this is not allowed to fail **/ #ifdef I40E_FCOE int i40e_close(struct net_device *netdev) #else static int i40e_close(struct net_device *netdev) #endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; i40e_vsi_close(vsi); return 0; } /** * i40e_do_reset - Start a PF or Core Reset sequence * @pf: board private structure * @reset_flags: which reset is requested * * The essential difference in resets is that the PF Reset * doesn't clear the packet buffers, doesn't reset the PE * firmware, and doesn't bother the other PFs on the chip. **/ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) { u32 val; WARN_ON(in_interrupt()); if (i40e_check_asq_alive(&pf->hw)) i40e_vc_notify_reset(pf); /* do the biggest reset indicated */ if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { /* Request a Global Reset * * This will start the chip's countdown to the actual full * chip reset event, and a warning interrupt to be sent * to all PFs, including the requestor. Our handler * for the warning interrupt will deal with the shutdown * and recovery of the switch setup. */ dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); val = rd32(&pf->hw, I40E_GLGEN_RTRIG); val |= I40E_GLGEN_RTRIG_GLOBR_MASK; wr32(&pf->hw, I40E_GLGEN_RTRIG, val); } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) { /* Request a Core Reset * * Same as Global Reset, except does *not* include the MAC/PHY */ dev_dbg(&pf->pdev->dev, "CoreR requested\n"); val = rd32(&pf->hw, I40E_GLGEN_RTRIG); val |= I40E_GLGEN_RTRIG_CORER_MASK; wr32(&pf->hw, I40E_GLGEN_RTRIG, val); i40e_flush(&pf->hw); } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) { /* Request a PF Reset * * Resets only the PF-specific registers * * This goes directly to the tear-down and rebuild of * the switch, since we need to do all the recovery as * for the Core Reset. */ dev_dbg(&pf->pdev->dev, "PFR requested\n"); i40e_handle_reset_warning(pf); } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { int v; /* Find the VSI(s) that requested a re-init */ dev_info(&pf->pdev->dev, "VSI reinit requested\n"); for (v = 0; v < pf->num_alloc_vsi; v++) { struct i40e_vsi *vsi = pf->vsi[v]; if (vsi != NULL && test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { i40e_vsi_reinit_locked(pf->vsi[v]); clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); } } } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { int v; /* Find the VSI(s) that needs to be brought down */ dev_info(&pf->pdev->dev, "VSI down requested\n"); for (v = 0; v < pf->num_alloc_vsi; v++) { struct i40e_vsi *vsi = pf->vsi[v]; if (vsi != NULL && test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { set_bit(__I40E_DOWN, &vsi->state); i40e_down(vsi); clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); } } } else { dev_info(&pf->pdev->dev, "bad reset request 0x%08x\n", reset_flags); } } #ifdef CONFIG_I40E_DCB /** * i40e_dcb_need_reconfig - Check if DCB needs reconfig * @pf: board private structure * @old_cfg: current DCB config * @new_cfg: new DCB config **/ bool i40e_dcb_need_reconfig(struct i40e_pf *pf, struct i40e_dcbx_config *old_cfg, struct i40e_dcbx_config *new_cfg) { bool need_reconfig = false; /* Check if ETS configuration has changed */ if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg, sizeof(new_cfg->etscfg))) { /* If Priority Table has changed reconfig is needed */ if (memcmp(&new_cfg->etscfg.prioritytable, &old_cfg->etscfg.prioritytable, sizeof(new_cfg->etscfg.prioritytable))) { need_reconfig = true; dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); } if (memcmp(&new_cfg->etscfg.tcbwtable, &old_cfg->etscfg.tcbwtable, sizeof(new_cfg->etscfg.tcbwtable))) dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); if (memcmp(&new_cfg->etscfg.tsatable, &old_cfg->etscfg.tsatable, sizeof(new_cfg->etscfg.tsatable))) dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); } /* Check if PFC configuration has changed */ if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) { need_reconfig = true; dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); } /* Check if APP Table has changed */ if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) { need_reconfig = true; dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); } dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); return need_reconfig; } /** * i40e_handle_lldp_event - Handle LLDP Change MIB event * @pf: board private structure * @e: event info posted on ARQ **/ static int i40e_handle_lldp_event(struct i40e_pf *pf, struct i40e_arq_event_info *e) { struct i40e_aqc_lldp_get_mib *mib = (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; struct i40e_hw *hw = &pf->hw; struct i40e_dcbx_config tmp_dcbx_cfg; bool need_reconfig = false; int ret = 0; u8 type; /* Not DCB capable or capability disabled */ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) return ret; /* Ignore if event is not for Nearest Bridge */ type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) return ret; /* Check MIB Type and return if event for Remote MIB update */ type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; dev_dbg(&pf->pdev->dev, "LLDP event mib type %s\n", type ? "remote" : "local"); if (type == I40E_AQ_LLDP_MIB_REMOTE) { /* Update the remote cached instance and return */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, &hw->remote_dcbx_config); goto exit; } /* Store the old configuration */ tmp_dcbx_cfg = hw->local_dcbx_config; /* Reset the old DCBx configuration data */ memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); /* Get updated DCBX data from firmware */ ret = i40e_get_dcb_config(&pf->hw); if (ret) { dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto exit; } /* No change detected in DCBX configs */ if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, sizeof(tmp_dcbx_cfg))) { dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); goto exit; } need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); if (!need_reconfig) goto exit; /* Enable DCB tagging only when more than one TC */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; else pf->flags &= ~I40E_FLAG_DCB_ENABLED; set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); /* Reconfiguration needed quiesce all VSIs */ i40e_pf_quiesce_all_vsi(pf); /* Changes in configuration update VEB/VSI */ i40e_dcb_reconfigure(pf); ret = i40e_resume_port_tx(pf); clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); /* In case of error no point in resuming VSIs */ if (ret) goto exit; /* Wait for the PF's Tx queues to be disabled */ ret = i40e_pf_wait_txq_disabled(pf); if (ret) { /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); i40e_service_event_schedule(pf); } else { i40e_pf_unquiesce_all_vsi(pf); } exit: return ret; } #endif /* CONFIG_I40E_DCB */ /** * i40e_do_reset_safe - Protected reset path for userland calls. * @pf: board private structure * @reset_flags: which reset is requested * **/ void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) { rtnl_lock(); i40e_do_reset(pf, reset_flags); rtnl_unlock(); } /** * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event * @pf: board private structure * @e: event info posted on ARQ * * Handler for LAN Queue Overflow Event generated by the firmware for PF * and VF queues **/ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, struct i40e_arq_event_info *e) { struct i40e_aqc_lan_overflow *data = (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; u32 queue = le32_to_cpu(data->prtdcb_rupto); u32 qtx_ctl = le32_to_cpu(data->otx_ctl); struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf; u16 vf_id; dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", queue, qtx_ctl); /* Queue belongs to VF, find the VF and issue VF reset */ if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) >> I40E_QTX_CTL_VFVM_INDX_SHIFT); vf_id -= hw->func_caps.vf_base_id; vf = &pf->vf[vf_id]; i40e_vc_notify_vf_reset(vf); /* Allow VF to process pending reset notification */ msleep(20); i40e_reset_vf(vf, false); } } /** * i40e_service_event_complete - Finish up the service event * @pf: board private structure **/ static void i40e_service_event_complete(struct i40e_pf *pf) { WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); /* flush memory to make sure state is correct before next watchog */ smp_mb__before_atomic(); clear_bit(__I40E_SERVICE_SCHED, &pf->state); } /** * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters * @pf: board private structure **/ u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) { u32 val, fcnt_prog; val = rd32(&pf->hw, I40E_PFQF_FDSTAT); fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); return fcnt_prog; } /** * i40e_get_current_fd_count - Get total FD filters programmed for this PF * @pf: board private structure **/ u32 i40e_get_current_fd_count(struct i40e_pf *pf) { u32 val, fcnt_prog; val = rd32(&pf->hw, I40E_PFQF_FDSTAT); fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); return fcnt_prog; } /** * i40e_get_global_fd_count - Get total FD filters programmed on device * @pf: board private structure **/ u32 i40e_get_global_fd_count(struct i40e_pf *pf) { u32 val, fcnt_prog; val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); return fcnt_prog; } /** * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled * @pf: board private structure **/ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) { struct i40e_fdir_filter *filter; u32 fcnt_prog, fcnt_avail; struct hlist_node *node; if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) return; /* Check if, FD SB or ATR was auto disabled and if there is enough room * to re-enable */ fcnt_prog = i40e_get_global_fd_count(pf); fcnt_avail = pf->fdir_pf_filter_count; if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || (pf->fd_add_err == 0) || (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); } } /* Wait for some more space to be available to turn on ATR */ if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); } } /* if hw had a problem adding a filter, delete it */ if (pf->fd_inv > 0) { hlist_for_each_entry_safe(filter, node, &pf->fdir_filter_list, fdir_node) { if (filter->fd_id == pf->fd_inv) { hlist_del(&filter->fdir_node); kfree(filter); pf->fdir_pf_active_filters--; } } } } #define I40E_MIN_FD_FLUSH_INTERVAL 10 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 /** * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB * @pf: board private structure **/ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) { unsigned long min_flush_time; int flush_wait_retry = 50; bool disable_atr = false; int fd_room; int reg; if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) return; if (!time_after(jiffies, pf->fd_flush_timestamp + (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) return; /* If the flush is happening too quick and we have mostly SB rules we * should not re-enable ATR for some time. */ min_flush_time = pf->fd_flush_timestamp + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; if (!(time_after(jiffies, min_flush_time)) && (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); disable_atr = true; } pf->fd_flush_timestamp = jiffies; pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; /* flush all filters */ wr32(&pf->hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); i40e_flush(&pf->hw); pf->fd_flush_cnt++; pf->fd_add_err = 0; do { /* Check FD flush status every 5-6msec */ usleep_range(5000, 6000); reg = rd32(&pf->hw, I40E_PFQF_CTL_1); if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) break; } while (flush_wait_retry--); if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); } else { /* replay sideband filters */ i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); if (!disable_atr) pf->flags |= I40E_FLAG_FD_ATR_ENABLED; clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } } /** * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed * @pf: board private structure **/ u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) { return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; } /* We can see up to 256 filter programming desc in transit if the filters are * being applied really fast; before we see the first * filter miss error on Rx queue 0. Accumulating enough error messages before * reacting will make sure we don't cause flush too often. */ #define I40E_MAX_FD_PROGRAM_ERROR 256 /** * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table * @pf: board private structure **/ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) { /* if interface is down do nothing */ if (test_bit(__I40E_DOWN, &pf->state)) return; if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) return; if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) i40e_fdir_flush_and_replay(pf); i40e_fdir_check_and_reenable(pf); } /** * i40e_vsi_link_event - notify VSI of a link event * @vsi: vsi to be notified * @link_up: link up or down **/ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) { if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) return; switch (vsi->type) { case I40E_VSI_MAIN: #ifdef I40E_FCOE case I40E_VSI_FCOE: #endif if (!vsi->netdev || !vsi->netdev_registered) break; if (link_up) { netif_carrier_on(vsi->netdev); netif_tx_wake_all_queues(vsi->netdev); } else { netif_carrier_off(vsi->netdev); netif_tx_stop_all_queues(vsi->netdev); } break; case I40E_VSI_SRIOV: case I40E_VSI_VMDQ2: case I40E_VSI_CTRL: case I40E_VSI_MIRROR: default: /* there is no notification for other VSIs */ break; } } /** * i40e_veb_link_event - notify elements on the veb of a link event * @veb: veb to be notified * @link_up: link up or down **/ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) { struct i40e_pf *pf; int i; if (!veb || !veb->pf) return; pf = veb->pf; /* depth first... */ for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) i40e_veb_link_event(pf->veb[i], link_up); /* ... now the local VSIs */ for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) i40e_vsi_link_event(pf->vsi[i], link_up); } /** * i40e_link_event - Update netif_carrier status * @pf: board private structure **/ static void i40e_link_event(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; u8 new_link_speed, old_link_speed; i40e_status status; bool new_link, old_link; /* save off old link status information */ pf->hw.phy.link_info_old = pf->hw.phy.link_info; /* set this to force the get_link_status call to refresh state */ pf->hw.phy.get_link_info = true; old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); status = i40e_get_link_status(&pf->hw, &new_link); if (status) { dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", status); return; } old_link_speed = pf->hw.phy.link_info_old.link_speed; new_link_speed = pf->hw.phy.link_info.link_speed; if (new_link == old_link && new_link_speed == old_link_speed && (test_bit(__I40E_DOWN, &vsi->state) || new_link == netif_carrier_ok(vsi->netdev))) return; if (!test_bit(__I40E_DOWN, &vsi->state)) i40e_print_link_message(vsi, new_link); /* Notify the base of the switch tree connected to * the link. Floating VEBs are not notified. */ if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); else i40e_vsi_link_event(vsi, new_link); if (pf->vf) i40e_vc_notify_link_state(pf); if (pf->flags & I40E_FLAG_PTP) i40e_ptp_set_increment(pf); } /** * i40e_watchdog_subtask - periodic checks not using event driven response * @pf: board private structure **/ static void i40e_watchdog_subtask(struct i40e_pf *pf) { int i; /* if interface is down do nothing */ if (test_bit(__I40E_DOWN, &pf->state) || test_bit(__I40E_CONFIG_BUSY, &pf->state)) return; /* make sure we don't do these things too often */ if (time_before(jiffies, (pf->service_timer_previous + pf->service_timer_period))) return; pf->service_timer_previous = jiffies; if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) i40e_link_event(pf); /* Update the stats for active netdevs so the network stack * can look at updated numbers whenever it cares to */ for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i] && pf->vsi[i]->netdev) i40e_update_stats(pf->vsi[i]); if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) { /* Update the stats for the active switching components */ for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i]) i40e_update_veb_stats(pf->veb[i]); } i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); } /** * i40e_reset_subtask - Set up for resetting the device and driver * @pf: board private structure **/ static void i40e_reset_subtask(struct i40e_pf *pf) { u32 reset_flags = 0; rtnl_lock(); if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { reset_flags |= BIT(__I40E_REINIT_REQUESTED); clear_bit(__I40E_REINIT_REQUESTED, &pf->state); } if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { reset_flags |= BIT(__I40E_DOWN_REQUESTED); clear_bit(__I40E_DOWN_REQUESTED, &pf->state); } /* If there's a recovery already waiting, it takes * precedence before starting a new reset sequence. */ if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { i40e_handle_reset_warning(pf); goto unlock; } /* If we're already down or resetting, just bail */ if (reset_flags && !test_bit(__I40E_DOWN, &pf->state) && !test_bit(__I40E_CONFIG_BUSY, &pf->state)) i40e_do_reset(pf, reset_flags); unlock: rtnl_unlock(); } /** * i40e_handle_link_event - Handle link event * @pf: board private structure * @e: event info posted on ARQ **/ static void i40e_handle_link_event(struct i40e_pf *pf, struct i40e_arq_event_info *e) { struct i40e_aqc_get_link_status *status = (struct i40e_aqc_get_link_status *)&e->desc.params.raw; /* Do a new status request to re-enable LSE reporting * and load new status information into the hw struct * This completely ignores any state information * in the ARQ event info, instead choosing to always * issue the AQ update link status command. */ i40e_link_event(pf); /* check for unqualified module, if link is down */ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && (!(status->link_info & I40E_AQ_LINK_UP))) dev_err(&pf->pdev->dev, "The driver failed to link because an unqualified module was detected.\n"); } /** * i40e_clean_adminq_subtask - Clean the AdminQ rings * @pf: board private structure **/ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) { struct i40e_arq_event_info event; struct i40e_hw *hw = &pf->hw; u16 pending, i = 0; i40e_status ret; u16 opcode; u32 oldval; u32 val; /* Do not run clean AQ when PF reset fails */ if (test_bit(__I40E_RESET_FAILED, &pf->state)) return; /* check for error indications */ val = rd32(&pf->hw, pf->hw.aq.arq.len); oldval = val; if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; } if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; } if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; } if (oldval != val) wr32(&pf->hw, pf->hw.aq.arq.len, val); val = rd32(&pf->hw, pf->hw.aq.asq.len); oldval = val; if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { if (pf->hw.debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; } if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { if (pf->hw.debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; } if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { if (pf->hw.debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; } if (oldval != val) wr32(&pf->hw, pf->hw.aq.asq.len, val); event.buf_len = I40E_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) return; do { ret = i40e_clean_arq_element(hw, &event, &pending); if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) break; else if (ret) { dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); break; } opcode = le16_to_cpu(event.desc.opcode); switch (opcode) { case i40e_aqc_opc_get_link_status: i40e_handle_link_event(pf, &event); break; case i40e_aqc_opc_send_msg_to_pf: ret = i40e_vc_process_vf_msg(pf, le16_to_cpu(event.desc.retval), le32_to_cpu(event.desc.cookie_high), le32_to_cpu(event.desc.cookie_low), event.msg_buf, event.msg_len); break; case i40e_aqc_opc_lldp_update_mib: dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); #ifdef CONFIG_I40E_DCB rtnl_lock(); ret = i40e_handle_lldp_event(pf, &event); rtnl_unlock(); #endif /* CONFIG_I40E_DCB */ break; case i40e_aqc_opc_event_lan_overflow: dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); i40e_handle_lan_overflow_event(pf, &event); break; case i40e_aqc_opc_send_msg_to_peer: dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); break; case i40e_aqc_opc_nvm_erase: case i40e_aqc_opc_nvm_update: case i40e_aqc_opc_oem_post_update: i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); break; default: dev_info(&pf->pdev->dev, "ARQ Error: Unknown event 0x%04x received\n", opcode); break; } } while (pending && (i++ < pf->adminq_work_limit)); clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); /* re-enable Admin queue interrupt cause */ val = rd32(hw, I40E_PFINT_ICR0_ENA); val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, val); i40e_flush(hw); kfree(event.msg_buf); } /** * i40e_verify_eeprom - make sure eeprom is good to use * @pf: board private structure **/ static void i40e_verify_eeprom(struct i40e_pf *pf) { int err; err = i40e_diag_eeprom_test(&pf->hw); if (err) { /* retry in case of garbage read */ err = i40e_diag_eeprom_test(&pf->hw); if (err) { dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", err); set_bit(__I40E_BAD_EEPROM, &pf->state); } } if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); clear_bit(__I40E_BAD_EEPROM, &pf->state); } } /** * i40e_enable_pf_switch_lb * @pf: pointer to the PF structure * * enable switch loop back or die - no point in a return value **/ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; int ret; ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't get PF vsi config, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return; } ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "update vsi switch failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } /** * i40e_disable_pf_switch_lb * @pf: pointer to the PF structure * * disable switch loop back or die - no point in a return value **/ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; int ret; ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't get PF vsi config, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return; } ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "update vsi switch failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } /** * i40e_config_bridge_mode - Configure the HW bridge mode * @veb: pointer to the bridge instance * * Configure the loop back mode for the LAN VSI that is downlink to the * specified HW bridge instance. It is expected this function is called * when a new HW bridge is instantiated. **/ static void i40e_config_bridge_mode(struct i40e_veb *veb) { struct i40e_pf *pf = veb->pf; if (pf->hw.debug_mask & I40E_DEBUG_LAN) dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); if (veb->bridge_mode & BRIDGE_MODE_VEPA) i40e_disable_pf_switch_lb(pf); else i40e_enable_pf_switch_lb(pf); } /** * i40e_reconstitute_veb - rebuild the VEB and anything connected to it * @veb: pointer to the VEB instance * * This is a recursive function that first builds the attached VSIs then * recurses in to build the next layer of VEB. We track the connections * through our own index numbers because the seid's from the HW could * change across the reset. **/ static int i40e_reconstitute_veb(struct i40e_veb *veb) { struct i40e_vsi *ctl_vsi = NULL; struct i40e_pf *pf = veb->pf; int v, veb_idx; int ret; /* build VSI that owns this VEB, temporarily attached to base VEB */ for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { if (pf->vsi[v] && pf->vsi[v]->veb_idx == veb->idx && pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { ctl_vsi = pf->vsi[v]; break; } } if (!ctl_vsi) { dev_info(&pf->pdev->dev, "missing owner VSI for veb_idx %d\n", veb->idx); ret = -ENOENT; goto end_reconstitute; } if (ctl_vsi != pf->vsi[pf->lan_vsi]) ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; ret = i40e_add_vsi(ctl_vsi); if (ret) { dev_info(&pf->pdev->dev, "rebuild of veb_idx %d owner VSI failed: %d\n", veb->idx, ret); goto end_reconstitute; } i40e_vsi_reset_stats(ctl_vsi); /* create the VEB in the switch and move the VSI onto the VEB */ ret = i40e_add_veb(veb, ctl_vsi); if (ret) goto end_reconstitute; if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) veb->bridge_mode = BRIDGE_MODE_VEB; else veb->bridge_mode = BRIDGE_MODE_VEPA; i40e_config_bridge_mode(veb); /* create the remaining VSIs attached to this VEB */ for (v = 0; v < pf->num_alloc_vsi; v++) { if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) continue; if (pf->vsi[v]->veb_idx == veb->idx) { struct i40e_vsi *vsi = pf->vsi[v]; vsi->uplink_seid = veb->seid; ret = i40e_add_vsi(vsi); if (ret) { dev_info(&pf->pdev->dev, "rebuild of vsi_idx %d failed: %d\n", v, ret); goto end_reconstitute; } i40e_vsi_reset_stats(vsi); } } /* create any VEBs attached to this VEB - RECURSION */ for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { pf->veb[veb_idx]->uplink_seid = veb->seid; ret = i40e_reconstitute_veb(pf->veb[veb_idx]); if (ret) break; } } end_reconstitute: return ret; } /** * i40e_get_capabilities - get info about the HW * @pf: the PF struct **/ static int i40e_get_capabilities(struct i40e_pf *pf) { struct i40e_aqc_list_capabilities_element_resp *cap_buf; u16 data_size; int buf_len; int err; buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); do { cap_buf = kzalloc(buf_len, GFP_KERNEL); if (!cap_buf) return -ENOMEM; /* this loads the data into the hw struct for us */ err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, &data_size, i40e_aqc_opc_list_func_capabilities, NULL); /* data loaded, buffer no longer needed */ kfree(cap_buf); if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { /* retry with a larger buffer */ buf_len = data_size; } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { dev_info(&pf->pdev->dev, "capability discovery failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -ENODEV; } } while (err); if (pf->hw.debug_mask & I40E_DEBUG_USER) dev_info(&pf->pdev->dev, "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", pf->hw.pf_id, pf->hw.func_caps.num_vfs, pf->hw.func_caps.num_msix_vectors, pf->hw.func_caps.num_msix_vectors_vf, pf->hw.func_caps.fd_filters_guaranteed, pf->hw.func_caps.fd_filters_best_effort, pf->hw.func_caps.num_tx_qp, pf->hw.func_caps.num_vsis); #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ + pf->hw.func_caps.num_vfs) if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { dev_info(&pf->pdev->dev, "got num_vsis %d, setting num_vsis to %d\n", pf->hw.func_caps.num_vsis, DEF_NUM_VSI); pf->hw.func_caps.num_vsis = DEF_NUM_VSI; } return 0; } static int i40e_vsi_clear(struct i40e_vsi *vsi); /** * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband * @pf: board private structure **/ static void i40e_fdir_sb_setup(struct i40e_pf *pf) { struct i40e_vsi *vsi; int i; /* quick workaround for an NVM issue that leaves a critical register * uninitialized */ if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { static const u32 hkey[] = { 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, 0x95b3a76d}; for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); } if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return; /* find existing VSI and see if it needs configuring */ vsi = NULL; for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { vsi = pf->vsi[i]; break; } } /* create a new VSI if none exists */ if (!vsi) { vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->vsi[pf->lan_vsi]->seid, 0); if (!vsi) { dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; return; } } i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); } /** * i40e_fdir_teardown - release the Flow Director resources * @pf: board private structure **/ static void i40e_fdir_teardown(struct i40e_pf *pf) { int i; i40e_fdir_filter_exit(pf); for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { i40e_vsi_release(pf->vsi[i]); break; } } } /** * i40e_prep_for_reset - prep for the core to reset * @pf: board private structure * * Close up the VFs and other things in prep for PF Reset. **/ static void i40e_prep_for_reset(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; i40e_status ret = 0; u32 v; clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) return; dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); /* quiesce the VSIs and their queues that are not already DOWN */ i40e_pf_quiesce_all_vsi(pf); for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v]) pf->vsi[v]->seid = 0; } i40e_shutdown_adminq(&pf->hw); /* call shutdown HMC */ if (hw->hmc.hmc_obj) { ret = i40e_shutdown_lan_hmc(hw); if (ret) dev_warn(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret); } } /** * i40e_send_version - update firmware with driver version * @pf: PF struct */ static void i40e_send_version(struct i40e_pf *pf) { struct i40e_driver_version dv; dv.major_version = DRV_VERSION_MAJOR; dv.minor_version = DRV_VERSION_MINOR; dv.build_version = DRV_VERSION_BUILD; dv.subbuild_version = 0; strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); i40e_aq_send_driver_version(&pf->hw, &dv, NULL); } /** * i40e_reset_and_rebuild - reset and rebuild using a saved config * @pf: board private structure * @reinit: if the Main VSI needs to re-initialized. **/ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) { struct i40e_hw *hw = &pf->hw; u8 set_fc_aq_fail = 0; i40e_status ret; u32 val; u32 v; /* Now we wait for GRST to settle out. * We don't have to delete the VEBs or VSIs from the hw switch * because the reset will make them disappear. */ ret = i40e_pf_reset(hw); if (ret) { dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); set_bit(__I40E_RESET_FAILED, &pf->state); goto clear_recovery; } pf->pfr_count++; if (test_bit(__I40E_DOWN, &pf->state)) goto clear_recovery; dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ ret = i40e_init_adminq(&pf->hw); if (ret) { dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto clear_recovery; } /* re-verify the eeprom if we just had an EMP reset */ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) i40e_verify_eeprom(pf); i40e_clear_pxe_mode(hw); ret = i40e_get_capabilities(pf); if (ret) goto end_core_reset; ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); if (ret) { dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); goto end_core_reset; } ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (ret) { dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); goto end_core_reset; } #ifdef CONFIG_I40E_DCB ret = i40e_init_pf_dcb(pf); if (ret) { dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); pf->flags &= ~I40E_FLAG_DCB_CAPABLE; /* Continue without DCB enabled */ } #endif /* CONFIG_I40E_DCB */ #ifdef I40E_FCOE i40e_init_pf_fcoe(pf); #endif /* do basic switch setup */ ret = i40e_setup_pf_switch(pf, reinit); if (ret) goto end_core_reset; /* driver is only interested in link up/down and module qualification * reports from firmware */ ret = i40e_aq_set_phy_int_mask(&pf->hw, I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); if (ret) dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* make sure our flow control settings are restored */ ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); if (ret) dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Rebuild the VSIs and VEBs that existed before reset. * They are still in our local switch element arrays, so only * need to rebuild the switch model in the HW. * * If there were VEBs but the reconstitution failed, we'll try * try to recover minimal use by getting the basic PF VSI working. */ if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); /* find the one VEB connected to the MAC, and find orphans */ for (v = 0; v < I40E_MAX_VEB; v++) { if (!pf->veb[v]) continue; if (pf->veb[v]->uplink_seid == pf->mac_seid || pf->veb[v]->uplink_seid == 0) { ret = i40e_reconstitute_veb(pf->veb[v]); if (!ret) continue; /* If Main VEB failed, we're in deep doodoo, * so give up rebuilding the switch and set up * for minimal rebuild of PF VSI. * If orphan failed, we'll report the error * but try to keep going. */ if (pf->veb[v]->uplink_seid == pf->mac_seid) { dev_info(&pf->pdev->dev, "rebuild of switch failed: %d, will try to set up simple PF connection\n", ret); pf->vsi[pf->lan_vsi]->uplink_seid = pf->mac_seid; break; } else if (pf->veb[v]->uplink_seid == 0) { dev_info(&pf->pdev->dev, "rebuild of orphan VEB failed: %d\n", ret); } } } } if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); /* no VEB, so rebuild only the Main VSI */ ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); if (ret) { dev_info(&pf->pdev->dev, "rebuild of Main VSI failed: %d\n", ret); goto end_core_reset; } } /* Reconfigure hardware for allowing smaller MSS in the case * of TSO, so that we avoid the MDD being fired and causing * a reset in the case of small MSS+TSO. */ #define I40E_REG_MSS 0x000E64DC #define I40E_REG_MSS_MIN_MASK 0x3FF0000 #define I40E_64BYTE_MSS 0x400000 val = rd32(hw, I40E_REG_MSS); if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { val &= ~I40E_REG_MSS_MIN_MASK; val |= I40E_64BYTE_MSS; wr32(hw, I40E_REG_MSS, val); } if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4)) { msleep(75); ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (ret) dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } /* reinit the misc interrupt */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) ret = i40e_setup_misc_vector(pf); /* Add a filter to drop all Flow control frames from any VSI from being * transmitted. By doing so we stop a malicious VF from sending out * PAUSE or PFC frames and potentially controlling traffic for other * PF/VF VSIs. * The FW can still send Flow control frames if enabled. */ i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, pf->main_vsi_seid); /* restart the VSIs that were rebuilt and running before the reset */ i40e_pf_unquiesce_all_vsi(pf); if (pf->num_alloc_vfs) { for (v = 0; v < pf->num_alloc_vfs; v++) i40e_reset_vf(&pf->vf[v], true); } /* tell the firmware that we're starting */ i40e_send_version(pf); end_core_reset: clear_bit(__I40E_RESET_FAILED, &pf->state); clear_recovery: clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); } /** * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild * @pf: board private structure * * Close up the VFs and other things in prep for a Core Reset, * then get ready to rebuild the world. **/ static void i40e_handle_reset_warning(struct i40e_pf *pf) { i40e_prep_for_reset(pf); i40e_reset_and_rebuild(pf, false); } /** * i40e_handle_mdd_event * @pf: pointer to the PF structure * * Called from the MDD irq handler to identify possibly malicious vfs **/ static void i40e_handle_mdd_event(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; bool mdd_detected = false; bool pf_mdd_detected = false; struct i40e_vf *vf; u32 reg; int i; if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) return; /* find what triggered the MDD event */ reg = rd32(hw, I40E_GL_MDET_TX); if (reg & I40E_GL_MDET_TX_VALID_MASK) { u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> I40E_GL_MDET_TX_PF_NUM_SHIFT; u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> I40E_GL_MDET_TX_VF_NUM_SHIFT; u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> I40E_GL_MDET_TX_EVENT_SHIFT; u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> I40E_GL_MDET_TX_QUEUE_SHIFT) - pf->hw.func_caps.base_queue; if (netif_msg_tx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", event, queue, pf_num, vf_num); wr32(hw, I40E_GL_MDET_TX, 0xffffffff); mdd_detected = true; } reg = rd32(hw, I40E_GL_MDET_RX); if (reg & I40E_GL_MDET_RX_VALID_MASK) { u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> I40E_GL_MDET_RX_FUNCTION_SHIFT; u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> I40E_GL_MDET_RX_EVENT_SHIFT; u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> I40E_GL_MDET_RX_QUEUE_SHIFT) - pf->hw.func_caps.base_queue; if (netif_msg_rx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", event, queue, func); wr32(hw, I40E_GL_MDET_RX, 0xffffffff); mdd_detected = true; } if (mdd_detected) { reg = rd32(hw, I40E_PF_MDET_TX); if (reg & I40E_PF_MDET_TX_VALID_MASK) { wr32(hw, I40E_PF_MDET_TX, 0xFFFF); dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); pf_mdd_detected = true; } reg = rd32(hw, I40E_PF_MDET_RX); if (reg & I40E_PF_MDET_RX_VALID_MASK) { wr32(hw, I40E_PF_MDET_RX, 0xFFFF); dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); pf_mdd_detected = true; } /* Queue belongs to the PF, initiate a reset */ if (pf_mdd_detected) { set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); i40e_service_event_schedule(pf); } } /* see if one of the VFs needs its hand slapped */ for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { vf = &(pf->vf[i]); reg = rd32(hw, I40E_VP_MDET_TX(i)); if (reg & I40E_VP_MDET_TX_VALID_MASK) { wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); vf->num_mdd_events++; dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", i); } reg = rd32(hw, I40E_VP_MDET_RX(i)); if (reg & I40E_VP_MDET_RX_VALID_MASK) { wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); vf->num_mdd_events++; dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", i); } if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { dev_info(&pf->pdev->dev, "Too many MDD events on VF %d, disabled\n", i); dev_info(&pf->pdev->dev, "Use PF Control I/F to re-enable the VF\n"); set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); } } /* re-enable mdd interrupt cause */ clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); i40e_flush(hw); } /** * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW * @pf: board private structure **/ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) { #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) struct i40e_hw *hw = &pf->hw; i40e_status ret; __be16 port; int i; if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC)) return; pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { if (pf->pending_udp_bitmap & BIT_ULL(i)) { pf->pending_udp_bitmap &= ~BIT_ULL(i); port = pf->udp_ports[i].index; if (port) ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), pf->udp_ports[i].type, NULL, NULL); else ret = i40e_aq_del_udp_tunnel(hw, i, NULL); if (ret) { dev_info(&pf->pdev->dev, "%s vxlan port %d, index %d failed, err %s aq_err %s\n", port ? "add" : "delete", ntohs(port), i, i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); pf->udp_ports[i].index = 0; } } } #endif } /** * i40e_service_task - Run the driver's async subtasks * @work: pointer to work_struct containing our data **/ static void i40e_service_task(struct work_struct *work) { struct i40e_pf *pf = container_of(work, struct i40e_pf, service_task); unsigned long start_time = jiffies; /* don't bother with service tasks if a reset is in progress */ if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { i40e_service_event_complete(pf); return; } i40e_detect_recover_hung(pf); i40e_reset_subtask(pf); i40e_handle_mdd_event(pf); i40e_vc_process_vflr_event(pf); i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); i40e_sync_filters_subtask(pf); #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) i40e_sync_udp_filters_subtask(pf); #endif i40e_clean_adminq_subtask(pf); i40e_service_event_complete(pf); /* If the tasks have taken longer than one timer cycle or there * is more work to be done, reschedule the service task now * rather than wait for the timer to tick again. */ if (time_after(jiffies, (start_time + pf->service_timer_period)) || test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) i40e_service_event_schedule(pf); } /** * i40e_service_timer - timer callback * @data: pointer to PF struct **/ static void i40e_service_timer(unsigned long data) { struct i40e_pf *pf = (struct i40e_pf *)data; mod_timer(&pf->service_timer, round_jiffies(jiffies + pf->service_timer_period)); i40e_service_event_schedule(pf); } /** * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI * @vsi: the VSI being configured **/ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; switch (vsi->type) { case I40E_VSI_MAIN: vsi->alloc_queue_pairs = pf->num_lan_qps; vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); if (pf->flags & I40E_FLAG_MSIX_ENABLED) vsi->num_q_vectors = pf->num_lan_msix; else vsi->num_q_vectors = 1; break; case I40E_VSI_FDIR: vsi->alloc_queue_pairs = 1; vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, I40E_REQ_DESCRIPTOR_MULTIPLE); vsi->num_q_vectors = 1; break; case I40E_VSI_VMDQ2: vsi->alloc_queue_pairs = pf->num_vmdq_qps; vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); vsi->num_q_vectors = pf->num_vmdq_msix; break; case I40E_VSI_SRIOV: vsi->alloc_queue_pairs = pf->num_vf_qps; vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); break; #ifdef I40E_FCOE case I40E_VSI_FCOE: vsi->alloc_queue_pairs = pf->num_fcoe_qps; vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, I40E_REQ_DESCRIPTOR_MULTIPLE); vsi->num_q_vectors = pf->num_fcoe_msix; break; #endif /* I40E_FCOE */ default: WARN_ON(1); return -ENODATA; } return 0; } /** * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi * @type: VSI pointer * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. * * On error: returns error code (negative) * On success: returns 0 **/ static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) { int size; int ret = 0; /* allocate memory for both Tx and Rx ring pointers */ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; vsi->tx_rings = kzalloc(size, GFP_KERNEL); if (!vsi->tx_rings) return -ENOMEM; vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; if (alloc_qvectors) { /* allocate memory for q_vector pointers */ size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; vsi->q_vectors = kzalloc(size, GFP_KERNEL); if (!vsi->q_vectors) { ret = -ENOMEM; goto err_vectors; } } return ret; err_vectors: kfree(vsi->tx_rings); return ret; } /** * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF * @pf: board private structure * @type: type of VSI * * On error: returns error code (negative) * On success: returns vsi index in PF (positive) **/ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) { int ret = -ENODEV; struct i40e_vsi *vsi; int vsi_idx; int i; /* Need to protect the allocation of the VSIs at the PF level */ mutex_lock(&pf->switch_mutex); /* VSI list may be fragmented if VSI creation/destruction has * been happening. We can afford to do a quick scan to look * for any free VSIs in the list. * * find next empty vsi slot, looping back around if necessary */ i = pf->next_vsi; while (i < pf->num_alloc_vsi && pf->vsi[i]) i++; if (i >= pf->num_alloc_vsi) { i = 0; while (i < pf->next_vsi && pf->vsi[i]) i++; } if (i < pf->num_alloc_vsi && !pf->vsi[i]) { vsi_idx = i; /* Found one! */ } else { ret = -ENODEV; goto unlock_pf; /* out of VSI slots! */ } pf->next_vsi = ++i; vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); if (!vsi) { ret = -ENOMEM; goto unlock_pf; } vsi->type = type; vsi->back = pf; set_bit(__I40E_DOWN, &vsi->state); vsi->flags = 0; vsi->idx = vsi_idx; vsi->rx_itr_setting = pf->rx_itr_default; vsi->tx_itr_setting = pf->tx_itr_default; vsi->int_rate_limit = 0; vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? pf->rss_table_size : 64; vsi->netdev_registered = false; vsi->work_limit = I40E_DEFAULT_IRQ_WORK; INIT_LIST_HEAD(&vsi->mac_filter_list); vsi->irqs_ready = false; ret = i40e_set_num_rings_in_vsi(vsi); if (ret) goto err_rings; ret = i40e_vsi_alloc_arrays(vsi, true); if (ret) goto err_rings; /* Setup default MSIX irq handler for VSI */ i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); /* Initialize VSI lock */ spin_lock_init(&vsi->mac_filter_list_lock); pf->vsi[vsi_idx] = vsi; ret = vsi_idx; goto unlock_pf; err_rings: pf->next_vsi = i - 1; kfree(vsi); unlock_pf: mutex_unlock(&pf->switch_mutex); return ret; } /** * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI * @type: VSI pointer * @free_qvectors: a bool to specify if q_vectors need to be freed. * * On error: returns error code (negative) * On success: returns 0 **/ static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) { /* free the ring and vector containers */ if (free_qvectors) { kfree(vsi->q_vectors); vsi->q_vectors = NULL; } kfree(vsi->tx_rings); vsi->tx_rings = NULL; vsi->rx_rings = NULL; } /** * i40e_clear_rss_config_user - clear the user configured RSS hash keys * and lookup table * @vsi: Pointer to VSI structure */ static void i40e_clear_rss_config_user(struct i40e_vsi *vsi) { if (!vsi) return; kfree(vsi->rss_hkey_user); vsi->rss_hkey_user = NULL; kfree(vsi->rss_lut_user); vsi->rss_lut_user = NULL; } /** * i40e_vsi_clear - Deallocate the VSI provided * @vsi: the VSI being un-configured **/ static int i40e_vsi_clear(struct i40e_vsi *vsi) { struct i40e_pf *pf; if (!vsi) return 0; if (!vsi->back) goto free_vsi; pf = vsi->back; mutex_lock(&pf->switch_mutex); if (!pf->vsi[vsi->idx]) { dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", vsi->idx, vsi->idx, vsi, vsi->type); goto unlock_vsi; } if (pf->vsi[vsi->idx] != vsi) { dev_err(&pf->pdev->dev, "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", pf->vsi[vsi->idx]->idx, pf->vsi[vsi->idx], pf->vsi[vsi->idx]->type, vsi->idx, vsi, vsi->type); goto unlock_vsi; } /* updates the PF for this cleared vsi */ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); i40e_vsi_free_arrays(vsi, true); i40e_clear_rss_config_user(vsi); pf->vsi[vsi->idx] = NULL; if (vsi->idx < pf->next_vsi) pf->next_vsi = vsi->idx; unlock_vsi: mutex_unlock(&pf->switch_mutex); free_vsi: kfree(vsi); return 0; } /** * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI * @vsi: the VSI being cleaned **/ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) { int i; if (vsi->tx_rings && vsi->tx_rings[0]) { for (i = 0; i < vsi->alloc_queue_pairs; i++) { kfree_rcu(vsi->tx_rings[i], rcu); vsi->tx_rings[i] = NULL; vsi->rx_rings[i] = NULL; } } } /** * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI * @vsi: the VSI being configured **/ static int i40e_alloc_rings(struct i40e_vsi *vsi) { struct i40e_ring *tx_ring, *rx_ring; struct i40e_pf *pf = vsi->back; int i; /* Set basic values in the rings to be used later during open() */ for (i = 0; i < vsi->alloc_queue_pairs; i++) { /* allocate space for both Tx and Rx in one shot */ tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); if (!tx_ring) goto err_out; tx_ring->queue_index = i; tx_ring->reg_idx = vsi->base_queue + i; tx_ring->ring_active = false; tx_ring->vsi = vsi; tx_ring->netdev = vsi->netdev; tx_ring->dev = &pf->pdev->dev; tx_ring->count = vsi->num_desc; tx_ring->size = 0; tx_ring->dcb_tc = 0; if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM; vsi->tx_rings[i] = tx_ring; rx_ring = &tx_ring[1]; rx_ring->queue_index = i; rx_ring->reg_idx = vsi->base_queue + i; rx_ring->ring_active = false; rx_ring->vsi = vsi; rx_ring->netdev = vsi->netdev; rx_ring->dev = &pf->pdev->dev; rx_ring->count = vsi->num_desc; rx_ring->size = 0; rx_ring->dcb_tc = 0; if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) set_ring_16byte_desc_enabled(rx_ring); else clear_ring_16byte_desc_enabled(rx_ring); vsi->rx_rings[i] = rx_ring; } return 0; err_out: i40e_vsi_clear_rings(vsi); return -ENOMEM; } /** * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel * @pf: board private structure * @vectors: the number of MSI-X vectors to request * * Returns the number of vectors reserved, or error **/ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) { vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, I40E_MIN_MSIX, vectors); if (vectors < 0) { dev_info(&pf->pdev->dev, "MSI-X vector reservation failed: %d\n", vectors); vectors = 0; } return vectors; } /** * i40e_init_msix - Setup the MSIX capability * @pf: board private structure * * Work with the OS to set up the MSIX vectors needed. * * Returns the number of vectors reserved or negative on failure **/ static int i40e_init_msix(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; int vectors_left; int v_budget, i; int v_actual; if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) return -ENODEV; /* The number of vectors we'll request will be comprised of: * - Add 1 for "other" cause for Admin Queue events, etc. * - The number of LAN queue pairs * - Queues being used for RSS. * We don't need as many as max_rss_size vectors. * use rss_size instead in the calculation since that * is governed by number of cpus in the system. * - assumes symmetric Tx/Rx pairing * - The number of VMDq pairs #ifdef I40E_FCOE * - The number of FCOE qps. #endif * Once we count this up, try the request. * * If we can't get what we want, we'll simplify to nearly nothing * and try again. If that still fails, we punt. */ vectors_left = hw->func_caps.num_msix_vectors; v_budget = 0; /* reserve one vector for miscellaneous handler */ if (vectors_left) { v_budget++; vectors_left--; } /* reserve vectors for the main PF traffic queues */ pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); vectors_left -= pf->num_lan_msix; v_budget += pf->num_lan_msix; /* reserve one vector for sideband flow director */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { if (vectors_left) { v_budget++; vectors_left--; } else { pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; } } #ifdef I40E_FCOE /* can we reserve enough for FCoE? */ if (pf->flags & I40E_FLAG_FCOE_ENABLED) { if (!vectors_left) pf->num_fcoe_msix = 0; else if (vectors_left >= pf->num_fcoe_qps) pf->num_fcoe_msix = pf->num_fcoe_qps; else pf->num_fcoe_msix = 1; v_budget += pf->num_fcoe_msix; vectors_left -= pf->num_fcoe_msix; } #endif /* any vectors left over go for VMDq support */ if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); /* if we're short on vectors for what's desired, we limit * the queues per vmdq. If this is still more than are * available, the user will need to change the number of * queues/vectors used by the PF later with the ethtool * channels command */ if (vmdq_vecs < vmdq_vecs_wanted) pf->num_vmdq_qps = 1; pf->num_vmdq_msix = pf->num_vmdq_qps; v_budget += vmdq_vecs; vectors_left -= vmdq_vecs; } pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); if (!pf->msix_entries) return -ENOMEM; for (i = 0; i < v_budget; i++) pf->msix_entries[i].entry = i; v_actual = i40e_reserve_msix_vectors(pf, v_budget); if (v_actual != v_budget) { /* If we have limited resources, we will start with no vectors * for the special features and then allocate vectors to some * of these features based on the policy and at the end disable * the features that did not get any vectors. */ #ifdef I40E_FCOE pf->num_fcoe_qps = 0; pf->num_fcoe_msix = 0; #endif pf->num_vmdq_msix = 0; } if (v_actual < I40E_MIN_MSIX) { pf->flags &= ~I40E_FLAG_MSIX_ENABLED; kfree(pf->msix_entries); pf->msix_entries = NULL; return -ENODEV; } else if (v_actual == I40E_MIN_MSIX) { /* Adjust for minimal MSIX use */ pf->num_vmdq_vsis = 0; pf->num_vmdq_qps = 0; pf->num_lan_qps = 1; pf->num_lan_msix = 1; } else if (v_actual != v_budget) { int vec; /* reserve the misc vector */ vec = v_actual - 1; /* Scale vector usage down */ pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ pf->num_vmdq_vsis = 1; pf->num_vmdq_qps = 1; pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; /* partition out the remaining vectors */ switch (vec) { case 2: pf->num_lan_msix = 1; break; case 3: #ifdef I40E_FCOE /* give one vector to FCoE */ if (pf->flags & I40E_FLAG_FCOE_ENABLED) { pf->num_lan_msix = 1; pf->num_fcoe_msix = 1; } #else pf->num_lan_msix = 2; #endif break; default: #ifdef I40E_FCOE /* give one vector to FCoE */ if (pf->flags & I40E_FLAG_FCOE_ENABLED) { pf->num_fcoe_msix = 1; vec--; } #endif /* give the rest to the PF */ pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps); break; } } if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && (pf->num_vmdq_msix == 0)) { dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; } #ifdef I40E_FCOE if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n"); pf->flags &= ~I40E_FLAG_FCOE_ENABLED; } #endif return v_actual; } /** * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector * @vsi: the VSI being configured * @v_idx: index of the vector in the vsi struct * * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) { struct i40e_q_vector *q_vector; /* allocate q_vector */ q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); if (!q_vector) return -ENOMEM; q_vector->vsi = vsi; q_vector->v_idx = v_idx; cpumask_set_cpu(v_idx, &q_vector->affinity_mask); if (vsi->netdev) netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll, NAPI_POLL_WEIGHT); q_vector->rx.latency_range = I40E_LOW_LATENCY; q_vector->tx.latency_range = I40E_LOW_LATENCY; /* tie q_vector and vsi together */ vsi->q_vectors[v_idx] = q_vector; return 0; } /** * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors * @vsi: the VSI being configured * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int v_idx, num_q_vectors; int err; /* if not MSIX, give the one vector only to the LAN VSI */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) num_q_vectors = vsi->num_q_vectors; else if (vsi == pf->vsi[pf->lan_vsi]) num_q_vectors = 1; else return -EINVAL; for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { err = i40e_vsi_alloc_q_vector(vsi, v_idx); if (err) goto err_out; } return 0; err_out: while (v_idx--) i40e_free_q_vector(vsi, v_idx); return err; } /** * i40e_init_interrupt_scheme - Determine proper interrupt scheme * @pf: board private structure to initialize **/ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) { int vectors = 0; ssize_t size; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { vectors = i40e_init_msix(pf); if (vectors < 0) { pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | #ifdef I40E_FCOE I40E_FLAG_FCOE_ENABLED | #endif I40E_FLAG_RSS_ENABLED | I40E_FLAG_DCB_CAPABLE | I40E_FLAG_SRIOV_ENABLED | I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_VMDQ_ENABLED); /* rework the queue expectations without MSIX */ i40e_determine_queue_usage(pf); } } if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && (pf->flags & I40E_FLAG_MSI_ENABLED)) { dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); vectors = pci_enable_msi(pf->pdev); if (vectors < 0) { dev_info(&pf->pdev->dev, "MSI init failed - %d\n", vectors); pf->flags &= ~I40E_FLAG_MSI_ENABLED; } vectors = 1; /* one MSI or Legacy vector */ } if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); /* set up vector assignment tracking */ size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); pf->irq_pile = kzalloc(size, GFP_KERNEL); if (!pf->irq_pile) { dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); return -ENOMEM; } pf->irq_pile->num_entries = vectors; pf->irq_pile->search_hint = 0; /* track first vector for misc interrupts, ignore return */ (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); return 0; } /** * i40e_setup_misc_vector - Setup the misc vector to handle non queue events * @pf: board private structure * * This sets up the handler for MSIX 0, which is used to manage the * non-queue interrupts, e.g. AdminQ and errors. This is not used * when in MSI or Legacy interrupt mode. **/ static int i40e_setup_misc_vector(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; int err = 0; /* Only request the irq if this is the first time through, and * not when we're rebuilding after a Reset */ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { err = request_irq(pf->msix_entries[0].vector, i40e_intr, 0, pf->int_name, pf); if (err) { dev_info(&pf->pdev->dev, "request_irq for %s failed: %d\n", pf->int_name, err); return -EFAULT; } } i40e_enable_misc_int_causes(pf); /* associate no queues to the misc vector */ wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); i40e_flush(hw); i40e_irq_dynamic_enable_icr0(pf); return err; } /** * i40e_config_rss_aq - Prepare for RSS using AQ commands * @vsi: vsi structure * @seed: RSS hash seed **/ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size) { struct i40e_aqc_get_set_rss_key_data rss_key; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; bool pf_lut = false; u8 *rss_lut; int ret, i; memset(&rss_key, 0, sizeof(rss_key)); memcpy(&rss_key, seed, sizeof(rss_key)); rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL); if (!rss_lut) return -ENOMEM; /* Populate the LUT with max no. of queues in round robin fashion */ for (i = 0; i < vsi->rss_table_size; i++) rss_lut[i] = i % vsi->rss_size; ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key); if (ret) { dev_info(&pf->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto config_rss_aq_out; } if (vsi->type == I40E_VSI_MAIN) pf_lut = true; ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut, vsi->rss_table_size); if (ret) dev_info(&pf->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); config_rss_aq_out: kfree(rss_lut); return ret; } /** * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used * @vsi: VSI structure **/ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) { u8 seed[I40E_HKEY_ARRAY_SIZE]; struct i40e_pf *pf = vsi->back; u8 *lut; int ret; if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) return 0; lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs); ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); kfree(lut); return ret; } /** * i40e_config_rss_reg - Configure RSS keys and lut by writing registers * @vsi: Pointer to vsi structure * @seed: RSS hash seed * @lut: Lookup table * @lut_size: Lookup table size * * Returns 0 on success, negative on failure **/ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, const u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u8 i; /* Fill out hash function seed */ if (seed) { u32 *seed_dw = (u32 *)seed; for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); } if (lut) { u32 *lut_dw = (u32 *)lut; if (lut_size != I40E_HLUT_ARRAY_SIZE) return -EINVAL; for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]); } i40e_flush(hw); return 0; } /** * i40e_get_rss_reg - Get the RSS keys and lut by reading registers * @vsi: Pointer to VSI structure * @seed: Buffer to store the keys * @lut: Buffer to store the lookup table entries * @lut_size: Size of buffer to store the lookup table entries * * Returns 0 on success, negative on failure */ static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; u16 i; if (seed) { u32 *seed_dw = (u32 *)seed; for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) seed_dw[i] = rd32(hw, I40E_PFQF_HKEY(i)); } if (lut) { u32 *lut_dw = (u32 *)lut; if (lut_size != I40E_HLUT_ARRAY_SIZE) return -EINVAL; for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i)); } return 0; } /** * i40e_config_rss - Configure RSS keys and lut * @vsi: Pointer to VSI structure * @seed: RSS hash seed * @lut: Lookup table * @lut_size: Lookup table size * * Returns 0 on success, negative on failure */ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) return i40e_config_rss_aq(vsi, seed, lut, lut_size); else return i40e_config_rss_reg(vsi, seed, lut, lut_size); } /** * i40e_get_rss - Get RSS keys and lut * @vsi: Pointer to VSI structure * @seed: Buffer to store the keys * @lut: Buffer to store the lookup table entries * lut_size: Size of buffer to store the lookup table entries * * Returns 0 on success, negative on failure */ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { return i40e_get_rss_reg(vsi, seed, lut, lut_size); } /** * i40e_fill_rss_lut - Fill the RSS lookup table with default values * @pf: Pointer to board private structure * @lut: Lookup table * @rss_table_size: Lookup table size * @rss_size: Range of queue number for hashing */ static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, u16 rss_table_size, u16 rss_size) { u16 i; for (i = 0; i < rss_table_size; i++) lut[i] = i % rss_size; } /** * i40e_pf_config_rss - Prepare for RSS if used * @pf: board private structure **/ static int i40e_pf_config_rss(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; u8 seed[I40E_HKEY_ARRAY_SIZE]; u8 *lut; struct i40e_hw *hw = &pf->hw; u32 reg_val; u64 hena; int ret; /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); hena |= i40e_pf_get_default_rss_hena(pf); wr32(hw, I40E_PFQF_HENA(0), (u32)hena); wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); /* Determine the RSS table size based on the hardware capabilities */ reg_val = rd32(hw, I40E_PFQF_CTL_0); reg_val = (pf->rss_table_size == 512) ? (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); wr32(hw, I40E_PFQF_CTL_0, reg_val); /* Determine the RSS size of the VSI */ if (!vsi->rss_size) vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs); lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; /* Use user configured lut if there is one, otherwise use default */ if (vsi->rss_lut_user) memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); else i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); /* Use user configured hash key if there is one, otherwise * use default. */ if (vsi->rss_hkey_user) memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); else netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); kfree(lut); return ret; } /** * i40e_reconfig_rss_queues - change number of queues for rss and rebuild * @pf: board private structure * @queue_count: the requested queue count for rss. * * returns 0 if rss is not enabled, if enabled returns the final rss queue * count which may be different from the requested queue count. **/ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; int new_rss_size; if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) return 0; new_rss_size = min_t(int, queue_count, pf->rss_size_max); if (queue_count != vsi->num_queue_pairs) { vsi->req_queue_pairs = queue_count; i40e_prep_for_reset(pf); pf->alloc_rss_size = new_rss_size; i40e_reset_and_rebuild(pf, true); /* Discard the user configured hash keys and lut, if less * queues are enabled. */ if (queue_count < vsi->rss_size) { i40e_clear_rss_config_user(vsi); dev_dbg(&pf->pdev->dev, "discard user configured hash keys and lut\n"); } /* Reset vsi->rss_size, as number of enabled queues changed */ vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs); i40e_pf_config_rss(pf); } dev_info(&pf->pdev->dev, "RSS count/HW max RSS count: %d/%d\n", pf->alloc_rss_size, pf->rss_size_max); return pf->alloc_rss_size; } /** * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition * @pf: board private structure **/ i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) { i40e_status status; bool min_valid, max_valid; u32 max_bw, min_bw; status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, &min_valid, &max_valid); if (!status) { if (min_valid) pf->npar_min_bw = min_bw; if (max_valid) pf->npar_max_bw = max_bw; } return status; } /** * i40e_set_npar_bw_setting - Set BW settings for this PF partition * @pf: board private structure **/ i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) { struct i40e_aqc_configure_partition_bw_data bw_data; i40e_status status; /* Set the valid bit for this PF */ bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; /* Set the new bandwidths */ status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); return status; } /** * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition * @pf: board private structure **/ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) { /* Commit temporary BW setting to permanent NVM image */ enum i40e_admin_queue_err last_aq_status; i40e_status ret; u16 nvm_word; if (pf->hw.partition_id != 1) { dev_info(&pf->pdev->dev, "Commit BW only works on partition 1! This is partition %d", pf->hw.partition_id); ret = I40E_NOT_SUPPORTED; goto bw_commit_out; } /* Acquire NVM for read access */ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); last_aq_status = pf->hw.aq.asq_last_status; if (ret) { dev_info(&pf->pdev->dev, "Cannot acquire NVM for read access, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } /* Read word 0x10 of NVM - SW compatibility word 1 */ ret = i40e_aq_read_nvm(&pf->hw, I40E_SR_NVM_CONTROL_WORD, 0x10, sizeof(nvm_word), &nvm_word, false, NULL); /* Save off last admin queue command status before releasing * the NVM */ last_aq_status = pf->hw.aq.asq_last_status; i40e_release_nvm(&pf->hw); if (ret) { dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } /* Wait a bit for NVM release to complete */ msleep(50); /* Acquire NVM for write access */ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); last_aq_status = pf->hw.aq.asq_last_status; if (ret) { dev_info(&pf->pdev->dev, "Cannot acquire NVM for write access, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } /* Write it back out unchanged to initiate update NVM, * which will force a write of the shadow (alt) RAM to * the NVM - thus storing the bandwidth values permanently. */ ret = i40e_aq_update_nvm(&pf->hw, I40E_SR_NVM_CONTROL_WORD, 0x10, sizeof(nvm_word), &nvm_word, true, NULL); /* Save off last admin queue command status before releasing * the NVM */ last_aq_status = pf->hw.aq.asq_last_status; i40e_release_nvm(&pf->hw); if (ret) dev_info(&pf->pdev->dev, "BW settings NOT SAVED, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, last_aq_status)); bw_commit_out: return ret; } /** * i40e_sw_init - Initialize general software structures (struct i40e_pf) * @pf: board private structure to initialize * * i40e_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ static int i40e_sw_init(struct i40e_pf *pf) { int err = 0; int size; pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { if (I40E_DEBUG_USER & debug) pf->hw.debug_mask = debug; pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), I40E_DEFAULT_MSG_ENABLE); } /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | I40E_FLAG_MSI_ENABLED | I40E_FLAG_LINK_POLLING_ENABLED | I40E_FLAG_MSIX_ENABLED; if (iommu_present(&pci_bus_type)) pf->flags |= I40E_FLAG_RX_PS_ENABLED; else pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; /* Set default ITR */ pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; /* Depending on PF configurations, it is possible that the RSS * maximum might end up larger than the available queues */ pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); pf->alloc_rss_size = 1; pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, pf->hw.func_caps.num_tx_qp); if (pf->hw.func_caps.rss) { pf->flags |= I40E_FLAG_RSS_ENABLED; pf->alloc_rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); } /* MFP mode enabled */ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { pf->flags |= I40E_FLAG_MFP_ENABLED; dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); if (i40e_get_npar_bw_setting(pf)) dev_warn(&pf->pdev->dev, "Could not get NPAR bw settings\n"); else dev_info(&pf->pdev->dev, "Min BW = %8.8x, Max BW = %8.8x\n", pf->npar_min_bw, pf->npar_max_bw); } /* FW/NVM is not yet fixed in this regard */ if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || (pf->hw.func_caps.fd_filters_best_effort > 0)) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; if (pf->flags & I40E_FLAG_MFP_ENABLED && pf->hw.num_partitions > 1) dev_info(&pf->pdev->dev, "Flow Director Sideband mode Disabled in MFP mode\n"); else pf->flags |= I40E_FLAG_FD_SB_ENABLED; pf->fdir_pf_filter_count = pf->hw.func_caps.fd_filters_guaranteed; pf->hw.fdir_shared_filter_count = pf->hw.func_caps.fd_filters_best_effort; } if (pf->hw.func_caps.vmdq) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; pf->flags |= I40E_FLAG_VMDQ_ENABLED; pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); } #ifdef I40E_FCOE i40e_init_pf_fcoe(pf); #endif /* I40E_FCOE */ #ifdef CONFIG_PCI_IOV if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; pf->flags |= I40E_FLAG_SRIOV_ENABLED; pf->num_req_vfs = min_t(int, pf->hw.func_caps.num_vfs, I40E_MAX_VF_COUNT); } #endif /* CONFIG_PCI_IOV */ if (pf->hw.mac.type == I40E_MAC_X722) { pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE | I40E_FLAG_128_QP_RSS_CAPABLE | I40E_FLAG_HW_ATR_EVICT_CAPABLE | I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | I40E_FLAG_WB_ON_ITR_CAPABLE | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; } pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; /* By default FW has this off for performance reasons */ pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; /* set up queue assignment tracking */ size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); pf->qp_pile = kzalloc(size, GFP_KERNEL); if (!pf->qp_pile) { err = -ENOMEM; goto sw_init_done; } pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; pf->qp_pile->search_hint = 0; pf->tx_timeout_recovery_level = 1; mutex_init(&pf->switch_mutex); /* If NPAR is enabled nudge the Tx scheduler */ if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf))) i40e_set_npar_bw_setting(pf); sw_init_done: return err; } /** * i40e_set_ntuple - set the ntuple feature flag and take action * @pf: board private structure to initialize * @features: the feature set that the stack is suggesting * * returns a bool to indicate if reset needs to happen **/ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) { bool need_reset = false; /* Check if Flow Director n-tuple support was enabled or disabled. If * the state changed, we need to reset. */ if (features & NETIF_F_NTUPLE) { /* Enable filters and mark for reset */ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) need_reset = true; pf->flags |= I40E_FLAG_FD_SB_ENABLED; } else { /* turn off filters, mark for reset and clear SW filter list */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { need_reset = true; i40e_fdir_filter_exit(pf); } pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; /* reset fd counters */ pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; pf->fdir_pf_active_filters = 0; pf->flags |= I40E_FLAG_FD_ATR_ENABLED; if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); /* if ATR was auto disabled it can be re-enabled. */ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; } return need_reset; } /** * i40e_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting **/ static int i40e_set_features(struct net_device *netdev, netdev_features_t features) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; bool need_reset; if (features & NETIF_F_HW_VLAN_CTAG_RX) i40e_vlan_stripping_enable(vsi); else i40e_vlan_stripping_disable(vsi); need_reset = i40e_set_ntuple(pf, features); if (need_reset) i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); return 0; } #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) /** * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port * @pf: board private structure * @port: The UDP port to look up * * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found **/ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) { u8 i; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { if (pf->udp_ports[i].index == port) return i; } return i; } #endif /** * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up * @netdev: This physical port's netdev * @sa_family: Socket Family that VXLAN is notifying us about * @port: New UDP port number that VXLAN started listening to **/ static void i40e_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { #if IS_ENABLED(CONFIG_VXLAN) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u8 next_idx; u8 idx; if (sa_family == AF_INET6) return; idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { netdev_info(netdev, "vxlan port %d already offloaded\n", ntohs(port)); return; } /* Now check if there is space to add the new port */ next_idx = i40e_get_udp_port_idx(pf, 0); if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", ntohs(port)); return; } /* New port: add it and mark its index in the bitmap */ pf->udp_ports[next_idx].index = port; pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; #endif } /** * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away * @netdev: This physical port's netdev * @sa_family: Socket Family that VXLAN is notifying us about * @port: UDP port number that VXLAN stopped listening to **/ static void i40e_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { #if IS_ENABLED(CONFIG_VXLAN) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u8 idx; if (sa_family == AF_INET6) return; idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { /* if port exists, set it to 0 (mark for deletion) * and make it pending */ pf->udp_ports[idx].index = 0; pf->pending_udp_bitmap |= BIT_ULL(idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; } else { netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", ntohs(port)); } #endif } /** * i40e_add_geneve_port - Get notifications about GENEVE ports that come up * @netdev: This physical port's netdev * @sa_family: Socket Family that GENEVE is notifying us about * @port: New UDP port number that GENEVE started listening to **/ static void i40e_add_geneve_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { #if IS_ENABLED(CONFIG_GENEVE) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u8 next_idx; u8 idx; if (sa_family == AF_INET6) return; idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { netdev_info(netdev, "udp port %d already offloaded\n", ntohs(port)); return; } /* Now check if there is space to add the new port */ next_idx = i40e_get_udp_port_idx(pf, 0); if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n", ntohs(port)); return; } /* New port: add it and mark its index in the bitmap */ pf->udp_ports[next_idx].index = port; pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); #endif } /** * i40e_del_geneve_port - Get notifications about GENEVE ports that go away * @netdev: This physical port's netdev * @sa_family: Socket Family that GENEVE is notifying us about * @port: UDP port number that GENEVE stopped listening to **/ static void i40e_del_geneve_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { #if IS_ENABLED(CONFIG_GENEVE) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; u8 idx; if (sa_family == AF_INET6) return; idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { /* if port exists, set it to 0 (mark for deletion) * and make it pending */ pf->udp_ports[idx].index = 0; pf->pending_udp_bitmap |= BIT_ULL(idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; dev_info(&pf->pdev->dev, "deleting geneve port %d\n", ntohs(port)); } else { netdev_warn(netdev, "geneve port %d was not found, not deleting\n", ntohs(port)); } #endif } static int i40e_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; if (!(pf->flags & I40E_FLAG_PORT_ID_VALID)) return -EOPNOTSUPP; ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); return 0; } /** * i40e_ndo_fdb_add - add an entry to the hardware database * @ndm: the input from the stack * @tb: pointer to array of nladdr (unused) * @dev: the net device pointer * @addr: the MAC address entry being added * @flags: instructions from stack about fdb operation */ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_pf *pf = np->vsi->back; int err = 0; if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) return -EOPNOTSUPP; if (vid) { pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); return -EINVAL; } /* Hardware does not support aging addresses so if a * ndm_state is given only allow permanent addresses */ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { netdev_info(dev, "FDB only supports static addresses\n"); return -EINVAL; } if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) err = dev_uc_add_excl(dev, addr); else if (is_multicast_ether_addr(addr)) err = dev_mc_add_excl(dev, addr); else err = -EINVAL; /* Only return duplicate errors if NLM_F_EXCL is set */ if (err == -EEXIST && !(flags & NLM_F_EXCL)) err = 0; return err; } /** * i40e_ndo_bridge_setlink - Set the hardware bridge mode * @dev: the netdev being configured * @nlh: RTNL message * * Inserts a new hardware bridge if not already created and * enables the bridging mode requested (VEB or VEPA). If the * hardware bridge has already been inserted and the request * is to change the mode then that requires a PF reset to * allow rebuild of the components with required hardware * bridge mode enabled. **/ static int i40e_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_veb *veb = NULL; struct nlattr *attr, *br_spec; int i, rem; /* Only for PF VSI for now */ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) return -EOPNOTSUPP; /* Find the HW bridge for PF VSI */ for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) veb = pf->veb[i]; } br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); nla_for_each_nested(attr, br_spec, rem) { __u16 mode; if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; mode = nla_get_u16(attr); if ((mode != BRIDGE_MODE_VEPA) && (mode != BRIDGE_MODE_VEB)) return -EINVAL; /* Insert a new HW bridge */ if (!veb) { veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, vsi->tc_config.enabled_tc); if (veb) { veb->bridge_mode = mode; i40e_config_bridge_mode(veb); } else { /* No Bridge HW offload available */ return -ENOENT; } break; } else if (mode != veb->bridge_mode) { /* Existing HW bridge but different mode needs reset */ veb->bridge_mode = mode; /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ if (mode == BRIDGE_MODE_VEB) pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; else pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); break; } } return 0; } /** * i40e_ndo_bridge_getlink - Get the hardware bridge mode * @skb: skb buff * @pid: process id * @seq: RTNL message seq # * @dev: the netdev being configured * @filter_mask: unused * @nlflags: netlink flags passed in * * Return the mode in which the hardware bridge is operating in * i.e VEB or VEPA. **/ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 __always_unused filter_mask, int nlflags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_veb *veb = NULL; int i; /* Only for PF VSI for now */ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) return -EOPNOTSUPP; /* Find the HW bridge for the PF VSI */ for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) veb = pf->veb[i]; } if (!veb) return 0; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, nlflags, 0, 0, filter_mask, NULL); } /* Hardware supports L4 tunnel length of 128B (=2^7) which includes * inner mac plus all inner ethertypes. */ #define I40E_MAX_TUNNEL_HDR_LEN 128 /** * i40e_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff * @dev: This physical port's netdev * @features: Offload features that the stack believes apply **/ static netdev_features_t i40e_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { if (skb->encapsulation && ((skb_inner_network_header(skb) - skb_transport_header(skb)) > I40E_MAX_TUNNEL_HDR_LEN)) return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); return features; } static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, .ndo_start_xmit = i40e_lan_xmit_frame, .ndo_get_stats64 = i40e_get_netdev_stats_struct, .ndo_set_rx_mode = i40e_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = i40e_set_mac, .ndo_change_mtu = i40e_change_mtu, .ndo_do_ioctl = i40e_ioctl, .ndo_tx_timeout = i40e_tx_timeout, .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = i40e_netpoll, #endif .ndo_setup_tc = i40e_setup_tc, #ifdef I40E_FCOE .ndo_fcoe_enable = i40e_fcoe_enable, .ndo_fcoe_disable = i40e_fcoe_disable, #endif .ndo_set_features = i40e_set_features, .ndo_set_vf_mac = i40e_ndo_set_vf_mac, .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, .ndo_set_vf_rate = i40e_ndo_set_vf_bw, .ndo_get_vf_config = i40e_ndo_get_vf_config, .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, #if IS_ENABLED(CONFIG_VXLAN) .ndo_add_vxlan_port = i40e_add_vxlan_port, .ndo_del_vxlan_port = i40e_del_vxlan_port, #endif #if IS_ENABLED(CONFIG_GENEVE) .ndo_add_geneve_port = i40e_add_geneve_port, .ndo_del_geneve_port = i40e_del_geneve_port, #endif .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, .ndo_features_check = i40e_features_check, .ndo_bridge_getlink = i40e_ndo_bridge_getlink, .ndo_bridge_setlink = i40e_ndo_bridge_setlink, }; /** * i40e_config_netdev - Setup the netdev flags * @vsi: the VSI being configured * * Returns 0 on success, negative value on failure **/ static int i40e_config_netdev(struct i40e_vsi *vsi) { u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_netdev_priv *np; struct net_device *netdev; u8 mac_addr[ETH_ALEN]; int etherdev_size; etherdev_size = sizeof(struct i40e_netdev_priv); netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); if (!netdev) return -ENOMEM; vsi->netdev = netdev; np = netdev_priv(netdev); np->vsi = vsi; netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_TSO; netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_RXHASH | 0; if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) netdev->features |= NETIF_F_NTUPLE; /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features; if (vsi->type == I40E_VSI_MAIN) { SET_NETDEV_DEV(netdev, &pf->pdev->dev); ether_addr_copy(mac_addr, hw->mac.perm_addr); /* The following steps are necessary to prevent reception * of tagged packets - some older NVM configurations load a * default a MAC-VLAN filter that accepts any tagged packet * which must be replaced by a normal filter. */ if (!i40e_rm_default_mac_filter(vsi, mac_addr)) { spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true); spin_unlock_bh(&vsi->mac_filter_list_lock); } } else { /* relate the VSI_VMDQ name to the VSI_MAIN name */ snprintf(netdev->name, IFNAMSIZ, "%sv%%d", pf->vsi[pf->lan_vsi]->netdev->name); random_ether_addr(mac_addr); spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); spin_unlock_bh(&vsi->mac_filter_list_lock); } spin_lock_bh(&vsi->mac_filter_list_lock); i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); spin_unlock_bh(&vsi->mac_filter_list_lock); ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); /* vlan gets same features (except vlan offload) * after any tweaks for specific VSI types */ netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER); netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; /* Setup netdev TC information */ i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); netdev->netdev_ops = &i40e_netdev_ops; netdev->watchdog_timeo = 5 * HZ; i40e_set_ethtool_ops(netdev); #ifdef I40E_FCOE i40e_fcoe_config_netdev(netdev, vsi); #endif return 0; } /** * i40e_vsi_delete - Delete a VSI from the switch * @vsi: the VSI being removed * * Returns 0 on success, negative value on failure **/ static void i40e_vsi_delete(struct i40e_vsi *vsi) { /* remove default VSI is not allowed */ if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) return; i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); } /** * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB * @vsi: the VSI being queried * * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode **/ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) { struct i40e_veb *veb; struct i40e_pf *pf = vsi->back; /* Uplink is not a bridge so default to VEB */ if (vsi->veb_idx == I40E_NO_VEB) return 1; veb = pf->veb[vsi->veb_idx]; if (!veb) { dev_info(&pf->pdev->dev, "There is no veb associated with the bridge\n"); return -ENOENT; } /* Uplink is a bridge in VEPA mode */ if (veb->bridge_mode & BRIDGE_MODE_VEPA) { return 0; } else { /* Uplink is a bridge in VEB mode */ return 1; } /* VEPA is now default bridge, so return 0 */ return 0; } /** * i40e_add_vsi - Add a VSI to the switch * @vsi: the VSI being configured * * This initializes a VSI context depending on the VSI type to be added and * passes it down to the add_vsi aq command. **/ static int i40e_add_vsi(struct i40e_vsi *vsi) { int ret = -ENODEV; u8 laa_macaddr[ETH_ALEN]; bool found_laa_mac_filter = false; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; struct i40e_mac_filter *f, *ftmp; u8 enabled_tc = 0x1; /* TC0 enabled */ int f_count = 0; memset(&ctxt, 0, sizeof(ctxt)); switch (vsi->type) { case I40E_VSI_MAIN: /* The PF's main VSI is already setup as part of the * device initialization, so we'll not bother with * the add_vsi call, but we will retrieve the current * VSI context. */ ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); ctxt.flags = I40E_AQ_VSI_TYPE_PF; if (ret) { dev_info(&pf->pdev->dev, "couldn't get PF vsi config, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -ENOENT; } vsi->info = ctxt.info; vsi->info.valid_sections = 0; vsi->seid = ctxt.seid; vsi->id = ctxt.vsi_number; enabled_tc = i40e_pf_get_tc_map(pf); /* MFP mode setup queue map and update VSI */ if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, "update vsi failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } /* update the local VSI info queue map */ i40e_vsi_update_queue_map(vsi, &ctxt); vsi->info.valid_sections = 0; } else { /* Default/Main VSI is only enabled for TC0 * reconfigure it to enable all TCs that are * available on the port in SFP mode. * For MFP case the iSCSI PF would use this * flow to enable LAN+iSCSI TC. */ ret = i40e_vsi_config_tc(vsi, enabled_tc); if (ret) { dev_info(&pf->pdev->dev, "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", enabled_tc, i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); ret = -ENOENT; } } break; case I40E_VSI_FDIR: ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_PF; if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && (i40e_is_vsi_uplink_mode_veb(vsi))) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; case I40E_VSI_VMDQ2: ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; /* This VSI is connected to VEB so the switch_id * should be set to zero by default. */ if (i40e_is_vsi_uplink_mode_veb(vsi)) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } /* Setup the VSI tx/rx queue map for TC0 only for now */ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; case I40E_VSI_SRIOV: ctxt.pf_num = hw->pf_id; ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_VF; /* This VSI is connected to VEB so the switch_id * should be set to zero by default. */ if (i40e_is_vsi_uplink_mode_veb(vsi)) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; if (pf->vf[vsi->vf_id].spoofchk) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); } /* Setup the VSI tx/rx queue map for TC0 only for now */ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; #ifdef I40E_FCOE case I40E_VSI_FCOE: ret = i40e_fcoe_vsi_init(vsi, &ctxt); if (ret) { dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n"); return ret; } break; #endif /* I40E_FCOE */ default: return -ENODEV; } if (vsi->type != I40E_VSI_MAIN) { ret = i40e_aq_add_vsi(hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "add vsi failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } vsi->info = ctxt.info; vsi->info.valid_sections = 0; vsi->seid = ctxt.seid; vsi->id = ctxt.vsi_number; } spin_lock_bh(&vsi->mac_filter_list_lock); /* If macvlan filters already exist, force them to get loaded */ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { f->changed = true; f_count++; /* Expected to have only one MAC filter entry for LAA in list */ if (f->is_laa && vsi->type == I40E_VSI_MAIN) { ether_addr_copy(laa_macaddr, f->macaddr); found_laa_mac_filter = true; } } spin_unlock_bh(&vsi->mac_filter_list_lock); if (found_laa_mac_filter) { struct i40e_aqc_remove_macvlan_element_data element; memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, laa_macaddr); element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; ret = i40e_aq_remove_macvlan(hw, vsi->seid, &element, 1, NULL); if (ret) { /* some older FW has a different default */ element.flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; i40e_aq_remove_macvlan(hw, vsi->seid, &element, 1, NULL); } i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, laa_macaddr, NULL); } if (f_count) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; pf->flags |= I40E_FLAG_FILTER_SYNC; } /* Update VSI BW information */ ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&pf->pdev->dev, "couldn't get vsi bw info, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* VSI is already added so not tearing that up */ ret = 0; } err: return ret; } /** * i40e_vsi_release - Delete a VSI and free its resources * @vsi: the VSI being removed * * Returns 0 on success or < 0 on error **/ int i40e_vsi_release(struct i40e_vsi *vsi) { struct i40e_mac_filter *f, *ftmp; struct i40e_veb *veb = NULL; struct i40e_pf *pf; u16 uplink_seid; int i, n; pf = vsi->back; /* release of a VEB-owner or last VSI is not allowed */ if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", vsi->seid, vsi->uplink_seid); return -ENODEV; } if (vsi == pf->vsi[pf->lan_vsi] && !test_bit(__I40E_DOWN, &pf->state)) { dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); return -ENODEV; } uplink_seid = vsi->uplink_seid; if (vsi->type != I40E_VSI_SRIOV) { if (vsi->netdev_registered) { vsi->netdev_registered = false; if (vsi->netdev) { /* results in a call to i40e_close() */ unregister_netdev(vsi->netdev); } } else { i40e_vsi_close(vsi); } i40e_vsi_disable_irq(vsi); } spin_lock_bh(&vsi->mac_filter_list_lock); list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) i40e_del_filter(vsi, f->macaddr, f->vlan, f->is_vf, f->is_netdev); spin_unlock_bh(&vsi->mac_filter_list_lock); i40e_sync_vsi_filters(vsi); i40e_vsi_delete(vsi); i40e_vsi_free_q_vectors(vsi); if (vsi->netdev) { free_netdev(vsi->netdev); vsi->netdev = NULL; } i40e_vsi_clear_rings(vsi); i40e_vsi_clear(vsi); /* If this was the last thing on the VEB, except for the * controlling VSI, remove the VEB, which puts the controlling * VSI onto the next level down in the switch. * * Well, okay, there's one more exception here: don't remove * the orphan VEBs yet. We'll wait for an explicit remove request * from up the network stack. */ for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->uplink_seid == uplink_seid && (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { n++; /* count the VSIs */ } } for (i = 0; i < I40E_MAX_VEB; i++) { if (!pf->veb[i]) continue; if (pf->veb[i]->uplink_seid == uplink_seid) n++; /* count the VEBs */ if (pf->veb[i]->seid == uplink_seid) veb = pf->veb[i]; } if (n == 0 && veb && veb->uplink_seid != 0) i40e_veb_release(veb); return 0; } /** * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI * @vsi: ptr to the VSI * * This should only be called after i40e_vsi_mem_alloc() which allocates the * corresponding SW VSI structure and initializes num_queue_pairs for the * newly allocated VSI. * * Returns 0 on success or negative on failure **/ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) { int ret = -ENOENT; struct i40e_pf *pf = vsi->back; if (vsi->q_vectors[0]) { dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", vsi->seid); return -EEXIST; } if (vsi->base_vector) { dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", vsi->seid, vsi->base_vector); return -EEXIST; } ret = i40e_vsi_alloc_q_vectors(vsi); if (ret) { dev_info(&pf->pdev->dev, "failed to allocate %d q_vector for VSI %d, ret=%d\n", vsi->num_q_vectors, vsi->seid, ret); vsi->num_q_vectors = 0; goto vector_setup_out; } /* In Legacy mode, we do not have to get any other vector since we * piggyback on the misc/ICR0 for queue interrupts. */ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) return ret; if (vsi->num_q_vectors) vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, vsi->num_q_vectors, vsi->idx); if (vsi->base_vector < 0) { dev_info(&pf->pdev->dev, "failed to get tracking for %d vectors for VSI %d, err=%d\n", vsi->num_q_vectors, vsi->seid, vsi->base_vector); i40e_vsi_free_q_vectors(vsi); ret = -ENOENT; goto vector_setup_out; } vector_setup_out: return ret; } /** * i40e_vsi_reinit_setup - return and reallocate resources for a VSI * @vsi: pointer to the vsi. * * This re-allocates a vsi's queue resources. * * Returns pointer to the successfully allocated and configured VSI sw struct * on success, otherwise returns NULL on failure. **/ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; u8 enabled_tc; int ret; i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_vsi_clear_rings(vsi); i40e_vsi_free_arrays(vsi, false); i40e_set_num_rings_in_vsi(vsi); ret = i40e_vsi_alloc_arrays(vsi, false); if (ret) goto err_vsi; ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); if (ret < 0) { dev_info(&pf->pdev->dev, "failed to get tracking for %d queues for VSI %d err %d\n", vsi->alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } vsi->base_queue = ret; /* Update the FW view of the VSI. Force a reset of TC and queue * layout configurations. */ enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); /* assign it some queues */ ret = i40e_alloc_rings(vsi); if (ret) goto err_rings; /* map all of the rings to the q_vectors */ i40e_vsi_map_rings_to_vectors(vsi); return vsi; err_rings: i40e_vsi_free_q_vectors(vsi); if (vsi->netdev_registered) { vsi->netdev_registered = false; unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); vsi->netdev = NULL; } i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); err_vsi: i40e_vsi_clear(vsi); return NULL; } /** * i40e_macaddr_init - explicitly write the mac address filters. * * @vsi: pointer to the vsi. * @macaddr: the MAC address * * This is needed when the macaddr has been obtained by other * means than the default, e.g., from Open Firmware or IDPROM. * Returns 0 on success, negative on failure **/ static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) { int ret; struct i40e_aqc_add_macvlan_element_data element; ret = i40e_aq_mac_address_write(&vsi->back->hw, I40E_AQC_WRITE_TYPE_LAA_WOL, macaddr, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "Addr change for VSI failed: %d\n", ret); return -EADDRNOTAVAIL; } memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, macaddr); element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, "add filter failed err %s aq_err %s\n", i40e_stat_str(&vsi->back->hw, ret), i40e_aq_str(&vsi->back->hw, vsi->back->hw.aq.asq_last_status)); } return ret; } /** * i40e_vsi_setup - Set up a VSI by a given type * @pf: board private structure * @type: VSI type * @uplink_seid: the switch element to link to * @param1: usage depends upon VSI type. For VF types, indicates VF id * * This allocates the sw VSI structure and its queue resources, then add a VSI * to the identified VEB. * * Returns pointer to the successfully allocated and configure VSI sw struct on * success, otherwise returns NULL on failure. **/ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, u16 uplink_seid, u32 param1) { struct i40e_vsi *vsi = NULL; struct i40e_veb *veb = NULL; int ret, i; int v_idx; /* The requested uplink_seid must be either * - the PF's port seid * no VEB is needed because this is the PF * or this is a Flow Director special case VSI * - seid of an existing VEB * - seid of a VSI that owns an existing VEB * - seid of a VSI that doesn't own a VEB * a new VEB is created and the VSI becomes the owner * - seid of the PF VSI, which is what creates the first VEB * this is a special case of the previous * * Find which uplink_seid we were given and create a new VEB if needed */ for (i = 0; i < I40E_MAX_VEB; i++) { if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { veb = pf->veb[i]; break; } } if (!veb && uplink_seid != pf->mac_seid) { for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { vsi = pf->vsi[i]; break; } } if (!vsi) { dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", uplink_seid); return NULL; } if (vsi->uplink_seid == pf->mac_seid) veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, vsi->tc_config.enabled_tc); else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, vsi->tc_config.enabled_tc); if (veb) { if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { dev_info(&vsi->back->pdev->dev, "New VSI creation error, uplink seid of LAN VSI expected.\n"); return NULL; } /* We come up by default in VEPA mode if SRIOV is not * already enabled, in which case we can't force VEPA * mode. */ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { veb->bridge_mode = BRIDGE_MODE_VEPA; pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; } i40e_config_bridge_mode(veb); } for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) veb = pf->veb[i]; } if (!veb) { dev_info(&pf->pdev->dev, "couldn't add VEB\n"); return NULL; } vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; uplink_seid = veb->seid; } /* get vsi sw struct */ v_idx = i40e_vsi_mem_alloc(pf, type); if (v_idx < 0) goto err_alloc; vsi = pf->vsi[v_idx]; if (!vsi) goto err_alloc; vsi->type = type; vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); if (type == I40E_VSI_MAIN) pf->lan_vsi = v_idx; else if (type == I40E_VSI_SRIOV) vsi->vf_id = param1; /* assign it some queues */ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); if (ret < 0) { dev_info(&pf->pdev->dev, "failed to get tracking for %d queues for VSI %d err=%d\n", vsi->alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } vsi->base_queue = ret; /* get a VSI from the hardware */ vsi->uplink_seid = uplink_seid; ret = i40e_add_vsi(vsi); if (ret) goto err_vsi; switch (vsi->type) { /* setup the netdev if needed */ case I40E_VSI_MAIN: /* Apply relevant filters if a platform-specific mac * address was selected. */ if (!!(pf->flags & I40E_FLAG_PF_MAC)) { ret = i40e_macaddr_init(vsi, pf->hw.mac.addr); if (ret) { dev_warn(&pf->pdev->dev, "could not set up macaddr; err %d\n", ret); } } case I40E_VSI_VMDQ2: case I40E_VSI_FCOE: ret = i40e_config_netdev(vsi); if (ret) goto err_netdev; ret = register_netdev(vsi->netdev); if (ret) goto err_netdev; vsi->netdev_registered = true; netif_carrier_off(vsi->netdev); #ifdef CONFIG_I40E_DCB /* Setup DCB netlink interface */ i40e_dcbnl_setup(vsi); #endif /* CONFIG_I40E_DCB */ /* fall through */ case I40E_VSI_FDIR: /* set up vectors and rings if needed */ ret = i40e_vsi_setup_vectors(vsi); if (ret) goto err_msix; ret = i40e_alloc_rings(vsi); if (ret) goto err_rings; /* map all of the rings to the q_vectors */ i40e_vsi_map_rings_to_vectors(vsi); i40e_vsi_reset_stats(vsi); break; default: /* no netdev or rings for the other VSI types */ break; } if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && (vsi->type == I40E_VSI_VMDQ2)) { ret = i40e_vsi_config_rss(vsi); } return vsi; err_rings: i40e_vsi_free_q_vectors(vsi); err_msix: if (vsi->netdev_registered) { vsi->netdev_registered = false; unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); vsi->netdev = NULL; } err_netdev: i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); err_vsi: i40e_vsi_clear(vsi); err_alloc: return NULL; } /** * i40e_veb_get_bw_info - Query VEB BW information * @veb: the veb to query * * Query the Tx scheduler BW configuration data for given VEB **/ static int i40e_veb_get_bw_info(struct i40e_veb *veb) { struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; struct i40e_pf *pf = veb->pf; struct i40e_hw *hw = &pf->hw; u32 tc_bw_max; int ret = 0; int i; ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, "query veb bw config failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); goto out; } ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, &ets_data, NULL); if (ret) { dev_info(&pf->pdev->dev, "query veb bw ets config failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); goto out; } veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); veb->bw_max_quanta = ets_data.tc_bw_max; veb->is_abs_credits = bw_data.absolute_credits_enable; veb->enabled_tc = ets_data.tc_valid_bits; tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; veb->bw_tc_limit_credits[i] = le16_to_cpu(bw_data.tc_bw_limits[i]); veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); } out: return ret; } /** * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF * @pf: board private structure * * On error: returns error code (negative) * On success: returns vsi index in PF (positive) **/ static int i40e_veb_mem_alloc(struct i40e_pf *pf) { int ret = -ENOENT; struct i40e_veb *veb; int i; /* Need to protect the allocation of switch elements at the PF level */ mutex_lock(&pf->switch_mutex); /* VEB list may be fragmented if VEB creation/destruction has * been happening. We can afford to do a quick scan to look * for any free slots in the list. * * find next empty veb slot, looping back around if necessary */ i = 0; while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) i++; if (i >= I40E_MAX_VEB) { ret = -ENOMEM; goto err_alloc_veb; /* out of VEB slots! */ } veb = kzalloc(sizeof(*veb), GFP_KERNEL); if (!veb) { ret = -ENOMEM; goto err_alloc_veb; } veb->pf = pf; veb->idx = i; veb->enabled_tc = 1; pf->veb[i] = veb; ret = i; err_alloc_veb: mutex_unlock(&pf->switch_mutex); return ret; } /** * i40e_switch_branch_release - Delete a branch of the switch tree * @branch: where to start deleting * * This uses recursion to find the tips of the branch to be * removed, deleting until we get back to and can delete this VEB. **/ static void i40e_switch_branch_release(struct i40e_veb *branch) { struct i40e_pf *pf = branch->pf; u16 branch_seid = branch->seid; u16 veb_idx = branch->idx; int i; /* release any VEBs on this VEB - RECURSION */ for (i = 0; i < I40E_MAX_VEB; i++) { if (!pf->veb[i]) continue; if (pf->veb[i]->uplink_seid == branch->seid) i40e_switch_branch_release(pf->veb[i]); } /* Release the VSIs on this VEB, but not the owner VSI. * * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing * the VEB itself, so don't use (*branch) after this loop. */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (!pf->vsi[i]) continue; if (pf->vsi[i]->uplink_seid == branch_seid && (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { i40e_vsi_release(pf->vsi[i]); } } /* There's one corner case where the VEB might not have been * removed, so double check it here and remove it if needed. * This case happens if the veb was created from the debugfs * commands and no VSIs were added to it. */ if (pf->veb[veb_idx]) i40e_veb_release(pf->veb[veb_idx]); } /** * i40e_veb_clear - remove veb struct * @veb: the veb to remove **/ static void i40e_veb_clear(struct i40e_veb *veb) { if (!veb) return; if (veb->pf) { struct i40e_pf *pf = veb->pf; mutex_lock(&pf->switch_mutex); if (pf->veb[veb->idx] == veb) pf->veb[veb->idx] = NULL; mutex_unlock(&pf->switch_mutex); } kfree(veb); } /** * i40e_veb_release - Delete a VEB and free its resources * @veb: the VEB being removed **/ void i40e_veb_release(struct i40e_veb *veb) { struct i40e_vsi *vsi = NULL; struct i40e_pf *pf; int i, n = 0; pf = veb->pf; /* find the remaining VSI and check for extras */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { n++; vsi = pf->vsi[i]; } } if (n != 1) { dev_info(&pf->pdev->dev, "can't remove VEB %d with %d VSIs left\n", veb->seid, n); return; } /* move the remaining VSI to uplink veb */ vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; if (veb->uplink_seid) { vsi->uplink_seid = veb->uplink_seid; if (veb->uplink_seid == pf->mac_seid) vsi->veb_idx = I40E_NO_VEB; else vsi->veb_idx = veb->veb_idx; } else { /* floating VEB */ vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; } i40e_aq_delete_element(&pf->hw, veb->seid, NULL); i40e_veb_clear(veb); } /** * i40e_add_veb - create the VEB in the switch * @veb: the VEB to be instantiated * @vsi: the controlling VSI **/ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { struct i40e_pf *pf = veb->pf; bool is_default = veb->pf->cur_promisc; bool is_cloud = false; int ret; /* get a VEB from the hardware */ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, veb->enabled_tc, is_default, is_cloud, &veb->seid, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't add VEB, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EPERM; } /* get statistics counter */ ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, &veb->stats_idx, NULL, NULL, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't get VEB statistics idx, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EPERM; } ret = i40e_veb_get_bw_info(veb); if (ret) { dev_info(&pf->pdev->dev, "couldn't get VEB bw info, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); i40e_aq_delete_element(&pf->hw, veb->seid, NULL); return -ENOENT; } vsi->uplink_seid = veb->seid; vsi->veb_idx = veb->idx; vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; return 0; } /** * i40e_veb_setup - Set up a VEB * @pf: board private structure * @flags: VEB setup flags * @uplink_seid: the switch element to link to * @vsi_seid: the initial VSI seid * @enabled_tc: Enabled TC bit-map * * This allocates the sw VEB structure and links it into the switch * It is possible and legal for this to be a duplicate of an already * existing VEB. It is also possible for both uplink and vsi seids * to be zero, in order to create a floating VEB. * * Returns pointer to the successfully allocated VEB sw struct on * success, otherwise returns NULL on failure. **/ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, u16 vsi_seid, u8 enabled_tc) { struct i40e_veb *veb, *uplink_veb = NULL; int vsi_idx, veb_idx; int ret; /* if one seid is 0, the other must be 0 to create a floating relay */ if ((uplink_seid == 0 || vsi_seid == 0) && (uplink_seid + vsi_seid != 0)) { dev_info(&pf->pdev->dev, "one, not both seid's are 0: uplink=%d vsi=%d\n", uplink_seid, vsi_seid); return NULL; } /* make sure there is such a vsi and uplink */ for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) break; if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { dev_info(&pf->pdev->dev, "vsi seid %d not found\n", vsi_seid); return NULL; } if (uplink_seid && uplink_seid != pf->mac_seid) { for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { if (pf->veb[veb_idx] && pf->veb[veb_idx]->seid == uplink_seid) { uplink_veb = pf->veb[veb_idx]; break; } } if (!uplink_veb) { dev_info(&pf->pdev->dev, "uplink seid %d not found\n", uplink_seid); return NULL; } } /* get veb sw struct */ veb_idx = i40e_veb_mem_alloc(pf); if (veb_idx < 0) goto err_alloc; veb = pf->veb[veb_idx]; veb->flags = flags; veb->uplink_seid = uplink_seid; veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); /* create the VEB in the switch */ ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); if (ret) goto err_veb; if (vsi_idx == pf->lan_vsi) pf->lan_veb = veb->idx; return veb; err_veb: i40e_veb_clear(veb); err_alloc: return NULL; } /** * i40e_setup_pf_switch_element - set PF vars based on switch type * @pf: board private structure * @ele: element we are building info from * @num_reported: total number of elements * @printconfig: should we print the contents * * helper function to assist in extracting a few useful SEID values. **/ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, struct i40e_aqc_switch_config_element_resp *ele, u16 num_reported, bool printconfig) { u16 downlink_seid = le16_to_cpu(ele->downlink_seid); u16 uplink_seid = le16_to_cpu(ele->uplink_seid); u8 element_type = ele->element_type; u16 seid = le16_to_cpu(ele->seid); if (printconfig) dev_info(&pf->pdev->dev, "type=%d seid=%d uplink=%d downlink=%d\n", element_type, seid, uplink_seid, downlink_seid); switch (element_type) { case I40E_SWITCH_ELEMENT_TYPE_MAC: pf->mac_seid = seid; break; case I40E_SWITCH_ELEMENT_TYPE_VEB: /* Main VEB? */ if (uplink_seid != pf->mac_seid) break; if (pf->lan_veb == I40E_NO_VEB) { int v; /* find existing or else empty VEB */ for (v = 0; v < I40E_MAX_VEB; v++) { if (pf->veb[v] && (pf->veb[v]->seid == seid)) { pf->lan_veb = v; break; } } if (pf->lan_veb == I40E_NO_VEB) { v = i40e_veb_mem_alloc(pf); if (v < 0) break; pf->lan_veb = v; } } pf->veb[pf->lan_veb]->seid = seid; pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; pf->veb[pf->lan_veb]->pf = pf; pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; break; case I40E_SWITCH_ELEMENT_TYPE_VSI: if (num_reported != 1) break; /* This is immediately after a reset so we can assume this is * the PF's VSI */ pf->mac_seid = uplink_seid; pf->pf_seid = downlink_seid; pf->main_vsi_seid = seid; if (printconfig) dev_info(&pf->pdev->dev, "pf_seid=%d main_vsi_seid=%d\n", pf->pf_seid, pf->main_vsi_seid); break; case I40E_SWITCH_ELEMENT_TYPE_PF: case I40E_SWITCH_ELEMENT_TYPE_VF: case I40E_SWITCH_ELEMENT_TYPE_EMP: case I40E_SWITCH_ELEMENT_TYPE_BMC: case I40E_SWITCH_ELEMENT_TYPE_PE: case I40E_SWITCH_ELEMENT_TYPE_PA: /* ignore these for now */ break; default: dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", element_type, seid); break; } } /** * i40e_fetch_switch_configuration - Get switch config from firmware * @pf: board private structure * @printconfig: should we print the contents * * Get the current switch configuration from the device and * extract a few useful SEID values. **/ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) { struct i40e_aqc_get_switch_config_resp *sw_config; u16 next_seid = 0; int ret = 0; u8 *aq_buf; int i; aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); if (!aq_buf) return -ENOMEM; sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; do { u16 num_reported, num_total; ret = i40e_aq_get_switch_config(&pf->hw, sw_config, I40E_AQ_LARGE_BUF, &next_seid, NULL); if (ret) { dev_info(&pf->pdev->dev, "get switch config failed err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); kfree(aq_buf); return -ENOENT; } num_reported = le16_to_cpu(sw_config->header.num_reported); num_total = le16_to_cpu(sw_config->header.num_total); if (printconfig) dev_info(&pf->pdev->dev, "header: %d reported %d total\n", num_reported, num_total); for (i = 0; i < num_reported; i++) { struct i40e_aqc_switch_config_element_resp *ele = &sw_config->element[i]; i40e_setup_pf_switch_element(pf, ele, num_reported, printconfig); } } while (next_seid != 0); kfree(aq_buf); return ret; } /** * i40e_setup_pf_switch - Setup the HW switch on startup or after reset * @pf: board private structure * @reinit: if the Main VSI needs to re-initialized. * * Returns 0 on success, negative value on failure **/ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) { int ret; /* find out what's out there already */ ret = i40e_fetch_switch_configuration(pf, false); if (ret) { dev_info(&pf->pdev->dev, "couldn't fetch switch config, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } i40e_pf_reset_stats(pf); /* first time setup */ if (pf->lan_vsi == I40E_NO_VSI || reinit) { struct i40e_vsi *vsi = NULL; u16 uplink_seid; /* Set up the PF VSI associated with the PF's main VSI * that is already in the HW switch */ if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) uplink_seid = pf->veb[pf->lan_veb]->seid; else uplink_seid = pf->mac_seid; if (pf->lan_vsi == I40E_NO_VSI) vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); else if (reinit) vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); if (!vsi) { dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); i40e_fdir_teardown(pf); return -EAGAIN; } } else { /* force a reset of TC and queue layout configurations */ u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); } i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); i40e_fdir_sb_setup(pf); /* Setup static PF queue filter control settings */ ret = i40e_setup_pf_filter_control(pf); if (ret) { dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", ret); /* Failure here should not stop continuing other steps */ } /* enable RSS in the HW, even for only one queue, as the stack can use * the hash */ if ((pf->flags & I40E_FLAG_RSS_ENABLED)) i40e_pf_config_rss(pf); /* fill in link information and enable LSE reporting */ i40e_update_link_info(&pf->hw); i40e_link_event(pf); /* Initialize user-specific link properties */ pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? true : false); i40e_ptp_init(pf); return ret; } /** * i40e_determine_queue_usage - Work out queue distribution * @pf: board private structure **/ static void i40e_determine_queue_usage(struct i40e_pf *pf) { int queues_left; pf->num_lan_qps = 0; #ifdef I40E_FCOE pf->num_fcoe_qps = 0; #endif /* Find the max queues to be put into basic use. We'll always be * using TC0, whether or not DCB is running, and TC0 will get the * big RSS set. */ queues_left = pf->hw.func_caps.num_tx_qp; if ((queues_left == 1) || !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { /* one qp for PF, no queues for anything else */ queues_left = 0; pf->alloc_rss_size = pf->num_lan_qps = 1; /* make sure all the fancies are disabled */ pf->flags &= ~(I40E_FLAG_RSS_ENABLED | #ifdef I40E_FCOE I40E_FLAG_FCOE_ENABLED | #endif I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_CAPABLE | I40E_FLAG_SRIOV_ENABLED | I40E_FLAG_VMDQ_ENABLED); } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_CAPABLE))) { /* one qp for PF */ pf->alloc_rss_size = pf->num_lan_qps = 1; queues_left -= pf->num_lan_qps; pf->flags &= ~(I40E_FLAG_RSS_ENABLED | #ifdef I40E_FCOE I40E_FLAG_FCOE_ENABLED | #endif I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_ENABLED | I40E_FLAG_VMDQ_ENABLED); } else { /* Not enough queues for all TCs */ if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && (queues_left < I40E_MAX_TRAFFIC_CLASS)) { pf->flags &= ~I40E_FLAG_DCB_CAPABLE; dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); } pf->num_lan_qps = max_t(int, pf->rss_size_max, num_online_cpus()); pf->num_lan_qps = min_t(int, pf->num_lan_qps, pf->hw.func_caps.num_tx_qp); queues_left -= pf->num_lan_qps; } #ifdef I40E_FCOE if (pf->flags & I40E_FLAG_FCOE_ENABLED) { if (I40E_DEFAULT_FCOE <= queues_left) { pf->num_fcoe_qps = I40E_DEFAULT_FCOE; } else if (I40E_MINIMUM_FCOE <= queues_left) { pf->num_fcoe_qps = I40E_MINIMUM_FCOE; } else { pf->num_fcoe_qps = 0; pf->flags &= ~I40E_FLAG_FCOE_ENABLED; dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n"); } queues_left -= pf->num_fcoe_qps; } #endif if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { if (queues_left > 1) { queues_left -= 1; /* save 1 queue for FD */ } else { pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); } } if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && pf->num_vf_qps && pf->num_req_vfs && queues_left) { pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left / pf->num_vf_qps)); queues_left -= (pf->num_req_vfs * pf->num_vf_qps); } if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, (queues_left / pf->num_vmdq_qps)); queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); } pf->queues_left = queues_left; dev_dbg(&pf->pdev->dev, "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", pf->hw.func_caps.num_tx_qp, !!(pf->flags & I40E_FLAG_FD_SB_ENABLED), pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); #ifdef I40E_FCOE dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); #endif } /** * i40e_setup_pf_filter_control - Setup PF static filter control * @pf: PF to be setup * * i40e_setup_pf_filter_control sets up a PF's initial filter control * settings. If PE/FCoE are enabled then it will also set the per PF * based filter sizes required for them. It also enables Flow director, * ethertype and macvlan type filter settings for the pf. * * Returns 0 on success, negative on failure **/ static int i40e_setup_pf_filter_control(struct i40e_pf *pf) { struct i40e_filter_control_settings *settings = &pf->filter_settings; settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; /* Flow Director is enabled */ if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) settings->enable_fdir = true; /* Ethtype and MACVLAN filters enabled for PF */ settings->enable_ethtype = true; settings->enable_macvlan = true; if (i40e_set_filter_control(&pf->hw, settings)) return -ENOENT; return 0; } #define INFO_STRING_LEN 255 #define REMAIN(__x) (INFO_STRING_LEN - (__x)) static void i40e_print_features(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; char *buf; int i; buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL); if (!buf) return; i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); #ifdef CONFIG_PCI_IOV i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); #endif i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s", pf->hw.func_caps.num_vsis, pf->vsi[pf->lan_vsi]->num_queue_pairs, pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); if (pf->flags & I40E_FLAG_RSS_ENABLED) i += snprintf(&buf[i], REMAIN(i), " RSS"); if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) i += snprintf(&buf[i], REMAIN(i), " FD_ATR"); if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { i += snprintf(&buf[i], REMAIN(i), " FD_SB"); i += snprintf(&buf[i], REMAIN(i), " NTUPLE"); } if (pf->flags & I40E_FLAG_DCB_CAPABLE) i += snprintf(&buf[i], REMAIN(i), " DCB"); #if IS_ENABLED(CONFIG_VXLAN) i += snprintf(&buf[i], REMAIN(i), " VxLAN"); #endif #if IS_ENABLED(CONFIG_GENEVE) i += snprintf(&buf[i], REMAIN(i), " Geneve"); #endif if (pf->flags & I40E_FLAG_PTP) i += snprintf(&buf[i], REMAIN(i), " PTP"); #ifdef I40E_FCOE if (pf->flags & I40E_FLAG_FCOE_ENABLED) i += snprintf(&buf[i], REMAIN(i), " FCOE"); #endif if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) i += snprintf(&buf[i], REMAIN(i), " VEB"); else i += snprintf(&buf[i], REMAIN(i), " VEPA"); dev_info(&pf->pdev->dev, "%s\n", buf); kfree(buf); WARN_ON(i > INFO_STRING_LEN); } /** * i40e_get_platform_mac_addr - get platform-specific MAC address * * @pdev: PCI device information struct * @pf: board private structure * * Look up the MAC address in Open Firmware on systems that support it, * and use IDPROM on SPARC if no OF address is found. On return, the * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value * has been selected. **/ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) { struct device_node *dp = pci_device_to_OF_node(pdev); const unsigned char *addr; u8 *mac_addr = pf->hw.mac.addr; pf->flags &= ~I40E_FLAG_PF_MAC; addr = of_get_mac_address(dp); if (addr) { ether_addr_copy(mac_addr, addr); pf->flags |= I40E_FLAG_PF_MAC; #ifdef CONFIG_SPARC } else { ether_addr_copy(mac_addr, idprom->id_ethaddr); pf->flags |= I40E_FLAG_PF_MAC; #endif /* CONFIG_SPARC */ } } /** * i40e_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in i40e_pci_tbl * * i40e_probe initializes a PF identified by a pci_dev structure. * The OS initialization, configuring of the PF private structure, * and a hardware reset occur. * * Returns 0 on success, negative on failure **/ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_pf *pf; struct i40e_hw *hw; static u16 pfs_found; u16 wol_nvm_bits; u16 link_status; int err; u32 len; u32 val; u32 i; u8 set_fc_aq_fail; err = pci_enable_device_mem(pdev); if (err) return err; /* set up for high or low dma */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); goto err_dma; } } /* set up pci connections */ err = pci_request_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM), i40e_driver_name); if (err) { dev_info(&pdev->dev, "pci_request_selected_regions failed %d\n", err); goto err_pci_reg; } pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); /* Now that we have a PCI connection, we need to do the * low level device setup. This is primarily setting up * the Admin Queue structures and then querying for the * device's current profile information. */ pf = kzalloc(sizeof(*pf), GFP_KERNEL); if (!pf) { err = -ENOMEM; goto err_pf_alloc; } pf->next_vsi = 0; pf->pdev = pdev; set_bit(__I40E_DOWN, &pf->state); hw = &pf->hw; hw->back = pf; pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), I40E_MAX_CSR_SPACE); hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); if (!hw->hw_addr) { err = -EIO; dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", (unsigned int)pci_resource_start(pdev, 0), pf->ioremap_len, err); goto err_ioremap; } hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); pf->instance = pfs_found; if (debug != -1) { pf->msg_enable = pf->hw.debug_mask; pf->msg_enable = debug; } /* do a special CORER for clearing PXE mode once at init */ if (hw->revision_id == 0 && (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); i40e_flush(hw); msleep(200); pf->corer_count++; i40e_clear_pxe_mode(hw); } /* Reset here to make sure all is clean and to define PF 'n' */ i40e_clear_hw(hw); err = i40e_pf_reset(hw); if (err) { dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); goto err_pf_reset; } pf->pfr_count++; hw->aq.num_arq_entries = I40E_AQ_LEN; hw->aq.num_asq_entries = I40E_AQ_LEN; hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); err = i40e_init_shared_code(hw); if (err) { dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", err); goto err_pf_reset; } /* set up a default setting for link flow control */ pf->hw.fc.requested_mode = I40E_FC_NONE; /* set up the locks for the AQ, do this only once in probe * and destroy them only once in remove */ mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex); err = i40e_init_adminq(hw); if (err) { if (err == I40E_ERR_FIRMWARE_API_VERSION) dev_info(&pdev->dev, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); else dev_info(&pdev->dev, "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n"); goto err_pf_reset; } /* provide nvm, fw, api versions */ dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, hw->aq.api_maj_ver, hw->aq.api_min_ver, i40e_nvm_version_str(hw)); if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) dev_info(&pdev->dev, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) dev_info(&pdev->dev, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); i40e_verify_eeprom(pf); /* Rev 0 hardware was never productized */ if (hw->revision_id < 1) dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); i40e_clear_pxe_mode(hw); err = i40e_get_capabilities(pf); if (err) goto err_adminq_setup; err = i40e_sw_init(pf); if (err) { dev_info(&pdev->dev, "sw_init failed: %d\n", err); goto err_sw_init; } err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); if (err) { dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); goto err_init_lan_hmc; } err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (err) { dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); err = -ENOENT; goto err_configure_lan_hmc; } /* Disable LLDP for NICs that have firmware versions lower than v4.3. * Ignore error return codes because if it was already disabled via * hardware settings this will fail */ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || (pf->hw.aq.fw_maj_ver < 4)) { dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); i40e_aq_stop_lldp(hw, true, NULL); } i40e_get_mac_addr(hw, hw->mac.addr); /* allow a platform config to override the HW addr */ i40e_get_platform_mac_addr(pdev, pf); if (!is_valid_ether_addr(hw->mac.addr)) { dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); err = -EIO; goto err_mac_addr; } dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); i40e_get_port_mac_addr(hw, hw->mac.port_addr); if (is_valid_ether_addr(hw->mac.port_addr)) pf->flags |= I40E_FLAG_PORT_ID_VALID; #ifdef I40E_FCOE err = i40e_get_san_mac_addr(hw, hw->mac.san_addr); if (err) dev_info(&pdev->dev, "(non-fatal) SAN MAC retrieval failed: %d\n", err); if (!is_valid_ether_addr(hw->mac.san_addr)) { dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n", hw->mac.san_addr); ether_addr_copy(hw->mac.san_addr, hw->mac.addr); } dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr); #endif /* I40E_FCOE */ pci_set_drvdata(pdev, pf); pci_save_state(pdev); #ifdef CONFIG_I40E_DCB err = i40e_init_pf_dcb(pf); if (err) { dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); pf->flags &= ~I40E_FLAG_DCB_CAPABLE; /* Continue without DCB enabled */ } #endif /* CONFIG_I40E_DCB */ /* set up periodic task facility */ setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); pf->service_timer_period = HZ; INIT_WORK(&pf->service_task, i40e_service_task); clear_bit(__I40E_SERVICE_SCHED, &pf->state); pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1) pf->wol_en = false; else pf->wol_en = true; device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); /* set up the main switch operations */ i40e_determine_queue_usage(pf); err = i40e_init_interrupt_scheme(pf); if (err) goto err_switch_setup; /* The number of VSIs reported by the FW is the minimum guaranteed * to us; HW supports far more and we share the remaining pool with * the other PFs. We allocate space for more than the guarantee with * the understanding that we might not get them all later. */ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; else pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi; pf->vsi = kzalloc(len, GFP_KERNEL); if (!pf->vsi) { err = -ENOMEM; goto err_switch_setup; } #ifdef CONFIG_PCI_IOV /* prep for VF support */ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && !test_bit(__I40E_BAD_EEPROM, &pf->state)) { if (pci_num_vf(pdev)) pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; } #endif err = i40e_setup_pf_switch(pf, false); if (err) { dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); goto err_vsis; } /* Make sure flow control is set according to current settings */ err = i40e_set_fc(hw, &set_fc_aq_fail, true); if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET) dev_dbg(&pf->pdev->dev, "Set fc with err %s aq_err %s on get_phy_cap\n", i40e_stat_str(hw, err), i40e_aq_str(hw, hw->aq.asq_last_status)); if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET) dev_dbg(&pf->pdev->dev, "Set fc with err %s aq_err %s on set_phy_config\n", i40e_stat_str(hw, err), i40e_aq_str(hw, hw->aq.asq_last_status)); if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE) dev_dbg(&pf->pdev->dev, "Set fc with err %s aq_err %s on get_link_info\n", i40e_stat_str(hw, err), i40e_aq_str(hw, hw->aq.asq_last_status)); /* if FDIR VSI was set up, start it now */ for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { i40e_vsi_open(pf->vsi[i]); break; } } /* driver is only interested in link up/down and module qualification * reports from firmware */ err = i40e_aq_set_phy_int_mask(&pf->hw, I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); if (err) dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Reconfigure hardware for allowing smaller MSS in the case * of TSO, so that we avoid the MDD being fired and causing * a reset in the case of small MSS+TSO. */ val = rd32(hw, I40E_REG_MSS); if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { val &= ~I40E_REG_MSS_MIN_MASK; val |= I40E_64BYTE_MSS; wr32(hw, I40E_REG_MSS, val); } if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4)) { msleep(75); err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (err) dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } /* The main driver is (mostly) up and happy. We need to set this state * before setting up the misc vector or we get a race and the vector * ends up disabled forever. */ clear_bit(__I40E_DOWN, &pf->state); /* In case of MSIX we are going to setup the misc vector right here * to handle admin queue events etc. In case of legacy and MSI * the misc functionality and queue processing is combined in * the same vector and that gets setup at open. */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) { err = i40e_setup_misc_vector(pf); if (err) { dev_info(&pdev->dev, "setup of misc vector failed: %d\n", err); goto err_vsis; } } #ifdef CONFIG_PCI_IOV /* prep for VF support */ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && !test_bit(__I40E_BAD_EEPROM, &pf->state)) { u32 val; /* disable link interrupts for VFs */ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); i40e_flush(hw); if (pci_num_vf(pdev)) { dev_info(&pdev->dev, "Active VFs found, allocating resources.\n"); err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); if (err) dev_info(&pdev->dev, "Error %d allocating resources for existing VFs\n", err); } } #endif /* CONFIG_PCI_IOV */ pfs_found++; i40e_dbg_pf_init(pf); /* tell the firmware that we're starting */ i40e_send_version(pf); /* since everything's happy, start the service_task timer */ mod_timer(&pf->service_timer, round_jiffies(jiffies + pf->service_timer_period)); #ifdef I40E_FCOE /* create FCoE interface */ i40e_fcoe_vsi_setup(pf); #endif #define PCI_SPEED_SIZE 8 #define PCI_WIDTH_SIZE 8 /* Devices on the IOSF bus do not have this information * and will report PCI Gen 1 x 1 by default so don't bother * checking them. */ if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) { char speed[PCI_SPEED_SIZE] = "Unknown"; char width[PCI_WIDTH_SIZE] = "Unknown"; /* Get the negotiated link width and speed from PCI config * space */ pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); i40e_set_pci_config_data(hw, link_status); switch (hw->bus.speed) { case i40e_bus_speed_8000: strncpy(speed, "8.0", PCI_SPEED_SIZE); break; case i40e_bus_speed_5000: strncpy(speed, "5.0", PCI_SPEED_SIZE); break; case i40e_bus_speed_2500: strncpy(speed, "2.5", PCI_SPEED_SIZE); break; default: break; } switch (hw->bus.width) { case i40e_bus_width_pcie_x8: strncpy(width, "8", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x4: strncpy(width, "4", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x2: strncpy(width, "2", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x1: strncpy(width, "1", PCI_WIDTH_SIZE); break; default: break; } dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n", speed, width); if (hw->bus.width < i40e_bus_width_pcie_x8 || hw->bus.speed < i40e_bus_speed_8000) { dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); } } /* get the requested speeds from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (err) dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n", i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); pf->hw.phy.link_info.requested_speeds = abilities.link_speed; /* get the supported phy types from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); if (err) dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type); /* Add a filter to drop all Flow control frames from any VSI from being * transmitted. By doing so we stop a malicious VF from sending out * PAUSE or PFC frames and potentially controlling traffic for other * PF/VF VSIs. * The FW can still send Flow control frames if enabled. */ i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, pf->main_vsi_seid); /* print a string summarizing features */ i40e_print_features(pf); return 0; /* Unwind what we've done if something failed in the setup */ err_vsis: set_bit(__I40E_DOWN, &pf->state); i40e_clear_interrupt_scheme(pf); kfree(pf->vsi); err_switch_setup: i40e_reset_interrupt_capability(pf); del_timer_sync(&pf->service_timer); err_mac_addr: err_configure_lan_hmc: (void)i40e_shutdown_lan_hmc(hw); err_init_lan_hmc: kfree(pf->qp_pile); err_sw_init: err_adminq_setup: (void)i40e_shutdown_adminq(hw); err_pf_reset: iounmap(hw->hw_addr); err_ioremap: kfree(pf); err_pf_alloc: pci_disable_pcie_error_reporting(pdev); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /** * i40e_remove - Device removal routine * @pdev: PCI device information struct * * i40e_remove is called by the PCI subsystem to alert the driver * that is should release a PCI device. This could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ static void i40e_remove(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; i40e_status ret_code; int i; i40e_dbg_pf_exit(pf); i40e_ptp_stop(pf); /* Disable RSS in hw */ wr32(hw, I40E_PFQF_HENA(0), 0); wr32(hw, I40E_PFQF_HENA(1), 0); /* no more scheduling of any task */ set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { i40e_free_vfs(pf); pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; } i40e_fdir_teardown(pf); /* If there is a switch structure or any orphans, remove them. * This will leave only the PF's VSI remaining. */ for (i = 0; i < I40E_MAX_VEB; i++) { if (!pf->veb[i]) continue; if (pf->veb[i]->uplink_seid == pf->mac_seid || pf->veb[i]->uplink_seid == 0) i40e_switch_branch_release(pf->veb[i]); } /* Now we can shutdown the PF's VSI, just before we kill * adminq and hmc. */ if (pf->vsi[pf->lan_vsi]) i40e_vsi_release(pf->vsi[pf->lan_vsi]); /* shutdown and destroy the HMC */ if (pf->hw.hmc.hmc_obj) { ret_code = i40e_shutdown_lan_hmc(&pf->hw); if (ret_code) dev_warn(&pdev->dev, "Failed to destroy the HMC resources: %d\n", ret_code); } /* shutdown the adminq */ ret_code = i40e_shutdown_adminq(&pf->hw); if (ret_code) dev_warn(&pdev->dev, "Failed to destroy the Admin Queue resources: %d\n", ret_code); /* destroy the locks only once, here */ mutex_destroy(&hw->aq.arq_mutex); mutex_destroy(&hw->aq.asq_mutex); /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ i40e_clear_interrupt_scheme(pf); for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i]) { i40e_vsi_clear_rings(pf->vsi[i]); i40e_vsi_clear(pf->vsi[i]); pf->vsi[i] = NULL; } } for (i = 0; i < I40E_MAX_VEB; i++) { kfree(pf->veb[i]); pf->veb[i] = NULL; } kfree(pf->qp_pile); kfree(pf->vsi); iounmap(pf->hw.hw_addr); kfree(pf); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); } /** * i40e_pci_error_detected - warning that something funky happened in PCI land * @pdev: PCI device information struct * * Called to warn that something happened and the error handling steps * are in progress. Allows the driver to quiesce things, be ready for * remediation. **/ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, enum pci_channel_state error) { struct i40e_pf *pf = pci_get_drvdata(pdev); dev_info(&pdev->dev, "%s: error %d\n", __func__, error); /* shutdown all operations */ if (!test_bit(__I40E_SUSPENDED, &pf->state)) { rtnl_lock(); i40e_prep_for_reset(pf); rtnl_unlock(); } /* Request a slot reset */ return PCI_ERS_RESULT_NEED_RESET; } /** * i40e_pci_error_slot_reset - a PCI slot reset just happened * @pdev: PCI device information struct * * Called to find if the driver can work with the device now that * the pci slot has been reset. If a basic connection seems good * (registers are readable and have sane content) then return a * happy little PCI_ERS_RESULT_xxx. **/ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); pci_ers_result_t result; int err; u32 reg; dev_dbg(&pdev->dev, "%s\n", __func__); if (pci_enable_device_mem(pdev)) { dev_info(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); pci_wake_from_d3(pdev, false); reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); if (reg == 0) result = PCI_ERS_RESULT_RECOVERED; else result = PCI_ERS_RESULT_DISCONNECT; } err = pci_cleanup_aer_uncorrect_error_status(pdev); if (err) { dev_info(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err); /* non-fatal, continue */ } return result; } /** * i40e_pci_error_resume - restart operations after PCI error recovery * @pdev: PCI device information struct * * Called to allow the driver to bring things back up after PCI error * and/or reset recovery has finished. **/ static void i40e_pci_error_resume(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); dev_dbg(&pdev->dev, "%s\n", __func__); if (test_bit(__I40E_SUSPENDED, &pf->state)) return; rtnl_lock(); i40e_handle_reset_warning(pf); rtnl_unlock(); } /** * i40e_shutdown - PCI callback for shutting down * @pdev: PCI device information struct **/ static void i40e_shutdown(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); rtnl_lock(); i40e_prep_for_reset(pf); rtnl_unlock(); wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); i40e_fdir_teardown(pf); rtnl_lock(); i40e_prep_for_reset(pf); rtnl_unlock(); wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); i40e_clear_interrupt_scheme(pf); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, pf->wol_en); pci_set_power_state(pdev, PCI_D3hot); } } #ifdef CONFIG_PM /** * i40e_suspend - PCI callback for moving to D3 * @pdev: PCI device information struct **/ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); rtnl_lock(); i40e_prep_for_reset(pf); rtnl_unlock(); wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); pci_wake_from_d3(pdev, pf->wol_en); pci_set_power_state(pdev, PCI_D3hot); return 0; } /** * i40e_resume - PCI callback for waking up from D3 * @pdev: PCI device information struct **/ static int i40e_resume(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); u32 err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* pci_restore_state() clears dev->state_saves, so * call pci_save_state() again to restore it. */ pci_save_state(pdev); err = pci_enable_device_mem(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); /* no wakeup events while running */ pci_wake_from_d3(pdev, false); /* handling the reset will rebuild the device state */ if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { clear_bit(__I40E_DOWN, &pf->state); rtnl_lock(); i40e_reset_and_rebuild(pf, false); rtnl_unlock(); } return 0; } #endif static const struct pci_error_handlers i40e_err_handler = { .error_detected = i40e_pci_error_detected, .slot_reset = i40e_pci_error_slot_reset, .resume = i40e_pci_error_resume, }; static struct pci_driver i40e_driver = { .name = i40e_driver_name, .id_table = i40e_pci_tbl, .probe = i40e_probe, .remove = i40e_remove, #ifdef CONFIG_PM .suspend = i40e_suspend, .resume = i40e_resume, #endif .shutdown = i40e_shutdown, .err_handler = &i40e_err_handler, .sriov_configure = i40e_pci_sriov_configure, }; /** * i40e_init_module - Driver registration routine * * i40e_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ static int __init i40e_init_module(void) { pr_info("%s: %s - version %s\n", i40e_driver_name, i40e_driver_string, i40e_driver_version_str); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); i40e_dbg_init(); return pci_register_driver(&i40e_driver); } module_init(i40e_init_module); /** * i40e_exit_module - Driver exit cleanup routine * * i40e_exit_module is called just before the driver is removed * from memory. **/ static void __exit i40e_exit_module(void) { pci_unregister_driver(&i40e_driver); i40e_dbg_exit(); } module_exit(i40e_exit_module);
omor1/linux-430
drivers/net/ethernet/intel/i40e/i40e_main.c
C
gpl-2.0
318,334
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js" type="text/javascript" charset="utf-8"></script> <script src="specimen_files/easytabs.js" type="text/javascript" charset="utf-8"></script> <link rel="stylesheet" href="specimen_files/specimen_stylesheet.css" type="text/css" charset="utf-8" /> <link rel="stylesheet" href="stylesheet.css" type="text/css" charset="utf-8" /> <style type="text/css"> body{ font-family: 'helveticabold'; } </style> <title>Helvetica Bold Specimen</title> <script type="text/javascript" charset="utf-8"> $(document).ready(function() { $('#container').easyTabs({defaultContent:1}); }); </script> </head> <body> <div id="container"> <div id="header"> Helvetica Bold </div> <ul class="tabs"> <li><a href="#specimen">Specimen</a></li> <li><a href="#layout">Sample Layout</a></li> <li><a href="#glyphs">Glyphs &amp; Languages</a></li> <li><a href="#installing">Installing Webfonts</a></li> </ul> <div id="main_content"> <div id="specimen"> <div class="section"> <div class="grid12 firstcol"> <div class="huge">AaBb</div> </div> </div> <div class="section"> <div class="glyph_range">A&#x200B;B&#x200b;C&#x200b;D&#x200b;E&#x200b;F&#x200b;G&#x200b;H&#x200b;I&#x200b;J&#x200b;K&#x200b;L&#x200b;M&#x200b;N&#x200b;O&#x200b;P&#x200b;Q&#x200b;R&#x200b;S&#x200b;T&#x200b;U&#x200b;V&#x200b;W&#x200b;X&#x200b;Y&#x200b;Z&#x200b;a&#x200b;b&#x200b;c&#x200b;d&#x200b;e&#x200b;f&#x200b;g&#x200b;h&#x200b;i&#x200b;j&#x200b;k&#x200b;l&#x200b;m&#x200b;n&#x200b;o&#x200b;p&#x200b;q&#x200b;r&#x200b;s&#x200b;t&#x200b;u&#x200b;v&#x200b;w&#x200b;x&#x200b;y&#x200b;z&#x200b;1&#x200b;2&#x200b;3&#x200b;4&#x200b;5&#x200b;6&#x200b;7&#x200b;8&#x200b;9&#x200b;0&#x200b;&amp;&#x200b;.&#x200b;,&#x200b;?&#x200b;!&#x200b;&#64;&#x200b;(&#x200b;)&#x200b;#&#x200b;$&#x200b;%&#x200b;*&#x200b;+&#x200b;-&#x200b;=&#x200b;:&#x200b;;</div> </div> <div class="section"> <div class="grid12 firstcol"> <table class="sample_table"> <tr><td>10</td><td class="size10">abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ</td></tr> <tr><td>11</td><td class="size11">abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ</td></tr> <tr><td>12</td><td class="size12">abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ</td></tr> <tr><td>13</td><td class="size13">abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ</td></tr> <tr><td>14</td><td class="size14">abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ</td></tr> <tr><td>16</td><td class="size16">abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ</td></tr> <tr><td>18</td><td class="size18">abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ</td></tr> <tr><td>20</td><td class="size20">abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ</td></tr> <tr><td>24</td><td class="size24">abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ</td></tr> <tr><td>30</td><td class="size30">abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ</td></tr> <tr><td>36</td><td class="size36">abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ</td></tr> <tr><td>48</td><td class="size48">abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ</td></tr> <tr><td>60</td><td class="size60">abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ</td></tr> <tr><td>72</td><td class="size72">abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ</td></tr> <tr><td>90</td><td class="size90">abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ</td></tr> </table> </div> </div> <div class="section" id="bodycomparison"> <div id="xheight"> <div class="fontbody">&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;&#x25FC;body</div><div class="arialbody">body</div><div class="verdanabody">body</div><div class="georgiabody">body</div></div> <div class="fontbody" style="z-index:1"> body<span>Helvetica Bold</span> </div> <div class="arialbody" style="z-index:1"> body<span>Arial</span> </div> <div class="verdanabody" style="z-index:1"> body<span>Verdana</span> </div> <div class="georgiabody" style="z-index:1"> body<span>Georgia</span> </div> </div> <div class="section psample psample_row1" id=""> <div class="grid2 firstcol"> <p class="size10"><span>10.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="grid3"> <p class="size11"><span>11.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="grid3"> <p class="size12"><span>12.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="grid4"> <p class="size13"><span>13.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="white_blend"></div> </div> <div class="section psample psample_row2" id=""> <div class="grid3 firstcol"> <p class="size14"><span>14.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="grid4"> <p class="size16"><span>16.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="grid5"> <p class="size18"><span>18.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="white_blend"></div> </div> <div class="section psample psample_row3" id=""> <div class="grid5 firstcol"> <p class="size20"><span>20.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="grid7"> <p class="size24"><span>24.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="white_blend"></div> </div> <div class="section psample psample_row4" id=""> <div class="grid12 firstcol"> <p class="size30"><span>30.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="white_blend"></div> </div> <div class="section psample psample_row1 fullreverse"> <div class="grid2 firstcol"> <p class="size10"><span>10.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="grid3"> <p class="size11"><span>11.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="grid3"> <p class="size12"><span>12.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="grid4"> <p class="size13"><span>13.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="black_blend"></div> </div> <div class="section psample psample_row2 fullreverse"> <div class="grid3 firstcol"> <p class="size14"><span>14.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="grid4"> <p class="size16"><span>16.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="grid5"> <p class="size18"><span>18.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="black_blend"></div> </div> <div class="section psample fullreverse psample_row3" id=""> <div class="grid5 firstcol"> <p class="size20"><span>20.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="grid7"> <p class="size24"><span>24.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="black_blend"></div> </div> <div class="section psample fullreverse psample_row4" id="" style="border-bottom: 20px #000 solid;"> <div class="grid12 firstcol"> <p class="size30"><span>30.</span>Aenean lacinia bibendum nulla sed consectetur. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Nullam id dolor id nibh ultricies vehicula ut id elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.</p> </div> <div class="black_blend"></div> </div> </div> <div id="layout"> <div class="section"> <div class="grid12 firstcol"> <h1>Lorem Ipsum Dolor</h1> <h2>Etiam porta sem malesuada magna mollis euismod</h2> <p class="byline">By <a href="#link">Aenean Lacinia</a></p> </div> </div> <div class="section"> <div class="grid8 firstcol"> <p class="large">Donec sed odio dui. Morbi leo risus, porta ac consectetur ac, vestibulum at eros. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. </p> <h3>Pellentesque ornare sem</h3> <p>Maecenas sed diam eget risus varius blandit sit amet non magna. Maecenas faucibus mollis interdum. Donec ullamcorper nulla non metus auctor fringilla. Nullam id dolor id nibh ultricies vehicula ut id elit. Nullam id dolor id nibh ultricies vehicula ut id elit. </p> <p>Aenean eu leo quam. Pellentesque ornare sem lacinia quam venenatis vestibulum. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. </p> <p>Nulla vitae elit libero, a pharetra augue. Praesent commodo cursus magna, vel scelerisque nisl consectetur et. Aenean lacinia bibendum nulla sed consectetur. </p> <p>Nullam quis risus eget urna mollis ornare vel eu leo. Nullam quis risus eget urna mollis ornare vel eu leo. Maecenas sed diam eget risus varius blandit sit amet non magna. Donec ullamcorper nulla non metus auctor fringilla. </p> <h3>Cras mattis consectetur</h3> <p>Aenean eu leo quam. Pellentesque ornare sem lacinia quam venenatis vestibulum. Aenean lacinia bibendum nulla sed consectetur. Integer posuere erat a ante venenatis dapibus posuere velit aliquet. Cras mattis consectetur purus sit amet fermentum. </p> <p>Nullam id dolor id nibh ultricies vehicula ut id elit. Nullam quis risus eget urna mollis ornare vel eu leo. Cras mattis consectetur purus sit amet fermentum.</p> </div> <div class="grid4 sidebar"> <div class="box reverse"> <p class="last">Nullam quis risus eget urna mollis ornare vel eu leo. Donec ullamcorper nulla non metus auctor fringilla. Cras mattis consectetur purus sit amet fermentum. Sed posuere consectetur est at lobortis. Lorem ipsum dolor sit amet, consectetur adipiscing elit. </p> </div> <p class="caption">Maecenas sed diam eget risus varius.</p> <p>Vestibulum id ligula porta felis euismod semper. Integer posuere erat a ante venenatis dapibus posuere velit aliquet. Vestibulum id ligula porta felis euismod semper. Sed posuere consectetur est at lobortis. Maecenas sed diam eget risus varius blandit sit amet non magna. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. </p> <p>Duis mollis, est non commodo luctus, nisi erat porttitor ligula, eget lacinia odio sem nec elit. Aenean lacinia bibendum nulla sed consectetur. Vivamus sagittis lacus vel augue laoreet rutrum faucibus dolor auctor. Aenean lacinia bibendum nulla sed consectetur. Nullam quis risus eget urna mollis ornare vel eu leo. </p> <p>Praesent commodo cursus magna, vel scelerisque nisl consectetur et. Donec ullamcorper nulla non metus auctor fringilla. Maecenas faucibus mollis interdum. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. </p> </div> </div> </div> <div id="glyphs"> <div class="section"> <div class="grid12 firstcol"> <h1>Language Support</h1> <p>The subset of Helvetica Bold in this kit supports the following languages:<br /> Albanian, Basque, Breton, Chamorro, English, Finnish, French, Frisian, Galician, German, Italian, Malagasy, Norwegian, Portuguese, Spanish, Swedish </p> <h1>Glyph Chart</h1> <p>The subset of Helvetica Bold in this kit includes all the glyphs listed below. Unicode entities are included above each glyph to help you insert individual characters into your layout.</p> <div id="glyph_chart"> <div><p>&amp;#32;</p>&#32;</div> <div><p>&amp;#33;</p>&#33;</div> <div><p>&amp;#34;</p>&#34;</div> <div><p>&amp;#35;</p>&#35;</div> <div><p>&amp;#36;</p>&#36;</div> <div><p>&amp;#37;</p>&#37;</div> <div><p>&amp;#38;</p>&#38;</div> <div><p>&amp;#39;</p>&#39;</div> <div><p>&amp;#40;</p>&#40;</div> <div><p>&amp;#41;</p>&#41;</div> <div><p>&amp;#42;</p>&#42;</div> <div><p>&amp;#43;</p>&#43;</div> <div><p>&amp;#44;</p>&#44;</div> <div><p>&amp;#45;</p>&#45;</div> <div><p>&amp;#46;</p>&#46;</div> <div><p>&amp;#47;</p>&#47;</div> <div><p>&amp;#48;</p>&#48;</div> <div><p>&amp;#49;</p>&#49;</div> <div><p>&amp;#50;</p>&#50;</div> <div><p>&amp;#51;</p>&#51;</div> <div><p>&amp;#52;</p>&#52;</div> <div><p>&amp;#53;</p>&#53;</div> <div><p>&amp;#54;</p>&#54;</div> <div><p>&amp;#55;</p>&#55;</div> <div><p>&amp;#56;</p>&#56;</div> <div><p>&amp;#57;</p>&#57;</div> <div><p>&amp;#58;</p>&#58;</div> <div><p>&amp;#59;</p>&#59;</div> <div><p>&amp;#60;</p>&#60;</div> <div><p>&amp;#61;</p>&#61;</div> <div><p>&amp;#62;</p>&#62;</div> <div><p>&amp;#63;</p>&#63;</div> <div><p>&amp;#64;</p>&#64;</div> <div><p>&amp;#65;</p>&#65;</div> <div><p>&amp;#66;</p>&#66;</div> <div><p>&amp;#67;</p>&#67;</div> <div><p>&amp;#68;</p>&#68;</div> <div><p>&amp;#69;</p>&#69;</div> <div><p>&amp;#70;</p>&#70;</div> <div><p>&amp;#71;</p>&#71;</div> <div><p>&amp;#72;</p>&#72;</div> <div><p>&amp;#73;</p>&#73;</div> <div><p>&amp;#74;</p>&#74;</div> <div><p>&amp;#75;</p>&#75;</div> <div><p>&amp;#76;</p>&#76;</div> <div><p>&amp;#77;</p>&#77;</div> <div><p>&amp;#78;</p>&#78;</div> <div><p>&amp;#79;</p>&#79;</div> <div><p>&amp;#80;</p>&#80;</div> <div><p>&amp;#81;</p>&#81;</div> <div><p>&amp;#82;</p>&#82;</div> <div><p>&amp;#83;</p>&#83;</div> <div><p>&amp;#84;</p>&#84;</div> <div><p>&amp;#85;</p>&#85;</div> <div><p>&amp;#86;</p>&#86;</div> <div><p>&amp;#87;</p>&#87;</div> <div><p>&amp;#88;</p>&#88;</div> <div><p>&amp;#89;</p>&#89;</div> <div><p>&amp;#90;</p>&#90;</div> <div><p>&amp;#91;</p>&#91;</div> <div><p>&amp;#92;</p>&#92;</div> <div><p>&amp;#93;</p>&#93;</div> <div><p>&amp;#94;</p>&#94;</div> <div><p>&amp;#95;</p>&#95;</div> <div><p>&amp;#96;</p>&#96;</div> <div><p>&amp;#97;</p>&#97;</div> <div><p>&amp;#98;</p>&#98;</div> <div><p>&amp;#99;</p>&#99;</div> <div><p>&amp;#100;</p>&#100;</div> <div><p>&amp;#101;</p>&#101;</div> <div><p>&amp;#102;</p>&#102;</div> <div><p>&amp;#103;</p>&#103;</div> <div><p>&amp;#104;</p>&#104;</div> <div><p>&amp;#105;</p>&#105;</div> <div><p>&amp;#106;</p>&#106;</div> <div><p>&amp;#107;</p>&#107;</div> <div><p>&amp;#108;</p>&#108;</div> <div><p>&amp;#109;</p>&#109;</div> <div><p>&amp;#110;</p>&#110;</div> <div><p>&amp;#111;</p>&#111;</div> <div><p>&amp;#112;</p>&#112;</div> <div><p>&amp;#113;</p>&#113;</div> <div><p>&amp;#114;</p>&#114;</div> <div><p>&amp;#115;</p>&#115;</div> <div><p>&amp;#116;</p>&#116;</div> <div><p>&amp;#117;</p>&#117;</div> <div><p>&amp;#118;</p>&#118;</div> <div><p>&amp;#119;</p>&#119;</div> <div><p>&amp;#120;</p>&#120;</div> <div><p>&amp;#121;</p>&#121;</div> <div><p>&amp;#122;</p>&#122;</div> <div><p>&amp;#123;</p>&#123;</div> <div><p>&amp;#124;</p>&#124;</div> <div><p>&amp;#125;</p>&#125;</div> <div><p>&amp;#126;</p>&#126;</div> <div><p>&amp;#160;</p>&#160;</div> <div><p>&amp;#161;</p>&#161;</div> <div><p>&amp;#162;</p>&#162;</div> <div><p>&amp;#163;</p>&#163;</div> <div><p>&amp;#164;</p>&#164;</div> <div><p>&amp;#165;</p>&#165;</div> <div><p>&amp;#167;</p>&#167;</div> <div><p>&amp;#168;</p>&#168;</div> <div><p>&amp;#169;</p>&#169;</div> <div><p>&amp;#170;</p>&#170;</div> <div><p>&amp;#171;</p>&#171;</div> <div><p>&amp;#172;</p>&#172;</div> <div><p>&amp;#173;</p>&#173;</div> <div><p>&amp;#174;</p>&#174;</div> <div><p>&amp;#176;</p>&#176;</div> <div><p>&amp;#177;</p>&#177;</div> <div><p>&amp;#180;</p>&#180;</div> <div><p>&amp;#181;</p>&#181;</div> <div><p>&amp;#182;</p>&#182;</div> <div><p>&amp;#183;</p>&#183;</div> <div><p>&amp;#184;</p>&#184;</div> <div><p>&amp;#186;</p>&#186;</div> <div><p>&amp;#187;</p>&#187;</div> <div><p>&amp;#191;</p>&#191;</div> <div><p>&amp;#192;</p>&#192;</div> <div><p>&amp;#193;</p>&#193;</div> <div><p>&amp;#194;</p>&#194;</div> <div><p>&amp;#195;</p>&#195;</div> <div><p>&amp;#196;</p>&#196;</div> <div><p>&amp;#197;</p>&#197;</div> <div><p>&amp;#198;</p>&#198;</div> <div><p>&amp;#199;</p>&#199;</div> <div><p>&amp;#200;</p>&#200;</div> <div><p>&amp;#201;</p>&#201;</div> <div><p>&amp;#202;</p>&#202;</div> <div><p>&amp;#203;</p>&#203;</div> <div><p>&amp;#204;</p>&#204;</div> <div><p>&amp;#205;</p>&#205;</div> <div><p>&amp;#206;</p>&#206;</div> <div><p>&amp;#207;</p>&#207;</div> <div><p>&amp;#209;</p>&#209;</div> <div><p>&amp;#210;</p>&#210;</div> <div><p>&amp;#211;</p>&#211;</div> <div><p>&amp;#212;</p>&#212;</div> <div><p>&amp;#213;</p>&#213;</div> <div><p>&amp;#214;</p>&#214;</div> <div><p>&amp;#216;</p>&#216;</div> <div><p>&amp;#217;</p>&#217;</div> <div><p>&amp;#218;</p>&#218;</div> <div><p>&amp;#219;</p>&#219;</div> <div><p>&amp;#220;</p>&#220;</div> <div><p>&amp;#223;</p>&#223;</div> <div><p>&amp;#224;</p>&#224;</div> <div><p>&amp;#225;</p>&#225;</div> <div><p>&amp;#226;</p>&#226;</div> <div><p>&amp;#227;</p>&#227;</div> <div><p>&amp;#228;</p>&#228;</div> <div><p>&amp;#229;</p>&#229;</div> <div><p>&amp;#230;</p>&#230;</div> <div><p>&amp;#231;</p>&#231;</div> <div><p>&amp;#232;</p>&#232;</div> <div><p>&amp;#233;</p>&#233;</div> <div><p>&amp;#234;</p>&#234;</div> <div><p>&amp;#235;</p>&#235;</div> <div><p>&amp;#236;</p>&#236;</div> <div><p>&amp;#237;</p>&#237;</div> <div><p>&amp;#238;</p>&#238;</div> <div><p>&amp;#239;</p>&#239;</div> <div><p>&amp;#241;</p>&#241;</div> <div><p>&amp;#242;</p>&#242;</div> <div><p>&amp;#243;</p>&#243;</div> <div><p>&amp;#244;</p>&#244;</div> <div><p>&amp;#245;</p>&#245;</div> <div><p>&amp;#246;</p>&#246;</div> <div><p>&amp;#247;</p>&#247;</div> <div><p>&amp;#248;</p>&#248;</div> <div><p>&amp;#249;</p>&#249;</div> <div><p>&amp;#250;</p>&#250;</div> <div><p>&amp;#251;</p>&#251;</div> <div><p>&amp;#252;</p>&#252;</div> <div><p>&amp;#255;</p>&#255;</div> <div><p>&amp;#338;</p>&#338;</div> <div><p>&amp;#339;</p>&#339;</div> <div><p>&amp;#376;</p>&#376;</div> <div><p>&amp;#710;</p>&#710;</div> <div><p>&amp;#732;</p>&#732;</div> <div><p>&amp;#8192;</p>&#8192;</div> <div><p>&amp;#8193;</p>&#8193;</div> <div><p>&amp;#8194;</p>&#8194;</div> <div><p>&amp;#8195;</p>&#8195;</div> <div><p>&amp;#8196;</p>&#8196;</div> <div><p>&amp;#8197;</p>&#8197;</div> <div><p>&amp;#8198;</p>&#8198;</div> <div><p>&amp;#8199;</p>&#8199;</div> <div><p>&amp;#8200;</p>&#8200;</div> <div><p>&amp;#8201;</p>&#8201;</div> <div><p>&amp;#8202;</p>&#8202;</div> <div><p>&amp;#8208;</p>&#8208;</div> <div><p>&amp;#8209;</p>&#8209;</div> <div><p>&amp;#8210;</p>&#8210;</div> <div><p>&amp;#8211;</p>&#8211;</div> <div><p>&amp;#8212;</p>&#8212;</div> <div><p>&amp;#8216;</p>&#8216;</div> <div><p>&amp;#8217;</p>&#8217;</div> <div><p>&amp;#8218;</p>&#8218;</div> <div><p>&amp;#8220;</p>&#8220;</div> <div><p>&amp;#8221;</p>&#8221;</div> <div><p>&amp;#8222;</p>&#8222;</div> <div><p>&amp;#8226;</p>&#8226;</div> <div><p>&amp;#8230;</p>&#8230;</div> <div><p>&amp;#8239;</p>&#8239;</div> <div><p>&amp;#8249;</p>&#8249;</div> <div><p>&amp;#8250;</p>&#8250;</div> <div><p>&amp;#8287;</p>&#8287;</div> <div><p>&amp;#8482;</p>&#8482;</div> <div><p>&amp;#9724;</p>&#9724;</div> </div> </div> </div> </div> <div id="specs"> </div> <div id="installing"> <div class="section"> <div class="grid7 firstcol"> <h1>Installing Webfonts</h1> <p>Webfonts are supported by all major browser platforms but not all in the same way. There are currently four different font formats that must be included in order to target all browsers. This includes TTF, WOFF, EOT and SVG.</p> <h2>1. Upload your webfonts</h2> <p>You must upload your webfont kit to your website. They should be in or near the same directory as your CSS files.</p> <h2>2. Include the webfont stylesheet</h2> <p>A special CSS @font-face declaration helps the various browsers select the appropriate font it needs without causing you a bunch of headaches. Learn more about this syntax by reading the <a href="http://www.fontspring.com/blog/further-hardening-of-the-bulletproof-syntax">Fontspring blog post</a> about it. The code for it is as follows:</p> <code> @font-face{ font-family: 'MyWebFont'; src: url('WebFont.eot'); src: url('WebFont.eot?#iefix') format('embedded-opentype'), url('WebFont.woff') format('woff'), url('WebFont.ttf') format('truetype'), url('WebFont.svg#webfont') format('svg'); } </code> <p>We've already gone ahead and generated the code for you. All you have to do is link to the stylesheet in your HTML, like this:</p> <code>&lt;link rel=&quot;stylesheet&quot; href=&quot;stylesheet.css&quot; type=&quot;text/css&quot; charset=&quot;utf-8&quot; /&gt;</code> <h2>3. Modify your own stylesheet</h2> <p>To take advantage of your new fonts, you must tell your stylesheet to use them. Look at the original @font-face declaration above and find the property called "font-family." The name linked there will be what you use to reference the font. Prepend that webfont name to the font stack in the "font-family" property, inside the selector you want to change. For example:</p> <code>p { font-family: 'WebFont', Arial, sans-serif; }</code> <h2>4. Test</h2> <p>Getting webfonts to work cross-browser <em>can</em> be tricky. Use the information in the sidebar to help you if you find that fonts aren't loading in a particular browser.</p> </div> <div class="grid5 sidebar"> <div class="box"> <h2>Troubleshooting<br />Font-Face Problems</h2> <p>Having trouble getting your webfonts to load in your new website? Here are some tips to sort out what might be the problem.</p> <h3>Fonts not showing in any browser</h3> <p>This sounds like you need to work on the plumbing. You either did not upload the fonts to the correct directory, or you did not link the fonts properly in the CSS. If you've confirmed that all this is correct and you still have a problem, take a look at your .htaccess file and see if requests are getting intercepted.</p> <h3>Fonts not loading in iPhone or iPad</h3> <p>The most common problem here is that you are serving the fonts from an IIS server. IIS refuses to serve files that have unknown MIME types. If that is the case, you must set the MIME type for SVG to "image/svg+xml" in the server settings. Follow these instructions from Microsoft if you need help.</p> <h3>Fonts not loading in Firefox</h3> <p>The primary reason for this failure? You are still using a version Firefox older than 3.5. So upgrade already! If that isn't it, then you are very likely serving fonts from a different domain. Firefox requires that all font assets be served from the same domain. Lastly it is possible that you need to add WOFF to your list of MIME types (if you are serving via IIS.)</p> <h3>Fonts not loading in IE</h3> <p>Are you looking at Internet Explorer on an actual Windows machine or are you cheating by using a service like Adobe BrowserLab? Many of these screenshot services do not render @font-face for IE. Best to test it on a real machine.</p> <h3>Fonts not loading in IE9</h3> <p>IE9, like Firefox, requires that fonts be served from the same domain as the website. Make sure that is the case.</p> </div> </div> </div> </div> </div> <div id="footer"> <p>&copy;2010-2011 Font Squirrel. All rights reserved.</p> </div> </div> </body> </html>
brittbec13/citizenmodern
wp-content/plugins/ultimate-social-media-icons/css/fonts/helvetica_bold_0-demo.html
HTML
gpl-2.0
36,588
(function($){Drupal.color={logoChanged:false,callback:function(context,settings,form,farb,height,width){if(!this.logoChanged){$("#preview #preview-logo img").attr("src",Drupal.settings.color.logo);this.logoChanged=true}if(Drupal.settings.color.logo==null){$("div").remove("#preview-logo")}$("#preview",form).css("backgroundColor",$('#palette input[name="palette[bg]"]',form).val());$("#preview #preview-main h2, #preview .preview-content",form).css("color",$('#palette input[name="palette[text]"]',form).val());$("#preview #preview-content a",form).css("color",$('#palette input[name="palette[link]"]',form).val());$("#preview #preview-sidebar #preview-block",form).css("background-color",$('#palette input[name="palette[sidebar]"]',form).val());$("#preview #preview-sidebar #preview-block",form).css("border-color",$('#palette input[name="palette[sidebarborders]"]',form).val());$("#preview #preview-footer-wrapper",form).css("background-color",$('#palette input[name="palette[footer]"]',form).val());var gradient_start=$('#palette input[name="palette[top]"]',form).val();var gradient_end=$('#palette input[name="palette[bottom]"]',form).val();$("#preview #preview-header",form).attr("style","background-color: "+gradient_start+"; background-image: -webkit-gradient(linear, 0% 0%, 0% 100%, from("+gradient_start+"), to("+gradient_end+")); background-image: -moz-linear-gradient(-90deg, "+gradient_start+", "+gradient_end+");");$("#preview #preview-site-name",form).css("color",$('#palette input[name="palette[titleslogan]"]',form).val())}}})(jQuery);
vhinrich/vhinandrich2
sites/all/modules/contrib/speedy/js/7.43/themes/bartik/color/preview.js
JavaScript
gpl-2.0
1,552
/* * AArch64 loadable module support. * * Copyright (C) 2012 ARM Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Author: Will Deacon <will.deacon@arm.com> */ #include <linux/bitops.h> #include <linux/elf.h> #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/moduleloader.h> #include <linux/vmalloc.h> #include <asm/insn.h> #define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX #define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16 void *module_alloc(unsigned long size) { return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL, PAGE_KERNEL_EXEC, -1, __builtin_return_address(0)); } enum aarch64_reloc_op { RELOC_OP_NONE, RELOC_OP_ABS, RELOC_OP_PREL, RELOC_OP_PAGE, }; static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val) { switch (reloc_op) { case RELOC_OP_ABS: return val; case RELOC_OP_PREL: return val - (u64)place; case RELOC_OP_PAGE: return (val & ~0xfff) - ((u64)place & ~0xfff); case RELOC_OP_NONE: return 0; } pr_err("do_reloc: unknown relocation operation %d\n", reloc_op); return 0; } static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) { u64 imm_mask = (1 << len) - 1; s64 sval = do_reloc(op, place, val); switch (len) { case 16: *(s16 *)place = sval; break; case 32: *(s32 *)place = sval; break; case 64: *(s64 *)place = sval; break; default: pr_err("Invalid length (%d) for data relocation\n", len); return 0; } /* * Extract the upper value bits (including the sign bit) and * shift them to bit 0. */ sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); /* * Overflow has occurred if the value is not representable in * len bits (i.e the bottom len bits are not sign-extended and * the top bits are not all zero). */ if ((u64)(sval + 1) > 2) return -ERANGE; return 0; } static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, int lsb, enum aarch64_insn_imm_type imm_type) { u64 imm, limit = 0; s64 sval; u32 insn = le32_to_cpu(*(u32 *)place); sval = do_reloc(op, place, val); sval >>= lsb; imm = sval & 0xffff; if (imm_type == AARCH64_INSN_IMM_MOVNZ) { /* * For signed MOVW relocations, we have to manipulate the * instruction encoding depending on whether or not the * immediate is less than zero. */ insn &= ~(3 << 29); if ((s64)imm >= 0) { /* >=0: Set the instruction to MOVZ (opcode 10b). */ insn |= 2 << 29; } else { /* * <0: Set the instruction to MOVN (opcode 00b). * Since we've masked the opcode already, we * don't need to do anything other than * inverting the new immediate field. */ imm = ~imm; } imm_type = AARCH64_INSN_IMM_MOVK; } /* Update the instruction with the new encoding. */ insn = aarch64_insn_encode_immediate(imm_type, insn, imm); *(u32 *)place = cpu_to_le32(insn); /* Shift out the immediate field. */ sval >>= 16; /* * For unsigned immediates, the overflow check is straightforward. * For signed immediates, the sign bit is actually the bit past the * most significant bit of the field. * The AARCH64_INSN_IMM_16 immediate type is unsigned. */ if (imm_type != AARCH64_INSN_IMM_16) { sval++; limit++; } /* Check the upper bits depending on the sign of the immediate. */ if ((u64)sval > limit) return -ERANGE; return 0; } static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val, int lsb, int len, enum aarch64_insn_imm_type imm_type) { u64 imm, imm_mask; s64 sval; u32 insn = le32_to_cpu(*(u32 *)place); /* Calculate the relocation value. */ sval = do_reloc(op, place, val); sval >>= lsb; /* Extract the value bits and shift them to bit 0. */ imm_mask = (BIT(lsb + len) - 1) >> lsb; imm = sval & imm_mask; /* Update the instruction's immediate field. */ insn = aarch64_insn_encode_immediate(imm_type, insn, imm); *(u32 *)place = cpu_to_le32(insn); /* * Extract the upper value bits (including the sign bit) and * shift them to bit 0. */ sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); /* * Overflow has occurred if the upper bits are not all equal to * the sign bit of the value. */ if ((u64)(sval + 1) >= 2) return -ERANGE; return 0; } int apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { unsigned int i; int ovf; bool overflow_check; Elf64_Sym *sym; void *loc; u64 val; Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* loc corresponds to P in the AArch64 ELF document. */ loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; /* sym is the ELF symbol we're referring to. */ sym = (Elf64_Sym *)sechdrs[symindex].sh_addr + ELF64_R_SYM(rel[i].r_info); /* val corresponds to (S + A) in the AArch64 ELF document. */ val = sym->st_value + rel[i].r_addend; /* Check for overflow by default. */ overflow_check = true; /* Perform the static relocation. */ switch (ELF64_R_TYPE(rel[i].r_info)) { /* Null relocations. */ case R_ARM_NONE: case R_AARCH64_NONE: ovf = 0; break; /* Data relocations. */ case R_AARCH64_ABS64: overflow_check = false; ovf = reloc_data(RELOC_OP_ABS, loc, val, 64); break; case R_AARCH64_ABS32: ovf = reloc_data(RELOC_OP_ABS, loc, val, 32); break; case R_AARCH64_ABS16: ovf = reloc_data(RELOC_OP_ABS, loc, val, 16); break; case R_AARCH64_PREL64: overflow_check = false; ovf = reloc_data(RELOC_OP_PREL, loc, val, 64); break; case R_AARCH64_PREL32: ovf = reloc_data(RELOC_OP_PREL, loc, val, 32); break; case R_AARCH64_PREL16: ovf = reloc_data(RELOC_OP_PREL, loc, val, 16); break; /* MOVW instruction relocations. */ case R_AARCH64_MOVW_UABS_G0_NC: overflow_check = false; case R_AARCH64_MOVW_UABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_UABS_G1_NC: overflow_check = false; case R_AARCH64_MOVW_UABS_G1: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_UABS_G2_NC: overflow_check = false; case R_AARCH64_MOVW_UABS_G2: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_UABS_G3: /* We're using the top bits so we can't overflow. */ overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, AARCH64_INSN_IMM_16); break; case R_AARCH64_MOVW_SABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_SABS_G1: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_SABS_G2: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G0_NC: overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, AARCH64_INSN_IMM_MOVK); break; case R_AARCH64_MOVW_PREL_G0: ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G1_NC: overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, AARCH64_INSN_IMM_MOVK); break; case R_AARCH64_MOVW_PREL_G1: ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G2_NC: overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, AARCH64_INSN_IMM_MOVK); break; case R_AARCH64_MOVW_PREL_G2: ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G3: /* We're using the top bits so we can't overflow. */ overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, AARCH64_INSN_IMM_MOVNZ); break; /* Immediate instruction relocations. */ case R_AARCH64_LD_PREL_LO19: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, AARCH64_INSN_IMM_19); break; case R_AARCH64_ADR_PREL_LO21: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, AARCH64_INSN_IMM_ADR); break; case R_AARCH64_ADR_PREL_PG_HI21_NC: overflow_check = false; case R_AARCH64_ADR_PREL_PG_HI21: ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, AARCH64_INSN_IMM_ADR); break; case R_AARCH64_ADD_ABS_LO12_NC: case R_AARCH64_LDST8_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST16_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST32_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST64_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST128_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, AARCH64_INSN_IMM_12); break; case R_AARCH64_TSTBR14: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, AARCH64_INSN_IMM_14); break; case R_AARCH64_CONDBR19: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, AARCH64_INSN_IMM_19); break; case R_AARCH64_JUMP26: case R_AARCH64_CALL26: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, AARCH64_INSN_IMM_26); break; default: pr_err("module %s: unsupported RELA relocation: %llu\n", me->name, ELF64_R_TYPE(rel[i].r_info)); return -ENOEXEC; } if (overflow_check && ovf == -ERANGE) goto overflow; } return 0; overflow: pr_err("module %s: overflow in relocation type %d val %Lx\n", me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); return -ENOEXEC; }
djvoleur/V_S6
arch/arm64/kernel/module.c
C
gpl-2.0
10,786
/* * drivers/gpu/ion/ion.c * * Copyright (C) 2011 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "%s():%d: " fmt, __func__, __LINE__ #include <linux/device.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/anon_inodes.h> #include <linux/ion.h> #include <linux/list.h> #include <linux/miscdevice.h> #include <linux/mm.h> #include <linux/mm_types.h> #include <linux/rbtree.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/debugfs.h> #include "ion_priv.h" #define DEBUG /* this function should only be called while dev->lock is held */ static void ion_buffer_add(struct ion_device *dev, struct ion_buffer *buffer) { struct rb_node **p = &dev->buffers.rb_node; struct rb_node *parent = NULL; struct ion_buffer *entry; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_buffer, node); if (buffer < entry) { p = &(*p)->rb_left; } else if (buffer > entry) { p = &(*p)->rb_right; } else { pr_err("buffer already found."); BUG(); } } rb_link_node(&buffer->node, parent, p); rb_insert_color(&buffer->node, &dev->buffers); } /* this function should only be called while dev->lock is held */ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, struct ion_device *dev, unsigned long len, unsigned long align, unsigned long flags) { struct ion_buffer *buffer; int ret; buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); if (!buffer) return ERR_PTR(-ENOMEM); buffer->heap = heap; kref_init(&buffer->ref); ret = heap->ops->allocate(heap, buffer, len, align, flags); if (ret) { kfree(buffer); return ERR_PTR(ret); } buffer->dev = dev; buffer->size = len; mutex_init(&buffer->lock); ion_buffer_add(dev, buffer); return buffer; } static void ion_buffer_destroy(struct kref *kref) { struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); struct ion_device *dev = buffer->dev; buffer->heap->ops->free(buffer); mutex_lock(&dev->lock); rb_erase(&buffer->node, &dev->buffers); mutex_unlock(&dev->lock); kfree(buffer); } void ion_buffer_get(struct ion_buffer *buffer) { kref_get(&buffer->ref); } static int ion_buffer_put(struct ion_buffer *buffer) { return kref_put(&buffer->ref, ion_buffer_destroy); } struct ion_handle *ion_handle_create(struct ion_client *client, struct ion_buffer *buffer) { struct ion_handle *handle; handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); if (!handle) return ERR_PTR(-ENOMEM); kref_init(&handle->ref); rb_init_node(&handle->node); handle->client = client; ion_buffer_get(buffer); handle->buffer = buffer; return handle; } static void ion_handle_destroy(struct kref *kref) { struct ion_handle *handle = container_of(kref, struct ion_handle, ref); /* XXX Can a handle be destroyed while it's map count is non-zero?: if (handle->map_cnt) unmap */ ion_buffer_put(handle->buffer); mutex_lock(&handle->client->lock); if (!RB_EMPTY_NODE(&handle->node)) rb_erase(&handle->node, &handle->client->handles); mutex_unlock(&handle->client->lock); kfree(handle); } struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) { return handle->buffer; } void ion_handle_get(struct ion_handle *handle) { kref_get(&handle->ref); } int ion_handle_put(struct ion_handle *handle) { return kref_put(&handle->ref, ion_handle_destroy); } static struct ion_handle *ion_handle_lookup(struct ion_client *client, struct ion_buffer *buffer) { struct rb_node *n; for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); if (handle->buffer == buffer) return handle; } return NULL; } bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) { struct rb_node *n = client->handles.rb_node; while (n) { struct ion_handle *handle_node = rb_entry(n, struct ion_handle, node); if (handle < handle_node) n = n->rb_left; else if (handle > handle_node) n = n->rb_right; else return true; } WARN(1, "invalid handle passed h=0x%x,comm=%d\n", handle, current->group_leader->comm); return false; } void ion_handle_add(struct ion_client *client, struct ion_handle *handle) { struct rb_node **p = &client->handles.rb_node; struct rb_node *parent = NULL; struct ion_handle *entry; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_handle, node); if (handle < entry) p = &(*p)->rb_left; else if (handle > entry) p = &(*p)->rb_right; else WARN(1, "%s: buffer already found.", __func__); } rb_link_node(&handle->node, parent, p); rb_insert_color(&handle->node, &client->handles); } struct ion_handle *ion_alloc(struct ion_client *client, size_t len, size_t align, unsigned int flags) { struct rb_node *n; struct ion_handle *handle; struct ion_device *dev = client->dev; struct ion_buffer *buffer = NULL; /* * traverse the list of heaps available in this system in priority * order. If the heap type is supported by the client, and matches the * request of the caller allocate from it. Repeat until allocate has * succeeded or all heaps have been tried */ mutex_lock(&dev->lock); for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { struct ion_heap *heap = rb_entry(n, struct ion_heap, node); /* if the client doesn't support this heap type */ if (!((1 << heap->type) & client->heap_mask)) continue; /* if the caller didn't specify this heap type */ if (!((1 << heap->id) & flags)) continue; buffer = ion_buffer_create(heap, dev, len, align, flags); if (!IS_ERR_OR_NULL(buffer)) break; } mutex_unlock(&dev->lock); if (IS_ERR_OR_NULL(buffer)) return ERR_PTR(PTR_ERR(buffer)); handle = ion_handle_create(client, buffer); if (IS_ERR_OR_NULL(handle)) goto end; /* * ion_buffer_create will create a buffer with a ref_cnt of 1, * and ion_handle_create will take a second reference, drop one here */ ion_buffer_put(buffer); mutex_lock(&client->lock); ion_handle_add(client, handle); mutex_unlock(&client->lock); return handle; end: ion_buffer_put(buffer); return handle; } void ion_free(struct ion_client *client, struct ion_handle *handle) { bool valid_handle; BUG_ON(client != handle->client); mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); mutex_unlock(&client->lock); if (!valid_handle) { WARN(1, "%s: invalid handle passed to free.\n", __func__); return; } ion_handle_put(handle); } static bool _ion_map(int *buffer_cnt, int *handle_cnt) { bool map; BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0); if (*buffer_cnt) map = false; else map = true; if (*handle_cnt == 0) (*buffer_cnt)++; (*handle_cnt)++; return map; } static bool _ion_unmap(int *buffer_cnt, int *handle_cnt) { BUG_ON(*handle_cnt == 0); (*handle_cnt)--; if (*handle_cnt != 0) return false; BUG_ON(*buffer_cnt == 0); (*buffer_cnt)--; if (*buffer_cnt == 0) return true; return false; } int ion_phys(struct ion_client *client, struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len) { struct ion_buffer *buffer; int ret; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { mutex_unlock(&client->lock); return -EINVAL; } buffer = handle->buffer; if (!buffer->heap->ops->phys) { pr_err("ion_phys is not implemented by this heap.\n"); mutex_unlock(&client->lock); return -ENODEV; } mutex_unlock(&client->lock); ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); return ret; } void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; void *vaddr; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { WARN(1, "invalid handle passed to map_kernel.\n"); mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } buffer = handle->buffer; mutex_lock(&buffer->lock); if (!handle->buffer->heap->ops->map_kernel) { pr_err("map_kernel is not implemented by this heap.\n"); mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return ERR_PTR(-ENODEV); } if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) { vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); if (IS_ERR_OR_NULL(vaddr)) _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt); buffer->vaddr = vaddr; } else { vaddr = buffer->vaddr; } mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return vaddr; } struct scatterlist *ion_map_dma(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; struct scatterlist *sglist; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { WARN(1, "invalid handle passed to map_dma.\n"); mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } buffer = handle->buffer; mutex_lock(&buffer->lock); if (!handle->buffer->heap->ops->map_dma) { pr_err("map_kernel is not implemented by this heap.\n"); mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return ERR_PTR(-ENODEV); } if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) { sglist = buffer->heap->ops->map_dma(buffer->heap, buffer); if (IS_ERR_OR_NULL(sglist)) _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt); buffer->sglist = sglist; } else { sglist = buffer->sglist; } mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return sglist; } struct scatterlist *iommu_heap_remap_dma(struct ion_heap *heap, struct ion_buffer *buf, unsigned long addr); int ion_remap_dma(struct ion_client *client, struct ion_handle *handle, unsigned long addr) { struct ion_buffer *buffer; int ret; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { pr_err("invalid handle passed to map_dma.\n"); mutex_unlock(&client->lock); return -EINVAL; } buffer = handle->buffer; mutex_lock(&buffer->lock); ret = iommu_heap_remap_dma(buffer->heap, buffer, addr); mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return ret; } void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; mutex_lock(&client->lock); buffer = handle->buffer; mutex_lock(&buffer->lock); if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) { buffer->heap->ops->unmap_kernel(buffer->heap, buffer); buffer->vaddr = NULL; } mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); } void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; mutex_lock(&client->lock); buffer = handle->buffer; mutex_lock(&buffer->lock); if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) { buffer->heap->ops->unmap_dma(buffer->heap, buffer); buffer->sglist = NULL; } mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); } struct ion_buffer *ion_share(struct ion_client *client, struct ion_handle *handle) { bool valid_handle; mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); mutex_unlock(&client->lock); if (!valid_handle) { WARN(1, "%s: invalid handle passed to share.\n", __func__); return ERR_PTR(-EINVAL); } /* do not take an extra reference here, the burden is on the caller * to make sure the buffer doesn't go away while it's passing it * to another client -- ion_free should not be called on this handle * until the buffer has been imported into the other client */ return handle->buffer; } struct ion_handle *ion_import(struct ion_client *client, struct ion_buffer *buffer) { struct ion_handle *handle = NULL; mutex_lock(&client->lock); /* if a handle exists for this buffer just take a reference to it */ handle = ion_handle_lookup(client, buffer); if (!IS_ERR_OR_NULL(handle)) { ion_handle_get(handle); goto end; } handle = ion_handle_create(client, buffer); if (IS_ERR_OR_NULL(handle)) { pr_err("error during handle create\n"); goto end; } ion_handle_add(client, handle); end: mutex_unlock(&client->lock); return handle; } static const struct file_operations ion_share_fops; struct ion_handle *ion_import_fd(struct ion_client *client, int fd) { struct file *file = fget(fd); struct ion_handle *handle; if (!file) { pr_err("imported fd not found in file table.\n"); return ERR_PTR(-EINVAL); } if (file->f_op != &ion_share_fops) { pr_err("imported file is not a shared ion file.\n"); handle = ERR_PTR(-EINVAL); goto end; } handle = ion_import(client, file->private_data); end: fput(file); return handle; } static int ion_debug_client_show(struct seq_file *s, void *unused) { struct ion_client *client = s->private; struct rb_node *n; size_t sizes[ION_NUM_HEAPS] = {0}; const char *names[ION_NUM_HEAPS] = {0}; int i; mutex_lock(&client->lock); for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); enum ion_heap_type type = handle->buffer->heap->type; if (!names[type]) names[type] = handle->buffer->heap->name; sizes[type] += handle->buffer->size; } mutex_unlock(&client->lock); seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); for (i = 0; i < ION_NUM_HEAPS; i++) { if (!names[i]) continue; seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i], atomic_read(&client->ref.refcount)); } return 0; } static int ion_debug_client_open(struct inode *inode, struct file *file) { return single_open(file, ion_debug_client_show, inode->i_private); } static const struct file_operations debug_client_fops = { .open = ion_debug_client_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct ion_client *ion_client_lookup(struct ion_device *dev, struct task_struct *task) { struct rb_node *n = dev->user_clients.rb_node; struct ion_client *client; mutex_lock(&dev->lock); while (n) { client = rb_entry(n, struct ion_client, node); if (task == client->task) { ion_client_get(client); mutex_unlock(&dev->lock); return client; } else if (task < client->task) { n = n->rb_left; } else if (task > client->task) { n = n->rb_right; } } mutex_unlock(&dev->lock); return NULL; } struct ion_client *ion_client_create(struct ion_device *dev, unsigned int heap_mask, const char *name) { struct ion_client *client; struct task_struct *task; struct rb_node **p; struct rb_node *parent = NULL; struct ion_client *entry; char debug_name[64]; pid_t pid; get_task_struct(current->group_leader); task_lock(current->group_leader); pid = task_pid_nr(current->group_leader); /* don't bother to store task struct for kernel threads, they can't be killed anyway */ if (current->group_leader->flags & PF_KTHREAD) { put_task_struct(current->group_leader); task = NULL; } else { task = current->group_leader; } task_unlock(current->group_leader); /* if this isn't a kernel thread, see if a client already exists */ if (task) { client = ion_client_lookup(dev, task); if (!IS_ERR_OR_NULL(client)) { put_task_struct(current->group_leader); return client; } } client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); if (!client) { put_task_struct(current->group_leader); return ERR_PTR(-ENOMEM); } client->dev = dev; client->handles = RB_ROOT; mutex_init(&client->lock); client->name = name; client->heap_mask = heap_mask; client->task = task; client->pid = pid; kref_init(&client->ref); mutex_lock(&dev->lock); if (task) { p = &dev->user_clients.rb_node; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_client, node); if (task < entry->task) p = &(*p)->rb_left; else if (task > entry->task) p = &(*p)->rb_right; } rb_link_node(&client->node, parent, p); rb_insert_color(&client->node, &dev->user_clients); } else { p = &dev->kernel_clients.rb_node; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_client, node); if (client < entry) p = &(*p)->rb_left; else if (client > entry) p = &(*p)->rb_right; } rb_link_node(&client->node, parent, p); rb_insert_color(&client->node, &dev->kernel_clients); } snprintf(debug_name, 64, "%u", client->pid); client->debug_root = debugfs_create_file(debug_name, 0664, dev->debug_root, client, &debug_client_fops); mutex_unlock(&dev->lock); return client; } static void _ion_client_destroy(struct kref *kref) { struct ion_client *client = container_of(kref, struct ion_client, ref); struct ion_device *dev = client->dev; struct rb_node *n; pr_debug("\n"); while ((n = rb_first(&client->handles))) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); ion_handle_destroy(&handle->ref); } mutex_lock(&dev->lock); if (client->task) { rb_erase(&client->node, &dev->user_clients); put_task_struct(client->task); } else { rb_erase(&client->node, &dev->kernel_clients); } debugfs_remove_recursive(client->debug_root); mutex_unlock(&dev->lock); kfree(client); } void ion_client_get(struct ion_client *client) { kref_get(&client->ref); } int ion_client_put(struct ion_client *client) { return kref_put(&client->ref, _ion_client_destroy); } void ion_client_destroy(struct ion_client *client) { ion_client_put(client); } static int ion_share_release(struct inode *inode, struct file* file) { struct ion_buffer *buffer = file->private_data; pr_debug("\n"); /* drop the reference to the buffer -- this prevents the buffer from going away because the client holding it exited while it was being passed */ ion_buffer_put(buffer); return 0; } static void ion_vma_open(struct vm_area_struct *vma) { struct ion_buffer *buffer = vma->vm_file->private_data; struct ion_handle *handle = vma->vm_private_data; struct ion_client *client; pr_debug("\n"); /* check that the client still exists and take a reference so it can't go away until this vma is closed */ client = ion_client_lookup(buffer->dev, current->group_leader); if (IS_ERR_OR_NULL(client)) { vma->vm_private_data = NULL; return; } ion_buffer_get(buffer); ion_handle_get(handle); pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", atomic_read(&client->ref.refcount), atomic_read(&handle->ref.refcount), atomic_read(&buffer->ref.refcount)); } static void ion_vma_close(struct vm_area_struct *vma) { struct ion_handle *handle = vma->vm_private_data; struct ion_buffer *buffer = vma->vm_file->private_data; struct ion_client *client; pr_debug("\n"); /* this indicates the client is gone, nothing to do here */ if (!handle) return; client = handle->client; pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", atomic_read(&client->ref.refcount), atomic_read(&handle->ref.refcount), atomic_read(&buffer->ref.refcount)); ion_handle_put(handle); ion_client_put(client); ion_buffer_put(buffer); pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", atomic_read(&client->ref.refcount), atomic_read(&handle->ref.refcount), atomic_read(&buffer->ref.refcount)); } static struct vm_operations_struct ion_vm_ops = { .open = ion_vma_open, .close = ion_vma_close, }; static int ion_share_mmap(struct file *file, struct vm_area_struct *vma) { struct ion_buffer *buffer = file->private_data; unsigned long size = vma->vm_end - vma->vm_start; struct ion_client *client; struct ion_handle *handle; int ret; pr_debug("\n"); /* make sure the client still exists, it's possible for the client to have gone away but the map/share fd still to be around, take a reference to it so it can't go away while this mapping exists */ client = ion_client_lookup(buffer->dev, current->group_leader); if (IS_ERR_OR_NULL(client)) { WARN(1, "trying to mmap an ion handle in a process with no " "ion client\n"); return -EINVAL; } if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) > buffer->size)) { WARN(1, "trying to map larger area than handle has available" "\n"); ret = -EINVAL; goto err; } /* find the handle and take a reference to it */ handle = ion_import(client, buffer); if (IS_ERR_OR_NULL(handle)) { ret = -EINVAL; goto err; } ion_buffer_get(buffer); if (!handle->buffer->heap->ops->map_user) { pr_err("this heap does not define a method for mapping " "to userspace\n"); ret = -EINVAL; goto err1; } mutex_lock(&buffer->lock); /* now map it to userspace */ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); mutex_unlock(&buffer->lock); if (ret) { pr_err("failure mapping buffer to userspace\n"); goto err1; } vma->vm_ops = &ion_vm_ops; /* move the handle into the vm_private_data so we can access it from vma_open/close */ vma->vm_private_data = handle; pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", atomic_read(&client->ref.refcount), atomic_read(&handle->ref.refcount), atomic_read(&buffer->ref.refcount)); return 0; err1: /* drop the reference to the handle */ ion_handle_put(handle); err: /* drop the reference to the client */ ion_client_put(client); return ret; } static const struct file_operations ion_share_fops = { .owner = THIS_MODULE, .release = ion_share_release, .mmap = ion_share_mmap, }; static int ion_ioctl_share(struct file *parent, struct ion_client *client, struct ion_handle *handle) { int fd = get_unused_fd(); struct file *file; if (fd < 0) return -ENFILE; file = anon_inode_getfile("ion_share_fd", &ion_share_fops, handle->buffer, O_RDWR); if (IS_ERR_OR_NULL(file)) goto err; ion_buffer_get(handle->buffer); fd_install(fd, file); return fd; err: put_unused_fd(fd); return -ENFILE; } static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct ion_client *client = filp->private_data; switch (cmd) { case ION_IOC_ALLOC: { struct ion_allocation_data data; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; data.handle = ion_alloc(client, data.len, data.align, data.flags); if (copy_to_user((void __user *)arg, &data, sizeof(data))) return -EFAULT; break; } case ION_IOC_FREE: { struct ion_handle_data data; bool valid; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_handle_data))) return -EFAULT; mutex_lock(&client->lock); valid = ion_handle_validate(client, data.handle); mutex_unlock(&client->lock); if (!valid) return -EINVAL; ion_free(client, data.handle); break; } case ION_IOC_MAP: case ION_IOC_SHARE: { struct ion_fd_data data; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; mutex_lock(&client->lock); if (!ion_handle_validate(client, data.handle)) { WARN(1, "invalid handle passed to share ioctl.\n"); mutex_unlock(&client->lock); return -EINVAL; } data.fd = ion_ioctl_share(filp, client, data.handle); mutex_unlock(&client->lock); if (copy_to_user((void __user *)arg, &data, sizeof(data))) return -EFAULT; break; } case ION_IOC_IMPORT: { struct ion_fd_data data; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_fd_data))) return -EFAULT; data.handle = ion_import_fd(client, data.fd); if (IS_ERR(data.handle)) data.handle = NULL; if (copy_to_user((void __user *)arg, &data, sizeof(struct ion_fd_data))) return -EFAULT; break; } case ION_IOC_CUSTOM: { struct ion_device *dev = client->dev; struct ion_custom_data data; if (!dev->custom_ioctl) return -ENOTTY; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_custom_data))) return -EFAULT; return dev->custom_ioctl(client, data.cmd, data.arg); } default: return -ENOTTY; } return 0; } static int ion_release(struct inode *inode, struct file *file) { struct ion_client *client = file->private_data; pr_debug("\n"); ion_client_put(client); return 0; } static int ion_open(struct inode *inode, struct file *file) { struct miscdevice *miscdev = file->private_data; struct ion_device *dev = container_of(miscdev, struct ion_device, dev); struct ion_client *client; pr_debug("\n"); client = ion_client_create(dev, -1, "user"); if (IS_ERR_OR_NULL(client)) return PTR_ERR(client); file->private_data = client; return 0; } static const struct file_operations ion_fops = { .owner = THIS_MODULE, .open = ion_open, .release = ion_release, .unlocked_ioctl = ion_ioctl, }; static size_t ion_debug_heap_total(struct ion_client *client, enum ion_heap_type type) { size_t size = 0; struct rb_node *n; mutex_lock(&client->lock); for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); if (handle->buffer->heap->type == type) size += handle->buffer->size; } mutex_unlock(&client->lock); return size; } static int ion_debug_heap_show(struct seq_file *s, void *unused) { struct ion_heap *heap = s->private; struct ion_device *dev = heap->dev; struct rb_node *n; seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) { struct ion_client *client = rb_entry(n, struct ion_client, node); char task_comm[TASK_COMM_LEN]; size_t size = ion_debug_heap_total(client, heap->type); if (!size) continue; get_task_comm(task_comm, client->task); seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid, size); } for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) { struct ion_client *client = rb_entry(n, struct ion_client, node); size_t size = ion_debug_heap_total(client, heap->type); if (!size) continue; seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid, size); } return 0; } static int ion_debug_heap_open(struct inode *inode, struct file *file) { return single_open(file, ion_debug_heap_show, inode->i_private); } static const struct file_operations debug_heap_fops = { .open = ion_debug_heap_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) { struct rb_node **p = &dev->heaps.rb_node; struct rb_node *parent = NULL; struct ion_heap *entry; heap->dev = dev; mutex_lock(&dev->lock); while (*p) { parent = *p; entry = rb_entry(parent, struct ion_heap, node); if (heap->id < entry->id) { p = &(*p)->rb_left; } else if (heap->id > entry->id ) { p = &(*p)->rb_right; } else { pr_err("can not insert multiple heaps with " "id %d\n", heap->id); goto end; } } rb_link_node(&heap->node, parent, p); rb_insert_color(&heap->node, &dev->heaps); debugfs_create_file(heap->name, 0664, dev->debug_root, heap, &debug_heap_fops); end: mutex_unlock(&dev->lock); } struct ion_device *ion_device_create(long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, unsigned long arg)) { struct ion_device *idev; int ret; idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); if (!idev) return ERR_PTR(-ENOMEM); idev->dev.minor = MISC_DYNAMIC_MINOR; idev->dev.name = "ion"; idev->dev.fops = &ion_fops; idev->dev.parent = NULL; ret = misc_register(&idev->dev); if (ret) { pr_err("ion: failed to register misc device.\n"); return ERR_PTR(ret); } idev->debug_root = debugfs_create_dir("ion", NULL); if (IS_ERR_OR_NULL(idev->debug_root)) pr_err("ion: failed to create debug files.\n"); idev->custom_ioctl = custom_ioctl; idev->buffers = RB_ROOT; mutex_init(&idev->lock); idev->heaps = RB_ROOT; idev->user_clients = RB_ROOT; idev->kernel_clients = RB_ROOT; return idev; } void ion_device_destroy(struct ion_device *dev) { misc_deregister(&dev->dev); /* XXX need to free the heaps and clients ? */ kfree(dev); } struct ion_client *ion_client_get_file(int fd) { struct ion_client *client = ERR_PTR(-EFAULT); struct file *f = fget(fd); if (!f) return ERR_PTR(-EINVAL); if (f->f_op == &ion_fops) { client = f->private_data; ion_client_get(client); } fput(f); return client; }
zombi-x/grimlock_kernel_asus_tegra3_unified
drivers/gpu/ion/ion.c
C
gpl-2.0
28,669
/* Expectation handling for nf_conntrack. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/netfilter.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/percpu.h> #include <linux/kernel.h> #include <linux/jhash.h> #include <net/net_namespace.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_zones.h> unsigned int nf_ct_expect_hsize __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_expect_hsize); unsigned int nf_ct_expect_max __read_mostly; static struct kmem_cache *nf_ct_expect_cachep __read_mostly; static HLIST_HEAD(nf_ct_userspace_expect_list); /* nf_conntrack_expect helper functions */ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, u32 pid, int report) { struct nf_conn_help *master_help = nfct_help(exp->master); struct net *net = nf_ct_exp_net(exp); NF_CT_ASSERT(!timer_pending(&exp->timeout)); hlist_del_rcu(&exp->hnode); net->ct.expect_count--; hlist_del(&exp->lnode); if (!(exp->flags & NF_CT_EXPECT_USERSPACE)) master_help->expecting[exp->class]--; nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report); nf_ct_expect_put(exp); NF_CT_STAT_INC(net, expect_delete); } EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report); static void nf_ct_expectation_timed_out(unsigned long ul_expect) { struct nf_conntrack_expect *exp = (void *)ul_expect; spin_lock_bh(&nf_conntrack_lock); nf_ct_unlink_expect(exp); spin_unlock_bh(&nf_conntrack_lock); nf_ct_expect_put(exp); } static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple) { unsigned int hash; if (unlikely(!nf_conntrack_hash_rnd)) { init_nf_conntrack_hash_rnd(); } hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all), (((tuple->dst.protonum ^ tuple->src.l3num) << 16) | (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd); return ((u64)hash * nf_ct_expect_hsize) >> 32; } struct nf_conntrack_expect * __nf_ct_expect_find(struct net *net, u16 zone, const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_expect *i; struct hlist_node *n; unsigned int h; if (!net->ct.expect_count) return NULL; h = nf_ct_expect_dst_hash(tuple); hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) { if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && nf_ct_zone(i->master) == zone) return i; } return NULL; } EXPORT_SYMBOL_GPL(__nf_ct_expect_find); /* Just find a expectation corresponding to a tuple. */ struct nf_conntrack_expect * nf_ct_expect_find_get(struct net *net, u16 zone, const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_expect *i; rcu_read_lock(); i = __nf_ct_expect_find(net, zone, tuple); if (i && !atomic_inc_not_zero(&i->use)) i = NULL; rcu_read_unlock(); return i; } EXPORT_SYMBOL_GPL(nf_ct_expect_find_get); /* If an expectation for this connection is found, it gets delete from * global list then returned. */ struct nf_conntrack_expect * nf_ct_find_expectation(struct net *net, u16 zone, const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_expect *i, *exp = NULL; struct hlist_node *n; unsigned int h; if (!net->ct.expect_count) return NULL; h = nf_ct_expect_dst_hash(tuple); hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { if (!(i->flags & NF_CT_EXPECT_INACTIVE) && nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && nf_ct_zone(i->master) == zone) { exp = i; break; } } if (!exp) return NULL; /* If master is not in hash table yet (ie. packet hasn't left this machine yet), how can other end know about expected? Hence these are not the droids you are looking for (if master ct never got confirmed, we'd hold a reference to it and weird things would happen to future packets). */ if (!nf_ct_is_confirmed(exp->master)) return NULL; if (exp->flags & NF_CT_EXPECT_PERMANENT) { atomic_inc(&exp->use); return exp; } else if (del_timer(&exp->timeout)) { nf_ct_unlink_expect(exp); return exp; } return NULL; } /* delete all expectations for this conntrack */ void nf_ct_remove_expectations(struct nf_conn *ct) { struct nf_conn_help *help = nfct_help(ct); struct nf_conntrack_expect *exp; struct hlist_node *n, *next; /* Optimization: most connection never expect any others. */ if (!help) return; hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { if (del_timer(&exp->timeout)) { nf_ct_unlink_expect(exp); nf_ct_expect_put(exp); } } } EXPORT_SYMBOL_GPL(nf_ct_remove_expectations); /* Would two expected things clash? */ static inline int expect_clash(const struct nf_conntrack_expect *a, const struct nf_conntrack_expect *b) { /* Part covered by intersection of masks must be unequal, otherwise they clash */ struct nf_conntrack_tuple_mask intersect_mask; int count; intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all; for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){ intersect_mask.src.u3.all[count] = a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; } return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); } static inline int expect_matches(const struct nf_conntrack_expect *a, const struct nf_conntrack_expect *b) { return a->master == b->master && a->class == b->class && nf_ct_tuple_equal(&a->tuple, &b->tuple) && nf_ct_tuple_mask_equal(&a->mask, &b->mask) && nf_ct_zone(a->master) == nf_ct_zone(b->master); } /* Generally a bad idea to call this: could have matched already. */ void nf_ct_unexpect_related(struct nf_conntrack_expect *exp) { spin_lock_bh(&nf_conntrack_lock); if (del_timer(&exp->timeout)) { nf_ct_unlink_expect(exp); nf_ct_expect_put(exp); } spin_unlock_bh(&nf_conntrack_lock); } EXPORT_SYMBOL_GPL(nf_ct_unexpect_related); /* We don't increase the master conntrack refcount for non-fulfilled * conntracks. During the conntrack destruction, the expectations are * always killed before the conntrack itself */ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me) { struct nf_conntrack_expect *new; new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC); if (!new) return NULL; new->master = me; atomic_set(&new->use, 1); return new; } EXPORT_SYMBOL_GPL(nf_ct_expect_alloc); void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class, u_int8_t family, const union nf_inet_addr *saddr, const union nf_inet_addr *daddr, u_int8_t proto, const __be16 *src, const __be16 *dst) { int len; if (family == AF_INET) len = 4; else len = 16; exp->flags = 0; exp->class = class; exp->expectfn = NULL; exp->helper = NULL; exp->tuple.src.l3num = family; exp->tuple.dst.protonum = proto; if (saddr) { memcpy(&exp->tuple.src.u3, saddr, len); if (sizeof(exp->tuple.src.u3) > len) /* address needs to be cleared for nf_ct_tuple_equal */ memset((void *)&exp->tuple.src.u3 + len, 0x00, sizeof(exp->tuple.src.u3) - len); memset(&exp->mask.src.u3, 0xFF, len); if (sizeof(exp->mask.src.u3) > len) memset((void *)&exp->mask.src.u3 + len, 0x00, sizeof(exp->mask.src.u3) - len); } else { memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3)); memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3)); } if (src) { exp->tuple.src.u.all = *src; exp->mask.src.u.all = htons(0xFFFF); } else { exp->tuple.src.u.all = 0; exp->mask.src.u.all = 0; } memcpy(&exp->tuple.dst.u3, daddr, len); if (sizeof(exp->tuple.dst.u3) > len) /* address needs to be cleared for nf_ct_tuple_equal */ memset((void *)&exp->tuple.dst.u3 + len, 0x00, sizeof(exp->tuple.dst.u3) - len); exp->tuple.dst.u.all = *dst; } EXPORT_SYMBOL_GPL(nf_ct_expect_init); static void nf_ct_expect_free_rcu(struct rcu_head *head) { struct nf_conntrack_expect *exp; exp = container_of(head, struct nf_conntrack_expect, rcu); kmem_cache_free(nf_ct_expect_cachep, exp); } void nf_ct_expect_put(struct nf_conntrack_expect *exp) { if (atomic_dec_and_test(&exp->use)) call_rcu(&exp->rcu, nf_ct_expect_free_rcu); } EXPORT_SYMBOL_GPL(nf_ct_expect_put); static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) { struct nf_conn_help *master_help = nfct_help(exp->master); struct net *net = nf_ct_exp_net(exp); const struct nf_conntrack_expect_policy *p; unsigned int h = nf_ct_expect_dst_hash(&exp->tuple); /* two references : one for hash insert, one for the timer */ atomic_add(2, &exp->use); if (master_help) { hlist_add_head(&exp->lnode, &master_help->expectations); master_help->expecting[exp->class]++; } else if (exp->flags & NF_CT_EXPECT_USERSPACE) hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list); hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]); net->ct.expect_count++; setup_timer(&exp->timeout, nf_ct_expectation_timed_out, (unsigned long)exp); if (master_help) { p = &rcu_dereference_protected( master_help->helper, lockdep_is_held(&nf_conntrack_lock) )->expect_policy[exp->class]; exp->timeout.expires = jiffies + p->timeout * HZ; } add_timer(&exp->timeout); NF_CT_STAT_INC(net, expect_create); } /* Race with expectations being used means we could have none to find; OK. */ static void evict_oldest_expect(struct nf_conn *master, struct nf_conntrack_expect *new) { struct nf_conn_help *master_help = nfct_help(master); struct nf_conntrack_expect *exp, *last = NULL; struct hlist_node *n; hlist_for_each_entry(exp, n, &master_help->expectations, lnode) { if (exp->class == new->class) last = exp; } if (last && del_timer(&last->timeout)) { nf_ct_unlink_expect(last); nf_ct_expect_put(last); } } static inline int refresh_timer(struct nf_conntrack_expect *i) { struct nf_conn_help *master_help = nfct_help(i->master); const struct nf_conntrack_expect_policy *p; if (!del_timer(&i->timeout)) return 0; p = &rcu_dereference_protected( master_help->helper, lockdep_is_held(&nf_conntrack_lock) )->expect_policy[i->class]; i->timeout.expires = jiffies + p->timeout * HZ; add_timer(&i->timeout); return 1; } static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) { const struct nf_conntrack_expect_policy *p; struct nf_conntrack_expect *i; struct nf_conn *master = expect->master; struct nf_conn_help *master_help = nfct_help(master); struct net *net = nf_ct_exp_net(expect); struct hlist_node *n; unsigned int h; int ret = 1; /* Don't allow expectations created from kernel-space with no helper */ if (!(expect->flags & NF_CT_EXPECT_USERSPACE) && (!master_help || (master_help && !master_help->helper))) { ret = -ESHUTDOWN; goto out; } h = nf_ct_expect_dst_hash(&expect->tuple); hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { if (expect_matches(i, expect)) { /* Refresh timer: if it's dying, ignore.. */ if (refresh_timer(i)) { ret = 0; goto out; } } else if (expect_clash(i, expect)) { ret = -EBUSY; goto out; } } /* Will be over limit? */ if (master_help) { p = &rcu_dereference_protected( master_help->helper, lockdep_is_held(&nf_conntrack_lock) )->expect_policy[expect->class]; if (p->max_expected && master_help->expecting[expect->class] >= p->max_expected) { evict_oldest_expect(master, expect); if (master_help->expecting[expect->class] >= p->max_expected) { ret = -EMFILE; goto out; } } } if (net->ct.expect_count >= nf_ct_expect_max) { if (net_ratelimit()) printk(KERN_WARNING "nf_conntrack: expectation table full\n"); ret = -EMFILE; } out: return ret; } int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, u32 pid, int report) { int ret; spin_lock_bh(&nf_conntrack_lock); ret = __nf_ct_expect_check(expect); if (ret <= 0) goto out; ret = 0; nf_ct_expect_insert(expect); spin_unlock_bh(&nf_conntrack_lock); nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report); return ret; out: spin_unlock_bh(&nf_conntrack_lock); return ret; } EXPORT_SYMBOL_GPL(nf_ct_expect_related_report); void nf_ct_remove_userspace_expectations(void) { struct nf_conntrack_expect *exp; struct hlist_node *n, *next; hlist_for_each_entry_safe(exp, n, next, &nf_ct_userspace_expect_list, lnode) { if (del_timer(&exp->timeout)) { nf_ct_unlink_expect(exp); nf_ct_expect_put(exp); } } } EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations); #ifdef CONFIG_PROC_FS struct ct_expect_iter_state { struct seq_net_private p; unsigned int bucket; }; static struct hlist_node *ct_expect_get_first(struct seq_file *seq) { struct net *net = seq_file_net(seq); struct ct_expect_iter_state *st = seq->private; struct hlist_node *n; for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket])); if (n) return n; } return NULL; } static struct hlist_node *ct_expect_get_next(struct seq_file *seq, struct hlist_node *head) { struct net *net = seq_file_net(seq); struct ct_expect_iter_state *st = seq->private; head = rcu_dereference(hlist_next_rcu(head)); while (head == NULL) { if (++st->bucket >= nf_ct_expect_hsize) return NULL; head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket])); } return head; } static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos) { struct hlist_node *head = ct_expect_get_first(seq); if (head) while (pos && (head = ct_expect_get_next(seq, head))) pos--; return pos ? NULL : head; } static void *exp_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); return ct_expect_get_idx(seq, *pos); } static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { (*pos)++; return ct_expect_get_next(seq, v); } static void exp_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int exp_seq_show(struct seq_file *s, void *v) { struct nf_conntrack_expect *expect; struct nf_conntrack_helper *helper; struct hlist_node *n = v; char *delim = ""; expect = hlist_entry(n, struct nf_conntrack_expect, hnode); if (expect->timeout.function) seq_printf(s, "%ld ", timer_pending(&expect->timeout) ? (long)(expect->timeout.expires - jiffies)/HZ : 0); else seq_printf(s, "- "); seq_printf(s, "l3proto = %u proto=%u ", expect->tuple.src.l3num, expect->tuple.dst.protonum); print_tuple(s, &expect->tuple, __nf_ct_l3proto_find(expect->tuple.src.l3num), __nf_ct_l4proto_find(expect->tuple.src.l3num, expect->tuple.dst.protonum)); if (expect->flags & NF_CT_EXPECT_PERMANENT) { seq_printf(s, "PERMANENT"); delim = ","; } if (expect->flags & NF_CT_EXPECT_INACTIVE) { seq_printf(s, "%sINACTIVE", delim); delim = ","; } if (expect->flags & NF_CT_EXPECT_USERSPACE) seq_printf(s, "%sUSERSPACE", delim); helper = rcu_dereference(nfct_help(expect->master)->helper); if (helper) { seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name); if (helper->expect_policy[expect->class].name) seq_printf(s, "/%s", helper->expect_policy[expect->class].name); } return seq_putc(s, '\n'); } static const struct seq_operations exp_seq_ops = { .start = exp_seq_start, .next = exp_seq_next, .stop = exp_seq_stop, .show = exp_seq_show }; static int exp_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &exp_seq_ops, sizeof(struct ct_expect_iter_state)); } static const struct file_operations exp_file_ops = { .owner = THIS_MODULE, .open = exp_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif /* CONFIG_PROC_FS */ static int exp_proc_init(struct net *net) { #ifdef CONFIG_PROC_FS struct proc_dir_entry *proc; proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops); if (!proc) return -ENOMEM; #endif /* CONFIG_PROC_FS */ return 0; } static void exp_proc_remove(struct net *net) { #ifdef CONFIG_PROC_FS proc_net_remove(net, "nf_conntrack_expect"); #endif /* CONFIG_PROC_FS */ } module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400); int nf_conntrack_expect_init(struct net *net) { int err = -ENOMEM; if (net_eq(net, &init_net)) { if (!nf_ct_expect_hsize) { nf_ct_expect_hsize = net->ct.htable_size / 256; if (!nf_ct_expect_hsize) nf_ct_expect_hsize = 1; } nf_ct_expect_max = nf_ct_expect_hsize * 4; } net->ct.expect_count = 0; net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0); if (net->ct.expect_hash == NULL) goto err1; if (net_eq(net, &init_net)) { nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect", sizeof(struct nf_conntrack_expect), 0, 0, NULL); if (!nf_ct_expect_cachep) goto err2; } err = exp_proc_init(net); if (err < 0) goto err3; return 0; err3: if (net_eq(net, &init_net)) kmem_cache_destroy(nf_ct_expect_cachep); err2: nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize); err1: return err; } void nf_conntrack_expect_fini(struct net *net) { exp_proc_remove(net); if (net_eq(net, &init_net)) { rcu_barrier(); /* Wait for call_rcu() before destroy */ kmem_cache_destroy(nf_ct_expect_cachep); } nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize); }
MoKee/android_kernel_motorola_olympus
net/netfilter/nf_conntrack_expect.c
C
gpl-2.0
17,951
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <types.h> #include "phytbl_lcn.h" static const u32 dot11lcn_gain_tbl_rev0[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000000, 0x00000004, 0x00000008, 0x00000001, 0x00000005, 0x00000009, 0x0000000d, 0x0000004d, 0x0000008d, 0x0000000d, 0x0000004d, 0x0000008d, 0x000000cd, 0x0000004f, 0x0000008f, 0x000000cf, 0x000000d3, 0x00000113, 0x00000513, 0x00000913, 0x00000953, 0x00000d53, 0x00001153, 0x00001193, 0x00005193, 0x00009193, 0x0000d193, 0x00011193, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000000, 0x00000004, 0x00000008, 0x00000001, 0x00000005, 0x00000009, 0x0000000d, 0x0000004d, 0x0000008d, 0x0000000d, 0x0000004d, 0x0000008d, 0x000000cd, 0x0000004f, 0x0000008f, 0x000000cf, 0x000000d3, 0x00000113, 0x00000513, 0x00000913, 0x00000953, 0x00000d53, 0x00001153, 0x00005153, 0x00009153, 0x0000d153, 0x00011153, 0x00015153, 0x00019153, 0x0001d153, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; static const u32 dot11lcn_gain_tbl_rev1[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008, 0x00000004, 0x00000008, 0x00000001, 0x00000005, 0x00000009, 0x0000000D, 0x00000011, 0x00000051, 0x00000091, 0x00000011, 0x00000051, 0x00000091, 0x000000d1, 0x00000053, 0x00000093, 0x000000d3, 0x000000d7, 0x00000117, 0x00000517, 0x00000917, 0x00000957, 0x00000d57, 0x00001157, 0x00001197, 0x00005197, 0x00009197, 0x0000d197, 0x00011197, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008, 0x00000004, 0x00000008, 0x00000001, 0x00000005, 0x00000009, 0x0000000D, 0x00000011, 0x00000051, 0x00000091, 0x00000011, 0x00000051, 0x00000091, 0x000000d1, 0x00000053, 0x00000093, 0x000000d3, 0x000000d7, 0x00000117, 0x00000517, 0x00000917, 0x00000957, 0x00000d57, 0x00001157, 0x00005157, 0x00009157, 0x0000d157, 0x00011157, 0x00015157, 0x00019157, 0x0001d157, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; static const u16 dot11lcn_aux_gain_idx_tbl_rev0[] = { 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0406, 0x0407, 0x0408, 0x0409, 0x040a, 0x058b, 0x058c, 0x058d, 0x058e, 0x058f, 0x0090, 0x0091, 0x0092, 0x0193, 0x0194, 0x0195, 0x0196, 0x0197, 0x0198, 0x0199, 0x019a, 0x019b, 0x019c, 0x019d, 0x019e, 0x019f, 0x01a0, 0x01a1, 0x01a2, 0x01a3, 0x01a4, 0x01a5, 0x0000, }; static const u32 dot11lcn_gain_idx_tbl_rev0[] = { 0x00000000, 0x00000000, 0x10000000, 0x00000000, 0x20000000, 0x00000000, 0x30000000, 0x00000000, 0x40000000, 0x00000000, 0x50000000, 0x00000000, 0x60000000, 0x00000000, 0x70000000, 0x00000000, 0x80000000, 0x00000000, 0x90000000, 0x00000008, 0xa0000000, 0x00000008, 0xb0000000, 0x00000008, 0xc0000000, 0x00000008, 0xd0000000, 0x00000008, 0xe0000000, 0x00000008, 0xf0000000, 0x00000008, 0x00000000, 0x00000009, 0x10000000, 0x00000009, 0x20000000, 0x00000019, 0x30000000, 0x00000019, 0x40000000, 0x00000019, 0x50000000, 0x00000019, 0x60000000, 0x00000019, 0x70000000, 0x00000019, 0x80000000, 0x00000019, 0x90000000, 0x00000019, 0xa0000000, 0x00000019, 0xb0000000, 0x00000019, 0xc0000000, 0x00000019, 0xd0000000, 0x00000019, 0xe0000000, 0x00000019, 0xf0000000, 0x00000019, 0x00000000, 0x0000001a, 0x10000000, 0x0000001a, 0x20000000, 0x0000001a, 0x30000000, 0x0000001a, 0x40000000, 0x0000001a, 0x50000000, 0x00000002, 0x60000000, 0x00000002, 0x70000000, 0x00000002, 0x80000000, 0x00000002, 0x90000000, 0x00000002, 0xa0000000, 0x00000002, 0xb0000000, 0x00000002, 0xc0000000, 0x0000000a, 0xd0000000, 0x0000000a, 0xe0000000, 0x0000000a, 0xf0000000, 0x0000000a, 0x00000000, 0x0000000b, 0x10000000, 0x0000000b, 0x20000000, 0x0000000b, 0x30000000, 0x0000000b, 0x40000000, 0x0000000b, 0x50000000, 0x0000001b, 0x60000000, 0x0000001b, 0x70000000, 0x0000001b, 0x80000000, 0x0000001b, 0x90000000, 0x0000001b, 0xa0000000, 0x0000001b, 0xb0000000, 0x0000001b, 0xc0000000, 0x0000001b, 0xd0000000, 0x0000001b, 0xe0000000, 0x0000001b, 0xf0000000, 0x0000001b, 0x00000000, 0x0000001c, 0x10000000, 0x0000001c, 0x20000000, 0x0000001c, 0x30000000, 0x0000001c, 0x40000000, 0x0000001c, 0x50000000, 0x0000001c, 0x60000000, 0x0000001c, 0x70000000, 0x0000001c, 0x80000000, 0x0000001c, 0x90000000, 0x0000001c, }; static const u16 dot11lcn_aux_gain_idx_tbl_2G[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0080, 0x0081, 0x0100, 0x0101, 0x0180, 0x0181, 0x0182, 0x0183, 0x0184, 0x0185, 0x0186, 0x0187, 0x0188, 0x0285, 0x0289, 0x028a, 0x028b, 0x028c, 0x028d, 0x028e, 0x028f, 0x0290, 0x0291, 0x0292, 0x0293, 0x0294, 0x0295, 0x0296, 0x0297, 0x0298, 0x0299, 0x029a, 0x0000 }; static const u8 dot11lcn_gain_val_tbl_2G[] = { 0xfc, 0x02, 0x08, 0x0e, 0x13, 0x1b, 0xfc, 0x02, 0x08, 0x0e, 0x13, 0x1b, 0xfc, 0x00, 0x0c, 0x03, 0xeb, 0xfe, 0x07, 0x0b, 0x0f, 0xfb, 0xfe, 0x01, 0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06, 0x09, 0x0c, 0x0f, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06, 0x09, 0x0c, 0x0f, 0x12, 0x15, 0x18, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u32 dot11lcn_gain_idx_tbl_2G[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10000000, 0x00000000, 0x00000000, 0x00000008, 0x10000000, 0x00000008, 0x00000000, 0x00000010, 0x10000000, 0x00000010, 0x00000000, 0x00000018, 0x10000000, 0x00000018, 0x20000000, 0x00000018, 0x30000000, 0x00000018, 0x40000000, 0x00000018, 0x50000000, 0x00000018, 0x60000000, 0x00000018, 0x70000000, 0x00000018, 0x80000000, 0x00000018, 0x50000000, 0x00000028, 0x90000000, 0x00000028, 0xa0000000, 0x00000028, 0xb0000000, 0x00000028, 0xc0000000, 0x00000028, 0xd0000000, 0x00000028, 0xe0000000, 0x00000028, 0xf0000000, 0x00000028, 0x00000000, 0x00000029, 0x10000000, 0x00000029, 0x20000000, 0x00000029, 0x30000000, 0x00000029, 0x40000000, 0x00000029, 0x50000000, 0x00000029, 0x60000000, 0x00000029, 0x70000000, 0x00000029, 0x80000000, 0x00000029, 0x90000000, 0x00000029, 0xa0000000, 0x00000029, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10000000, 0x00000000, 0x00000000, 0x00000008, 0x10000000, 0x00000008, 0x00000000, 0x00000010, 0x10000000, 0x00000010, 0x00000000, 0x00000018, 0x10000000, 0x00000018, 0x20000000, 0x00000018, 0x30000000, 0x00000018, 0x40000000, 0x00000018, 0x50000000, 0x00000018, 0x60000000, 0x00000018, 0x70000000, 0x00000018, 0x80000000, 0x00000018, 0x50000000, 0x00000028, 0x90000000, 0x00000028, 0xa0000000, 0x00000028, 0xb0000000, 0x00000028, 0xc0000000, 0x00000028, 0xd0000000, 0x00000028, 0xe0000000, 0x00000028, 0xf0000000, 0x00000028, 0x00000000, 0x00000029, 0x10000000, 0x00000029, 0x20000000, 0x00000029, 0x30000000, 0x00000029, 0x40000000, 0x00000029, 0x50000000, 0x00000029, 0x60000000, 0x00000029, 0x70000000, 0x00000029, 0x80000000, 0x00000029, 0x90000000, 0x00000029, 0xa0000000, 0x00000029, 0xb0000000, 0x00000029, 0xc0000000, 0x00000029, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; static const u32 dot11lcn_gain_tbl_2G[] = { 0x00000000, 0x00000004, 0x00000008, 0x00000001, 0x00000005, 0x00000009, 0x0000000d, 0x0000004d, 0x0000008d, 0x00000049, 0x00000089, 0x000000c9, 0x0000004b, 0x0000008b, 0x000000cb, 0x000000cf, 0x0000010f, 0x0000050f, 0x0000090f, 0x0000094f, 0x00000d4f, 0x0000114f, 0x0000118f, 0x0000518f, 0x0000918f, 0x0000d18f, 0x0001118f, 0x0001518f, 0x0001918f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; static const u32 dot11lcn_gain_tbl_extlna_2G[] = { 0x00000000, 0x00000004, 0x00000008, 0x00000001, 0x00000005, 0x00000009, 0x0000000d, 0x00000003, 0x00000007, 0x0000000b, 0x0000000f, 0x0000004f, 0x0000008f, 0x000000cf, 0x0000010f, 0x0000014f, 0x0000018f, 0x0000058f, 0x0000098f, 0x00000d8f, 0x00008000, 0x00008004, 0x00008008, 0x00008001, 0x00008005, 0x00008009, 0x0000800d, 0x00008003, 0x00008007, 0x0000800b, 0x0000800f, 0x0000804f, 0x0000808f, 0x000080cf, 0x0000810f, 0x0000814f, 0x0000818f, 0x0000858f, 0x0000898f, 0x00008d8f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; static const u16 dot11lcn_aux_gain_idx_tbl_extlna_2G[] = { 0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0483, 0x0484, 0x0485, 0x0486, 0x0583, 0x0584, 0x0585, 0x0587, 0x0588, 0x0589, 0x058a, 0x0687, 0x0688, 0x0689, 0x068a, 0x068b, 0x068c, 0x068d, 0x068e, 0x068f, 0x0690, 0x0691, 0x0692, 0x0693, 0x0000 }; static const u8 dot11lcn_gain_val_tbl_extlna_2G[] = { 0xfc, 0x02, 0x08, 0x0e, 0x13, 0x1b, 0xfc, 0x02, 0x08, 0x0e, 0x13, 0x1b, 0xfc, 0x00, 0x0f, 0x03, 0xeb, 0xfe, 0x07, 0x0b, 0x0f, 0xfb, 0xfe, 0x01, 0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06, 0x09, 0x0c, 0x0f, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06, 0x09, 0x0c, 0x0f, 0x12, 0x15, 0x18, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u32 dot11lcn_gain_idx_tbl_extlna_2G[] = { 0x00000000, 0x00000040, 0x00000000, 0x00000040, 0x00000000, 0x00000040, 0x00000000, 0x00000040, 0x00000000, 0x00000040, 0x00000000, 0x00000040, 0x00000000, 0x00000040, 0x00000000, 0x00000040, 0x00000000, 0x00000040, 0x10000000, 0x00000040, 0x20000000, 0x00000040, 0x30000000, 0x00000040, 0x40000000, 0x00000040, 0x30000000, 0x00000048, 0x40000000, 0x00000048, 0x50000000, 0x00000048, 0x60000000, 0x00000048, 0x30000000, 0x00000058, 0x40000000, 0x00000058, 0x50000000, 0x00000058, 0x70000000, 0x00000058, 0x80000000, 0x00000058, 0x90000000, 0x00000058, 0xa0000000, 0x00000058, 0x70000000, 0x00000068, 0x80000000, 0x00000068, 0x90000000, 0x00000068, 0xa0000000, 0x00000068, 0xb0000000, 0x00000068, 0xc0000000, 0x00000068, 0xd0000000, 0x00000068, 0xe0000000, 0x00000068, 0xf0000000, 0x00000068, 0x00000000, 0x00000069, 0x10000000, 0x00000069, 0x20000000, 0x00000069, 0x30000000, 0x00000069, 0x40000000, 0x00000041, 0x40000000, 0x00000041, 0x40000000, 0x00000041, 0x40000000, 0x00000041, 0x40000000, 0x00000041, 0x40000000, 0x00000041, 0x40000000, 0x00000041, 0x40000000, 0x00000041, 0x40000000, 0x00000041, 0x50000000, 0x00000041, 0x60000000, 0x00000041, 0x70000000, 0x00000041, 0x80000000, 0x00000041, 0x70000000, 0x00000049, 0x80000000, 0x00000049, 0x90000000, 0x00000049, 0xa0000000, 0x00000049, 0x70000000, 0x00000059, 0x80000000, 0x00000059, 0x90000000, 0x00000059, 0xb0000000, 0x00000059, 0xc0000000, 0x00000059, 0xd0000000, 0x00000059, 0xe0000000, 0x00000059, 0xb0000000, 0x00000069, 0xc0000000, 0x00000069, 0xd0000000, 0x00000069, 0xe0000000, 0x00000069, 0xf0000000, 0x00000069, 0x00000000, 0x0000006a, 0x10000000, 0x0000006a, 0x20000000, 0x0000006a, 0x30000000, 0x0000006a, 0x40000000, 0x0000006a, 0x50000000, 0x0000006a, 0x60000000, 0x0000006a, 0x70000000, 0x0000006a, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; static const u32 dot11lcn_aux_gain_idx_tbl_5G[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0186, 0x0187, 0x0188, 0x0189, 0x018a, 0x018b, 0x018c, 0x018d, 0x018e, 0x018f, 0x0190, 0x0191, 0x0192, 0x0193, 0x0194, 0x0195, 0x0196, 0x0197, 0x0198, 0x0199, 0x019a, 0x019b, 0x019c, 0x019d, 0x0000 }; static const u32 dot11lcn_gain_val_tbl_5G[] = { 0xf7, 0xfd, 0x00, 0x04, 0x04, 0x04, 0xf7, 0xfd, 0x00, 0x04, 0x04, 0x04, 0xf6, 0x00, 0x0c, 0x03, 0xeb, 0xfe, 0x06, 0x0a, 0x10, 0x00, 0x03, 0x06, 0x09, 0x0c, 0x0f, 0x12, 0x15, 0x18, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06, 0x09, 0x0c, 0x0f, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06, 0x09, 0x0c, 0x0f, 0x12, 0x15, 0x18, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u32 dot11lcn_gain_idx_tbl_5G[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10000000, 0x00000000, 0x20000000, 0x00000000, 0x30000000, 0x00000000, 0x40000000, 0x00000000, 0x30000000, 0x00000008, 0x40000000, 0x00000008, 0x50000000, 0x00000008, 0x60000000, 0x00000008, 0x70000000, 0x00000008, 0x60000000, 0x00000018, 0x70000000, 0x00000018, 0x80000000, 0x00000018, 0x90000000, 0x00000018, 0xa0000000, 0x00000018, 0xb0000000, 0x00000018, 0xc0000000, 0x00000018, 0xd0000000, 0x00000018, 0xe0000000, 0x00000018, 0xf0000000, 0x00000018, 0x00000000, 0x00000019, 0x10000000, 0x00000019, 0x20000000, 0x00000019, 0x30000000, 0x00000019, 0x40000000, 0x00000019, 0x50000000, 0x00000019, 0x60000000, 0x00000019, 0x70000000, 0x00000019, 0x80000000, 0x00000019, 0x90000000, 0x00000019, 0xa0000000, 0x00000019, 0xb0000000, 0x00000019, 0xc0000000, 0x00000019, 0xd0000000, 0x00000019, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; static const u32 dot11lcn_gain_tbl_5G[] = { 0x00000000, 0x00000040, 0x00000080, 0x00000001, 0x00000005, 0x00000009, 0x0000000d, 0x00000011, 0x00000015, 0x00000055, 0x00000095, 0x00000017, 0x0000001b, 0x0000005b, 0x0000009b, 0x000000db, 0x0000011b, 0x0000015b, 0x0000019b, 0x0000059b, 0x0000099b, 0x00000d9b, 0x0000119b, 0x0000519b, 0x0000919b, 0x0000d19b, 0x0001119b, 0x0001519b, 0x0001919b, 0x0001d19b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[] = { {&dot11lcn_gain_tbl_rev0, ARRAY_SIZE(dot11lcn_gain_tbl_rev0), 18, 0, 32} , {&dot11lcn_aux_gain_idx_tbl_rev0, ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16} , {&dot11lcn_gain_idx_tbl_rev0, ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32} , }; static const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev1[] = { {&dot11lcn_gain_tbl_rev1, ARRAY_SIZE(dot11lcn_gain_tbl_rev1), 18, 0, 32} , {&dot11lcn_aux_gain_idx_tbl_rev0, ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16} , {&dot11lcn_gain_idx_tbl_rev0, ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32} , }; const struct phytbl_info dot11lcnphytbl_rx_gain_info_2G_rev2[] = { {&dot11lcn_gain_tbl_2G, ARRAY_SIZE(dot11lcn_gain_tbl_2G), 18, 0, 32} , {&dot11lcn_aux_gain_idx_tbl_2G, ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_2G), 14, 0, 16} , {&dot11lcn_gain_idx_tbl_2G, ARRAY_SIZE(dot11lcn_gain_idx_tbl_2G), 13, 0, 32} , {&dot11lcn_gain_val_tbl_2G, ARRAY_SIZE(dot11lcn_gain_val_tbl_2G), 17, 0, 8} }; const struct phytbl_info dot11lcnphytbl_rx_gain_info_5G_rev2[] = { {&dot11lcn_gain_tbl_5G, ARRAY_SIZE(dot11lcn_gain_tbl_5G), 18, 0, 32} , {&dot11lcn_aux_gain_idx_tbl_5G, ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_5G), 14, 0, 16} , {&dot11lcn_gain_idx_tbl_5G, ARRAY_SIZE(dot11lcn_gain_idx_tbl_5G), 13, 0, 32} , {&dot11lcn_gain_val_tbl_5G, ARRAY_SIZE(dot11lcn_gain_val_tbl_5G), 17, 0, 8} }; const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_2G_rev2[] = { {&dot11lcn_gain_tbl_extlna_2G, ARRAY_SIZE(dot11lcn_gain_tbl_extlna_2G), 18, 0, 32} , {&dot11lcn_aux_gain_idx_tbl_extlna_2G, ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_extlna_2G), 14, 0, 16} , {&dot11lcn_gain_idx_tbl_extlna_2G, ARRAY_SIZE(dot11lcn_gain_idx_tbl_extlna_2G), 13, 0, 32} , {&dot11lcn_gain_val_tbl_extlna_2G, ARRAY_SIZE(dot11lcn_gain_val_tbl_extlna_2G), 17, 0, 8} }; const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_5G_rev2[] = { {&dot11lcn_gain_tbl_5G, ARRAY_SIZE(dot11lcn_gain_tbl_5G), 18, 0, 32} , {&dot11lcn_aux_gain_idx_tbl_5G, ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_5G), 14, 0, 16} , {&dot11lcn_gain_idx_tbl_5G, ARRAY_SIZE(dot11lcn_gain_idx_tbl_5G), 13, 0, 32} , {&dot11lcn_gain_val_tbl_5G, ARRAY_SIZE(dot11lcn_gain_val_tbl_5G), 17, 0, 8} }; const u32 dot11lcnphytbl_rx_gain_info_sz_rev0 = ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_rev0); const u32 dot11lcnphytbl_rx_gain_info_2G_rev2_sz = ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_2G_rev2); const u32 dot11lcnphytbl_rx_gain_info_5G_rev2_sz = ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_5G_rev2); static const u16 dot11lcn_min_sig_sq_tbl_rev0[] = { 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, }; static const u16 dot11lcn_noise_scale_tbl_rev0[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const u32 dot11lcn_fltr_ctrl_tbl_rev0[] = { 0x000141f8, 0x000021f8, 0x000021fb, 0x000041fb, 0x0001fe4b, 0x0000217b, 0x00002133, 0x000040eb, 0x0001fea3, 0x0000024b, }; static const u32 dot11lcn_ps_ctrl_tbl_rev0[] = { 0x00100001, 0x00200010, 0x00300001, 0x00400010, 0x00500022, 0x00600122, 0x00700222, 0x00800322, 0x00900422, 0x00a00522, 0x00b00622, 0x00c00722, 0x00d00822, 0x00f00922, 0x00100a22, 0x00200b22, 0x00300c22, 0x00400d22, 0x00500e22, 0x00600f22, }; static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo[] = { 0x0007, 0x0005, 0x0006, 0x0004, 0x0007, 0x0005, 0x0006, 0x0004, 0x0007, 0x0005, 0x0006, 0x0004, 0x0007, 0x0005, 0x0006, 0x0004, 0x000b, 0x000b, 0x000a, 0x000a, 0x000b, 0x000b, 0x000a, 0x000a, 0x000b, 0x000b, 0x000a, 0x000a, 0x000b, 0x000b, 0x000a, 0x000a, 0x0007, 0x0005, 0x0006, 0x0004, 0x0007, 0x0005, 0x0006, 0x0004, 0x0007, 0x0005, 0x0006, 0x0004, 0x0007, 0x0005, 0x0006, 0x0004, 0x000b, 0x000b, 0x000a, 0x000a, 0x000b, 0x000b, 0x000a, 0x000a, 0x000b, 0x000b, 0x000a, 0x000a, 0x000b, 0x000b, 0x000a, 0x000a, }; static const u16 dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0[] = { 0x0007, 0x0005, 0x0002, 0x0000, 0x0007, 0x0005, 0x0002, 0x0000, 0x0007, 0x0005, 0x0002, 0x0000, 0x0007, 0x0005, 0x0002, 0x0000, 0x0007, 0x0007, 0x0002, 0x0002, 0x0007, 0x0007, 0x0002, 0x0002, 0x0007, 0x0007, 0x0002, 0x0002, 0x0007, 0x0007, 0x0002, 0x0002, 0x0007, 0x0005, 0x0002, 0x0000, 0x0007, 0x0005, 0x0002, 0x0000, 0x0007, 0x0005, 0x0002, 0x0000, 0x0007, 0x0005, 0x0002, 0x0000, 0x0007, 0x0007, 0x0002, 0x0002, 0x0007, 0x0007, 0x0002, 0x0002, 0x0007, 0x0007, 0x0002, 0x0002, 0x0007, 0x0007, 0x0002, 0x0002, }; static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = { 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, }; static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = { 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, }; static const u16 dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo[] = { 0x0005, 0x0006, 0x0009, 0x000a, 0x0005, 0x0006, 0x0009, 0x000a, 0x0005, 0x0006, 0x0009, 0x000a, 0x0005, 0x0006, 0x0009, 0x000a, 0x0005, 0x0006, 0x0009, 0x000a, 0x0005, 0x0006, 0x0009, 0x000a, 0x0005, 0x0006, 0x0009, 0x000a, 0x0005, 0x0006, 0x0009, 0x000a, 0x0005, 0x0006, 0x0009, 0x000a, 0x0005, 0x0006, 0x0009, 0x000a, 0x0005, 0x0006, 0x0009, 0x000a, 0x0005, 0x0006, 0x0009, 0x000a, 0x0005, 0x0006, 0x0009, 0x000a, 0x0005, 0x0006, 0x0009, 0x000a, 0x0005, 0x0006, 0x0009, 0x000a, 0x0005, 0x0006, 0x0009, 0x000a, }; static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = { 0x0004, 0x0004, 0x0002, 0x0002, 0x0004, 0x0004, 0x0002, 0x0002, 0x0004, 0x0004, 0x0002, 0x0002, 0x0004, 0x0004, 0x0002, 0x0002, 0x0004, 0x0004, 0x0002, 0x0002, 0x0004, 0x0004, 0x0002, 0x0002, 0x0004, 0x0004, 0x0002, 0x0002, 0x0004, 0x0004, 0x0002, 0x0002, 0x0004, 0x0004, 0x0002, 0x0002, 0x0004, 0x0004, 0x0002, 0x0002, 0x0004, 0x0004, 0x0002, 0x0002, 0x0004, 0x0004, 0x0002, 0x0002, 0x0004, 0x0004, 0x0002, 0x0002, 0x0004, 0x0004, 0x0002, 0x0002, 0x0004, 0x0004, 0x0002, 0x0002, 0x0004, 0x0004, 0x0002, 0x0002, }; static const u8 dot11lcn_nf_table_rev0[] = { 0x5f, 0x36, 0x29, 0x1f, 0x5f, 0x36, 0x29, 0x1f, 0x5f, 0x36, 0x29, 0x1f, 0x5f, 0x36, 0x29, 0x1f, }; static const u8 dot11lcn_gain_val_tbl_rev0[] = { 0x09, 0x0f, 0x14, 0x18, 0xfe, 0x07, 0x0b, 0x0f, 0xfb, 0xfe, 0x01, 0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06, 0x09, 0x0c, 0x0f, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06, 0x09, 0x0c, 0x0f, 0x12, 0x15, 0x18, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xeb, 0x00, 0x00, }; static const u8 dot11lcn_spur_tbl_rev0[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x03, 0x01, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x03, 0x01, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, }; static const u16 dot11lcn_unsup_mcs_tbl_rev0[] = { 0x001a, 0x0034, 0x004e, 0x0068, 0x009c, 0x00d0, 0x00ea, 0x0104, 0x0034, 0x0068, 0x009c, 0x00d0, 0x0138, 0x01a0, 0x01d4, 0x0208, 0x004e, 0x009c, 0x00ea, 0x0138, 0x01d4, 0x0270, 0x02be, 0x030c, 0x0068, 0x00d0, 0x0138, 0x01a0, 0x0270, 0x0340, 0x03a8, 0x0410, 0x0018, 0x009c, 0x00d0, 0x0104, 0x00ea, 0x0138, 0x0186, 0x00d0, 0x0104, 0x0104, 0x0138, 0x016c, 0x016c, 0x01a0, 0x0138, 0x0186, 0x0186, 0x01d4, 0x0222, 0x0222, 0x0270, 0x0104, 0x0138, 0x016c, 0x0138, 0x016c, 0x01a0, 0x01d4, 0x01a0, 0x01d4, 0x0208, 0x0208, 0x023c, 0x0186, 0x01d4, 0x0222, 0x01d4, 0x0222, 0x0270, 0x02be, 0x0270, 0x02be, 0x030c, 0x030c, 0x035a, 0x0036, 0x006c, 0x00a2, 0x00d8, 0x0144, 0x01b0, 0x01e6, 0x021c, 0x006c, 0x00d8, 0x0144, 0x01b0, 0x0288, 0x0360, 0x03cc, 0x0438, 0x00a2, 0x0144, 0x01e6, 0x0288, 0x03cc, 0x0510, 0x05b2, 0x0654, 0x00d8, 0x01b0, 0x0288, 0x0360, 0x0510, 0x06c0, 0x0798, 0x0870, 0x0018, 0x0144, 0x01b0, 0x021c, 0x01e6, 0x0288, 0x032a, 0x01b0, 0x021c, 0x021c, 0x0288, 0x02f4, 0x02f4, 0x0360, 0x0288, 0x032a, 0x032a, 0x03cc, 0x046e, 0x046e, 0x0510, 0x021c, 0x0288, 0x02f4, 0x0288, 0x02f4, 0x0360, 0x03cc, 0x0360, 0x03cc, 0x0438, 0x0438, 0x04a4, 0x032a, 0x03cc, 0x046e, 0x03cc, 0x046e, 0x0510, 0x05b2, 0x0510, 0x05b2, 0x0654, 0x0654, 0x06f6, }; static const u16 dot11lcn_iq_local_tbl_rev0[] = { 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const u32 dot11lcn_papd_compdelta_tbl_rev0[] = { 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, }; const struct phytbl_info dot11lcnphytbl_info_rev0[] = { {&dot11lcn_min_sig_sq_tbl_rev0, ARRAY_SIZE(dot11lcn_min_sig_sq_tbl_rev0), 2, 0, 16} , {&dot11lcn_noise_scale_tbl_rev0, ARRAY_SIZE(dot11lcn_noise_scale_tbl_rev0), 1, 0, 16} , {&dot11lcn_fltr_ctrl_tbl_rev0, ARRAY_SIZE(dot11lcn_fltr_ctrl_tbl_rev0), 11, 0, 32} , {&dot11lcn_ps_ctrl_tbl_rev0, ARRAY_SIZE(dot11lcn_ps_ctrl_tbl_rev0), 12, 0, 32} , {&dot11lcn_gain_idx_tbl_rev0, ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32} , {&dot11lcn_aux_gain_idx_tbl_rev0, ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16} , {&dot11lcn_sw_ctrl_tbl_rev0, ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_rev0), 15, 0, 16} , {&dot11lcn_nf_table_rev0, ARRAY_SIZE(dot11lcn_nf_table_rev0), 16, 0, 8} , {&dot11lcn_gain_val_tbl_rev0, ARRAY_SIZE(dot11lcn_gain_val_tbl_rev0), 17, 0, 8} , {&dot11lcn_gain_tbl_rev0, ARRAY_SIZE(dot11lcn_gain_tbl_rev0), 18, 0, 32} , {&dot11lcn_spur_tbl_rev0, ARRAY_SIZE(dot11lcn_spur_tbl_rev0), 20, 0, 8} , {&dot11lcn_unsup_mcs_tbl_rev0, ARRAY_SIZE(dot11lcn_unsup_mcs_tbl_rev0), 23, 0, 16} , {&dot11lcn_iq_local_tbl_rev0, ARRAY_SIZE(dot11lcn_iq_local_tbl_rev0), 0, 0, 16} , {&dot11lcn_papd_compdelta_tbl_rev0, ARRAY_SIZE(dot11lcn_papd_compdelta_tbl_rev0), 24, 0, 32} , }; const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313 = { &dot11lcn_sw_ctrl_tbl_4313_rev0, ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_rev0), 15, 0, 16 }; const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa = { &dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo, ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo), 15, 0, 16 }; const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa = { &dot11lcn_sw_ctrl_tbl_4313_epa_rev0, ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_epa_rev0), 15, 0, 16 }; const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa = { &dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo, ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo), 15, 0, 16 }; const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250 = { &dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0, ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0), 15, 0, 16 }; const u32 dot11lcnphytbl_info_sz_rev0 = ARRAY_SIZE(dot11lcnphytbl_info_rev0); const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = { {3, 0, 31, 0, 72}, {3, 0, 31, 0, 70}, {3, 0, 31, 0, 68}, {3, 0, 30, 0, 67}, {3, 0, 29, 0, 68}, {3, 0, 28, 0, 68}, {3, 0, 27, 0, 69}, {3, 0, 26, 0, 70}, {3, 0, 25, 0, 70}, {3, 0, 24, 0, 71}, {3, 0, 23, 0, 72}, {3, 0, 23, 0, 70}, {3, 0, 22, 0, 71}, {3, 0, 21, 0, 72}, {3, 0, 21, 0, 70}, {3, 0, 21, 0, 68}, {3, 0, 21, 0, 66}, {3, 0, 21, 0, 64}, {3, 0, 21, 0, 63}, {3, 0, 20, 0, 64}, {3, 0, 19, 0, 65}, {3, 0, 19, 0, 64}, {3, 0, 18, 0, 65}, {3, 0, 18, 0, 64}, {3, 0, 17, 0, 65}, {3, 0, 17, 0, 64}, {3, 0, 16, 0, 65}, {3, 0, 16, 0, 64}, {3, 0, 16, 0, 62}, {3, 0, 16, 0, 60}, {3, 0, 16, 0, 58}, {3, 0, 15, 0, 61}, {3, 0, 15, 0, 59}, {3, 0, 14, 0, 61}, {3, 0, 14, 0, 60}, {3, 0, 14, 0, 58}, {3, 0, 13, 0, 60}, {3, 0, 13, 0, 59}, {3, 0, 12, 0, 62}, {3, 0, 12, 0, 60}, {3, 0, 12, 0, 58}, {3, 0, 11, 0, 62}, {3, 0, 11, 0, 60}, {3, 0, 11, 0, 59}, {3, 0, 11, 0, 57}, {3, 0, 10, 0, 61}, {3, 0, 10, 0, 59}, {3, 0, 10, 0, 57}, {3, 0, 9, 0, 62}, {3, 0, 9, 0, 60}, {3, 0, 9, 0, 58}, {3, 0, 9, 0, 57}, {3, 0, 8, 0, 62}, {3, 0, 8, 0, 60}, {3, 0, 8, 0, 58}, {3, 0, 8, 0, 57}, {3, 0, 8, 0, 55}, {3, 0, 7, 0, 61}, {3, 0, 7, 0, 60}, {3, 0, 7, 0, 58}, {3, 0, 7, 0, 56}, {3, 0, 7, 0, 55}, {3, 0, 6, 0, 62}, {3, 0, 6, 0, 60}, {3, 0, 6, 0, 58}, {3, 0, 6, 0, 57}, {3, 0, 6, 0, 55}, {3, 0, 6, 0, 54}, {3, 0, 6, 0, 52}, {3, 0, 5, 0, 61}, {3, 0, 5, 0, 59}, {3, 0, 5, 0, 57}, {3, 0, 5, 0, 56}, {3, 0, 5, 0, 54}, {3, 0, 5, 0, 53}, {3, 0, 5, 0, 51}, {3, 0, 4, 0, 62}, {3, 0, 4, 0, 60}, {3, 0, 4, 0, 58}, {3, 0, 4, 0, 57}, {3, 0, 4, 0, 55}, {3, 0, 4, 0, 54}, {3, 0, 4, 0, 52}, {3, 0, 4, 0, 51}, {3, 0, 4, 0, 49}, {3, 0, 4, 0, 48}, {3, 0, 4, 0, 46}, {3, 0, 3, 0, 60}, {3, 0, 3, 0, 58}, {3, 0, 3, 0, 57}, {3, 0, 3, 0, 55}, {3, 0, 3, 0, 54}, {3, 0, 3, 0, 52}, {3, 0, 3, 0, 51}, {3, 0, 3, 0, 49}, {3, 0, 3, 0, 48}, {3, 0, 3, 0, 46}, {3, 0, 3, 0, 45}, {3, 0, 3, 0, 44}, {3, 0, 3, 0, 43}, {3, 0, 3, 0, 41}, {3, 0, 2, 0, 61}, {3, 0, 2, 0, 59}, {3, 0, 2, 0, 57}, {3, 0, 2, 0, 56}, {3, 0, 2, 0, 54}, {3, 0, 2, 0, 53}, {3, 0, 2, 0, 51}, {3, 0, 2, 0, 50}, {3, 0, 2, 0, 48}, {3, 0, 2, 0, 47}, {3, 0, 2, 0, 46}, {3, 0, 2, 0, 44}, {3, 0, 2, 0, 43}, {3, 0, 2, 0, 42}, {3, 0, 2, 0, 41}, {3, 0, 2, 0, 39}, {3, 0, 2, 0, 38}, {3, 0, 2, 0, 37}, {3, 0, 2, 0, 36}, {3, 0, 2, 0, 35}, {3, 0, 2, 0, 34}, {3, 0, 2, 0, 33}, {3, 0, 2, 0, 32}, {3, 0, 1, 0, 63}, {3, 0, 1, 0, 61}, {3, 0, 1, 0, 59}, {3, 0, 1, 0, 57}, }; const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_gaintable_rev0[128] = { {15, 0, 31, 0, 72}, {15, 0, 31, 0, 70}, {15, 0, 31, 0, 68}, {15, 0, 30, 0, 68}, {15, 0, 29, 0, 69}, {15, 0, 28, 0, 69}, {15, 0, 27, 0, 70}, {15, 0, 26, 0, 70}, {15, 0, 25, 0, 71}, {15, 0, 24, 0, 72}, {15, 0, 23, 0, 73}, {15, 0, 23, 0, 71}, {15, 0, 22, 0, 72}, {15, 0, 21, 0, 73}, {15, 0, 21, 0, 71}, {15, 0, 21, 0, 69}, {15, 0, 21, 0, 67}, {15, 0, 21, 0, 65}, {15, 0, 21, 0, 63}, {15, 0, 20, 0, 65}, {15, 0, 19, 0, 66}, {15, 0, 19, 0, 64}, {15, 0, 18, 0, 66}, {15, 0, 18, 0, 64}, {15, 0, 17, 0, 66}, {15, 0, 17, 0, 64}, {15, 0, 16, 0, 66}, {15, 0, 16, 0, 64}, {15, 0, 16, 0, 62}, {15, 0, 16, 0, 61}, {15, 0, 16, 0, 59}, {15, 0, 15, 0, 61}, {15, 0, 15, 0, 59}, {15, 0, 14, 0, 62}, {15, 0, 14, 0, 60}, {15, 0, 14, 0, 58}, {15, 0, 13, 0, 61}, {15, 0, 13, 0, 59}, {15, 0, 12, 0, 62}, {15, 0, 12, 0, 61}, {15, 0, 12, 0, 59}, {15, 0, 11, 0, 62}, {15, 0, 11, 0, 61}, {15, 0, 11, 0, 59}, {15, 0, 11, 0, 57}, {15, 0, 10, 0, 61}, {15, 0, 10, 0, 59}, {15, 0, 10, 0, 58}, {15, 0, 9, 0, 62}, {15, 0, 9, 0, 61}, {15, 0, 9, 0, 59}, {15, 0, 9, 0, 57}, {15, 0, 8, 0, 62}, {15, 0, 8, 0, 61}, {15, 0, 8, 0, 59}, {15, 0, 8, 0, 57}, {15, 0, 8, 0, 56}, {15, 0, 8, 0, 54}, {15, 0, 8, 0, 53}, {15, 0, 8, 0, 51}, {15, 0, 8, 0, 50}, {7, 0, 7, 0, 69}, {7, 0, 7, 0, 67}, {7, 0, 7, 0, 65}, {7, 0, 7, 0, 64}, {7, 0, 7, 0, 62}, {7, 0, 7, 0, 60}, {7, 0, 7, 0, 58}, {7, 0, 7, 0, 57}, {7, 0, 7, 0, 55}, {7, 0, 6, 0, 62}, {7, 0, 6, 0, 61}, {7, 0, 6, 0, 59}, {7, 0, 6, 0, 57}, {7, 0, 6, 0, 56}, {7, 0, 6, 0, 54}, {7, 0, 6, 0, 53}, {7, 0, 5, 0, 61}, {7, 0, 5, 0, 60}, {7, 0, 5, 0, 58}, {7, 0, 5, 0, 56}, {7, 0, 5, 0, 55}, {7, 0, 5, 0, 53}, {7, 0, 5, 0, 52}, {7, 0, 5, 0, 50}, {7, 0, 5, 0, 49}, {7, 0, 5, 0, 47}, {7, 0, 4, 0, 57}, {7, 0, 4, 0, 56}, {7, 0, 4, 0, 54}, {7, 0, 4, 0, 53}, {7, 0, 4, 0, 51}, {7, 0, 4, 0, 50}, {7, 0, 4, 0, 48}, {7, 0, 4, 0, 47}, {7, 0, 4, 0, 46}, {7, 0, 4, 0, 44}, {7, 0, 4, 0, 43}, {7, 0, 4, 0, 42}, {7, 0, 4, 0, 41}, {7, 0, 4, 0, 40}, {7, 0, 3, 0, 51}, {7, 0, 3, 0, 50}, {7, 0, 3, 0, 48}, {7, 0, 3, 0, 47}, {7, 0, 3, 0, 46}, {7, 0, 3, 0, 44}, {7, 0, 3, 0, 43}, {7, 0, 3, 0, 42}, {7, 0, 3, 0, 41}, {3, 0, 3, 0, 56}, {3, 0, 3, 0, 54}, {3, 0, 3, 0, 53}, {3, 0, 3, 0, 51}, {3, 0, 3, 0, 50}, {3, 0, 3, 0, 48}, {3, 0, 3, 0, 47}, {3, 0, 3, 0, 46}, {3, 0, 3, 0, 44}, {3, 0, 3, 0, 43}, {3, 0, 3, 0, 42}, {3, 0, 3, 0, 41}, {3, 0, 3, 0, 39}, {3, 0, 3, 0, 38}, {3, 0, 3, 0, 37}, {3, 0, 3, 0, 36}, {3, 0, 3, 0, 35}, {3, 0, 3, 0, 34}, }; const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_5GHz_gaintable_rev0[128] = { {255, 255, 0xf0, 0, 152}, {255, 255, 0xf0, 0, 147}, {255, 255, 0xf0, 0, 143}, {255, 255, 0xf0, 0, 139}, {255, 255, 0xf0, 0, 135}, {255, 255, 0xf0, 0, 131}, {255, 255, 0xf0, 0, 128}, {255, 255, 0xf0, 0, 124}, {255, 255, 0xf0, 0, 121}, {255, 255, 0xf0, 0, 117}, {255, 255, 0xf0, 0, 114}, {255, 255, 0xf0, 0, 111}, {255, 255, 0xf0, 0, 107}, {255, 255, 0xf0, 0, 104}, {255, 255, 0xf0, 0, 101}, {255, 255, 0xf0, 0, 99}, {255, 255, 0xf0, 0, 96}, {255, 255, 0xf0, 0, 93}, {255, 255, 0xf0, 0, 90}, {255, 255, 0xf0, 0, 88}, {255, 255, 0xf0, 0, 85}, {255, 255, 0xf0, 0, 83}, {255, 255, 0xf0, 0, 81}, {255, 255, 0xf0, 0, 78}, {255, 255, 0xf0, 0, 76}, {255, 255, 0xf0, 0, 74}, {255, 255, 0xf0, 0, 72}, {255, 255, 0xf0, 0, 70}, {255, 255, 0xf0, 0, 68}, {255, 255, 0xf0, 0, 66}, {255, 255, 0xf0, 0, 64}, {255, 248, 0xf0, 0, 64}, {255, 241, 0xf0, 0, 64}, {255, 251, 0xe0, 0, 64}, {255, 244, 0xe0, 0, 64}, {255, 254, 0xd0, 0, 64}, {255, 246, 0xd0, 0, 64}, {255, 239, 0xd0, 0, 64}, {255, 249, 0xc0, 0, 64}, {255, 242, 0xc0, 0, 64}, {255, 255, 0xb0, 0, 64}, {255, 248, 0xb0, 0, 64}, {255, 241, 0xb0, 0, 64}, {255, 254, 0xa0, 0, 64}, {255, 246, 0xa0, 0, 64}, {255, 239, 0xa0, 0, 64}, {255, 255, 0x90, 0, 64}, {255, 248, 0x90, 0, 64}, {255, 241, 0x90, 0, 64}, {255, 234, 0x90, 0, 64}, {255, 255, 0x80, 0, 64}, {255, 248, 0x80, 0, 64}, {255, 241, 0x80, 0, 64}, {255, 234, 0x80, 0, 64}, {255, 255, 0x70, 0, 64}, {255, 248, 0x70, 0, 64}, {255, 241, 0x70, 0, 64}, {255, 234, 0x70, 0, 64}, {255, 227, 0x70, 0, 64}, {255, 221, 0x70, 0, 64}, {255, 215, 0x70, 0, 64}, {255, 208, 0x70, 0, 64}, {255, 203, 0x70, 0, 64}, {255, 197, 0x70, 0, 64}, {255, 255, 0x60, 0, 64}, {255, 248, 0x60, 0, 64}, {255, 241, 0x60, 0, 64}, {255, 234, 0x60, 0, 64}, {255, 227, 0x60, 0, 64}, {255, 221, 0x60, 0, 64}, {255, 255, 0x50, 0, 64}, {255, 248, 0x50, 0, 64}, {255, 241, 0x50, 0, 64}, {255, 234, 0x50, 0, 64}, {255, 227, 0x50, 0, 64}, {255, 221, 0x50, 0, 64}, {255, 215, 0x50, 0, 64}, {255, 208, 0x50, 0, 64}, {255, 255, 0x40, 0, 64}, {255, 248, 0x40, 0, 64}, {255, 241, 0x40, 0, 64}, {255, 234, 0x40, 0, 64}, {255, 227, 0x40, 0, 64}, {255, 221, 0x40, 0, 64}, {255, 215, 0x40, 0, 64}, {255, 208, 0x40, 0, 64}, {255, 203, 0x40, 0, 64}, {255, 197, 0x40, 0, 64}, {255, 255, 0x30, 0, 64}, {255, 248, 0x30, 0, 64}, {255, 241, 0x30, 0, 64}, {255, 234, 0x30, 0, 64}, {255, 227, 0x30, 0, 64}, {255, 221, 0x30, 0, 64}, {255, 215, 0x30, 0, 64}, {255, 208, 0x30, 0, 64}, {255, 203, 0x30, 0, 64}, {255, 197, 0x30, 0, 64}, {255, 191, 0x30, 0, 64}, {255, 186, 0x30, 0, 64}, {255, 181, 0x30, 0, 64}, {255, 175, 0x30, 0, 64}, {255, 255, 0x20, 0, 64}, {255, 248, 0x20, 0, 64}, {255, 241, 0x20, 0, 64}, {255, 234, 0x20, 0, 64}, {255, 227, 0x20, 0, 64}, {255, 221, 0x20, 0, 64}, {255, 215, 0x20, 0, 64}, {255, 208, 0x20, 0, 64}, {255, 203, 0x20, 0, 64}, {255, 197, 0x20, 0, 64}, {255, 191, 0x20, 0, 64}, {255, 186, 0x20, 0, 64}, {255, 181, 0x20, 0, 64}, {255, 175, 0x20, 0, 64}, {255, 170, 0x20, 0, 64}, {255, 166, 0x20, 0, 64}, {255, 161, 0x20, 0, 64}, {255, 156, 0x20, 0, 64}, {255, 152, 0x20, 0, 64}, {255, 148, 0x20, 0, 64}, {255, 143, 0x20, 0, 64}, {255, 139, 0x20, 0, 64}, {255, 135, 0x20, 0, 64}, {255, 132, 0x20, 0, 64}, {255, 255, 0x10, 0, 64}, {255, 248, 0x10, 0, 64}, };
allan888/Linux_kernel_asynchronous
drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
C
gpl-2.0
43,852
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * identify.c: identify machine by looking up system identifier * * Copyright (C) 1998 Thomas Bogendoerfer * * This code is based on arch/mips/sgi/kernel/system.c, which is * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <asm/sgialib.h> #include <asm/bootinfo.h> struct smatch { char *arcname; char *liname; int flags; }; static struct smatch mach_table[] = { { .arcname = "SGI-IP22", .liname = "SGI Indy", .flags = PROM_FLAG_ARCS, }, { .arcname = "SGI-IP27", .liname = "SGI Origin", .flags = PROM_FLAG_ARCS, }, { .arcname = "SGI-IP28", .liname = "SGI IP28", .flags = PROM_FLAG_ARCS, }, { .arcname = "SGI-IP30", .liname = "SGI Octane", .flags = PROM_FLAG_ARCS, }, { .arcname = "SGI-IP32", .liname = "SGI O2", .flags = PROM_FLAG_ARCS, }, { .arcname = "Microsoft-Jazz", .liname = "Jazz MIPS_Magnum_4000", .flags = 0, }, { .arcname = "PICA-61", .liname = "Jazz Acer_PICA_61", .flags = 0, }, { .arcname = "RM200PCI", .liname = "SNI RM200_PCI", .flags = PROM_FLAG_DONT_FREE_TEMP, }, { .arcname = "RM200PCI-R5K", .liname = "SNI RM200_PCI-R5K", .flags = PROM_FLAG_DONT_FREE_TEMP, } }; int prom_flags; static struct smatch * __init string_to_mach(const char *s) { int i; for (i = 0; i < ARRAY_SIZE(mach_table); i++) { if (!strcmp(s, mach_table[i].arcname)) return &mach_table[i]; } panic("Yeee, could not determine architecture type <%s>", s); } char *system_type; const char *get_system_type(void) { return system_type; } void __init prom_identify_arch(void) { pcomponent *p; struct smatch *mach; const char *iname; /* * The root component tells us what machine architecture we have here. */ p = ArcGetChild(PROM_NULL_COMPONENT); if (p == NULL) { #ifdef CONFIG_SGI_IP27 /* IP27 PROM misbehaves, seems to not implement ARC GetChild(). So we just assume it's an IP27. */ iname = "SGI-IP27"; #else iname = "Unknown"; #endif } else iname = (char *) (long) p->iname; printk("ARCH: %s\n", iname); mach = string_to_mach(iname); system_type = mach->liname; prom_flags = mach->flags; }
linuxandroid/kernel
arch/mips/fw/arc/identify.c
C
gpl-2.0
2,420
/* * arch/sh/kernel/cpu/sh2a/clock-sh7201.c * * SH7201 support for the clock framework * * Copyright (C) 2008 Peter Griffin <pgriffin@mpc-data.co.uk> * * Based on clock-sh4.c * Copyright (C) 2005 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/clock.h> #include <asm/freq.h> #include <asm/io.h> static const int pll1rate[]={1,2,3,4,6,8}; static const int pfc_divisors[]={1,2,3,4,6,8,12}; #define ifc_divisors pfc_divisors static unsigned int pll2_mult; static void master_clk_init(struct clk *clk) { clk->rate = 10000000 * pll2_mult * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007]; } static struct clk_ops sh7201_master_clk_ops = { .init = master_clk_init, }; static unsigned long module_clk_recalc(struct clk *clk) { int idx = (__raw_readw(FREQCR) & 0x0007); return clk->parent->rate / pfc_divisors[idx]; } static struct clk_ops sh7201_module_clk_ops = { .recalc = module_clk_recalc, }; static unsigned long bus_clk_recalc(struct clk *clk) { int idx = (__raw_readw(FREQCR) & 0x0007); return clk->parent->rate / pfc_divisors[idx]; } static struct clk_ops sh7201_bus_clk_ops = { .recalc = bus_clk_recalc, }; static unsigned long cpu_clk_recalc(struct clk *clk) { int idx = ((__raw_readw(FREQCR) >> 4) & 0x0007); return clk->parent->rate / ifc_divisors[idx]; } static struct clk_ops sh7201_cpu_clk_ops = { .recalc = cpu_clk_recalc, }; static struct clk_ops *sh7201_clk_ops[] = { &sh7201_master_clk_ops, &sh7201_module_clk_ops, &sh7201_bus_clk_ops, &sh7201_cpu_clk_ops, }; void __init arch_init_clk_ops(struct clk_ops **ops, int idx) { if (test_mode_pin(MODE_PIN1 | MODE_PIN0)) pll2_mult = 1; else if (test_mode_pin(MODE_PIN1)) pll2_mult = 2; else pll2_mult = 4; if (idx < ARRAY_SIZE(sh7201_clk_ops)) *ops = sh7201_clk_ops[idx]; }
Racing1/msm8660-common
arch/sh/kernel/cpu/sh2a/clock-sh7201.c
C
gpl-2.0
1,999
/* * Watchdog driver for Cirrus Logic EP93xx family of devices. * * Copyright (c) 2004 Ray Lehtiniemi * Copyright (c) 2006 Tower Technologies * Based on ep93xx driver, bits from alim7101_wdt.c * * Authors: Ray Lehtiniemi <rayl@mail.com>, * Alessandro Zummo <a.zummo@towertech.it> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. * * This watchdog fires after 250msec, which is a too short interval * for us to rely on the user space daemon alone. So we ping the * wdt each ~200msec and eventually stop doing it if the user space * daemon dies. * * TODO: * * - Test last reset from watchdog status * - Add a few missing ioctls */ #include <linux/module.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/timer.h> #include <linux/uaccess.h> #include <linux/io.h> #include <mach/hardware.h> #define WDT_VERSION "0.3" #define PFX "ep93xx_wdt: " /* default timeout (secs) */ #define WDT_TIMEOUT 30 static int nowayout = WATCHDOG_NOWAYOUT; static int timeout = WDT_TIMEOUT; static struct timer_list timer; static unsigned long next_heartbeat; static unsigned long wdt_status; static unsigned long boot_status; #define WDT_IN_USE 0 #define WDT_OK_TO_CLOSE 1 #define EP93XX_WDT_REG(x) (EP93XX_WATCHDOG_BASE + (x)) #define EP93XX_WDT_WATCHDOG EP93XX_WDT_REG(0x00) #define EP93XX_WDT_WDSTATUS EP93XX_WDT_REG(0x04) /* reset the wdt every ~200ms */ #define WDT_INTERVAL (HZ/5) static void wdt_enable(void) { __raw_writew(0xaaaa, EP93XX_WDT_WATCHDOG); } static void wdt_disable(void) { __raw_writew(0xaa55, EP93XX_WDT_WATCHDOG); } static inline void wdt_ping(void) { __raw_writew(0x5555, EP93XX_WDT_WATCHDOG); } static void wdt_startup(void) { next_heartbeat = jiffies + (timeout * HZ); wdt_enable(); mod_timer(&timer, jiffies + WDT_INTERVAL); } static void wdt_shutdown(void) { del_timer_sync(&timer); wdt_disable(); } static void wdt_keepalive(void) { /* user land ping */ next_heartbeat = jiffies + (timeout * HZ); } static int ep93xx_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(WDT_IN_USE, &wdt_status)) return -EBUSY; clear_bit(WDT_OK_TO_CLOSE, &wdt_status); wdt_startup(); return nonseekable_open(inode, file); } static ssize_t ep93xx_wdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { if (len) { if (!nowayout) { size_t i; clear_bit(WDT_OK_TO_CLOSE, &wdt_status); for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') set_bit(WDT_OK_TO_CLOSE, &wdt_status); else clear_bit(WDT_OK_TO_CLOSE, &wdt_status); } } wdt_keepalive(); } return len; } static const struct watchdog_info ident = { .options = WDIOF_CARDRESET | WDIOF_MAGICCLOSE, .identity = "EP93xx Watchdog", }; static long ep93xx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = -ENOTTY; switch (cmd) { case WDIOC_GETSUPPORT: ret = copy_to_user((struct watchdog_info __user *)arg, &ident, sizeof(ident)) ? -EFAULT : 0; break; case WDIOC_GETSTATUS: ret = put_user(0, (int __user *)arg); break; case WDIOC_GETBOOTSTATUS: ret = put_user(boot_status, (int __user *)arg); break; case WDIOC_KEEPALIVE: wdt_keepalive(); ret = 0; break; case WDIOC_GETTIMEOUT: /* actually, it is 0.250 seconds.... */ ret = put_user(1, (int __user *)arg); break; } return ret; } static int ep93xx_wdt_release(struct inode *inode, struct file *file) { if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) wdt_shutdown(); else printk(KERN_CRIT PFX "Device closed unexpectedly - timer will not stop\n"); clear_bit(WDT_IN_USE, &wdt_status); clear_bit(WDT_OK_TO_CLOSE, &wdt_status); return 0; } static const struct file_operations ep93xx_wdt_fops = { .owner = THIS_MODULE, .write = ep93xx_wdt_write, .unlocked_ioctl = ep93xx_wdt_ioctl, .open = ep93xx_wdt_open, .release = ep93xx_wdt_release, .llseek = no_llseek, }; static struct miscdevice ep93xx_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &ep93xx_wdt_fops, }; static void ep93xx_timer_ping(unsigned long data) { if (time_before(jiffies, next_heartbeat)) wdt_ping(); /* Re-set the timer interval */ mod_timer(&timer, jiffies + WDT_INTERVAL); } static int __init ep93xx_wdt_init(void) { int err; err = misc_register(&ep93xx_wdt_miscdev); boot_status = __raw_readl(EP93XX_WDT_WATCHDOG) & 0x01 ? 1 : 0; printk(KERN_INFO PFX "EP93XX watchdog, driver version " WDT_VERSION "%s\n", (__raw_readl(EP93XX_WDT_WATCHDOG) & 0x08) ? " (nCS1 disable detected)" : ""); if (timeout < 1 || timeout > 3600) { timeout = WDT_TIMEOUT; printk(KERN_INFO PFX "timeout value must be 1<=x<=3600, using %d\n", timeout); } setup_timer(&timer, ep93xx_timer_ping, 1); return err; } static void __exit ep93xx_wdt_exit(void) { wdt_shutdown(); misc_deregister(&ep93xx_wdt_miscdev); } module_init(ep93xx_wdt_init); module_exit(ep93xx_wdt_exit); module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<=3600, default=" __MODULE_STRING(WDT_TIMEOUT) ")"); MODULE_AUTHOR("Ray Lehtiniemi <rayl@mail.com>," "Alessandro Zummo <a.zummo@towertech.it>"); MODULE_DESCRIPTION("EP93xx Watchdog"); MODULE_LICENSE("GPL"); MODULE_VERSION(WDT_VERSION); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
CyanogenMod/android_kernel_motorola_omap4-kexec-common
drivers/watchdog/ep93xx_wdt.c
C
gpl-2.0
5,641
/* * * general timer device for using in ISDN stacks * * Author Karsten Keil <kkeil@novell.com> * * Copyright 2008 by Karsten Keil <kkeil@novell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/poll.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mISDNif.h> #include <linux/mutex.h> #include "core.h" static DEFINE_MUTEX(mISDN_mutex); static u_int *debug; struct mISDNtimerdev { int next_id; struct list_head pending; struct list_head expired; wait_queue_head_t wait; u_int work; spinlock_t lock; /* protect lists */ }; struct mISDNtimer { struct list_head list; struct mISDNtimerdev *dev; struct timer_list tl; int id; }; static int mISDN_open(struct inode *ino, struct file *filep) { struct mISDNtimerdev *dev; if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s(%p,%p)\n", __func__, ino, filep); dev = kmalloc(sizeof(struct mISDNtimerdev) , GFP_KERNEL); if (!dev) return -ENOMEM; dev->next_id = 1; INIT_LIST_HEAD(&dev->pending); INIT_LIST_HEAD(&dev->expired); spin_lock_init(&dev->lock); dev->work = 0; init_waitqueue_head(&dev->wait); filep->private_data = dev; __module_get(THIS_MODULE); return nonseekable_open(ino, filep); } static int mISDN_close(struct inode *ino, struct file *filep) { struct mISDNtimerdev *dev = filep->private_data; struct mISDNtimer *timer, *next; if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s(%p,%p)\n", __func__, ino, filep); list_for_each_entry_safe(timer, next, &dev->pending, list) { del_timer(&timer->tl); kfree(timer); } list_for_each_entry_safe(timer, next, &dev->expired, list) { kfree(timer); } kfree(dev); module_put(THIS_MODULE); return 0; } static ssize_t mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off) { struct mISDNtimerdev *dev = filep->private_data; struct mISDNtimer *timer; u_long flags; int ret = 0; if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__, filep, buf, (int)count, off); if (list_empty(&dev->expired) && (dev->work == 0)) { if (filep->f_flags & O_NONBLOCK) return -EAGAIN; wait_event_interruptible(dev->wait, (dev->work || !list_empty(&dev->expired))); if (signal_pending(current)) return -ERESTARTSYS; } if (count < sizeof(int)) return -ENOSPC; if (dev->work) dev->work = 0; if (!list_empty(&dev->expired)) { spin_lock_irqsave(&dev->lock, flags); timer = (struct mISDNtimer *)dev->expired.next; list_del(&timer->list); spin_unlock_irqrestore(&dev->lock, flags); if (put_user(timer->id, (int __user *)buf)) ret = -EFAULT; else ret = sizeof(int); kfree(timer); } return ret; } static unsigned int mISDN_poll(struct file *filep, poll_table *wait) { struct mISDNtimerdev *dev = filep->private_data; unsigned int mask = POLLERR; if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s(%p, %p)\n", __func__, filep, wait); if (dev) { poll_wait(filep, &dev->wait, wait); mask = 0; if (dev->work || !list_empty(&dev->expired)) mask |= (POLLIN | POLLRDNORM); if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s work(%d) empty(%d)\n", __func__, dev->work, list_empty(&dev->expired)); } return mask; } static void dev_expire_timer(unsigned long data) { struct mISDNtimer *timer = (void *)data; u_long flags; spin_lock_irqsave(&timer->dev->lock, flags); list_move_tail(&timer->list, &timer->dev->expired); spin_unlock_irqrestore(&timer->dev->lock, flags); wake_up_interruptible(&timer->dev->wait); } static int misdn_add_timer(struct mISDNtimerdev *dev, int timeout) { int id; u_long flags; struct mISDNtimer *timer; if (!timeout) { dev->work = 1; wake_up_interruptible(&dev->wait); id = 0; } else { timer = kzalloc(sizeof(struct mISDNtimer), GFP_KERNEL); if (!timer) return -ENOMEM; spin_lock_irqsave(&dev->lock, flags); timer->id = dev->next_id++; if (dev->next_id < 0) dev->next_id = 1; list_add_tail(&timer->list, &dev->pending); spin_unlock_irqrestore(&dev->lock, flags); timer->dev = dev; timer->tl.data = (long)timer; timer->tl.function = dev_expire_timer; init_timer(&timer->tl); timer->tl.expires = jiffies + ((HZ * (u_long)timeout) / 1000); add_timer(&timer->tl); id = timer->id; } return id; } static int misdn_del_timer(struct mISDNtimerdev *dev, int id) { u_long flags; struct mISDNtimer *timer; int ret = 0; spin_lock_irqsave(&dev->lock, flags); list_for_each_entry(timer, &dev->pending, list) { if (timer->id == id) { list_del_init(&timer->list); /* RED-PEN AK: race -- timer can be still running on * other CPU. Needs reference count I think */ del_timer(&timer->tl); ret = timer->id; kfree(timer); goto unlock; } } unlock: spin_unlock_irqrestore(&dev->lock, flags); return ret; } static long mISDN_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { struct mISDNtimerdev *dev = filep->private_data; int id, tout, ret = 0; if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s(%p, %x, %lx)\n", __func__, filep, cmd, arg); mutex_lock(&mISDN_mutex); switch (cmd) { case IMADDTIMER: if (get_user(tout, (int __user *)arg)) { ret = -EFAULT; break; } id = misdn_add_timer(dev, tout); if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s add %d id %d\n", __func__, tout, id); if (id < 0) { ret = id; break; } if (put_user(id, (int __user *)arg)) ret = -EFAULT; break; case IMDELTIMER: if (get_user(id, (int __user *)arg)) { ret = -EFAULT; break; } if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s del id %d\n", __func__, id); id = misdn_del_timer(dev, id); if (put_user(id, (int __user *)arg)) ret = -EFAULT; break; default: ret = -EINVAL; } mutex_unlock(&mISDN_mutex); return ret; } static const struct file_operations mISDN_fops = { .read = mISDN_read, .poll = mISDN_poll, .unlocked_ioctl = mISDN_ioctl, .open = mISDN_open, .release = mISDN_close, .llseek = no_llseek, }; static struct miscdevice mISDNtimer = { .minor = MISC_DYNAMIC_MINOR, .name = "mISDNtimer", .fops = &mISDN_fops, }; int mISDN_inittimer(u_int *deb) { int err; debug = deb; err = misc_register(&mISDNtimer); if (err) printk(KERN_WARNING "mISDN: Could not register timer device\n"); return err; } void mISDN_timer_cleanup(void) { misc_deregister(&mISDNtimer); }
dzo/kernel_ville
drivers/isdn/mISDN/timerdev.c
C
gpl-2.0
6,838
/* * pnpacpi -- PnP ACPI driver * * Copyright (c) 2004 Matthieu Castet <castet.matthieu@free.fr> * Copyright (c) 2004 Li Shaohua <shaohua.li@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/export.h> #include <linux/acpi.h> #include <linux/pnp.h> #include <linux/slab.h> #include <linux/mod_devicetable.h> #include <acpi/acpi_bus.h> #include "../base.h" #include "pnpacpi.h" static int num; /* We need only to blacklist devices that have already an acpi driver that * can't use pnp layer. We don't need to blacklist device that are directly * used by the kernel (PCI root, ...), as it is harmless and there were * already present in pnpbios. But there is an exception for devices that * have irqs (PIC, Timer) because we call acpi_register_gsi. * Finally, only devices that have a CRS method need to be in this list. */ static struct acpi_device_id excluded_id_list[] __initdata = { {"PNP0C09", 0}, /* EC */ {"PNP0C0F", 0}, /* Link device */ {"PNP0000", 0}, /* PIC */ {"PNP0100", 0}, /* Timer */ {"", 0}, }; static inline int __init is_exclusive_device(struct acpi_device *dev) { return (!acpi_match_device_ids(dev, excluded_id_list)); } /* * Compatible Device IDs */ #define TEST_HEX(c) \ if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \ return 0 #define TEST_ALPHA(c) \ if (!('@' <= (c) || (c) <= 'Z')) \ return 0 static int __init ispnpidacpi(const char *id) { TEST_ALPHA(id[0]); TEST_ALPHA(id[1]); TEST_ALPHA(id[2]); TEST_HEX(id[3]); TEST_HEX(id[4]); TEST_HEX(id[5]); TEST_HEX(id[6]); if (id[7] != '\0') return 0; return 1; } static int pnpacpi_get_resources(struct pnp_dev *dev) { pnp_dbg(&dev->dev, "get resources\n"); return pnpacpi_parse_allocated_resource(dev); } static int pnpacpi_set_resources(struct pnp_dev *dev) { struct acpi_device *acpi_dev; acpi_handle handle; struct acpi_buffer buffer; int ret; pnp_dbg(&dev->dev, "set resources\n"); handle = DEVICE_ACPI_HANDLE(&dev->dev); if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return -ENODEV; } ret = pnpacpi_build_resource_template(dev, &buffer); if (ret) return ret; ret = pnpacpi_encode_resources(dev, &buffer); if (ret) { kfree(buffer.pointer); return ret; } if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer))) ret = -EINVAL; else if (acpi_bus_power_manageable(handle)) ret = acpi_bus_set_power(handle, ACPI_STATE_D0); kfree(buffer.pointer); return ret; } static int pnpacpi_disable_resources(struct pnp_dev *dev) { struct acpi_device *acpi_dev; acpi_handle handle; int ret; dev_dbg(&dev->dev, "disable resources\n"); handle = DEVICE_ACPI_HANDLE(&dev->dev); if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return 0; } /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ ret = 0; if (acpi_bus_power_manageable(handle)) acpi_bus_set_power(handle, ACPI_STATE_D3); /* continue even if acpi_bus_set_power() fails */ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL))) ret = -ENODEV; return ret; } #ifdef CONFIG_ACPI_SLEEP static bool pnpacpi_can_wakeup(struct pnp_dev *dev) { struct acpi_device *acpi_dev; acpi_handle handle; handle = DEVICE_ACPI_HANDLE(&dev->dev); if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return false; } return acpi_bus_can_wakeup(handle); } static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) { struct acpi_device *acpi_dev; acpi_handle handle; int error = 0; handle = DEVICE_ACPI_HANDLE(&dev->dev); if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return 0; } if (device_can_wakeup(&dev->dev)) { error = acpi_pm_device_sleep_wake(&dev->dev, device_may_wakeup(&dev->dev)); if (error) return error; } if (acpi_bus_power_manageable(handle)) { int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); if (power_state < 0) power_state = (state.event == PM_EVENT_ON) ? ACPI_STATE_D0 : ACPI_STATE_D3; /* * acpi_bus_set_power() often fails (keyboard port can't be * powered-down?), and in any case, our return value is ignored * by pnp_bus_suspend(). Hence we don't revert the wakeup * setting if the set_power fails. */ error = acpi_bus_set_power(handle, power_state); } return error; } static int pnpacpi_resume(struct pnp_dev *dev) { struct acpi_device *acpi_dev; acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); int error = 0; if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); return -ENODEV; } if (device_may_wakeup(&dev->dev)) acpi_pm_device_sleep_wake(&dev->dev, false); if (acpi_bus_power_manageable(handle)) error = acpi_bus_set_power(handle, ACPI_STATE_D0); return error; } #endif struct pnp_protocol pnpacpi_protocol = { .name = "Plug and Play ACPI", .get = pnpacpi_get_resources, .set = pnpacpi_set_resources, .disable = pnpacpi_disable_resources, #ifdef CONFIG_ACPI_SLEEP .can_wakeup = pnpacpi_can_wakeup, .suspend = pnpacpi_suspend, .resume = pnpacpi_resume, #endif }; EXPORT_SYMBOL(pnpacpi_protocol); static char *__init pnpacpi_get_id(struct acpi_device *device) { struct acpi_hardware_id *id; list_for_each_entry(id, &device->pnp.ids, list) { if (ispnpidacpi(id->id)) return id->id; } return NULL; } static int __init pnpacpi_add_device(struct acpi_device *device) { acpi_handle temp = NULL; acpi_status status; struct pnp_dev *dev; char *pnpid; struct acpi_hardware_id *id; /* * If a PnPacpi device is not present , the device * driver should not be loaded. */ status = acpi_get_handle(device->handle, "_CRS", &temp); if (ACPI_FAILURE(status)) return 0; pnpid = pnpacpi_get_id(device); if (!pnpid) return 0; if (is_exclusive_device(device) || !device->status.present) return 0; dev = pnp_alloc_dev(&pnpacpi_protocol, num, pnpid); if (!dev) return -ENOMEM; dev->data = device; /* .enabled means the device can decode the resources */ dev->active = device->status.enabled; status = acpi_get_handle(device->handle, "_SRS", &temp); if (ACPI_SUCCESS(status)) dev->capabilities |= PNP_CONFIGURABLE; dev->capabilities |= PNP_READ; if (device->flags.dynamic_status && (dev->capabilities & PNP_CONFIGURABLE)) dev->capabilities |= PNP_WRITE; if (device->flags.removable) dev->capabilities |= PNP_REMOVABLE; status = acpi_get_handle(device->handle, "_DIS", &temp); if (ACPI_SUCCESS(status)) dev->capabilities |= PNP_DISABLE; if (strlen(acpi_device_name(device))) strncpy(dev->name, acpi_device_name(device), sizeof(dev->name)); else strncpy(dev->name, acpi_device_bid(device), sizeof(dev->name)); if (dev->active) pnpacpi_parse_allocated_resource(dev); if (dev->capabilities & PNP_CONFIGURABLE) pnpacpi_parse_resource_option_data(dev); list_for_each_entry(id, &device->pnp.ids, list) { if (!strcmp(id->id, pnpid)) continue; if (!ispnpidacpi(id->id)) continue; pnp_add_id(dev, id->id); } /* clear out the damaged flags */ if (!dev->active) pnp_init_resources(dev); pnp_add_device(dev); num++; return AE_OK; } static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle, u32 lvl, void *context, void **rv) { struct acpi_device *device; if (!acpi_bus_get_device(handle, &device)) pnpacpi_add_device(device); else return AE_CTRL_DEPTH; return AE_OK; } static int __init acpi_pnp_match(struct device *dev, void *_pnp) { struct acpi_device *acpi = to_acpi_device(dev); struct pnp_dev *pnp = _pnp; struct device *physical_device; physical_device = acpi_get_physical_device(acpi->handle); if (physical_device) put_device(physical_device); /* true means it matched */ return !physical_device && compare_pnp_id(pnp->id, acpi_device_hid(acpi)); } static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle) { struct device *adev; struct acpi_device *acpi; adev = bus_find_device(&acpi_bus_type, NULL, to_pnp_dev(dev), acpi_pnp_match); if (!adev) return -ENODEV; acpi = to_acpi_device(adev); *handle = acpi->handle; put_device(adev); return 0; } /* complete initialization of a PNPACPI device includes having * pnpdev->dev.archdata.acpi_handle point to its ACPI sibling. */ static struct acpi_bus_type __initdata acpi_pnp_bus = { .bus = &pnp_bus_type, .find_device = acpi_pnp_find_device, }; int pnpacpi_disabled __initdata; static int __init pnpacpi_init(void) { if (acpi_disabled || pnpacpi_disabled) { printk(KERN_INFO "pnp: PnP ACPI: disabled\n"); return 0; } printk(KERN_INFO "pnp: PnP ACPI init\n"); pnp_register_protocol(&pnpacpi_protocol); register_acpi_bus_type(&acpi_pnp_bus); acpi_get_devices(NULL, pnpacpi_add_device_handler, NULL, NULL); printk(KERN_INFO "pnp: PnP ACPI: found %d devices\n", num); unregister_acpi_bus_type(&acpi_pnp_bus); pnp_platform_devices = 1; return 0; } fs_initcall(pnpacpi_init); static int __init pnpacpi_setup(char *str) { if (str == NULL) return 1; if (!strncmp(str, "off", 3)) pnpacpi_disabled = 1; return 1; } __setup("pnpacpi=", pnpacpi_setup);
CyanogenMod/android_kernel_sony_msm8974pro
drivers/pnp/pnpacpi/core.c
C
gpl-2.0
10,105
<?php /** * The template for displaying posts in the Gallery post format * * @package WordPress * @subpackage Twenty_Thirteen * @since Twenty Thirteen 1.0 */ ?> <article id="post-<?php the_ID(); ?>" <?php post_class(); ?>> <header class="entry-header"> <?php if ( is_single() ) : ?> <h1 class="entry-title"><?php the_title(); ?></h1> <?php else : ?> <h1 class="entry-title"> <a href="<?php the_permalink(); ?>" rel="bookmark"><?php the_title(); ?></a> </h1> <?php endif; // is_single() ?> </header><!-- .entry-header --> <div class="entry-content"> <?php if ( is_single() || ! get_post_gallery() ) : ?> <?php the_content( __( 'Continue reading <span class="meta-nav">&rarr;</span>', 'twentythirteen' ) ); ?> <?php wp_link_pages( array( 'before' => '<div class="page-links"><span class="page-links-title">' . __( 'Pages:', 'twentythirteen' ) . '</span>', 'after' => '</div>', 'link_before' => '<span>', 'link_after' => '</span>' ) ); ?> <?php else : ?> <?php echo get_post_gallery(); ?> <?php endif; // is_single() ?> </div><!-- .entry-content --> <footer class="entry-meta"> <?php twentythirteen_entry_meta(); ?> <?php if ( comments_open() && ! is_single() ) : ?> <span class="comments-link"> <?php comments_popup_link( '<span class="leave-reply">' . __( 'Leave a comment', 'twentythirteen' ) . '</span>', __( 'One comment so far', 'twentythirteen' ), __( 'View all % comments', 'twentythirteen' ) ); ?> </span><!-- .comments-link --> <?php endif; // comments_open() ?> <?php edit_post_link( __( 'Edit', 'twentythirteen' ), '<span class="edit-link">', '</span>' ); ?> <?php if ( is_single() && get_the_author_meta( 'description' ) && is_multi_author() ) : ?> <?php get_template_part( 'author-bio' ); ?> <?php endif; ?> </footer><!-- .entry-meta --> </article><!-- #post -->
DesignBP/Guymark
wp-content/themes/twentythirteen/content-gallery.php
PHP
gpl-2.0
1,843
/* * Copyright 2011, Siemens AG * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ /* * Based on patches from Jon Smirl <jonsmirl@gmail.com> * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* Jon's code is based on 6lowpan implementation for Contiki which is: * Copyright (c) 2008, Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Institute nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <linux/bitops.h> #include <linux/if_arp.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <net/af_ieee802154.h> #include <net/ieee802154.h> #include <net/ieee802154_netdev.h> #include <net/ipv6.h> #include "6lowpan.h" /* TTL uncompression values */ static const u8 lowpan_ttl_values[] = {0, 1, 64, 255}; static LIST_HEAD(lowpan_devices); /* * Uncompression of linklocal: * 0 -> 16 bytes from packet * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet * 2 -> 2 bytes from prefix - zeroes + 2 from packet * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr * * NOTE: => the uncompress function does change 0xf to 0x10 * NOTE: 0x00 => no-autoconfig => unspecified */ static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20}; /* * Uncompression of ctx-based: * 0 -> 0 bits from packet [unspecified / reserved] * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet * 2 -> 8 bytes from prefix - zeroes + 2 from packet * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr */ static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80}; /* * Uncompression of ctx-base * 0 -> 0 bits from packet * 1 -> 2 bytes from prefix - bunch of zeroes 5 from packet * 2 -> 2 bytes from prefix - zeroes + 3 from packet * 3 -> 2 bytes from prefix - infer 1 bytes from lladdr */ static const u8 lowpan_unc_mxconf[] = {0x0f, 0x25, 0x23, 0x21}; /* Link local prefix */ static const u8 lowpan_llprefix[] = {0xfe, 0x80}; /* private device info */ struct lowpan_dev_info { struct net_device *real_dev; /* real WPAN device ptr */ struct mutex dev_list_mtx; /* mutex for list ops */ }; struct lowpan_dev_record { struct net_device *ldev; struct list_head list; }; struct lowpan_fragment { struct sk_buff *skb; /* skb to be assembled */ spinlock_t lock; /* concurency lock */ u16 length; /* length to be assemled */ u32 bytes_rcv; /* bytes received */ u16 tag; /* current fragment tag */ struct timer_list timer; /* assembling timer */ struct list_head list; /* fragments list */ }; static unsigned short fragment_tag; static LIST_HEAD(lowpan_fragments); spinlock_t flist_lock; static inline struct lowpan_dev_info *lowpan_dev_info(const struct net_device *dev) { return netdev_priv(dev); } static inline void lowpan_address_flip(u8 *src, u8 *dest) { int i; for (i = 0; i < IEEE802154_ADDR_LEN; i++) (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i]; } /* list of all 6lowpan devices, uses for package delivering */ /* print data in line */ static inline void lowpan_raw_dump_inline(const char *caller, char *msg, unsigned char *buf, int len) { #ifdef DEBUG if (msg) pr_debug("(%s) %s: ", caller, msg); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 1, buf, len, false); #endif /* DEBUG */ } /* * print data in a table format: * * addr: xx xx xx xx xx xx * addr: xx xx xx xx xx xx * ... */ static inline void lowpan_raw_dump_table(const char *caller, char *msg, unsigned char *buf, int len) { #ifdef DEBUG if (msg) pr_debug("(%s) %s:\n", caller, msg); print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false); #endif /* DEBUG */ } static u8 lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr, const unsigned char *lladdr) { u8 val = 0; if (is_addr_mac_addr_based(ipaddr, lladdr)) val = 3; /* 0-bits */ else if (lowpan_is_iid_16_bit_compressable(ipaddr)) { /* compress IID to 16 bits xxxx::XXXX */ memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2); *hc06_ptr += 2; val = 2; /* 16-bits */ } else { /* do not compress IID => xxxx::IID */ memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8); *hc06_ptr += 8; val = 1; /* 64-bits */ } return rol8(val, shift); } static void lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr) { memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ALEN); /* second bit-flip (Universe/Local) is done according RFC2464 */ ipaddr->s6_addr[8] ^= 0x02; } /* * Uncompress addresses based on a prefix and a postfix with zeroes in * between. If the postfix is zero in length it will use the link address * to configure the IP address (autoconf style). * pref_post_count takes a byte where the first nibble specify prefix count * and the second postfix count (NOTE: 15/0xf => 16 bytes copy). */ static int lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr, u8 const *prefix, u8 pref_post_count, unsigned char *lladdr) { u8 prefcount = pref_post_count >> 4; u8 postcount = pref_post_count & 0x0f; /* full nibble 15 => 16 */ prefcount = (prefcount == 15 ? 16 : prefcount); postcount = (postcount == 15 ? 16 : postcount); if (lladdr) lowpan_raw_dump_inline(__func__, "linklocal address", lladdr, IEEE802154_ALEN); if (prefcount > 0) memcpy(ipaddr, prefix, prefcount); if (prefcount + postcount < 16) memset(&ipaddr->s6_addr[prefcount], 0, 16 - (prefcount + postcount)); if (postcount > 0) { memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount); skb_pull(skb, postcount); } else if (prefcount > 0) { if (lladdr == NULL) return -EINVAL; /* no IID based configuration if no prefix and no data */ lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr); } pr_debug("(%s): uncompressing %d + %d => ", __func__, prefcount, postcount); lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16); return 0; } static void lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb) { struct udphdr *uh = udp_hdr(skb); pr_debug("(%s): UDP header compression\n", __func__); if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) == LOWPAN_NHC_UDP_4BIT_PORT) && ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) == LOWPAN_NHC_UDP_4BIT_PORT)) { pr_debug("(%s): both ports compression to 4 bits\n", __func__); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11; **(hc06_ptr + 1) = /* subtraction is faster */ (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) + ((uh->source & LOWPAN_NHC_UDP_4BIT_PORT) << 4)); *hc06_ptr += 2; } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) == LOWPAN_NHC_UDP_8BIT_PORT) { pr_debug("(%s): remove 8 bits of dest\n", __func__); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01; memcpy(*hc06_ptr + 1, &uh->source, 2); **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT); *hc06_ptr += 4; } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) == LOWPAN_NHC_UDP_8BIT_PORT) { pr_debug("(%s): remove 8 bits of source\n", __func__); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10; memcpy(*hc06_ptr + 1, &uh->dest, 2); **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT); *hc06_ptr += 4; } else { pr_debug("(%s): can't compress header\n", __func__); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00; memcpy(*hc06_ptr + 1, &uh->source, 2); memcpy(*hc06_ptr + 3, &uh->dest, 2); *hc06_ptr += 5; } /* checksum is always inline */ memcpy(*hc06_ptr, &uh->check, 2); *hc06_ptr += 2; } static u8 lowpan_fetch_skb_u8(struct sk_buff *skb) { u8 ret; ret = skb->data[0]; skb_pull(skb, 1); return ret; } static u16 lowpan_fetch_skb_u16(struct sk_buff *skb) { u16 ret; BUG_ON(!pskb_may_pull(skb, 2)); ret = skb->data[0] | (skb->data[1] << 8); skb_pull(skb, 2); return ret; } static int lowpan_uncompress_udp_header(struct sk_buff *skb) { struct udphdr *uh = udp_hdr(skb); u8 tmp; tmp = lowpan_fetch_skb_u8(skb); if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) { pr_debug("(%s): UDP header uncompression\n", __func__); switch (tmp & LOWPAN_NHC_UDP_CS_P_11) { case LOWPAN_NHC_UDP_CS_P_00: memcpy(&uh->source, &skb->data[0], 2); memcpy(&uh->dest, &skb->data[2], 2); skb_pull(skb, 4); break; case LOWPAN_NHC_UDP_CS_P_01: memcpy(&uh->source, &skb->data[0], 2); uh->dest = skb->data[2] + LOWPAN_NHC_UDP_8BIT_PORT; skb_pull(skb, 3); break; case LOWPAN_NHC_UDP_CS_P_10: uh->source = skb->data[0] + LOWPAN_NHC_UDP_8BIT_PORT; memcpy(&uh->dest, &skb->data[1], 2); skb_pull(skb, 3); break; case LOWPAN_NHC_UDP_CS_P_11: uh->source = LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] >> 4); uh->dest = LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] & 0x0f); skb_pull(skb, 1); break; default: pr_debug("(%s) ERROR: unknown UDP format\n", __func__); goto err; break; } pr_debug("(%s): uncompressed UDP ports: src = %d, dst = %d\n", __func__, uh->source, uh->dest); /* copy checksum */ memcpy(&uh->check, &skb->data[0], 2); skb_pull(skb, 2); } else { pr_debug("(%s): ERROR: unsupported NH format\n", __func__); goto err; } return 0; err: return -EINVAL; } static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *_daddr, const void *_saddr, unsigned len) { u8 tmp, iphc0, iphc1, *hc06_ptr; struct ipv6hdr *hdr; const u8 *saddr = _saddr; const u8 *daddr = _daddr; u8 *head; struct ieee802154_addr sa, da; if (type != ETH_P_IPV6) return 0; /* TODO: * if this package isn't ipv6 one, where should it be routed? */ head = kzalloc(100, GFP_KERNEL); if (head == NULL) return -ENOMEM; hdr = ipv6_hdr(skb); hc06_ptr = head + 2; pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n" "\tnexthdr = 0x%02x\n\thop_lim = %d\n", __func__, hdr->version, ntohs(hdr->payload_len), hdr->nexthdr, hdr->hop_limit); lowpan_raw_dump_table(__func__, "raw skb network header dump", skb_network_header(skb), sizeof(struct ipv6hdr)); if (!saddr) saddr = dev->dev_addr; lowpan_raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8); /* * As we copy some bit-length fields, in the IPHC encoding bytes, * we sometimes use |= * If the field is 0, and the current bit value in memory is 1, * this does not work. We therefore reset the IPHC encoding here */ iphc0 = LOWPAN_DISPATCH_IPHC; iphc1 = 0; /* TODO: context lookup */ lowpan_raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8); /* * Traffic class, flow label * If flow label is 0, compress it. If traffic class is 0, compress it * We have to process both in the same time as the offset of traffic * class depends on the presence of version and flow label */ /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */ tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4); tmp = ((tmp & 0x03) << 6) | (tmp >> 2); if (((hdr->flow_lbl[0] & 0x0F) == 0) && (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) { /* flow label can be compressed */ iphc0 |= LOWPAN_IPHC_FL_C; if ((hdr->priority == 0) && ((hdr->flow_lbl[0] & 0xF0) == 0)) { /* compress (elide) all */ iphc0 |= LOWPAN_IPHC_TC_C; } else { /* compress only the flow label */ *hc06_ptr = tmp; hc06_ptr += 1; } } else { /* Flow label cannot be compressed */ if ((hdr->priority == 0) && ((hdr->flow_lbl[0] & 0xF0) == 0)) { /* compress only traffic class */ iphc0 |= LOWPAN_IPHC_TC_C; *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F); memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2); hc06_ptr += 3; } else { /* compress nothing */ memcpy(hc06_ptr, &hdr, 4); /* replace the top byte with new ECN | DSCP format */ *hc06_ptr = tmp; hc06_ptr += 4; } } /* NOTE: payload length is always compressed */ /* Next Header is compress if UDP */ if (hdr->nexthdr == UIP_PROTO_UDP) iphc0 |= LOWPAN_IPHC_NH_C; if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { *hc06_ptr = hdr->nexthdr; hc06_ptr += 1; } /* * Hop limit * if 1: compress, encoding is 01 * if 64: compress, encoding is 10 * if 255: compress, encoding is 11 * else do not compress */ switch (hdr->hop_limit) { case 1: iphc0 |= LOWPAN_IPHC_TTL_1; break; case 64: iphc0 |= LOWPAN_IPHC_TTL_64; break; case 255: iphc0 |= LOWPAN_IPHC_TTL_255; break; default: *hc06_ptr = hdr->hop_limit; break; } /* source address compression */ if (is_addr_unspecified(&hdr->saddr)) { pr_debug("(%s): source address is unspecified, setting SAC\n", __func__); iphc1 |= LOWPAN_IPHC_SAC; /* TODO: context lookup */ } else if (is_addr_link_local(&hdr->saddr)) { pr_debug("(%s): source address is link-local\n", __func__); iphc1 |= lowpan_compress_addr_64(&hc06_ptr, LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr); } else { pr_debug("(%s): send the full source address\n", __func__); memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16); hc06_ptr += 16; } /* destination address compression */ if (is_addr_mcast(&hdr->daddr)) { pr_debug("(%s): destination address is multicast", __func__); iphc1 |= LOWPAN_IPHC_M; if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) { pr_debug("compressed to 1 octet\n"); iphc1 |= LOWPAN_IPHC_DAM_11; /* use last byte */ *hc06_ptr = hdr->daddr.s6_addr[15]; hc06_ptr += 1; } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) { pr_debug("compressed to 4 octets\n"); iphc1 |= LOWPAN_IPHC_DAM_10; /* second byte + the last three */ *hc06_ptr = hdr->daddr.s6_addr[1]; memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3); hc06_ptr += 4; } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) { pr_debug("compressed to 6 octets\n"); iphc1 |= LOWPAN_IPHC_DAM_01; /* second byte + the last five */ *hc06_ptr = hdr->daddr.s6_addr[1]; memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5); hc06_ptr += 6; } else { pr_debug("using full address\n"); iphc1 |= LOWPAN_IPHC_DAM_00; memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16); hc06_ptr += 16; } } else { pr_debug("(%s): destination address is unicast: ", __func__); /* TODO: context lookup */ if (is_addr_link_local(&hdr->daddr)) { pr_debug("destination address is link-local\n"); iphc1 |= lowpan_compress_addr_64(&hc06_ptr, LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr); } else { pr_debug("using full address\n"); memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16); hc06_ptr += 16; } } /* UDP header compression */ if (hdr->nexthdr == UIP_PROTO_UDP) lowpan_compress_udp_header(&hc06_ptr, skb); head[0] = iphc0; head[1] = iphc1; skb_pull(skb, sizeof(struct ipv6hdr)); memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head); kfree(head); lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len); /* * NOTE1: I'm still unsure about the fact that compression and WPAN * header are created here and not later in the xmit. So wait for * an opinion of net maintainers. */ /* * NOTE2: to be absolutely correct, we must derive PANid information * from MAC subif of the 'dev' and 'real_dev' network devices, but * this isn't implemented in mainline yet, so currently we assign 0xff */ { /* prepare wpan address data */ sa.addr_type = IEEE802154_ADDR_LONG; sa.pan_id = 0xff; da.addr_type = IEEE802154_ADDR_LONG; da.pan_id = 0xff; memcpy(&(da.hwaddr), daddr, 8); memcpy(&(sa.hwaddr), saddr, 8); mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA; return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, type, (void *)&da, (void *)&sa, skb->len); } } static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr) { struct sk_buff *new; struct lowpan_dev_record *entry; int stat = NET_RX_SUCCESS; new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb), GFP_ATOMIC); kfree_skb(skb); if (!new) return -ENOMEM; skb_push(new, sizeof(struct ipv6hdr)); skb_reset_network_header(new); skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr)); new->protocol = htons(ETH_P_IPV6); new->pkt_type = PACKET_HOST; rcu_read_lock(); list_for_each_entry_rcu(entry, &lowpan_devices, list) if (lowpan_dev_info(entry->ldev)->real_dev == new->dev) { skb = skb_copy(new, GFP_ATOMIC); if (!skb) { stat = -ENOMEM; break; } skb->dev = entry->ldev; stat = netif_rx(skb); } rcu_read_unlock(); kfree_skb(new); return stat; } static void lowpan_fragment_timer_expired(unsigned long entry_addr) { struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr; pr_debug("%s: timer expired for frame with tag %d\n", __func__, entry->tag); spin_lock(&flist_lock); list_del(&entry->list); spin_unlock(&flist_lock); dev_kfree_skb(entry->skb); kfree(entry); } static int lowpan_process_data(struct sk_buff *skb) { struct ipv6hdr hdr; u8 tmp, iphc0, iphc1, num_context = 0; u8 *_saddr, *_daddr; int err; lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len); /* at least two bytes will be used for the encoding */ if (skb->len < 2) goto drop; iphc0 = lowpan_fetch_skb_u8(skb); /* fragments assembling */ switch (iphc0 & LOWPAN_DISPATCH_MASK) { case LOWPAN_DISPATCH_FRAG1: case LOWPAN_DISPATCH_FRAGN: { struct lowpan_fragment *frame; u8 len, offset; u16 tag; bool found = false; len = lowpan_fetch_skb_u8(skb); /* frame length */ tag = lowpan_fetch_skb_u16(skb); /* * check if frame assembling with the same tag is * already in progress */ spin_lock(&flist_lock); list_for_each_entry(frame, &lowpan_fragments, list) if (frame->tag == tag) { found = true; break; } /* alloc new frame structure */ if (!found) { frame = kzalloc(sizeof(struct lowpan_fragment), GFP_ATOMIC); if (!frame) goto unlock_and_drop; INIT_LIST_HEAD(&frame->list); frame->length = (iphc0 & 7) | (len << 3); frame->tag = tag; /* allocate buffer for frame assembling */ frame->skb = alloc_skb(frame->length + sizeof(struct ipv6hdr), GFP_ATOMIC); if (!frame->skb) { kfree(frame); goto unlock_and_drop; } frame->skb->priority = skb->priority; frame->skb->dev = skb->dev; /* reserve headroom for uncompressed ipv6 header */ skb_reserve(frame->skb, sizeof(struct ipv6hdr)); skb_put(frame->skb, frame->length); init_timer(&frame->timer); /* time out is the same as for ipv6 - 60 sec */ frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT; frame->timer.data = (unsigned long)frame; frame->timer.function = lowpan_fragment_timer_expired; add_timer(&frame->timer); list_add_tail(&frame->list, &lowpan_fragments); } if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) goto unlock_and_drop; offset = lowpan_fetch_skb_u8(skb); /* fetch offset */ /* if payload fits buffer, copy it */ if (likely((offset * 8 + skb->len) <= frame->length)) skb_copy_to_linear_data_offset(frame->skb, offset * 8, skb->data, skb->len); else goto unlock_and_drop; frame->bytes_rcv += skb->len; /* frame assembling complete */ if ((frame->bytes_rcv == frame->length) && frame->timer.expires > jiffies) { /* if timer haven't expired - first of all delete it */ del_timer(&frame->timer); list_del(&frame->list); spin_unlock(&flist_lock); dev_kfree_skb(skb); skb = frame->skb; kfree(frame); iphc0 = lowpan_fetch_skb_u8(skb); break; } spin_unlock(&flist_lock); return kfree_skb(skb), 0; } default: break; } iphc1 = lowpan_fetch_skb_u8(skb); _saddr = mac_cb(skb)->sa.hwaddr; _daddr = mac_cb(skb)->da.hwaddr; pr_debug("(%s): iphc0 = %02x, iphc1 = %02x\n", __func__, iphc0, iphc1); /* another if the CID flag is set */ if (iphc1 & LOWPAN_IPHC_CID) { pr_debug("(%s): CID flag is set, increase header with one\n", __func__); if (!skb->len) goto drop; num_context = lowpan_fetch_skb_u8(skb); } hdr.version = 6; /* Traffic Class and Flow Label */ switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) { /* * Traffic Class and FLow Label carried in-line * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes) */ case 0: /* 00b */ if (!skb->len) goto drop; tmp = lowpan_fetch_skb_u8(skb); memcpy(&hdr.flow_lbl, &skb->data[0], 3); skb_pull(skb, 3); hdr.priority = ((tmp >> 2) & 0x0f); hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) | (hdr.flow_lbl[0] & 0x0f); break; /* * Traffic class carried in-line * ECN + DSCP (1 byte), Flow Label is elided */ case 1: /* 10b */ if (!skb->len) goto drop; tmp = lowpan_fetch_skb_u8(skb); hdr.priority = ((tmp >> 2) & 0x0f); hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30); hdr.flow_lbl[1] = 0; hdr.flow_lbl[2] = 0; break; /* * Flow Label carried in-line * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided */ case 2: /* 01b */ if (!skb->len) goto drop; tmp = lowpan_fetch_skb_u8(skb); hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30); memcpy(&hdr.flow_lbl[1], &skb->data[0], 2); skb_pull(skb, 2); break; /* Traffic Class and Flow Label are elided */ case 3: /* 11b */ hdr.priority = 0; hdr.flow_lbl[0] = 0; hdr.flow_lbl[1] = 0; hdr.flow_lbl[2] = 0; break; default: break; } /* Next Header */ if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { /* Next header is carried inline */ if (!skb->len) goto drop; hdr.nexthdr = lowpan_fetch_skb_u8(skb); pr_debug("(%s): NH flag is set, next header is carried " "inline: %02x\n", __func__, hdr.nexthdr); } /* Hop Limit */ if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I) hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03]; else { if (!skb->len) goto drop; hdr.hop_limit = lowpan_fetch_skb_u8(skb); } /* Extract SAM to the tmp variable */ tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03; /* Source address uncompression */ pr_debug("(%s): source address stateless compression\n", __func__); err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix, lowpan_unc_llconf[tmp], skb->data); if (err) goto drop; /* Extract DAM to the tmp variable */ tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03; /* check for Multicast Compression */ if (iphc1 & LOWPAN_IPHC_M) { if (iphc1 & LOWPAN_IPHC_DAC) { pr_debug("(%s): destination address context-based " "multicast compression\n", __func__); /* TODO: implement this */ } else { u8 prefix[] = {0xff, 0x02}; pr_debug("(%s): destination address non-context-based" " multicast compression\n", __func__); if (0 < tmp && tmp < 3) { if (!skb->len) goto drop; else prefix[1] = lowpan_fetch_skb_u8(skb); } err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix, lowpan_unc_mxconf[tmp], NULL); if (err) goto drop; } } else { pr_debug("(%s): destination address stateless compression\n", __func__); err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix, lowpan_unc_llconf[tmp], skb->data); if (err) goto drop; } /* UDP data uncompression */ if (iphc0 & LOWPAN_IPHC_NH_C) if (lowpan_uncompress_udp_header(skb)) goto drop; /* Not fragmented package */ hdr.payload_len = htons(skb->len); pr_debug("(%s): skb headroom size = %d, data length = %d\n", __func__, skb_headroom(skb), skb->len); pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t" "nexthdr = 0x%02x\n\thop_lim = %d\n", __func__, hdr.version, ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit); lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr)); return lowpan_skb_deliver(skb, &hdr); unlock_and_drop: spin_unlock(&flist_lock); drop: kfree_skb(skb); return -EINVAL; } static int lowpan_get_mac_header_length(struct sk_buff *skb) { /* * Currently long addressing mode is supported only, so the overall * header size is 21: * FC SeqNum DPAN DA SA Sec * 2 + 1 + 2 + 8 + 8 + 0 = 21 */ return 21; } static int lowpan_fragment_xmit(struct sk_buff *skb, u8 *head, int mlen, int plen, int offset) { struct sk_buff *frag; int hlen, ret; /* if payload length is zero, therefore it's a first fragment */ hlen = (plen == 0 ? LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE); lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen); frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE); if (!frag) return -ENOMEM; frag->priority = skb->priority; frag->dev = skb->dev; /* copy header, MFR and payload */ memcpy(skb_put(frag, mlen), skb->data, mlen); memcpy(skb_put(frag, hlen), head, hlen); if (plen) skb_copy_from_linear_data_offset(skb, offset + mlen, skb_put(frag, plen), plen); lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len); ret = dev_queue_xmit(frag); return ret; } static int lowpan_skb_fragmentation(struct sk_buff *skb) { int err, header_length, payload_length, tag, offset = 0; u8 head[5]; header_length = lowpan_get_mac_header_length(skb); payload_length = skb->len - header_length; tag = fragment_tag++; /* first fragment header */ head[0] = LOWPAN_DISPATCH_FRAG1 | (payload_length & 0x7); head[1] = (payload_length >> 3) & 0xff; head[2] = tag & 0xff; head[3] = tag >> 8; err = lowpan_fragment_xmit(skb, head, header_length, 0, 0); /* next fragment header */ head[0] &= ~LOWPAN_DISPATCH_FRAG1; head[0] |= LOWPAN_DISPATCH_FRAGN; while ((payload_length - offset > 0) && (err >= 0)) { int len = LOWPAN_FRAG_SIZE; head[4] = offset / 8; if (payload_length - offset < len) len = payload_length - offset; err = lowpan_fragment_xmit(skb, head, header_length, len, offset); offset += len; } return err; } static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev) { int err = -1; pr_debug("(%s): package xmit\n", __func__); skb->dev = lowpan_dev_info(dev)->real_dev; if (skb->dev == NULL) { pr_debug("(%s) ERROR: no real wpan device found\n", __func__); goto error; } if (skb->len <= IEEE802154_MTU) { err = dev_queue_xmit(skb); goto out; } pr_debug("(%s): frame is too big, fragmentation is needed\n", __func__); err = lowpan_skb_fragmentation(skb); error: dev_kfree_skb(skb); out: if (err < 0) pr_debug("(%s): ERROR: xmit failed\n", __func__); return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK); } static void lowpan_dev_free(struct net_device *dev) { dev_put(lowpan_dev_info(dev)->real_dev); free_netdev(dev); } static struct wpan_phy *lowpan_get_phy(const struct net_device *dev) { struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; return ieee802154_mlme_ops(real_dev)->get_phy(real_dev); } static u16 lowpan_get_pan_id(const struct net_device *dev) { struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev); } static u16 lowpan_get_short_addr(const struct net_device *dev) { struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev); } static struct header_ops lowpan_header_ops = { .create = lowpan_header_create, }; static const struct net_device_ops lowpan_netdev_ops = { .ndo_start_xmit = lowpan_xmit, .ndo_set_mac_address = eth_mac_addr, }; static struct ieee802154_mlme_ops lowpan_mlme = { .get_pan_id = lowpan_get_pan_id, .get_phy = lowpan_get_phy, .get_short_addr = lowpan_get_short_addr, }; static void lowpan_setup(struct net_device *dev) { pr_debug("(%s)\n", __func__); dev->addr_len = IEEE802154_ADDR_LEN; memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); dev->type = ARPHRD_IEEE802154; /* Frame Control + Sequence Number + Address fields + Security Header */ dev->hard_header_len = 2 + 1 + 20 + 14; dev->needed_tailroom = 2; /* FCS */ dev->mtu = 1281; dev->tx_queue_len = 0; dev->flags = IFF_BROADCAST | IFF_MULTICAST; dev->watchdog_timeo = 0; dev->netdev_ops = &lowpan_netdev_ops; dev->header_ops = &lowpan_header_ops; dev->ml_priv = &lowpan_mlme; dev->destructor = lowpan_dev_free; } static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[]) { pr_debug("(%s)\n", __func__); if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN) return -EINVAL; } return 0; } static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { if (!netif_running(dev)) goto drop; if (dev->type != ARPHRD_IEEE802154) goto drop; /* check that it's our buffer */ switch (skb->data[0] & 0xe0) { case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */ lowpan_process_data(skb); break; default: break; } return NET_RX_SUCCESS; drop: kfree_skb(skb); return NET_RX_DROP; } static int lowpan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_device *real_dev; struct lowpan_dev_record *entry; pr_debug("(%s)\n", __func__); if (!tb[IFLA_LINK]) return -EINVAL; /* find and hold real wpan device */ real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) return -ENODEV; lowpan_dev_info(dev)->real_dev = real_dev; mutex_init(&lowpan_dev_info(dev)->dev_list_mtx); entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL); if (!entry) { dev_put(real_dev); lowpan_dev_info(dev)->real_dev = NULL; return -ENOMEM; } entry->ldev = dev; mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); INIT_LIST_HEAD(&entry->list); list_add_tail(&entry->list, &lowpan_devices); mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); spin_lock_init(&flist_lock); register_netdevice(dev); return 0; } static void lowpan_dellink(struct net_device *dev, struct list_head *head) { struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev); struct net_device *real_dev = lowpan_dev->real_dev; struct lowpan_dev_record *entry, *tmp; struct lowpan_fragment *frame, *tframe; ASSERT_RTNL(); spin_lock(&flist_lock); list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) { del_timer(&frame->timer); list_del(&frame->list); dev_kfree_skb(frame->skb); kfree(frame); } spin_unlock(&flist_lock); mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { if (entry->ldev == dev) { list_del(&entry->list); kfree(entry); } } mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx); unregister_netdevice_queue(dev, head); dev_put(real_dev); } static struct rtnl_link_ops lowpan_link_ops __read_mostly = { .kind = "lowpan", .priv_size = sizeof(struct lowpan_dev_info), .setup = lowpan_setup, .newlink = lowpan_newlink, .dellink = lowpan_dellink, .validate = lowpan_validate, }; static inline int __init lowpan_netlink_init(void) { return rtnl_link_register(&lowpan_link_ops); } static inline void __init lowpan_netlink_fini(void) { rtnl_link_unregister(&lowpan_link_ops); } static struct packet_type lowpan_packet_type = { .type = __constant_htons(ETH_P_IEEE802154), .func = lowpan_rcv, }; static int __init lowpan_init_module(void) { int err = 0; pr_debug("(%s)\n", __func__); err = lowpan_netlink_init(); if (err < 0) goto out; dev_add_pack(&lowpan_packet_type); out: return err; } static void __exit lowpan_cleanup_module(void) { pr_debug("(%s)\n", __func__); lowpan_netlink_fini(); dev_remove_pack(&lowpan_packet_type); } module_init(lowpan_init_module); module_exit(lowpan_cleanup_module); MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("lowpan");
Red680812/android_443_KitKat_kernel_htc_dlxub1
net/ieee802154/6lowpan.c
C
gpl-2.0
33,847
<?php /** * @file * Contains the CTools Export UI integration code. * * Note that this is only a partial integration. */ /** * CTools Export UI class handler for Views UI. */ class views_ui extends ctools_export_ui { function init($plugin) { // We modify the plugin info here so that we take the defaults and // twiddle, rather than completely override them. // Reset the edit path to match what we're really using. $plugin['menu']['items']['edit']['path'] = 'view/%ctools_export_ui/edit'; $plugin['menu']['items']['clone']['path'] = 'view/%ctools_export_ui/clone'; $plugin['menu']['items']['clone']['type'] = MENU_VISIBLE_IN_BREADCRUMB; $plugin['menu']['items']['export']['path'] = 'view/%ctools_export_ui/export'; $plugin['menu']['items']['export']['type'] = MENU_VISIBLE_IN_BREADCRUMB; $plugin['menu']['items']['enable']['path'] = 'view/%ctools_export_ui/enable'; $plugin['menu']['items']['disable']['path'] = 'view/%ctools_export_ui/disable'; $plugin['menu']['items']['delete']['path'] = 'view/%ctools_export_ui/delete'; $plugin['menu']['items']['delete']['type'] = MENU_VISIBLE_IN_BREADCRUMB; $plugin['menu']['items']['revert']['path'] = 'view/%ctools_export_ui/revert'; $plugin['menu']['items']['revert']['type'] = MENU_VISIBLE_IN_BREADCRUMB; $prefix_count = count(explode('/', $plugin['menu']['menu prefix'])); $plugin['menu']['items']['add-template'] = array( 'path' => 'template/%/add', 'title' => 'Add from template', 'page callback' => 'ctools_export_ui_switcher_page', 'page arguments' => array($plugin['name'], 'add_template', $prefix_count + 2), 'load arguments' => array($plugin['name']), 'access callback' => 'ctools_export_ui_task_access', 'access arguments' => array($plugin['name'], 'add_template', $prefix_count + 2), 'type' => MENU_CALLBACK, ); return parent::init($plugin); } function hook_menu(&$items) { // We are using our own 'edit' still, rather than having edit on this // object (maybe in the future) so unset the edit callbacks: // Store this so we can put them back as sometimes they're needed // again laster: $stored_items = $this->plugin['menu']['items']; // We leave these to make sure the operations still exist in the plugin so // that the path finder. unset($this->plugin['menu']['items']['edit']); unset($this->plugin['menu']['items']['add']); unset($this->plugin['menu']['items']['import']); unset($this->plugin['menu']['items']['edit callback']); parent::hook_menu($items); $this->plugin['menu']['items'] = $stored_items; } function load_item($item_name) { return views_ui_cache_load($item_name); } function list_form(&$form, &$form_state) { $row_class = 'container-inline'; if (!variable_get('views_ui_show_listing_filters', FALSE)) { $row_class .= " element-invisible"; } views_include('admin'); parent::list_form($form, $form_state); // ctools only has two rows. We want four. // That's why we create our own structure. $form['bottom row']['submit']['#attributes']['class'][] = 'js-hide'; $form['first row'] = array( '#prefix' => '<div class="' . $row_class . ' ctools-export-ui-row ctools-export-ui-first-row clearfix">', '#suffix' => '</div>', 'search' => $form['top row']['search'], 'submit' => $form['bottom row']['submit'], 'reset' => $form['bottom row']['reset'], ); $form['second row'] = array( '#prefix' => '<div class="' . $row_class . ' ctools-export-ui-row ctools-export-ui-second-row clearfix">', '#suffix' => '</div>', 'storage' => $form['top row']['storage'], 'disabled' => $form['top row']['disabled'], ); $form['third row'] = array( '#prefix' => '<div class="' . $row_class . ' ctools-export-ui-row ctools-export-ui-third-row clearfix element-hidden">', '#suffix' => '</div>', 'order' => $form['bottom row']['order'], 'sort' => $form['bottom row']['sort'], ); unset($form['top row']); unset($form['bottom row']); // Modify the look and contents of existing form elements. $form['second row']['storage']['#title'] = ''; $form['second row']['storage']['#options'] = array( 'all' => t('All storage'), t('Normal') => t('In database'), t('Default') => t('In code'), t('Overridden') => t('Database overriding code'), ); $form['second row']['disabled']['#title'] = ''; $form['second row']['disabled']['#options']['all'] = t('All status'); $form['third row']['sort']['#title'] = ''; // And finally, add our own. $this->bases = array(); foreach (views_fetch_base_tables() as $table => $info) { $this->bases[$table] = $info['title']; } $form['second row']['base'] = array( '#type' => 'select', '#options' => array_merge(array('all' => t('All types')), $this->bases), '#default_value' => 'all', '#weight' => -1, ); $tags = array(); if (isset($form_state['object']->items)) { foreach ($form_state['object']->items as $name => $view) { if (!empty($view->tag)) { $view_tags = drupal_explode_tags($view->tag); foreach ($view_tags as $tag) { $tags[$tag] = $tag; } } } } asort($tags); $form['second row']['tag'] = array( '#type' => 'select', '#title' => t('Filter'), '#options' => array_merge(array('all' => t('All tags')), array('none' => t('No tags')), $tags), '#default_value' => 'all', '#weight' => -9, ); $displays = array(); foreach (views_fetch_plugin_data('display') as $id => $info) { if (!empty($info['admin'])) { $displays[$id] = $info['admin']; } } asort($displays); $form['second row']['display'] = array( '#type' => 'select', '#options' => array_merge(array('all' => t('All displays')), $displays), '#default_value' => 'all', '#weight' => -1, ); } function list_filter($form_state, $view) { // Don't filter by tags if all is set up. if ($form_state['values']['tag'] != 'all') { // If none is selected check whether the view has a tag. if ($form_state['values']['tag'] == 'none') { return !empty($view->tag); } else { // Check whether the tag can be found in the views tag. return strpos($view->tag, $form_state['values']['tag']) === FALSE; } } if ($form_state['values']['base'] != 'all' && $form_state['values']['base'] != $view->base_table) { return TRUE; } return parent::list_filter($form_state, $view); } function list_sort_options() { return array( 'disabled' => t('Enabled, name'), 'name' => t('Name'), 'path' => t('Path'), 'tag' => t('Tag'), 'storage' => t('Storage'), ); } function list_build_row($view, &$form_state, $operations) { if (!empty($view->human_name)) { $title = $view->human_name; } else { $title = $view->get_title(); if (empty($title)) { $title = $view->name; } } $paths = _views_ui_get_paths($view); $paths = implode(", ", $paths); $base = !empty($this->bases[$view->base_table]) ? $this->bases[$view->base_table] : t('Broken'); $info = theme('views_ui_view_info', array('view' => $view, 'base' => $base)); // Reorder the operations so that enable is the default action for a templatic views if (!empty($operations['enable'])) { $operations = array('enable' => $operations['enable']) + $operations; } // Set up sorting switch ($form_state['values']['order']) { case 'disabled': $this->sorts[$view->name] = strtolower(empty($view->disabled) . $title); break; case 'name': $this->sorts[$view->name] = strtolower($title); break; case 'path': $this->sorts[$view->name] = strtolower($paths); break; case 'tag': $this->sorts[$view->name] = strtolower($view->tag); break; case 'storage': $this->sorts[$view->name] = strtolower($view->type . $title); break; } $ops = theme('links__ctools_dropbutton', array('links' => $operations, 'attributes' => array('class' => array('links', 'inline')))); $this->rows[$view->name] = array( 'data' => array( array('data' => $info, 'class' => array('views-ui-name')), array('data' => check_plain($view->description), 'class' => array('views-ui-description')), array('data' => check_plain($view->tag), 'class' => array('views-ui-tag')), array('data' => $paths, 'class' => array('views-ui-path')), array('data' => $ops, 'class' => array('views-ui-operations')), ), 'title' => t('Machine name: ') . check_plain($view->name), 'class' => array(!empty($view->disabled) ? 'ctools-export-ui-disabled' : 'ctools-export-ui-enabled'), ); } function list_render(&$form_state) { views_include('admin'); views_ui_add_admin_css(); if (empty($_REQUEST['js'])) { views_ui_check_advanced_help(); } drupal_add_library('system', 'jquery.bbq'); views_add_js('views-list'); $this->active = $form_state['values']['order']; $this->order = $form_state['values']['sort']; $query = tablesort_get_query_parameters(); $header = array( $this->tablesort_link(t('View name'), 'name', 'views-ui-name'), array('data' => t('Description'), 'class' => array('views-ui-description')), $this->tablesort_link(t('Tag'), 'tag', 'views-ui-tag'), $this->tablesort_link(t('Path'), 'path', 'views-ui-path'), array('data' => t('Operations'), 'class' => array('views-ui-operations')), ); $table = array( 'header' => $header, 'rows' => $this->rows, 'empty' => t('No views match the search criteria.'), 'attributes' => array('id' => 'ctools-export-ui-list-items'), ); return theme('table', $table); } function tablesort_link($label, $field, $class) { $title = t('sort by @s', array('@s' => $label)); $initial = 'asc'; if ($this->active == $field) { $initial = ($this->order == 'asc') ? 'desc' : 'asc'; $label .= theme('tablesort_indicator', array('style' => $initial)); } $query['order'] = $field; $query['sort'] = $initial; $link_options = array( 'html' => TRUE, 'attributes' => array('title' => $title), 'query' => $query, ); $link = l($label, $_GET['q'], $link_options); if ($this->active == $field) { $class .= ' active'; } return array('data' => $link, 'class' => $class); } function clone_page($js, $input, $item, $step = NULL) { drupal_set_title($this->get_page_title('clone', $item)); $name = $item->{$this->plugin['export']['key']}; $form_state = array( 'plugin' => $this->plugin, 'object' => &$this, 'ajax' => $js, 'item' => $item, 'op' => 'add', 'form type' => 'clone', 'original name' => $name, 'rerender' => TRUE, 'no_redirect' => TRUE, 'step' => $step, // Store these in case additional args are needed. 'function args' => func_get_args(), ); $output = drupal_build_form('views_ui_clone_form', $form_state); if (!empty($form_state['executed'])) { $item->name = $form_state['values']['name']; $item->human_name = $form_state['values']['human_name']; $item->vid = NULL; views_ui_cache_set($item); drupal_goto(ctools_export_ui_plugin_menu_path($this->plugin, 'edit', $item->name)); } return $output; } function add_template_page($js, $input, $name, $step = NULL) { $templates = views_get_all_templates(); if (empty($templates[$name])) { return MENU_NOT_FOUND; } $template = $templates[$name]; // The template description probably describes the template, not the // view that will be created from it, but users aren't that likely to // touch it. if (!empty($template->description)) { unset($template->description); } $template->is_template = TRUE; $template->type = t('Default'); $output = $this->clone_page($js, $input, $template, $step); drupal_set_title(t('Create view from template @template', array('@template' => $template->get_human_name()))); return $output; } function set_item_state($state, $js, $input, $item) { ctools_export_set_object_status($item, $state); menu_rebuild(); if (!$js) { drupal_goto(ctools_export_ui_plugin_base_path($this->plugin)); } else { return $this->list_page($js, $input); } } function list_page($js, $input) { // Remove filters values from session if filters are hidden. if (!variable_get('views_ui_show_listing_filters', FALSE) && isset($_SESSION['ctools_export_ui'][$this->plugin['name']])) { unset($_SESSION['ctools_export_ui'][$this->plugin['name']]); } // wrap output in a div for CSS $output = parent::list_page($js, $input); if (is_string($output)) { $output = '<div id="views-ui-list-page">' . $output . '</div>'; return $output; } } } /** * Form callback to edit an exportable item using the wizard * * This simply loads the object defined in the plugin and hands it off. */ function views_ui_clone_form($form, &$form_state) { $counter = 1; if (!isset($form_state['item'])) { $view = views_get_view($form_state['original name']); } else { $view = $form_state['item']; } do { if (empty($form_state['item']->is_template)) { $name = format_plural($counter, 'Clone of', 'Clone @count of') . ' ' . $view->get_human_name(); } else { $name = $view->get_human_name(); if ($counter > 1) { $name .= ' ' . $counter; } } $counter++; $machine_name = preg_replace('/[^a-z0-9_]+/', '_', drupal_strtolower($name)); } while (ctools_export_crud_load($form_state['plugin']['schema'], $machine_name)); $form['human_name'] = array( '#type' => 'textfield', '#title' => t('View name'), '#default_value' => $name, '#size' => 32, '#maxlength' => 255, ); $form['name'] = array( '#title' => t('View name'), '#type' => 'machine_name', '#required' => TRUE, '#maxlength' => 128, '#size' => 128, '#machine_name' => array( 'exists' => 'ctools_export_ui_edit_name_exists', 'source' => array('human_name'), ), ); $form['submit'] = array( '#type' => 'submit', '#value' => t('Continue'), ); return $form; }
vijayakoppineedi/tripal2d7
sites/all/modules/views/plugins/export_ui/views_ui.class.php
PHP
gpl-2.0
14,684
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/string.h> #include <linux/gfp.h> #include "mthca_dev.h" #include "mthca_cmd.h" struct mthca_mgm { __be32 next_gid_index; u32 reserved[3]; u8 gid[16]; __be32 qp[MTHCA_QP_PER_MGM]; }; static const u8 zero_gid[16]; /* automatically initialized to 0 */ /* * Caller must hold MCG table semaphore. gid and mgm parameters must * be properly aligned for command interface. * * Returns 0 unless a firmware command error occurs. * * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 * and *mgm holds MGM entry. * * if GID is found in AMGM, *index = index in AMGM, *prev = index of * previous entry in hash chain and *mgm holds AMGM entry. * * If no AMGM exists for given gid, *index = -1, *prev = index of last * entry in hash chain and *mgm holds end of hash chain. */ static int find_mgm(struct mthca_dev *dev, u8 *gid, struct mthca_mailbox *mgm_mailbox, u16 *hash, int *prev, int *index) { struct mthca_mailbox *mailbox; struct mthca_mgm *mgm = mgm_mailbox->buf; u8 *mgid; int err; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return -ENOMEM; mgid = mailbox->buf; memcpy(mgid, gid, 16); err = mthca_MGID_HASH(dev, mailbox, hash); if (err) { mthca_err(dev, "MGID_HASH failed (%d)\n", err); goto out; } if (0) mthca_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash); *index = *hash; *prev = -1; do { err = mthca_READ_MGM(dev, *index, mgm_mailbox); if (err) { mthca_err(dev, "READ_MGM failed (%d)\n", err); goto out; } if (!memcmp(mgm->gid, zero_gid, 16)) { if (*index != *hash) { mthca_err(dev, "Found zero MGID in AMGM.\n"); err = -EINVAL; } goto out; } if (!memcmp(mgm->gid, gid, 16)) goto out; *prev = *index; *index = be32_to_cpu(mgm->next_gid_index) >> 6; } while (*index); *index = -1; out: mthca_free_mailbox(dev, mailbox); return err; } int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_mailbox *mailbox; struct mthca_mgm *mgm; u16 hash; int index, prev; int link = 0; int i; int err; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); mgm = mailbox->buf; mutex_lock(&dev->mcg_table.mutex); err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); if (err) goto out; if (index != -1) { if (!memcmp(mgm->gid, zero_gid, 16)) memcpy(mgm->gid, gid->raw, 16); } else { link = 1; index = mthca_alloc(&dev->mcg_table.alloc); if (index == -1) { mthca_err(dev, "No AMGM entries left\n"); err = -ENOMEM; goto out; } err = mthca_READ_MGM(dev, index, mailbox); if (err) { mthca_err(dev, "READ_MGM failed (%d)\n", err); goto out; } memset(mgm, 0, sizeof *mgm); memcpy(mgm->gid, gid->raw, 16); } for (i = 0; i < MTHCA_QP_PER_MGM; ++i) if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) { mthca_dbg(dev, "QP %06x already a member of MGM\n", ibqp->qp_num); err = 0; goto out; } else if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) { mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31)); break; } if (i == MTHCA_QP_PER_MGM) { mthca_err(dev, "MGM at index %x is full.\n", index); err = -ENOMEM; goto out; } err = mthca_WRITE_MGM(dev, index, mailbox); if (err) { mthca_err(dev, "WRITE_MGM failed %d\n", err); err = -EINVAL; goto out; } if (!link) goto out; err = mthca_READ_MGM(dev, prev, mailbox); if (err) { mthca_err(dev, "READ_MGM failed %d\n", err); goto out; } mgm->next_gid_index = cpu_to_be32(index << 6); err = mthca_WRITE_MGM(dev, prev, mailbox); if (err) mthca_err(dev, "WRITE_MGM returned %d\n", err); out: if (err && link && index != -1) { BUG_ON(index < dev->limits.num_mgms); mthca_free(&dev->mcg_table.alloc, index); } mutex_unlock(&dev->mcg_table.mutex); mthca_free_mailbox(dev, mailbox); return err; } int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_mailbox *mailbox; struct mthca_mgm *mgm; u16 hash; int prev, index; int i, loc; int err; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); mgm = mailbox->buf; mutex_lock(&dev->mcg_table.mutex); err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); if (err) goto out; if (index == -1) { mthca_err(dev, "MGID %pI6 not found\n", gid->raw); err = -EINVAL; goto out; } for (loc = -1, i = 0; i < MTHCA_QP_PER_MGM; ++i) { if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) loc = i; if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) break; } if (loc == -1) { mthca_err(dev, "QP %06x not found in MGM\n", ibqp->qp_num); err = -EINVAL; goto out; } mgm->qp[loc] = mgm->qp[i - 1]; mgm->qp[i - 1] = 0; err = mthca_WRITE_MGM(dev, index, mailbox); if (err) { mthca_err(dev, "WRITE_MGM returned %d\n", err); goto out; } if (i != 1) goto out; if (prev == -1) { /* Remove entry from MGM */ int amgm_index_to_free = be32_to_cpu(mgm->next_gid_index) >> 6; if (amgm_index_to_free) { err = mthca_READ_MGM(dev, amgm_index_to_free, mailbox); if (err) { mthca_err(dev, "READ_MGM returned %d\n", err); goto out; } } else memset(mgm->gid, 0, 16); err = mthca_WRITE_MGM(dev, index, mailbox); if (err) { mthca_err(dev, "WRITE_MGM returned %d\n", err); goto out; } if (amgm_index_to_free) { BUG_ON(amgm_index_to_free < dev->limits.num_mgms); mthca_free(&dev->mcg_table.alloc, amgm_index_to_free); } } else { /* Remove entry from AMGM */ int curr_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; err = mthca_READ_MGM(dev, prev, mailbox); if (err) { mthca_err(dev, "READ_MGM returned %d\n", err); goto out; } mgm->next_gid_index = cpu_to_be32(curr_next_index << 6); err = mthca_WRITE_MGM(dev, prev, mailbox); if (err) { mthca_err(dev, "WRITE_MGM returned %d\n", err); goto out; } BUG_ON(index < dev->limits.num_mgms); mthca_free(&dev->mcg_table.alloc, index); } out: mutex_unlock(&dev->mcg_table.mutex); mthca_free_mailbox(dev, mailbox); return err; } int mthca_init_mcg_table(struct mthca_dev *dev) { int err; int table_size = dev->limits.num_mgms + dev->limits.num_amgms; err = mthca_alloc_init(&dev->mcg_table.alloc, table_size, table_size - 1, dev->limits.num_mgms); if (err) return err; mutex_init(&dev->mcg_table.mutex); return 0; } void mthca_cleanup_mcg_table(struct mthca_dev *dev) { mthca_alloc_cleanup(&dev->mcg_table.alloc); }
VincentEmmanuel/android_kernel_pantech_e51k
drivers/infiniband/hw/mthca/mthca_mcg.c
C
gpl-2.0
8,111
/* * linux/drivers/video/macmodes.c -- Standard MacOS video modes * * Copyright (C) 1998 Geert Uytterhoeven * * 2000 - Removal of OpenFirmware dependencies by: * - Ani Joshi * - Brad Douglas <brad@neruo.com> * * 2001 - Documented with DocBook * - Brad Douglas <brad@neruo.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/errno.h> #include <linux/fb.h> #include <linux/string.h> #include <linux/module.h> #include "macmodes.h" /* * MacOS video mode definitions * * Order IS important! If you change these, don't forget to update * mac_modes[] below! */ #define DEFAULT_MODEDB_INDEX 0 static const struct fb_videomode mac_modedb[] = { { /* 512x384, 60Hz, Non-Interlaced (15.67 MHz dot clock) */ "mac2", 60, 512, 384, 63828, 80, 16, 19, 1, 32, 3, 0, FB_VMODE_NONINTERLACED }, { /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */ "mac5", 60, 640, 480, 39722, 32, 32, 33, 10, 96, 2, 0, FB_VMODE_NONINTERLACED }, { /* 640x480, 67Hz, Non-Interlaced (30.0 MHz dotclock) */ "mac6", 67, 640, 480, 33334, 80, 80, 39, 3, 64, 3, 0, FB_VMODE_NONINTERLACED }, { /* 640x870, 75Hz (portrait), Non-Interlaced (57.28 MHz dot clock) */ "mac7", 75, 640, 870, 17457, 80, 32, 42, 3, 80, 3, 0, FB_VMODE_NONINTERLACED }, { /* 800x600, 56 Hz, Non-Interlaced (36.00 MHz dotclock) */ "mac9", 56, 800, 600, 27778, 112, 40, 22, 1, 72, 2, FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, { /* 800x600, 60 Hz, Non-Interlaced (40.00 MHz dotclock) */ "mac10", 60, 800, 600, 25000, 72, 56, 23, 1, 128, 4, FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, { /* 800x600, 72 Hz, Non-Interlaced (50.00 MHz dotclock) */ "mac11", 72, 800, 600, 20000, 48, 72, 23, 37, 120, 6, FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, { /* 800x600, 75 Hz, Non-Interlaced (49.50 MHz dotclock) */ "mac12", 75, 800, 600, 20203, 144, 32, 21, 1, 80, 3, FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, { /* 832x624, 75Hz, Non-Interlaced (57.6 MHz dotclock) */ "mac13", 75, 832, 624, 17362, 208, 48, 39, 1, 64, 3, 0, FB_VMODE_NONINTERLACED }, { /* 1024x768, 60 Hz, Non-Interlaced (65.00 MHz dotclock) */ "mac14", 60, 1024, 768, 15385, 144, 40, 29, 3, 136, 6, 0, FB_VMODE_NONINTERLACED }, { /* 1024x768, 72 Hz, Non-Interlaced (75.00 MHz dotclock) */ "mac15", 72, 1024, 768, 13334, 128, 40, 29, 3, 136, 6, 0, FB_VMODE_NONINTERLACED }, { /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */ "mac16", 75, 1024, 768, 12699, 176, 16, 28, 1, 96, 3, FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, { /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */ "mac17", 75, 1024, 768, 12699, 160, 32, 28, 1, 96, 3, FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, { /* 1152x870, 75 Hz, Non-Interlaced (100.0 MHz dotclock) */ "mac18", 75, 1152, 870, 10000, 128, 48, 39, 3, 128, 3, FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, { /* 1280x960, 75 Hz, Non-Interlaced (126.00 MHz dotclock) */ "mac19", 75, 1280, 960, 7937, 224, 32, 36, 1, 144, 3, 0, FB_VMODE_NONINTERLACED }, { /* 1280x1024, 75 Hz, Non-Interlaced (135.00 MHz dotclock) */ "mac20", 75, 1280, 1024, 7408, 232, 64, 38, 1, 112, 3, FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, { /* 1152x768, 60 Hz, Titanium PowerBook */ "mac21", 60, 1152, 768, 15386, 158, 26, 29, 3, 136, 6, FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, { /* 1600x1024, 60 Hz, Non-Interlaced (112.27 MHz dotclock) */ "mac22", 60, 1600, 1024, 8908, 88, 104, 1, 10, 16, 1, FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED } #if 0 /* Anyone who has timings for these? */ { /* VMODE_512_384_60I: 512x384, 60Hz, Interlaced (NTSC) */ "mac1", 60, 512, 384, pixclock, left, right, upper, lower, hslen, vslen, sync, FB_VMODE_INTERLACED }, { /* VMODE_640_480_50I: 640x480, 50Hz, Interlaced (PAL) */ "mac3", 50, 640, 480, pixclock, left, right, upper, lower, hslen, vslen, sync, FB_VMODE_INTERLACED }, { /* VMODE_640_480_60I: 640x480, 60Hz, Interlaced (NTSC) */ "mac4", 60, 640, 480, pixclock, left, right, upper, lower, hslen, vslen, sync, FB_VMODE_INTERLACED }, { /* VMODE_768_576_50I: 768x576, 50Hz (PAL full frame), Interlaced */ "mac8", 50, 768, 576, pixclock, left, right, upper, lower, hslen, vslen, sync, FB_VMODE_INTERLACED }, #endif }; /* * Mapping between MacOS video mode numbers and video mode definitions * * These MUST be ordered in * - increasing resolution * - decreasing pixel clock period */ static const struct mode_map { int vmode; const struct fb_videomode *mode; } mac_modes[] = { /* 512x384 */ { VMODE_512_384_60, &mac_modedb[0] }, /* 640x480 */ { VMODE_640_480_60, &mac_modedb[1] }, { VMODE_640_480_67, &mac_modedb[2] }, /* 640x870 */ { VMODE_640_870_75P, &mac_modedb[3] }, /* 800x600 */ { VMODE_800_600_56, &mac_modedb[4] }, { VMODE_800_600_60, &mac_modedb[5] }, { VMODE_800_600_75, &mac_modedb[7] }, { VMODE_800_600_72, &mac_modedb[6] }, /* 832x624 */ { VMODE_832_624_75, &mac_modedb[8] }, /* 1024x768 */ { VMODE_1024_768_60, &mac_modedb[9] }, { VMODE_1024_768_70, &mac_modedb[10] }, { VMODE_1024_768_75V, &mac_modedb[11] }, { VMODE_1024_768_75, &mac_modedb[12] }, /* 1152x768 */ { VMODE_1152_768_60, &mac_modedb[16] }, /* 1152x870 */ { VMODE_1152_870_75, &mac_modedb[13] }, /* 1280x960 */ { VMODE_1280_960_75, &mac_modedb[14] }, /* 1280x1024 */ { VMODE_1280_1024_75, &mac_modedb[15] }, /* 1600x1024 */ { VMODE_1600_1024_60, &mac_modedb[17] }, { -1, NULL } }; /* * Mapping between monitor sense values and MacOS video mode numbers */ static const struct monitor_map { int sense; int vmode; } mac_monitors[] = { { 0x000, VMODE_1280_1024_75 }, /* 21" RGB */ { 0x114, VMODE_640_870_75P }, /* Portrait Monochrome */ { 0x221, VMODE_512_384_60 }, /* 12" RGB*/ { 0x331, VMODE_1280_1024_75 }, /* 21" RGB (Radius) */ { 0x334, VMODE_1280_1024_75 }, /* 21" mono (Radius) */ { 0x335, VMODE_1280_1024_75 }, /* 21" mono */ { 0x40A, VMODE_640_480_60I }, /* NTSC */ { 0x51E, VMODE_640_870_75P }, /* Portrait RGB */ { 0x603, VMODE_832_624_75 }, /* 12"-16" multiscan */ { 0x60b, VMODE_1024_768_70 }, /* 13"-19" multiscan */ { 0x623, VMODE_1152_870_75 }, /* 13"-21" multiscan */ { 0x62b, VMODE_640_480_67 }, /* 13"/14" RGB */ { 0x700, VMODE_640_480_50I }, /* PAL */ { 0x714, VMODE_640_480_60I }, /* NTSC */ { 0x717, VMODE_800_600_75 }, /* VGA */ { 0x72d, VMODE_832_624_75 }, /* 16" RGB (Goldfish) */ { 0x730, VMODE_768_576_50I }, /* PAL (Alternate) */ { 0x73a, VMODE_1152_870_75 }, /* 3rd party 19" */ { 0x73f, VMODE_640_480_67 }, /* no sense lines connected at all */ { 0xBEEF, VMODE_1600_1024_60 }, /* 22" Apple Cinema Display */ { -1, VMODE_640_480_60 }, /* catch-all, must be last */ }; /** * mac_vmode_to_var - converts vmode/cmode pair to var structure * @vmode: MacOS video mode * @cmode: MacOS color mode * @var: frame buffer video mode structure * * Converts a MacOS vmode/cmode pair to a frame buffer video * mode structure. * * Returns negative errno on error, or zero for success. * */ int mac_vmode_to_var(int vmode, int cmode, struct fb_var_screeninfo *var) { const struct fb_videomode *mode = NULL; const struct mode_map *map; for (map = mac_modes; map->vmode != -1; map++) if (map->vmode == vmode) { mode = map->mode; break; } if (!mode) return -EINVAL; memset(var, 0, sizeof(struct fb_var_screeninfo)); switch (cmode) { case CMODE_8: var->bits_per_pixel = 8; var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; break; case CMODE_16: var->bits_per_pixel = 16; var->red.offset = 10; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 0; var->blue.length = 5; break; case CMODE_32: var->bits_per_pixel = 32; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; default: return -EINVAL; } var->xres = mode->xres; var->yres = mode->yres; var->xres_virtual = mode->xres; var->yres_virtual = mode->yres; var->height = -1; var->width = -1; var->pixclock = mode->pixclock; var->left_margin = mode->left_margin; var->right_margin = mode->right_margin; var->upper_margin = mode->upper_margin; var->lower_margin = mode->lower_margin; var->hsync_len = mode->hsync_len; var->vsync_len = mode->vsync_len; var->sync = mode->sync; var->vmode = mode->vmode; return 0; } EXPORT_SYMBOL(mac_vmode_to_var); /** * mac_var_to_vmode - convert var structure to MacOS vmode/cmode pair * @var: frame buffer video mode structure * @vmode: MacOS video mode * @cmode: MacOS color mode * * Converts a frame buffer video mode structure to a MacOS * vmode/cmode pair. * * Returns negative errno on error, or zero for success. * */ int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode, int *cmode) { const struct mode_map *map; if (var->bits_per_pixel <= 8) *cmode = CMODE_8; else if (var->bits_per_pixel <= 16) *cmode = CMODE_16; else if (var->bits_per_pixel <= 32) *cmode = CMODE_32; else return -EINVAL; /* * Find the mac_mode with a matching resolution or failing that, the * closest larger resolution. Skip modes with a shorter pixel clock period. */ for (map = mac_modes; map->vmode != -1; map++) { const struct fb_videomode *mode = map->mode; if (var->xres > mode->xres || var->yres > mode->yres) continue; if (var->xres_virtual > mode->xres || var->yres_virtual > mode->yres) continue; if (var->pixclock > mode->pixclock) continue; if ((var->vmode & FB_VMODE_MASK) != mode->vmode) continue; *vmode = map->vmode; /* * Having found a good resolution, find the matching pixel clock * or failing that, the closest longer pixel clock period. */ map++; while (map->vmode != -1) { const struct fb_videomode *clk_mode = map->mode; if (mode->xres != clk_mode->xres || mode->yres != clk_mode->yres) break; if (var->pixclock > mode->pixclock) break; if (mode->vmode != clk_mode->vmode) continue; *vmode = map->vmode; map++; } return 0; } return -EINVAL; } /** * mac_map_monitor_sense - Convert monitor sense to vmode * @sense: Macintosh monitor sense number * * Converts a Macintosh monitor sense number to a MacOS * vmode number. * * Returns MacOS vmode video mode number. * */ int mac_map_monitor_sense(int sense) { const struct monitor_map *map; for (map = mac_monitors; map->sense != -1; map++) if (map->sense == sense) break; return map->vmode; } EXPORT_SYMBOL(mac_map_monitor_sense); /** * mac_find_mode - find a video mode * @var: frame buffer user defined part of display * @info: frame buffer info structure * @mode_option: video mode name (see mac_modedb[]) * @default_bpp: default color depth in bits per pixel * * Finds a suitable video mode. Tries to set mode specified * by @mode_option. If the name of the wanted mode begins with * 'mac', the Mac video mode database will be used, otherwise it * will fall back to the standard video mode database. * * Note: Function marked as __init and can only be used during * system boot. * * Returns error code from fb_find_mode (see fb_find_mode * function). * */ int mac_find_mode(struct fb_var_screeninfo *var, struct fb_info *info, const char *mode_option, unsigned int default_bpp) { const struct fb_videomode *db = NULL; unsigned int dbsize = 0; if (mode_option && !strncmp(mode_option, "mac", 3)) { mode_option += 3; db = mac_modedb; dbsize = ARRAY_SIZE(mac_modedb); } return fb_find_mode(var, info, mode_option, db, dbsize, &mac_modedb[DEFAULT_MODEDB_INDEX], default_bpp); } EXPORT_SYMBOL(mac_find_mode); MODULE_LICENSE("GPL");
brymaster5000/m7-GPE-L
drivers/video/macmodes.c
C
gpl-2.0
12,698
/* arch/arm/mach-msm/htc_BCM4335_wl_reg.c * * Copyright (C) 2012 HTC, Inc. * Author: assd bt <assd_bt@htc.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* Control bluetooth power for glacier platform */ #include <linux/gpio.h> #include <linux/mfd/pm8xxx/pm8921.h> #include <mach/htc_4335_wl_reg.h> #define PM8921_GPIO_BASE NR_GPIO_IRQS #define PM8921_GPIO_PM_TO_SYS(pm_gpio) (pm_gpio - 1 + PM8921_GPIO_BASE) static int htc_BCM4335_wl_reg_pin = -2; static int htc_BCM4335_wl_reg_state_wifi; static int htc_BCM4335_wl_reg_state_bt; static DEFINE_MUTEX(htc_w_b_mutex); int set_BCM4335_wl_reg_onoff(int on) { if (on) { printk(KERN_DEBUG "EN BCM4335 WL REG\n"); gpio_set_value(PM8921_GPIO_PM_TO_SYS(htc_BCM4335_wl_reg_pin), 1); } else { printk(KERN_DEBUG "DIS BCM4335 WL REG\n"); gpio_set_value(PM8921_GPIO_PM_TO_SYS(htc_BCM4335_wl_reg_pin), 0); } return 0; } int htc_BCM4335_wl_reg_ctl(int on, int id) { int err = 0; printk(KERN_DEBUG "%s ON=%d, ID=%d\n", __func__, on, id); if (htc_BCM4335_wl_reg_pin < 0) { printk(KERN_DEBUG "== ERR WL REG PIN=%d ==\n", htc_BCM4335_wl_reg_pin); return htc_BCM4335_wl_reg_pin; } mutex_lock(&htc_w_b_mutex); if (on) { if ((BCM4335_WL_REG_OFF == htc_BCM4335_wl_reg_state_wifi) && (BCM4335_WL_REG_OFF == htc_BCM4335_wl_reg_state_bt)) { err = set_BCM4335_wl_reg_onoff(BCM4335_WL_REG_ON); if (err) { mutex_unlock(&htc_w_b_mutex); return err; } } else if ((id == ID_WIFI) && ( BCM4335_WL_REG_ON == htc_BCM4335_wl_reg_state_bt)) { printk(KERN_ERR "[WLAN] Try to pull WL_REG off and on to reset Wi-Fi chip\n"); err = set_BCM4335_wl_reg_onoff(BCM4335_WL_REG_OFF); if (err) { printk(KERN_ERR "[WLAN] Failed to pull WL_REG off\n"); mutex_unlock(&htc_w_b_mutex); return err; } mdelay(1); err = set_BCM4335_wl_reg_onoff(BCM4335_WL_REG_ON); if (err) { printk(KERN_ERR "[WLAN] Failed to pull WL_REG on\n"); mutex_unlock(&htc_w_b_mutex); return err; } } if (id == ID_BT) htc_BCM4335_wl_reg_state_bt = BCM4335_WL_REG_ON; else htc_BCM4335_wl_reg_state_wifi = BCM4335_WL_REG_ON; } else { if (((id == ID_BT) && (BCM4335_WL_REG_OFF == htc_BCM4335_wl_reg_state_wifi)) || ((id == ID_WIFI) && (BCM4335_WL_REG_OFF == htc_BCM4335_wl_reg_state_bt))) { err = set_BCM4335_wl_reg_onoff(BCM4335_WL_REG_OFF); if (err) { mutex_unlock(&htc_w_b_mutex); return err; } } else { printk(KERN_DEBUG "KEEP BCM4335 WL REG ALIVE\n"); } if (id) htc_BCM4335_wl_reg_state_bt = BCM4335_WL_REG_OFF; else htc_BCM4335_wl_reg_state_wifi = BCM4335_WL_REG_OFF; } mutex_unlock(&htc_w_b_mutex); printk(KERN_DEBUG "%s ON=%d, ID=%d DONE\n", __func__, on, id); return 0; } int htc_BCM4335_wl_reg_init(int BCM4335_wl_reg_pin) { htc_BCM4335_wl_reg_pin = BCM4335_wl_reg_pin; htc_BCM4335_wl_reg_state_wifi = BCM4335_WL_REG_OFF; htc_BCM4335_wl_reg_state_bt = BCM4335_WL_REG_OFF; printk(KERN_DEBUG "%s, pin=%d\n", __func__, htc_BCM4335_wl_reg_pin); return true; }
bgcngm/802Xtreem
arch/arm/mach-msm/htc_4335_wl_reg.c
C
gpl-2.0
3,443
// +-----------------------------------------------------------------------+ // | Copyright (c) 2002-2003, Richard Heyes, Harald Radi | // | All rights reserved. | // | | // | Redistribution and use in source and binary forms, with or without | // | modification, are permitted provided that the following conditions | // | are met: | // | | // | o Redistributions of source code must retain the above copyright | // | notice, this list of conditions and the following disclaimer. | // | o Redistributions in binary form must reproduce the above copyright | // | notice, this list of conditions and the following disclaimer in the | // | documentation and/or other materials provided with the distribution.| // | o The names of the authors may not be used to endorse or promote | // | products derived from this software without specific prior written | // | permission. | // | | // | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | // | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | // | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | // | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | // | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | // | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | // | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | // | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | // | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | // | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | // | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | // | | // +-----------------------------------------------------------------------+ // | Author: Richard Heyes <richard@phpguru.org> | // | Harald Radi <harald.radi@nme.at> | // +-----------------------------------------------------------------------+ /** * Function to create copies of objects which are * normally passed around by references (Arrays for example) */ function arrayCopy(input) { var output = new Array(input.length); for (i in input) { if (typeof(input[i]) == 'array') { output[i] = arrayCopy(input[i]); } else { output[i] = input[i]; } } return output; } /** * TreeMenu class */ function TreeMenu(iconpath, myname, linkTarget, defaultClass, usePersistence, noTopLevelImages) { // Properties this.iconpath = iconpath; this.myname = myname; this.linkTarget = linkTarget; this.defaultClass = defaultClass; this.usePersistence = usePersistence; this.noTopLevelImages = noTopLevelImages; this.n = new Array(); this.output = ''; this.nodeRefs = new Array(); this.branches = new Array(); this.branchStatus = new Array(); this.layerRelations = new Array(); this.childParents = new Array(); this.cookieStatuses = new Array(); this.preloadImages(); } /** * Adds a node to the tree */ TreeMenu.prototype.addItem = function (newNode) { newIndex = this.n.length; this.n[newIndex] = newNode; return this.n[newIndex]; } /** * Preload images hack for Mozilla */ TreeMenu.prototype.preloadImages = function () { var plustop = new Image; plustop.src = this.iconpath + '/plustop.gif'; var plusbottom = new Image; plusbottom.src = this.iconpath + '/plusbottom.gif'; var plus = new Image; plus.src = this.iconpath + '/plus.gif'; var minustop = new Image; minustop.src = this.iconpath + '/minustop.gif'; var minusbottom = new Image; minusbottom.src = this.iconpath + '/minusbottom.gif'; var minus = new Image; minus.src = this.iconpath + '/minus.gif'; var branchtop = new Image; branchtop.src = this.iconpath + '/branchtop.gif'; var branchbottom = new Image; branchbottom.src = this.iconpath + '/branchbottom.gif'; var branch = new Image; branch.src = this.iconpath + '/branch.gif'; var linebottom = new Image; linebottom.src = this.iconpath + '/linebottom.gif'; var line = new Image; line.src = this.iconpath + '/line.gif'; } /** * Main function that draws the menu and assigns it * to the layer (or document.write()s it) */ TreeMenu.prototype.drawMenu = function ()// OPTIONAL ARGS: nodes = [], level = [], prepend = '', expanded = false, visbility = 'inline', parentLayerID = null { /** * Necessary variables */ var output = ''; var modifier = ''; var layerID = ''; var parentLayerID = ''; /** * Parse any optional arguments */ var nodes = arguments[0] ? arguments[0] : this.n var level = arguments[1] ? arguments[1] : []; var prepend = arguments[2] ? arguments[2] : ''; var expanded = arguments[3] ? arguments[3] : false; var visibility = arguments[4] ? arguments[4] : 'inline'; var parentLayerID = arguments[5] ? arguments[5] : null; var currentlevel = level.length; for (var i=0; i<nodes.length; i++) { level[currentlevel] = i+1; layerID = this.myname + '_' + 'node_' + this.implode('_', level); /** * Store this object in the nodeRefs array */ this.nodeRefs[layerID] = nodes[i]; /** * Store the child/parent relationship */ this.childParents[layerID] = parentLayerID; /** * Gif modifier */ if (i == 0 && parentLayerID == null) { modifier = nodes.length > 1 ? "top" : 'single'; } else if(i == (nodes.length-1)) { modifier = "bottom"; } else { modifier = ""; } /** * Single root branch is always expanded */ if (!this.doesMenu() || (parentLayerID == null && (nodes.length == 1 || this.noTopLevelImages))) { expanded = true; } else if (nodes[i].expanded) { expanded = true; } else { expanded = false; } /** * Make sure visibility is correct based on parent status */ visibility = this.checkParentVisibility(layerID) ? visibility : 'none'; /** * Setup branch status and build an indexed array * of branch layer ids */ if (nodes[i].n.length > 0) { this.branchStatus[layerID] = expanded; this.branches[this.branches.length] = layerID; } /** * Setup toggle relationship */ if (!this.layerRelations[parentLayerID]) { this.layerRelations[parentLayerID] = new Array(); } this.layerRelations[parentLayerID][this.layerRelations[parentLayerID].length] = layerID; /** * Branch images */ var gifname = nodes[i].n.length && this.doesMenu() && nodes[i].isDynamic ? (expanded ? 'minus' : 'plus') : 'branch'; var iconName = expanded && nodes[i].expandedIcon ? nodes[i].expandedIcon : nodes[i].icon; var iconimg = nodes[i].icon ? this.stringFormat('<img src="{0}/{1}" width="20" height="20" align="top" id="icon_{2}">', this.iconpath, iconName, layerID) : ''; /** * Add event handlers */ var eventHandlers = ""; for (j in nodes[i].events) { eventHandlers += this.stringFormat('{0}="{1}" ', j, nodes[i].events[j]); } /** * Build the html to write to the document * IMPORTANT: * document.write()ing the string: '<div style="display:...' will screw up nn4.x */ var layerTag = this.doesMenu() ? this.stringFormat('<div id="{0}" style="display: {1}" class="{2}">', layerID, visibility, (nodes[i].cssClass ? nodes[i].cssClass : this.defaultClass)) : this.stringFormat('<div class="{0}">', nodes[i].cssClass ? nodes[i].cssClass : this.defaultClass); var onMDown = this.doesMenu() && nodes[i].n.length && nodes[i].isDynamic ? this.stringFormat('onmousedown="{0}.toggleBranch(\'{1}\', true)" style="cursor: pointer;"', this.myname, layerID) : ''; var imgTag = this.stringFormat('<img src="{0}/{1}{2}.gif" width="20" height="20" align="top" border="0" name="img_{3}" {4}>', this.iconpath, gifname, modifier, layerID, onMDown); var linkTarget= nodes[i].linkTarget ? nodes[i].linkTarget : this.linkTarget; var linkStart = nodes[i].link ? this.stringFormat('<a href="{0}" target="{1}">', nodes[i].link, linkTarget) : ''; var linkEnd = nodes[i].link ? '</a>' : ''; this.output += this.stringFormat('{0}<nobr>{1}{2}{3}{4}<span {5}>{6}</span>{7}</nobr><br></div>', layerTag, prepend, parentLayerID == null && (nodes.length == 1 || this.noTopLevelImages) ? '' : imgTag, iconimg, linkStart, eventHandlers, nodes[i].title, linkEnd); /** * Traverse sub nodes ? */ if (nodes[i].n.length) { /** * Determine what to prepend. If there is only one root * node then the prepend to pass to children is nothing. * Otherwise it depends on where we are in the tree. */ if (parentLayerID == null && (nodes.length == 1 || this.noTopLevelImages)) { var newPrepend = ''; } else if (i < (nodes.length - 1)) { var newPrepend = prepend + this.stringFormat('<img src="{0}/line.gif" width="20" height="20" align="top">', this.iconpath); } else { var newPrepend = prepend + this.stringFormat('<img src="{0}/linebottom.gif" width="20" height="20" align="top">', this.iconpath); } this.drawMenu(nodes[i].n, arrayCopy(level), newPrepend, nodes[i].expanded, expanded ? 'inline' : 'none', layerID); } } } /** * Writes the output generated by drawMenu() to the page */ TreeMenu.prototype.writeOutput = function () { document.write(this.output); } /** * Toggles a branches visible status. Called from resetBranches() * and also when a +/- graphic is clicked. */ TreeMenu.prototype.toggleBranch = function (layerID, updateStatus) // OPTIONAL ARGS: fireEvents = true { var currentDisplay = this.getLayer(layerID).style.display; var newDisplay = (this.branchStatus[layerID] && currentDisplay == 'inline') ? 'none' : 'inline'; var fireEvents = arguments[2] != null ? arguments[2] : true; for (var i=0; i<this.layerRelations[layerID].length; i++) { if (this.branchStatus[this.layerRelations[layerID][i]]) { this.toggleBranch(this.layerRelations[layerID][i], false); } this.getLayer(this.layerRelations[layerID][i]).style.display = newDisplay; } if (updateStatus) { this.branchStatus[layerID] = !this.branchStatus[layerID]; /** * Persistence */ if (this.doesPersistence() && !arguments[2] && this.usePersistence) { this.setExpandedStatusForCookie(layerID, this.branchStatus[layerID]); } /** * Fire custom events */ if (fireEvents) { nodeObject = this.nodeRefs[layerID]; if (nodeObject.ontoggle != null) { eval(nodeObject.ontoggle); } if (newDisplay == 'none' && nodeObject.oncollapse != null) { eval(nodeObject.oncollapse); } else if (newDisplay == 'inline' && nodeObject.onexpand != null){ eval(nodeObject.onexpand); } } // Swap image this.swapImage(layerID); } // Swap icon this.swapIcon(layerID); } /** * Swaps the plus/minus branch images */ TreeMenu.prototype.swapImage = function (layerID) { var imgSrc = document.images['img_' + layerID].src; var re = /^(.*)(plus|minus)(bottom|top|single)?.gif$/ if (matches = imgSrc.match(re)) { document.images['img_' + layerID].src = this.stringFormat('{0}{1}{2}{3}', matches[1], matches[2] == 'plus' ? 'minus' : 'plus', matches[3] ? matches[3] : '', '.gif'); } } /** * Swaps the icon for the expanded icon if one * has been supplied. */ TreeMenu.prototype.swapIcon = function (layerID) { if (document.images['icon_' + layerID]) { var imgSrc = document.images['icon_' + layerID].src; if (this.nodeRefs[layerID].icon && this.nodeRefs[layerID].expandedIcon) { var newSrc = (imgSrc.indexOf(this.nodeRefs[layerID].expandedIcon) == -1 ? this.nodeRefs[layerID].expandedIcon : this.nodeRefs[layerID].icon); document.images['icon_' + layerID].src = this.iconpath + '/' + newSrc; } } } /** * Can the browser handle the dynamic menu? */ TreeMenu.prototype.doesMenu = function () { return (is_ie4up || is_nav6up || is_gecko || is_opera7); } /** * Can the browser handle save the branch status */ TreeMenu.prototype.doesPersistence = function () { return (is_ie4up || is_gecko || is_nav6up || is_opera7); } /** * Returns the appropriate layer accessor */ TreeMenu.prototype.getLayer = function (layerID) { if (is_ie4) { return document.all(layerID); } else if (document.getElementById(layerID)) { return document.getElementById(layerID); } else if (document.all(layerID)) { return document.all(layerID); } } /** * Save the status of the layer */ TreeMenu.prototype.setExpandedStatusForCookie = function (layerID, expanded) { this.cookieStatuses[layerID] = expanded; this.saveCookie(); } /** * Load the status of the layer */ TreeMenu.prototype.getExpandedStatusFromCookie = function (layerID) { if (this.cookieStatuses[layerID]) { return this.cookieStatuses[layerID]; } return false; } /** * Saves the cookie that holds which branches are expanded. * Only saves the details of the branches which are expanded. */ TreeMenu.prototype.saveCookie = function () { var cookieString = new Array(); for (var i in this.cookieStatuses) { if (this.cookieStatuses[i] == true) { cookieString[cookieString.length] = i; } } document.cookie = 'TreeMenuBranchStatus=' + cookieString.join(':'); } /** * Reads cookie parses it for status info and * stores that info in the class member. */ TreeMenu.prototype.loadCookie = function () { var cookie = document.cookie.split('; '); for (var i=0; i < cookie.length; i++) { var crumb = cookie[i].split('='); if ('TreeMenuBranchStatus' == crumb[0] && crumb[1]) { var expandedBranches = crumb[1].split(':'); for (var j=0; j<expandedBranches.length; j++) { this.cookieStatuses[expandedBranches[j]] = true; } } } } /** * Reset branch status */ TreeMenu.prototype.resetBranches = function () { if (!this.doesPersistence()) { return false; } this.loadCookie(); for (var i=0; i<this.branches.length; i++) { var status = this.getExpandedStatusFromCookie(this.branches[i]); // Only update if it's supposed to be expanded and it's not already if (status == true && this.branchStatus[this.branches[i]] != true) { if (this.checkParentVisibility(this.branches[i])) { this.toggleBranch(this.branches[i], true, false); } else { this.branchStatus[this.branches[i]] = true; this.swapImage(this.branches[i]); } } } } /** * Checks whether a branch should be open * or not based on its parents' status */ TreeMenu.prototype.checkParentVisibility = function (layerID) { if (this.in_array(this.childParents[layerID], this.branches) && this.branchStatus[this.childParents[layerID]] && this.checkParentVisibility(this.childParents[layerID]) ) { return true; } else if (this.childParents[layerID] == null) { return true; } return false; } /** * New C# style string formatter */ TreeMenu.prototype.stringFormat = function (strInput) { var idx = 0; for (var i=1; i<arguments.length; i++) { while ((idx = strInput.indexOf('{' + (i - 1) + '}', idx)) != -1) { strInput = strInput.substring(0, idx) + arguments[i] + strInput.substr(idx + 3); } } return strInput; } /** * Also much adored, the PHP implode() function */ TreeMenu.prototype.implode = function (seperator, input) { var output = ''; for (var i=0; i<input.length; i++) { if (i == 0) { output += input[i]; } else { output += seperator + input[i]; } } return output; } /** * Aah, all the old favourites are coming out... */ TreeMenu.prototype.in_array = function (item, arr) { for (var i=0; i<arr.length; i++) { if (arr[i] == item) { return true; } } return false; } /** * TreeNode Class */ function TreeNode(title, icon, link, expanded, isDynamic, cssClass, linkTarget, expandedIcon) { this.title = title; this.icon = icon; this.expandedIcon = expandedIcon; this.link = link; this.expanded = expanded; this.isDynamic = isDynamic; this.cssClass = cssClass; this.linkTarget = linkTarget; this.n = new Array(); this.events = new Array(); this.handlers = null; this.oncollapse = null; this.onexpand = null; this.ontoggle = null; } /** * Adds a node to an already existing node */ TreeNode.prototype.addItem = function (newNode) { newIndex = this.n.length; this.n[newIndex] = newNode; return this.n[newIndex]; } /** * Sets an event for this particular node */ TreeNode.prototype.setEvent = function (eventName, eventHandler) { switch (eventName.toLowerCase()) { case 'onexpand': this.onexpand = eventHandler; break; case 'oncollapse': this.oncollapse = eventHandler; break; case 'ontoggle': this.ontoggle = eventHandler; break; default: this.events[eventName] = eventHandler; } } /** * That's the end of the tree classes. What follows is * the browser detection code. */ //<!-- // Ultimate client-side JavaScript client sniff. Version 3.03 // (C) Netscape Communications 1999-2001. Permission granted to reuse and distribute. // Revised 17 May 99 to add is_nav5up and is_ie5up (see below). // Revised 20 Dec 00 to add is_gecko and change is_nav5up to is_nav6up // also added support for IE5.5 Opera4&5 HotJava3 AOLTV // Revised 22 Feb 01 to correct Javascript Detection for IE 5.x, Opera 4, // correct Opera 5 detection // add support for winME and win2k // synch with browser-type-oo.js // Revised 26 Mar 01 to correct Opera detection // Revised 02 Oct 01 to add IE6 detection // Everything you always wanted to know about your JavaScript client // but were afraid to ask. Creates "is_" variables indicating: // (1) browser vendor: // is_nav, is_ie, is_opera, is_hotjava, is_webtv, is_TVNavigator, is_AOLTV // (2) browser version number: // is_major (integer indicating major version number: 2, 3, 4 ...) // is_minor (float indicating full version number: 2.02, 3.01, 4.04 ...) // (3) browser vendor AND major version number // is_nav2, is_nav3, is_nav4, is_nav4up, is_nav6, is_nav6up, is_gecko, is_ie3, // is_ie4, is_ie4up, is_ie5, is_ie5up, is_ie5_5, is_ie5_5up, is_ie6, is_ie6up, is_hotjava3, is_hotjava3up, // is_opera2, is_opera3, is_opera4, is_opera5, is_opera5up // (4) JavaScript version number: // is_js (float indicating full JavaScript version number: 1, 1.1, 1.2 ...) // (5) OS platform and version: // is_win, is_win16, is_win32, is_win31, is_win95, is_winnt, is_win98, is_winme, is_win2k // is_os2 // is_mac, is_mac68k, is_macppc // is_unix // is_sun, is_sun4, is_sun5, is_suni86 // is_irix, is_irix5, is_irix6 // is_hpux, is_hpux9, is_hpux10 // is_aix, is_aix1, is_aix2, is_aix3, is_aix4 // is_linux, is_sco, is_unixware, is_mpras, is_reliant // is_dec, is_sinix, is_freebsd, is_bsd // is_vms // // See http://www.it97.de/JavaScript/JS_tutorial/bstat/navobj.html and // http://www.it97.de/JavaScript/JS_tutorial/bstat/Browseraol.html // for detailed lists of userAgent strings. // // Note: you don't want your Nav4 or IE4 code to "turn off" or // stop working when new versions of browsers are released, so // in conditional code forks, use is_ie5up ("IE 5.0 or greater") // is_opera5up ("Opera 5.0 or greater") instead of is_ie5 or is_opera5 // to check version in code which you want to work on future // versions. /** * Severly curtailed all this as only certain elements * are required by TreeMenu, specifically: * o is_ie4up * o is_nav6up * o is_gecko */ // convert all characters to lowercase to simplify testing var agt=navigator.userAgent.toLowerCase(); // *** BROWSER VERSION *** // Note: On IE5, these return 4, so use is_ie5up to detect IE5. var is_major = parseInt(navigator.appVersion); var is_minor = parseFloat(navigator.appVersion); // Note: Opera and WebTV spoof Navigator. We do strict client detection. // If you want to allow spoofing, take out the tests for opera and webtv. var is_nav = ((agt.indexOf('mozilla')!=-1) && (agt.indexOf('spoofer')==-1) && (agt.indexOf('compatible') == -1) && (agt.indexOf('opera')==-1) && (agt.indexOf('webtv')==-1) && (agt.indexOf('hotjava')==-1)); var is_nav6up = (is_nav && (is_major >= 5)); var is_gecko = (agt.indexOf('gecko') != -1); var is_ie = ((agt.indexOf("msie") != -1) && (agt.indexOf("opera") == -1)); var is_ie4 = (is_ie && (is_major == 4) && (agt.indexOf("msie 4")!=-1) ); var is_ie4up = (is_ie && (is_major >= 4)); var is_opera = (agt.indexOf("opera") != -1); var is_opera7 = is_opera && (agt.indexOf("opera 7") != -1); // Patch from Harald Fielker if (agt.indexOf('konqueror') != -1) { var is_nav = false; var is_nav6up = false; var is_gecko = false; var is_ie = true; var is_ie4 = true; var is_ie4up = true; } //--> end hide JavaScript
Tate-ad/revive-adserver
www/devel/assets/js/TreeMenu.js
JavaScript
gpl-2.0
22,106
/* * gmidi.c -- USB MIDI Gadget Driver * * Copyright (C) 2006 Thumtronics Pty Ltd. * Developed for Thumtronics by Grey Innovation * Ben Williamson <ben.williamson@greyinnovation.com> * * This software is distributed under the terms of the GNU General Public * License ("GPL") version 2, as published by the Free Software Foundation. * * This code is based in part on: * * Gadget Zero driver, Copyright (C) 2003-2004 David Brownell. * USB Audio driver, Copyright (C) 2002 by Takashi Iwai. * USB MIDI driver, Copyright (C) 2002-2005 Clemens Ladisch. * * Refer to the USB Device Class Definition for MIDI Devices: * http://www.usb.org/developers/devclass_docs/midi10.pdf */ /* #define VERBOSE_DEBUG */ #include <linux/kernel.h> #include <linux/utsname.h> #include <linux/device.h> #include <sound/driver.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/rawmidi.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb/audio.h> #include <linux/usb/midi.h> #include "gadget_chips.h" MODULE_AUTHOR("Ben Williamson"); MODULE_LICENSE("GPL v2"); #define DRIVER_VERSION "25 Jul 2006" static const char shortname[] = "g_midi"; static const char longname[] = "MIDI Gadget"; static int index = SNDRV_DEFAULT_IDX1; static char *id = SNDRV_DEFAULT_STR1; module_param(index, int, 0444); MODULE_PARM_DESC(index, "Index value for the USB MIDI Gadget adapter."); module_param(id, charp, 0444); MODULE_PARM_DESC(id, "ID string for the USB MIDI Gadget adapter."); /* Some systems will want different product identifers published in the * device descriptor, either numbers or strings or both. These string * parameters are in UTF-8 (superset of ASCII's 7 bit characters). */ static ushort idVendor; module_param(idVendor, ushort, S_IRUGO); MODULE_PARM_DESC(idVendor, "USB Vendor ID"); static ushort idProduct; module_param(idProduct, ushort, S_IRUGO); MODULE_PARM_DESC(idProduct, "USB Product ID"); static ushort bcdDevice; module_param(bcdDevice, ushort, S_IRUGO); MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)"); static char *iManufacturer; module_param(iManufacturer, charp, S_IRUGO); MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string"); static char *iProduct; module_param(iProduct, charp, S_IRUGO); MODULE_PARM_DESC(iProduct, "USB Product string"); static char *iSerialNumber; module_param(iSerialNumber, charp, S_IRUGO); MODULE_PARM_DESC(iSerialNumber, "SerialNumber"); /* * this version autoconfigures as much as possible, * which is reasonable for most "bulk-only" drivers. */ static const char *EP_IN_NAME; static const char *EP_OUT_NAME; /* big enough to hold our biggest descriptor */ #define USB_BUFSIZ 256 /* This is a gadget, and the IN/OUT naming is from the host's perspective. USB -> OUT endpoint -> rawmidi USB <- IN endpoint <- rawmidi */ struct gmidi_in_port { struct gmidi_device* dev; int active; uint8_t cable; /* cable number << 4 */ uint8_t state; #define STATE_UNKNOWN 0 #define STATE_1PARAM 1 #define STATE_2PARAM_1 2 #define STATE_2PARAM_2 3 #define STATE_SYSEX_0 4 #define STATE_SYSEX_1 5 #define STATE_SYSEX_2 6 uint8_t data[2]; }; struct gmidi_device { spinlock_t lock; struct usb_gadget *gadget; struct usb_request *req; /* for control responses */ u8 config; struct usb_ep *in_ep, *out_ep; struct snd_card *card; struct snd_rawmidi *rmidi; struct snd_rawmidi_substream *in_substream; struct snd_rawmidi_substream *out_substream; /* For the moment we only support one port in each direction, but in_port is kept as a separate struct so we can have more later. */ struct gmidi_in_port in_port; unsigned long out_triggered; struct tasklet_struct tasklet; }; static void gmidi_transmit(struct gmidi_device* dev, struct usb_request* req); #define DBG(d, fmt, args...) \ dev_dbg(&(d)->gadget->dev , fmt , ## args) #define VDBG(d, fmt, args...) \ dev_vdbg(&(d)->gadget->dev , fmt , ## args) #define ERROR(d, fmt, args...) \ dev_err(&(d)->gadget->dev , fmt , ## args) #define WARN(d, fmt, args...) \ dev_warn(&(d)->gadget->dev , fmt , ## args) #define INFO(d, fmt, args...) \ dev_info(&(d)->gadget->dev , fmt , ## args) static unsigned buflen = 256; static unsigned qlen = 32; module_param(buflen, uint, S_IRUGO); module_param(qlen, uint, S_IRUGO); /* Thanks to Grey Innovation for donating this product ID. * * DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!! * Instead: allocate your own, using normal USB-IF procedures. */ #define DRIVER_VENDOR_NUM 0x17b3 /* Grey Innovation */ #define DRIVER_PRODUCT_NUM 0x0004 /* Linux-USB "MIDI Gadget" */ /* * DESCRIPTORS ... most are static, but strings and (full) * configuration descriptors are built on demand. */ #define STRING_MANUFACTURER 25 #define STRING_PRODUCT 42 #define STRING_SERIAL 101 #define STRING_MIDI_GADGET 250 /* We only have the one configuration, it's number 1. */ #define GMIDI_CONFIG 1 /* We have two interfaces- AudioControl and MIDIStreaming */ #define GMIDI_AC_INTERFACE 0 #define GMIDI_MS_INTERFACE 1 #define GMIDI_NUM_INTERFACES 2 DECLARE_USB_AC_HEADER_DESCRIPTOR(1); DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1); DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(1); /* B.1 Device Descriptor */ static struct usb_device_descriptor device_desc = { .bLength = USB_DT_DEVICE_SIZE, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = __constant_cpu_to_le16(0x0200), .bDeviceClass = USB_CLASS_PER_INTERFACE, .idVendor = __constant_cpu_to_le16(DRIVER_VENDOR_NUM), .idProduct = __constant_cpu_to_le16(DRIVER_PRODUCT_NUM), .iManufacturer = STRING_MANUFACTURER, .iProduct = STRING_PRODUCT, .bNumConfigurations = 1, }; /* B.2 Configuration Descriptor */ static struct usb_config_descriptor config_desc = { .bLength = USB_DT_CONFIG_SIZE, .bDescriptorType = USB_DT_CONFIG, /* compute wTotalLength on the fly */ .bNumInterfaces = GMIDI_NUM_INTERFACES, .bConfigurationValue = GMIDI_CONFIG, .iConfiguration = STRING_MIDI_GADGET, /* * FIXME: When embedding this driver in a device, * these need to be set to reflect the actual * power properties of the device. Is it selfpowered? */ .bmAttributes = USB_CONFIG_ATT_ONE, .bMaxPower = 1, }; /* B.3.1 Standard AC Interface Descriptor */ static const struct usb_interface_descriptor ac_interface_desc = { .bLength = USB_DT_INTERFACE_SIZE, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = GMIDI_AC_INTERFACE, .bNumEndpoints = 0, .bInterfaceClass = USB_CLASS_AUDIO, .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, .iInterface = STRING_MIDI_GADGET, }; /* B.3.2 Class-Specific AC Interface Descriptor */ static const struct usb_ac_header_descriptor_1 ac_header_desc = { .bLength = USB_DT_AC_HEADER_SIZE(1), .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = USB_MS_HEADER, .bcdADC = __constant_cpu_to_le16(0x0100), .wTotalLength = USB_DT_AC_HEADER_SIZE(1), .bInCollection = 1, .baInterfaceNr = { [0] = GMIDI_MS_INTERFACE, } }; /* B.4.1 Standard MS Interface Descriptor */ static const struct usb_interface_descriptor ms_interface_desc = { .bLength = USB_DT_INTERFACE_SIZE, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = GMIDI_MS_INTERFACE, .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_AUDIO, .bInterfaceSubClass = USB_SUBCLASS_MIDISTREAMING, .iInterface = STRING_MIDI_GADGET, }; /* B.4.2 Class-Specific MS Interface Descriptor */ static const struct usb_ms_header_descriptor ms_header_desc = { .bLength = USB_DT_MS_HEADER_SIZE, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = USB_MS_HEADER, .bcdMSC = __constant_cpu_to_le16(0x0100), .wTotalLength = USB_DT_MS_HEADER_SIZE + 2*USB_DT_MIDI_IN_SIZE + 2*USB_DT_MIDI_OUT_SIZE(1), }; #define JACK_IN_EMB 1 #define JACK_IN_EXT 2 #define JACK_OUT_EMB 3 #define JACK_OUT_EXT 4 /* B.4.3 MIDI IN Jack Descriptors */ static const struct usb_midi_in_jack_descriptor jack_in_emb_desc = { .bLength = USB_DT_MIDI_IN_SIZE, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = USB_MS_MIDI_IN_JACK, .bJackType = USB_MS_EMBEDDED, .bJackID = JACK_IN_EMB, }; static const struct usb_midi_in_jack_descriptor jack_in_ext_desc = { .bLength = USB_DT_MIDI_IN_SIZE, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = USB_MS_MIDI_IN_JACK, .bJackType = USB_MS_EXTERNAL, .bJackID = JACK_IN_EXT, }; /* B.4.4 MIDI OUT Jack Descriptors */ static const struct usb_midi_out_jack_descriptor_1 jack_out_emb_desc = { .bLength = USB_DT_MIDI_OUT_SIZE(1), .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = USB_MS_MIDI_OUT_JACK, .bJackType = USB_MS_EMBEDDED, .bJackID = JACK_OUT_EMB, .bNrInputPins = 1, .pins = { [0] = { .baSourceID = JACK_IN_EXT, .baSourcePin = 1, } } }; static const struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc = { .bLength = USB_DT_MIDI_OUT_SIZE(1), .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = USB_MS_MIDI_OUT_JACK, .bJackType = USB_MS_EXTERNAL, .bJackID = JACK_OUT_EXT, .bNrInputPins = 1, .pins = { [0] = { .baSourceID = JACK_IN_EMB, .baSourcePin = 1, } } }; /* B.5.1 Standard Bulk OUT Endpoint Descriptor */ static struct usb_endpoint_descriptor bulk_out_desc = { .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; /* B.5.2 Class-specific MS Bulk OUT Endpoint Descriptor */ static const struct usb_ms_endpoint_descriptor_1 ms_out_desc = { .bLength = USB_DT_MS_ENDPOINT_SIZE(1), .bDescriptorType = USB_DT_CS_ENDPOINT, .bDescriptorSubtype = USB_MS_GENERAL, .bNumEmbMIDIJack = 1, .baAssocJackID = { [0] = JACK_IN_EMB, } }; /* B.6.1 Standard Bulk IN Endpoint Descriptor */ static struct usb_endpoint_descriptor bulk_in_desc = { .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; /* B.6.2 Class-specific MS Bulk IN Endpoint Descriptor */ static const struct usb_ms_endpoint_descriptor_1 ms_in_desc = { .bLength = USB_DT_MS_ENDPOINT_SIZE(1), .bDescriptorType = USB_DT_CS_ENDPOINT, .bDescriptorSubtype = USB_MS_GENERAL, .bNumEmbMIDIJack = 1, .baAssocJackID = { [0] = JACK_OUT_EMB, } }; static const struct usb_descriptor_header *gmidi_function [] = { (struct usb_descriptor_header *)&ac_interface_desc, (struct usb_descriptor_header *)&ac_header_desc, (struct usb_descriptor_header *)&ms_interface_desc, (struct usb_descriptor_header *)&ms_header_desc, (struct usb_descriptor_header *)&jack_in_emb_desc, (struct usb_descriptor_header *)&jack_in_ext_desc, (struct usb_descriptor_header *)&jack_out_emb_desc, (struct usb_descriptor_header *)&jack_out_ext_desc, /* If you add more jacks, update ms_header_desc.wTotalLength */ (struct usb_descriptor_header *)&bulk_out_desc, (struct usb_descriptor_header *)&ms_out_desc, (struct usb_descriptor_header *)&bulk_in_desc, (struct usb_descriptor_header *)&ms_in_desc, NULL, }; static char manufacturer[50]; static char product_desc[40] = "MIDI Gadget"; static char serial_number[20]; /* static strings, in UTF-8 */ static struct usb_string strings [] = { { STRING_MANUFACTURER, manufacturer, }, { STRING_PRODUCT, product_desc, }, { STRING_SERIAL, serial_number, }, { STRING_MIDI_GADGET, longname, }, { } /* end of list */ }; static struct usb_gadget_strings stringtab = { .language = 0x0409, /* en-us */ .strings = strings, }; static int config_buf(struct usb_gadget *gadget, u8 *buf, u8 type, unsigned index) { int len; /* only one configuration */ if (index != 0) { return -EINVAL; } len = usb_gadget_config_buf(&config_desc, buf, USB_BUFSIZ, gmidi_function); if (len < 0) { return len; } ((struct usb_config_descriptor *)buf)->bDescriptorType = type; return len; } static struct usb_request *alloc_ep_req(struct usb_ep *ep, unsigned length) { struct usb_request *req; req = usb_ep_alloc_request(ep, GFP_ATOMIC); if (req) { req->length = length; req->buf = kmalloc(length, GFP_ATOMIC); if (!req->buf) { usb_ep_free_request(ep, req); req = NULL; } } return req; } static void free_ep_req(struct usb_ep *ep, struct usb_request *req) { kfree(req->buf); usb_ep_free_request(ep, req); } static const uint8_t gmidi_cin_length[] = { 0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1 }; /* * Receives a chunk of MIDI data. */ static void gmidi_read_data(struct usb_ep *ep, int cable, uint8_t *data, int length) { struct gmidi_device *dev = ep->driver_data; /* cable is ignored, because for now we only have one. */ if (!dev->out_substream) { /* Nobody is listening - throw it on the floor. */ return; } if (!test_bit(dev->out_substream->number, &dev->out_triggered)) { return; } snd_rawmidi_receive(dev->out_substream, data, length); } static void gmidi_handle_out_data(struct usb_ep *ep, struct usb_request *req) { unsigned i; u8 *buf = req->buf; for (i = 0; i + 3 < req->actual; i += 4) { if (buf[i] != 0) { int cable = buf[i] >> 4; int length = gmidi_cin_length[buf[i] & 0x0f]; gmidi_read_data(ep, cable, &buf[i + 1], length); } } } static void gmidi_complete(struct usb_ep *ep, struct usb_request *req) { struct gmidi_device *dev = ep->driver_data; int status = req->status; switch (status) { case 0: /* normal completion */ if (ep == dev->out_ep) { /* we received stuff. req is queued again, below */ gmidi_handle_out_data(ep, req); } else if (ep == dev->in_ep) { /* our transmit completed. see if there's more to go. gmidi_transmit eats req, don't queue it again. */ gmidi_transmit(dev, req); return; } break; /* this endpoint is normally active while we're configured */ case -ECONNABORTED: /* hardware forced ep reset */ case -ECONNRESET: /* request dequeued */ case -ESHUTDOWN: /* disconnect from host */ VDBG(dev, "%s gone (%d), %d/%d\n", ep->name, status, req->actual, req->length); if (ep == dev->out_ep) { gmidi_handle_out_data(ep, req); } free_ep_req(ep, req); return; case -EOVERFLOW: /* buffer overrun on read means that * we didn't provide a big enough * buffer. */ default: DBG(dev, "%s complete --> %d, %d/%d\n", ep->name, status, req->actual, req->length); break; case -EREMOTEIO: /* short read */ break; } status = usb_ep_queue(ep, req, GFP_ATOMIC); if (status) { ERROR(dev, "kill %s: resubmit %d bytes --> %d\n", ep->name, req->length, status); usb_ep_set_halt(ep); /* FIXME recover later ... somehow */ } } static int set_gmidi_config(struct gmidi_device *dev, gfp_t gfp_flags) { int err = 0; struct usb_request *req; struct usb_ep *ep; unsigned i; err = usb_ep_enable(dev->in_ep, &bulk_in_desc); if (err) { ERROR(dev, "can't start %s: %d\n", dev->in_ep->name, err); goto fail; } dev->in_ep->driver_data = dev; err = usb_ep_enable(dev->out_ep, &bulk_out_desc); if (err) { ERROR(dev, "can't start %s: %d\n", dev->out_ep->name, err); goto fail; } dev->out_ep->driver_data = dev; /* allocate a bunch of read buffers and queue them all at once. */ ep = dev->out_ep; for (i = 0; i < qlen && err == 0; i++) { req = alloc_ep_req(ep, buflen); if (req) { req->complete = gmidi_complete; err = usb_ep_queue(ep, req, GFP_ATOMIC); if (err) { DBG(dev, "%s queue req: %d\n", ep->name, err); } } else { err = -ENOMEM; } } fail: /* caller is responsible for cleanup on error */ return err; } static void gmidi_reset_config(struct gmidi_device *dev) { if (dev->config == 0) { return; } DBG(dev, "reset config\n"); /* just disable endpoints, forcing completion of pending i/o. * all our completion handlers free their requests in this case. */ usb_ep_disable(dev->in_ep); usb_ep_disable(dev->out_ep); dev->config = 0; } /* change our operational config. this code must agree with the code * that returns config descriptors, and altsetting code. * * it's also responsible for power management interactions. some * configurations might not work with our current power sources. * * note that some device controller hardware will constrain what this * code can do, perhaps by disallowing more than one configuration or * by limiting configuration choices (like the pxa2xx). */ static int gmidi_set_config(struct gmidi_device *dev, unsigned number, gfp_t gfp_flags) { int result = 0; struct usb_gadget *gadget = dev->gadget; #if 0 /* FIXME */ /* Hacking this bit out fixes a bug where on receipt of two USB_REQ_SET_CONFIGURATION messages, we end up with no buffered OUT requests waiting for data. This is clearly hiding a bug elsewhere, because if the config didn't change then we really shouldn't do anything. */ /* Having said that, when we do "change" from config 1 to config 1, we at least gmidi_reset_config() which clears out any requests on endpoints, so it's not like we leak or anything. */ if (number == dev->config) { return 0; } #endif if (gadget_is_sa1100(gadget) && dev->config) { /* tx fifo is full, but we can't clear it...*/ ERROR(dev, "can't change configurations\n"); return -ESPIPE; } gmidi_reset_config(dev); switch (number) { case GMIDI_CONFIG: result = set_gmidi_config(dev, gfp_flags); break; default: result = -EINVAL; /* FALL THROUGH */ case 0: return result; } if (!result && (!dev->in_ep || !dev->out_ep)) { result = -ENODEV; } if (result) { gmidi_reset_config(dev); } else { char *speed; switch (gadget->speed) { case USB_SPEED_LOW: speed = "low"; break; case USB_SPEED_FULL: speed = "full"; break; case USB_SPEED_HIGH: speed = "high"; break; default: speed = "?"; break; } dev->config = number; INFO(dev, "%s speed\n", speed); } return result; } static void gmidi_setup_complete(struct usb_ep *ep, struct usb_request *req) { if (req->status || req->actual != req->length) { DBG((struct gmidi_device *) ep->driver_data, "setup complete --> %d, %d/%d\n", req->status, req->actual, req->length); } } /* * The setup() callback implements all the ep0 functionality that's * not handled lower down, in hardware or the hardware driver (like * device and endpoint feature flags, and their status). It's all * housekeeping for the gadget function we're implementing. Most of * the work is in config-specific setup. */ static int gmidi_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) { struct gmidi_device *dev = get_gadget_data(gadget); struct usb_request *req = dev->req; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); /* usually this stores reply data in the pre-allocated ep0 buffer, * but config change events will reconfigure hardware. */ req->zero = 0; switch (ctrl->bRequest) { case USB_REQ_GET_DESCRIPTOR: if (ctrl->bRequestType != USB_DIR_IN) { goto unknown; } switch (w_value >> 8) { case USB_DT_DEVICE: value = min(w_length, (u16) sizeof(device_desc)); memcpy(req->buf, &device_desc, value); break; case USB_DT_CONFIG: value = config_buf(gadget, req->buf, w_value >> 8, w_value & 0xff); if (value >= 0) { value = min(w_length, (u16)value); } break; case USB_DT_STRING: /* wIndex == language code. * this driver only handles one language, you can * add string tables for other languages, using * any UTF-8 characters */ value = usb_gadget_get_string(&stringtab, w_value & 0xff, req->buf); if (value >= 0) { value = min(w_length, (u16)value); } break; } break; /* currently two configs, two speeds */ case USB_REQ_SET_CONFIGURATION: if (ctrl->bRequestType != 0) { goto unknown; } if (gadget->a_hnp_support) { DBG(dev, "HNP available\n"); } else if (gadget->a_alt_hnp_support) { DBG(dev, "HNP needs a different root port\n"); } else { VDBG(dev, "HNP inactive\n"); } spin_lock(&dev->lock); value = gmidi_set_config(dev, w_value, GFP_ATOMIC); spin_unlock(&dev->lock); break; case USB_REQ_GET_CONFIGURATION: if (ctrl->bRequestType != USB_DIR_IN) { goto unknown; } *(u8 *)req->buf = dev->config; value = min(w_length, (u16)1); break; /* until we add altsetting support, or other interfaces, * only 0/0 are possible. pxa2xx only supports 0/0 (poorly) * and already killed pending endpoint I/O. */ case USB_REQ_SET_INTERFACE: if (ctrl->bRequestType != USB_RECIP_INTERFACE) { goto unknown; } spin_lock(&dev->lock); if (dev->config && w_index < GMIDI_NUM_INTERFACES && w_value == 0) { u8 config = dev->config; /* resets interface configuration, forgets about * previous transaction state (queued bufs, etc) * and re-inits endpoint state (toggle etc) * no response queued, just zero status == success. * if we had more than one interface we couldn't * use this "reset the config" shortcut. */ gmidi_reset_config(dev); gmidi_set_config(dev, config, GFP_ATOMIC); value = 0; } spin_unlock(&dev->lock); break; case USB_REQ_GET_INTERFACE: if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) { goto unknown; } if (!dev->config) { break; } if (w_index >= GMIDI_NUM_INTERFACES) { value = -EDOM; break; } *(u8 *)req->buf = 0; value = min(w_length, (u16)1); break; default: unknown: VDBG(dev, "unknown control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); } /* respond with data transfer before status phase? */ if (value >= 0) { req->length = value; req->zero = value < w_length; value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC); if (value < 0) { DBG(dev, "ep_queue --> %d\n", value); req->status = 0; gmidi_setup_complete(gadget->ep0, req); } } /* device either stalls (value < 0) or reports success */ return value; } static void gmidi_disconnect(struct usb_gadget *gadget) { struct gmidi_device *dev = get_gadget_data(gadget); unsigned long flags; spin_lock_irqsave(&dev->lock, flags); gmidi_reset_config(dev); /* a more significant application might have some non-usb * activities to quiesce here, saving resources like power * or pushing the notification up a network stack. */ spin_unlock_irqrestore(&dev->lock, flags); /* next we may get setup() calls to enumerate new connections; * or an unbind() during shutdown (including removing module). */ } static void /* __init_or_exit */ gmidi_unbind(struct usb_gadget *gadget) { struct gmidi_device *dev = get_gadget_data(gadget); struct snd_card *card; DBG(dev, "unbind\n"); card = dev->card; dev->card = NULL; if (card) { snd_card_free(card); } /* we've already been disconnected ... no i/o is active */ if (dev->req) { dev->req->length = USB_BUFSIZ; free_ep_req(gadget->ep0, dev->req); } kfree(dev); set_gadget_data(gadget, NULL); } static int gmidi_snd_free(struct snd_device *device) { return 0; } static void gmidi_transmit_packet(struct usb_request *req, uint8_t p0, uint8_t p1, uint8_t p2, uint8_t p3) { unsigned length = req->length; u8 *buf = (u8 *)req->buf + length; buf[0] = p0; buf[1] = p1; buf[2] = p2; buf[3] = p3; req->length = length + 4; } /* * Converts MIDI commands to USB MIDI packets. */ static void gmidi_transmit_byte(struct usb_request *req, struct gmidi_in_port *port, uint8_t b) { uint8_t p0 = port->cable; if (b >= 0xf8) { gmidi_transmit_packet(req, p0 | 0x0f, b, 0, 0); } else if (b >= 0xf0) { switch (b) { case 0xf0: port->data[0] = b; port->state = STATE_SYSEX_1; break; case 0xf1: case 0xf3: port->data[0] = b; port->state = STATE_1PARAM; break; case 0xf2: port->data[0] = b; port->state = STATE_2PARAM_1; break; case 0xf4: case 0xf5: port->state = STATE_UNKNOWN; break; case 0xf6: gmidi_transmit_packet(req, p0 | 0x05, 0xf6, 0, 0); port->state = STATE_UNKNOWN; break; case 0xf7: switch (port->state) { case STATE_SYSEX_0: gmidi_transmit_packet(req, p0 | 0x05, 0xf7, 0, 0); break; case STATE_SYSEX_1: gmidi_transmit_packet(req, p0 | 0x06, port->data[0], 0xf7, 0); break; case STATE_SYSEX_2: gmidi_transmit_packet(req, p0 | 0x07, port->data[0], port->data[1], 0xf7); break; } port->state = STATE_UNKNOWN; break; } } else if (b >= 0x80) { port->data[0] = b; if (b >= 0xc0 && b <= 0xdf) port->state = STATE_1PARAM; else port->state = STATE_2PARAM_1; } else { /* b < 0x80 */ switch (port->state) { case STATE_1PARAM: if (port->data[0] < 0xf0) { p0 |= port->data[0] >> 4; } else { p0 |= 0x02; port->state = STATE_UNKNOWN; } gmidi_transmit_packet(req, p0, port->data[0], b, 0); break; case STATE_2PARAM_1: port->data[1] = b; port->state = STATE_2PARAM_2; break; case STATE_2PARAM_2: if (port->data[0] < 0xf0) { p0 |= port->data[0] >> 4; port->state = STATE_2PARAM_1; } else { p0 |= 0x03; port->state = STATE_UNKNOWN; } gmidi_transmit_packet(req, p0, port->data[0], port->data[1], b); break; case STATE_SYSEX_0: port->data[0] = b; port->state = STATE_SYSEX_1; break; case STATE_SYSEX_1: port->data[1] = b; port->state = STATE_SYSEX_2; break; case STATE_SYSEX_2: gmidi_transmit_packet(req, p0 | 0x04, port->data[0], port->data[1], b); port->state = STATE_SYSEX_0; break; } } } static void gmidi_transmit(struct gmidi_device *dev, struct usb_request *req) { struct usb_ep *ep = dev->in_ep; struct gmidi_in_port *port = &dev->in_port; if (!ep) { return; } if (!req) { req = alloc_ep_req(ep, buflen); } if (!req) { ERROR(dev, "gmidi_transmit: alloc_ep_request failed\n"); return; } req->length = 0; req->complete = gmidi_complete; if (port->active) { while (req->length + 3 < buflen) { uint8_t b; if (snd_rawmidi_transmit(dev->in_substream, &b, 1) != 1) { port->active = 0; break; } gmidi_transmit_byte(req, port, b); } } if (req->length > 0) { usb_ep_queue(ep, req, GFP_ATOMIC); } else { free_ep_req(ep, req); } } static void gmidi_in_tasklet(unsigned long data) { struct gmidi_device *dev = (struct gmidi_device *)data; gmidi_transmit(dev, NULL); } static int gmidi_in_open(struct snd_rawmidi_substream *substream) { struct gmidi_device *dev = substream->rmidi->private_data; VDBG(dev, "gmidi_in_open\n"); dev->in_substream = substream; dev->in_port.state = STATE_UNKNOWN; return 0; } static int gmidi_in_close(struct snd_rawmidi_substream *substream) { struct gmidi_device *dev = substream->rmidi->private_data; VDBG(dev, "gmidi_in_close\n"); return 0; } static void gmidi_in_trigger(struct snd_rawmidi_substream *substream, int up) { struct gmidi_device *dev = substream->rmidi->private_data; VDBG(dev, "gmidi_in_trigger %d\n", up); dev->in_port.active = up; if (up) { tasklet_hi_schedule(&dev->tasklet); } } static int gmidi_out_open(struct snd_rawmidi_substream *substream) { struct gmidi_device *dev = substream->rmidi->private_data; VDBG(dev, "gmidi_out_open\n"); dev->out_substream = substream; return 0; } static int gmidi_out_close(struct snd_rawmidi_substream *substream) { struct gmidi_device *dev = substream->rmidi->private_data; VDBG(dev, "gmidi_out_close\n"); return 0; } static void gmidi_out_trigger(struct snd_rawmidi_substream *substream, int up) { struct gmidi_device *dev = substream->rmidi->private_data; VDBG(dev, "gmidi_out_trigger %d\n", up); if (up) { set_bit(substream->number, &dev->out_triggered); } else { clear_bit(substream->number, &dev->out_triggered); } } static struct snd_rawmidi_ops gmidi_in_ops = { .open = gmidi_in_open, .close = gmidi_in_close, .trigger = gmidi_in_trigger, }; static struct snd_rawmidi_ops gmidi_out_ops = { .open = gmidi_out_open, .close = gmidi_out_close, .trigger = gmidi_out_trigger }; /* register as a sound "card" */ static int gmidi_register_card(struct gmidi_device *dev) { struct snd_card *card; struct snd_rawmidi *rmidi; int err; int out_ports = 1; int in_ports = 1; static struct snd_device_ops ops = { .dev_free = gmidi_snd_free, }; card = snd_card_new(index, id, THIS_MODULE, 0); if (!card) { ERROR(dev, "snd_card_new failed\n"); err = -ENOMEM; goto fail; } dev->card = card; err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, dev, &ops); if (err < 0) { ERROR(dev, "snd_device_new failed: error %d\n", err); goto fail; } strcpy(card->driver, longname); strcpy(card->longname, longname); strcpy(card->shortname, shortname); /* Set up rawmidi */ dev->in_port.dev = dev; dev->in_port.active = 0; snd_component_add(card, "MIDI"); err = snd_rawmidi_new(card, "USB MIDI Gadget", 0, out_ports, in_ports, &rmidi); if (err < 0) { ERROR(dev, "snd_rawmidi_new failed: error %d\n", err); goto fail; } dev->rmidi = rmidi; strcpy(rmidi->name, card->shortname); rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX; rmidi->private_data = dev; /* Yes, rawmidi OUTPUT = USB IN, and rawmidi INPUT = USB OUT. It's an upside-down world being a gadget. */ snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &gmidi_in_ops); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &gmidi_out_ops); snd_card_set_dev(card, &dev->gadget->dev); /* register it - we're ready to go */ err = snd_card_register(card); if (err < 0) { ERROR(dev, "snd_card_register failed\n"); goto fail; } VDBG(dev, "gmidi_register_card finished ok\n"); return 0; fail: if (dev->card) { snd_card_free(dev->card); dev->card = NULL; } return err; } /* * Creates an output endpoint, and initializes output ports. */ static int __devinit gmidi_bind(struct usb_gadget *gadget) { struct gmidi_device *dev; struct usb_ep *in_ep, *out_ep; int gcnum, err = 0; /* support optional vendor/distro customization */ if (idVendor) { if (!idProduct) { printk(KERN_ERR "idVendor needs idProduct!\n"); return -ENODEV; } device_desc.idVendor = cpu_to_le16(idVendor); device_desc.idProduct = cpu_to_le16(idProduct); if (bcdDevice) { device_desc.bcdDevice = cpu_to_le16(bcdDevice); } } if (iManufacturer) { strlcpy(manufacturer, iManufacturer, sizeof(manufacturer)); } else { snprintf(manufacturer, sizeof(manufacturer), "%s %s with %s", init_utsname()->sysname, init_utsname()->release, gadget->name); } if (iProduct) { strlcpy(product_desc, iProduct, sizeof(product_desc)); } if (iSerialNumber) { device_desc.iSerialNumber = STRING_SERIAL, strlcpy(serial_number, iSerialNumber, sizeof(serial_number)); } /* Bulk-only drivers like this one SHOULD be able to * autoconfigure on any sane usb controller driver, * but there may also be important quirks to address. */ usb_ep_autoconfig_reset(gadget); in_ep = usb_ep_autoconfig(gadget, &bulk_in_desc); if (!in_ep) { autoconf_fail: printk(KERN_ERR "%s: can't autoconfigure on %s\n", shortname, gadget->name); return -ENODEV; } EP_IN_NAME = in_ep->name; in_ep->driver_data = in_ep; /* claim */ out_ep = usb_ep_autoconfig(gadget, &bulk_out_desc); if (!out_ep) { goto autoconf_fail; } EP_OUT_NAME = out_ep->name; out_ep->driver_data = out_ep; /* claim */ gcnum = usb_gadget_controller_number(gadget); if (gcnum >= 0) { device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum); } else { /* gmidi is so simple (no altsettings) that * it SHOULD NOT have problems with bulk-capable hardware. * so warn about unrecognized controllers, don't panic. */ printk(KERN_WARNING "%s: controller '%s' not recognized\n", shortname, gadget->name); device_desc.bcdDevice = __constant_cpu_to_le16(0x9999); } /* ok, we made sense of the hardware ... */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { return -ENOMEM; } spin_lock_init(&dev->lock); dev->gadget = gadget; dev->in_ep = in_ep; dev->out_ep = out_ep; set_gadget_data(gadget, dev); tasklet_init(&dev->tasklet, gmidi_in_tasklet, (unsigned long)dev); /* preallocate control response and buffer */ dev->req = alloc_ep_req(gadget->ep0, USB_BUFSIZ); if (!dev->req) { err = -ENOMEM; goto fail; } dev->req->complete = gmidi_setup_complete; device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket; gadget->ep0->driver_data = dev; INFO(dev, "%s, version: " DRIVER_VERSION "\n", longname); INFO(dev, "using %s, OUT %s IN %s\n", gadget->name, EP_OUT_NAME, EP_IN_NAME); /* register as an ALSA sound card */ err = gmidi_register_card(dev); if (err < 0) { goto fail; } VDBG(dev, "gmidi_bind finished ok\n"); return 0; fail: gmidi_unbind(gadget); return err; } static void gmidi_suspend(struct usb_gadget *gadget) { struct gmidi_device *dev = get_gadget_data(gadget); if (gadget->speed == USB_SPEED_UNKNOWN) { return; } DBG(dev, "suspend\n"); } static void gmidi_resume(struct usb_gadget *gadget) { struct gmidi_device *dev = get_gadget_data(gadget); DBG(dev, "resume\n"); } static struct usb_gadget_driver gmidi_driver = { .speed = USB_SPEED_FULL, .function = (char *)longname, .bind = gmidi_bind, .unbind = gmidi_unbind, .setup = gmidi_setup, .disconnect = gmidi_disconnect, .suspend = gmidi_suspend, .resume = gmidi_resume, .driver = { .name = (char *)shortname, .owner = THIS_MODULE, }, }; static int __init gmidi_init(void) { return usb_gadget_register_driver(&gmidi_driver); } module_init(gmidi_init); static void __exit gmidi_cleanup(void) { usb_gadget_unregister_driver(&gmidi_driver); } module_exit(gmidi_cleanup);
loverlucia/linux-2.6.24
drivers/usb/gadget/gmidi.c
C
gpl-2.0
33,751
/* * Contiguous Memory Allocator for DMA mapping framework * Copyright (c) 2010-2011 by Samsung Electronics. * Written by: * Marek Szyprowski <m.szyprowski@samsung.com> * Michal Nazarewicz <mina86@mina86.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License or (at your optional) any later version of the license. * * The Linux Foundation chooses to take subject only to the GPLv2 license * terms, and distributes only under these terms. */ #define pr_fmt(fmt) "cma: " fmt #ifdef CONFIG_CMA_DEBUG #ifndef DEBUG # define DEBUG #endif #endif #include <asm/page.h> #include <asm/dma-contiguous.h> #include <linux/memblock.h> #include <linux/err.h> #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/of_platform.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/page-isolation.h> #include <linux/slab.h> #include <linux/swap.h> #include <linux/mm_types.h> #include <linux/dma-contiguous.h> #include <trace/events/kmem.h> #include <htc_debug/stability/htc_report_meminfo.h> #ifndef SZ_1M #define SZ_1M (1 << 20) #endif struct cma { unsigned long base_pfn; unsigned long count; unsigned long *bitmap; bool in_system; struct mutex lock; }; static DEFINE_MUTEX(cma_mutex); struct cma *dma_contiguous_def_area; phys_addr_t dma_contiguous_def_base; static struct cma_area { phys_addr_t base; unsigned long size; struct cma *cma; const char *name; bool to_system; } cma_areas[MAX_CMA_AREAS]; static unsigned cma_area_count; static struct cma_map { phys_addr_t base; struct device *dev; } cma_maps[MAX_CMA_AREAS] __initdata; static unsigned cma_map_count __initdata; static struct cma *cma_get_area(phys_addr_t base) { int i; for (i = 0; i < cma_area_count; i++) if (cma_areas[i].base == base) return cma_areas[i].cma; return NULL; } static struct cma *cma_get_area_by_name(const char *name) { int i; if (!name) return NULL; for (i = 0; i < cma_area_count; i++) if (cma_areas[i].name && strcmp(cma_areas[i].name, name) == 0) return cma_areas[i].cma; return NULL; } #ifdef CONFIG_CMA_SIZE_MBYTES #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES #else #define CMA_SIZE_MBYTES 0 #endif /* * Default global CMA area size can be defined in kernel's .config. * This is usefull mainly for distro maintainers to create a kernel * that works correctly for most supported systems. * The size can be set in bytes or as a percentage of the total memory * in the system. * * Users, who want to set the size of global CMA area for their system * should use cma= kernel parameter. */ static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M; static phys_addr_t size_cmdline = -1; static int __init early_cma(char *p) { pr_debug("%s(%s)\n", __func__, p); size_cmdline = memparse(p, &p); return 0; } early_param("cma", early_cma); #ifdef CONFIG_CMA_SIZE_PERCENTAGE static phys_addr_t __init __maybe_unused cma_early_percent_memory(void) { struct memblock_region *reg; unsigned long total_pages = 0; /* * We cannot use memblock_phys_mem_size() here, because * memblock_analyze() has not been called yet. */ for_each_memblock(memory, reg) total_pages += memblock_region_memory_end_pfn(reg) - memblock_region_memory_base_pfn(reg); return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT; } #else static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) { return 0; } #endif static __init int cma_activate_area(unsigned long base_pfn, unsigned long count) { unsigned long pfn = base_pfn; unsigned i = count >> pageblock_order; struct zone *zone; WARN_ON_ONCE(!pfn_valid(pfn)); zone = page_zone(pfn_to_page(pfn)); do { unsigned j; base_pfn = pfn; for (j = pageblock_nr_pages; j; --j, pfn++) { WARN_ON_ONCE(!pfn_valid(pfn)); if (page_zone(pfn_to_page(pfn)) != zone) return -EINVAL; } init_cma_reserved_pageblock(pfn_to_page(base_pfn)); } while (--i); return 0; } static __init struct cma *cma_create_area(unsigned long base_pfn, unsigned long count, bool system) { int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); struct cma *cma; int ret = -ENOMEM; pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count); cma = kmalloc(sizeof *cma, GFP_KERNEL); if (!cma) return ERR_PTR(-ENOMEM); cma->base_pfn = base_pfn; cma->count = count; cma->in_system = system; cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!cma->bitmap) goto no_mem; if (cma->in_system) { ret = cma_activate_area(base_pfn, count); if (ret) goto error; } mutex_init(&cma->lock); pr_debug("%s: returned %p\n", __func__, (void *)cma); return cma; error: kfree(cma->bitmap); no_mem: kfree(cma); return ERR_PTR(ret); } /*****************************************************************************/ #ifdef CONFIG_OF int __init cma_fdt_scan(unsigned long node, const char *uname, int depth, void *data) { phys_addr_t base, size; unsigned long len; __be32 *prop; char *name; bool in_system; phys_addr_t limit = MEMBLOCK_ALLOC_ANYWHERE; if (!of_get_flat_dt_prop(node, "linux,contiguous-region", NULL)) return 0; prop = of_get_flat_dt_prop(node, "reg", &len); if (!prop || (len != 2 * sizeof(unsigned long))) return 0; base = be32_to_cpu(prop[0]); size = be32_to_cpu(prop[1]); name = of_get_flat_dt_prop(node, "label", NULL); in_system = of_get_flat_dt_prop(node, "linux,reserve-region", NULL) ? 0 : 1; prop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL); if (prop) limit = be32_to_cpu(prop[0]); pr_info("Found %s, memory base %lx, size %ld MiB, limit %pa\n", uname, (unsigned long)base, (unsigned long)size / SZ_1M, &limit); dma_contiguous_reserve_area(size, &base, limit, name, in_system); return 0; } #endif /** * dma_contiguous_reserve() - reserve area for contiguous memory handling * @limit: End address of the reserved memory (optional, 0 for any). * * This function reserves memory from early allocator. It should be * called by arch specific code once the early allocator (memblock or bootmem) * has been activated and all other subsystems have already allocated/reserved * memory. It reserves contiguous areas for global, device independent * allocations and (optionally) all areas defined in device tree structures. */ void __init dma_contiguous_reserve(phys_addr_t limit) { phys_addr_t sel_size = 0; pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); if (size_cmdline != -1) { sel_size = size_cmdline; } else { #ifdef CONFIG_CMA_SIZE_SEL_MBYTES sel_size = size_bytes; #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE) sel_size = cma_early_percent_memory(); #elif defined(CONFIG_CMA_SIZE_SEL_MIN) sel_size = min(size_bytes, cma_early_percent_memory()); #elif defined(CONFIG_CMA_SIZE_SEL_MAX) sel_size = max(size_bytes, cma_early_percent_memory()); #endif } if (sel_size) { phys_addr_t base = 0; pr_debug("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)sel_size / SZ_1M); if (dma_contiguous_reserve_area(sel_size, &base, limit, NULL, true) == 0) dma_contiguous_def_base = base; } #ifdef CONFIG_OF of_scan_flat_dt(cma_fdt_scan, NULL); #endif }; /** * dma_contiguous_reserve_area() - reserve custom contiguous area * @size: Size of the reserved area (in bytes), * @base: Pointer to the base address of the reserved area, also used to return * base address of the actually reserved area, optional, use pointer to * 0 for any * @limit: End address of the reserved memory (optional, 0 for any). * * This function reserves memory from early allocator. It should be * called by arch specific code once the early allocator (memblock or bootmem) * has been activated and all other subsystems have already allocated/reserved * memory. This function allows to create custom reserved areas for specific * devices. */ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base, phys_addr_t limit, const char *name, bool to_system) { phys_addr_t base = *res_base; phys_addr_t alignment; int ret = 0; pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, (unsigned long)size, (unsigned long)base, (unsigned long)limit); /* Sanity checks */ if (cma_area_count == ARRAY_SIZE(cma_areas)) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } if (!size) return -EINVAL; /* Sanitise input arguments */ alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); base = ALIGN(base, alignment); size = ALIGN(size, alignment); limit &= ~(alignment - 1); /* Reserve memory */ if (base) { if (memblock_is_region_reserved(base, size) || memblock_reserve(base, size) < 0) { ret = -EBUSY; goto err; } } else { /* * Use __memblock_alloc_base() since * memblock_alloc_base() panic()s. */ phys_addr_t addr = __memblock_alloc_base(size, alignment, limit); if (!addr) { ret = -ENOMEM; goto err; } else { base = addr; } } /* * Each reserved area must be initialised later, when more kernel * subsystems (like slab allocator) are available. */ cma_areas[cma_area_count].base = base; cma_areas[cma_area_count].size = size; cma_areas[cma_area_count].name = name; cma_areas[cma_area_count].to_system = to_system; cma_area_count++; *res_base = base; pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, (unsigned long)base); /* Architecture specific contiguous memory fixup. */ dma_contiguous_early_fixup(base, size); return 0; err: pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); return ret; } /** * dma_contiguous_add_device() - add device to custom contiguous reserved area * @dev: Pointer to device structure. * @base: Pointer to the base address of the reserved area returned by * dma_contiguous_reserve_area() function, also used to return * * This function assigns the given device to the contiguous memory area * reserved earlier by dma_contiguous_reserve_area() function. */ int __init dma_contiguous_add_device(struct device *dev, phys_addr_t base) { if (cma_map_count == ARRAY_SIZE(cma_maps)) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } cma_maps[cma_map_count].dev = dev; cma_maps[cma_map_count].base = base; cma_map_count++; return 0; } #ifdef CONFIG_OF static void cma_assign_device_from_dt(struct device *dev) { struct device_node *node; struct cma *cma; const char *name; u32 value; node = of_parse_phandle(dev->of_node, "linux,contiguous-region", 0); if (!node) return; if (of_property_read_u32(node, "reg", &value) && !value) return; if (of_property_read_string(node, "label", &name)) return; cma = cma_get_area_by_name(name); if (!cma) return; dev_set_cma_area(dev, cma); pr_info("Assigned CMA region at %lx to %s device\n", (unsigned long)value, dev_name(dev)); } static int cma_device_init_notifier_call(struct notifier_block *nb, unsigned long event, void *data) { struct device *dev = data; if (event == BUS_NOTIFY_ADD_DEVICE && dev->of_node) cma_assign_device_from_dt(dev); return NOTIFY_DONE; } static struct notifier_block cma_dev_init_nb = { .notifier_call = cma_device_init_notifier_call, }; #endif static int __init cma_init_reserved_areas(void) { struct cma *cma; int i; for (i = 0; i < cma_area_count; i++) { phys_addr_t base = PFN_DOWN(cma_areas[i].base); unsigned int count = cma_areas[i].size >> PAGE_SHIFT; bool system = cma_areas[i].to_system; cma = cma_create_area(base, count, system); if (!IS_ERR(cma)) cma_areas[i].cma = cma; } dma_contiguous_def_area = cma_get_area(dma_contiguous_def_base); for (i = 0; i < cma_map_count; i++) { cma = cma_get_area(cma_maps[i].base); dev_set_cma_area(cma_maps[i].dev, cma); } #ifdef CONFIG_OF bus_register_notifier(&platform_bus_type, &cma_dev_init_nb); #endif return 0; } core_initcall(cma_init_reserved_areas); phys_addr_t cma_get_base(struct device *dev) { struct cma *cma = dev_get_cma_area(dev); return cma->base_pfn << PAGE_SHIFT; } bool cma_area_exist(struct device *dev) { struct cma *cma = dev_get_cma_area(dev); return (cma != dma_contiguous_def_area); } static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) { mutex_lock(&cma->lock); bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); mutex_unlock(&cma->lock); } /** * dma_alloc_from_contiguous() - allocate pages from contiguous area * @dev: Pointer to device for which the allocation is performed. * @count: Requested number of pages. * @align: Requested alignment of pages (in PAGE_SIZE order). * * This function allocates memory buffer for specified device. It uses * device specific contiguous memory area if available or the default * global one. Requires architecture specific get_dev_cma_area() helper * function. */ struct page *dma_alloc_from_contiguous(struct device *dev, int count, unsigned int align) { unsigned long mask, pfn, pageno, start = 0; struct cma *cma = dev_get_cma_area(dev); struct page *page = NULL; int ret = 0; int tries = 0; if (!cma || !cma->count) return NULL; if (align > CONFIG_CMA_ALIGNMENT) align = CONFIG_CMA_ALIGNMENT; pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, count, align); if (!count) return NULL; mask = (1 << align) - 1; for (;;) { mutex_lock(&cma->lock); pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, start, count, mask); if (pageno >= cma->count) { mutex_unlock(&cma->lock); break; } bitmap_set(cma->bitmap, pageno, count); /* * It's safe to drop the lock here. We've marked this region for * our exclusive use. If the migration fails we will take the * lock again and unmark it. */ mutex_unlock(&cma->lock); pfn = cma->base_pfn + pageno; mutex_lock(&cma_mutex); if (cma->in_system) ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); mutex_unlock(&cma_mutex); if (ret == 0) { page = pfn_to_page(pfn); add_meminfo_total_pages(NR_DMA_PAGES, count); break; } else if (ret != -EBUSY) { pfn = 0; clear_cma_bitmap(cma, pfn, count); break; } clear_cma_bitmap(cma, pfn, count); tries++; trace_dma_alloc_contiguous_retry(tries); pr_debug("%s(): memory range at %p is busy, retrying\n", __func__, pfn_to_page(pfn)); /* try again with a bit different memory target */ start = pageno + mask + 1; } pr_debug("%s(): returned %p\n", __func__, page); return page; } /** * dma_release_from_contiguous() - release allocated pages * @dev: Pointer to device for which the pages were allocated. * @pages: Allocated pages. * @count: Number of allocated pages. * * This function releases memory allocated by dma_alloc_from_contiguous(). * It returns false when provided pages do not belong to contiguous area and * true otherwise. */ bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count) { struct cma *cma = dev_get_cma_area(dev); unsigned long pfn; if (!cma || !pages) return false; pr_debug("%s(page %p)\n", __func__, (void *)pages); pfn = page_to_pfn(pages); if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) return false; VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); sub_meminfo_total_pages(NR_DMA_PAGES, count); if (cma->in_system) free_contig_range(pfn, count); clear_cma_bitmap(cma, pfn, count); return true; }
xXminiWHOOPERxX/MSM8974-Mini-Reloaded-Sense-Kernel
drivers/base/dma-contiguous.c
C
gpl-2.0
15,625
/* * Copyright (C) 2005-2008 MaNGOS <http://getmangos.com/> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "ByteBuffer.h" #include "ReactorAI.h" #include "Errors.h" #include "Creature.h" #include "Log.h" #include "ObjectAccessor.h" #define REACTOR_VISIBLE_RANGE (26.46f) int ReactorAI::Permissible(const Creature *creature) { if( creature->isCivilian() || creature->IsNeutralToAll() ) return PERMIT_BASE_REACTIVE; return PERMIT_BASE_NO; } void ReactorAI::MoveInLineOfSight(Unit *) { } void ReactorAI::AttackStart(Unit *p) { if(!p) return; if(i_creature.Attack(p,true)) { DEBUG_LOG("Tag unit GUID: %u (TypeId: %u) as a victim", p->GetGUIDLow(), p->GetTypeId()); i_creature.SetInCombatWith(p); p->SetInCombatWith(&i_creature); i_creature.AddThreat(p, 0.0f); i_victimGuid = p->GetGUID(); i_creature.GetMotionMaster()->MoveChase(p); } } bool ReactorAI::IsVisible(Unit *) const { return false; } void ReactorAI::UpdateAI(const uint32 /*time_diff*/) { // update i_victimGuid if i_creature.getVictim() !=0 and changed if(!i_creature.SelectHostilTarget() || !i_creature.getVictim()) return; i_victimGuid = i_creature.getVictim()->GetGUID(); if( i_creature.isAttackReady() ) { if( i_creature.IsWithinDistInMap(i_creature.getVictim(), ATTACK_DISTANCE)) { i_creature.AttackerStateUpdate(i_creature.getVictim()); i_creature.resetAttackTimer(); } } } void ReactorAI::EnterEvadeMode() { if( !i_creature.isAlive() ) { DEBUG_LOG("Creature stoped attacking cuz his dead [guid=%u]", i_creature.GetGUIDLow()); i_creature.GetMotionMaster()->MovementExpired(); i_creature.GetMotionMaster()->MoveIdle(); i_victimGuid = 0; i_creature.CombatStop(); i_creature.DeleteThreatList(); return; } Unit* victim = ObjectAccessor::GetUnit(i_creature, i_victimGuid ); if( !victim ) { DEBUG_LOG("Creature stopped attacking because victim is non exist [guid=%u]", i_creature.GetGUIDLow()); } else if( victim->HasStealthAura() ) { DEBUG_LOG("Creature stopped attacking cuz his victim is stealth [guid=%u]", i_creature.GetGUIDLow()); } else if( victim->isInFlight() ) { DEBUG_LOG("Creature stopped attacking cuz his victim is fly away [guid=%u]", i_creature.GetGUIDLow()); } else { DEBUG_LOG("Creature stopped attacking due to target %s [guid=%u]", victim->isAlive() ? "out run him" : "is dead", i_creature.GetGUIDLow()); } i_creature.RemoveAllAuras(); i_creature.DeleteThreatList(); i_victimGuid = 0; i_creature.CombatStop(); i_creature.SetLootRecipient(NULL); // Remove TargetedMovementGenerator from MotionMaster stack list, and add HomeMovementGenerator instead if( i_creature.GetMotionMaster()->GetCurrentMovementGeneratorType() == TARGETED_MOTION_TYPE ) i_creature.GetMotionMaster()->MoveTargetedHome(); }
eyeless/mangos
src/game/ReactorAI.cpp
C++
gpl-2.0
3,734
// Copyright 2008 Dolphin Emulator Project // Licensed under GPLv2+ // Refer to the license.txt file included. #include "Core/ConfigManager.h" #include "Core/HW/DSPHLE/MailHandler.h" #include "Core/HW/DSPHLE/UCodes/GBA.h" #include "Core/HW/DSPHLE/UCodes/UCodes.h" #include "Core/HW/DSPHLE/UCodes/Zelda.h" // Uncomment this to have a strict version of the HLE implementation, which // PanicAlerts on recoverable unknown behaviors instead of silently ignoring // them. Recommended for development. // #define STRICT_ZELDA_HLE 1 // These flags modify the behavior of the HLE implementation based on the UCode // version. When introducing a new flag, please recheck the behavior of each // UCode version. enum ZeldaUCodeFlag { // UCode for Wii where no ARAM is present. Instead of using ARAM, DMAs from // MRAM are used to transfer sound data. NO_ARAM = 0x00000001, // Multiply by two the computed Dolby positional volumes. Some UCodes do // not do that (Zelda TWW for example), others do (Zelda TP, SMG). MAKE_DOLBY_LOUDER = 0x00000002, // Light version of the UCode: no Dolby mixing, different synchronization // protocol, etc. LIGHT_PROTOCOL = 0x00000004, // If set, only consider 4 of the 6 non-Dolby mixing outputs. Early // versions of the Zelda UCode only had 4. FOUR_MIXING_DESTS = 0x00000008, // Handle smaller VPBs that are missing their 0x40-0x80 area. Very early // versions of the Zelda UCode used 0x80 sized VPBs. TINY_VPB = 0x00000010, // If set, interpret non-Dolby mixing parameters as step/current volume // instead of target/current volume. VOLUME_EXPLICIT_STEP = 0x00000020, // If set, handle synchronization per-frame instead of per-16-voices. SYNC_PER_FRAME = 0x00000040, // If set, does not support command 0D. TODO: rename. NO_CMD_0D = 0x00000080, // If set, command 0C is used for GBA crypto. This was used before the GBA // UCode and UCode switching was available. SUPPORTS_GBA_CRYPTO = 0x00000100, // If set, command 0C is used for an unknown purpose. TODO: rename. WEIRD_CMD_0C = 0x00000200, }; static const std::map<u32, u32> UCODE_FLAGS = { // GameCube IPL/BIOS, NTSC. { 0x24B22038, LIGHT_PROTOCOL | FOUR_MIXING_DESTS | TINY_VPB | VOLUME_EXPLICIT_STEP | NO_CMD_0D | WEIRD_CMD_0C }, // GameCube IPL/BIOS, PAL. { 0x6BA3B3EA, LIGHT_PROTOCOL | FOUR_MIXING_DESTS | NO_CMD_0D | WEIRD_CMD_0C }, // Pikmin 1 GC NTSC. // Animal Crossing. { 0x4BE6A5CB, LIGHT_PROTOCOL | NO_CMD_0D | SUPPORTS_GBA_CRYPTO }, // Luigi's Mansion. { 0x42F64AC4, LIGHT_PROTOCOL | NO_CMD_0D | WEIRD_CMD_0C }, // Pikmin 1 GC PAL. { 0x267FD05A, SYNC_PER_FRAME | NO_CMD_0D }, // Super Mario Sunshine. { 0x56D36052, SYNC_PER_FRAME | NO_CMD_0D }, // The Legend of Zelda: The Wind Waker. { 0x86840740, 0 }, // The Legend of Zelda: Four Swords Adventures. // Mario Kart: Double Dash. // Pikmin 2 GC NTSC. { 0x2FCDF1EC, MAKE_DOLBY_LOUDER }, // The Legend of Zelda: Twilight Princess / GC. // Donkey Kong Jungle Beat. // // TODO: These do additional filtering at frame rendering time. We don't // implement this yet. { 0x6CA33A6D, MAKE_DOLBY_LOUDER }, // The Legend of Zelda: Twilight Princess / Wii. { 0x6C3F6F94, NO_ARAM | MAKE_DOLBY_LOUDER }, // Super Mario Galaxy. // Super Mario Galaxy 2. { 0xD643001F, NO_ARAM | MAKE_DOLBY_LOUDER }, // Pikmin 1/2 New Play Control. { 0xEAEB38CC, NO_ARAM | MAKE_DOLBY_LOUDER }, // TODO: Other games that use this UCode (exhaustive list): // * Link's Crossbow Training // * The Legend of Zelda: Collector's Edition // * The Legend of Zelda: Twilight Princess / Wii (type ????, CRC ????) }; ZeldaUCode::ZeldaUCode(DSPHLE *dsphle, u32 crc) : UCodeInterface(dsphle, crc) { auto it = UCODE_FLAGS.find(crc); if (it == UCODE_FLAGS.end()) PanicAlert("No flags definition found for Zelda CRC %08x", crc); m_flags = it->second; m_renderer.SetFlags(m_flags); WARN_LOG(DSPHLE, "Zelda UCode loaded, crc=%08x, flags=%08x", crc, m_flags); if (m_flags & LIGHT_PROTOCOL) { m_mail_handler.PushMail(0x88881111); } else { m_mail_handler.PushMail(DSP_INIT, true); m_mail_handler.PushMail(0xF3551111); // handshake } } ZeldaUCode::~ZeldaUCode() { m_mail_handler.Clear(); } void ZeldaUCode::Update() { if (NeedsResumeMail()) { m_mail_handler.PushMail(DSP_RESUME, true); } } void ZeldaUCode::DoState(PointerWrap &p) { p.Do(m_flags); p.Do(m_mail_current_state); p.Do(m_mail_expected_cmd_mails); p.Do(m_sync_max_voice_id); p.Do(m_sync_voice_skip_flags); p.Do(m_sync_flags_second_half); p.Do(m_cmd_buffer); p.Do(m_read_offset); p.Do(m_write_offset); p.Do(m_pending_commands_count); p.Do(m_cmd_can_execute); p.Do(m_rendering_requested_frames); p.Do(m_rendering_voices_per_frame); p.Do(m_rendering_curr_frame); p.Do(m_rendering_curr_voice); m_renderer.DoState(p); DoStateShared(p); } void ZeldaUCode::HandleMail(u32 mail) { if (m_upload_setup_in_progress) // evaluated first! { PrepareBootUCode(mail); return; } if (m_flags & LIGHT_PROTOCOL) HandleMailLight(mail); else HandleMailDefault(mail); } void ZeldaUCode::HandleMailDefault(u32 mail) { switch (m_mail_current_state) { case MailState::WAITING: if (mail & 0x80000000) { if ((mail >> 16) != 0xCDD1) { PanicAlert("Rendering end mail without prefix CDD1: %08x", mail); } switch (mail & 0xFFFF) { case 1: NOTICE_LOG(DSPHLE, "UCode being replaced."); m_upload_setup_in_progress = true; SetMailState(MailState::HALTED); break; case 2: NOTICE_LOG(DSPHLE, "UCode being rebooted to ROM."); SetMailState(MailState::HALTED); m_dsphle->SetUCode(UCODE_ROM); break; case 3: m_cmd_can_execute = true; RunPendingCommands(); break; default: NOTICE_LOG(DSPHLE, "Unknown end rendering action. Halting."); case 0: NOTICE_LOG(DSPHLE, "UCode asked to halt. Stopping any processing."); SetMailState(MailState::HALTED); break; } } else if (!(mail & 0xFFFF)) { if (RenderingInProgress()) { SetMailState(MailState::RENDERING); } else { NOTICE_LOG(DSPHLE, "Sync mail (%08x) received when rendering was not active. Halting.", mail); SetMailState(MailState::HALTED); } } else { SetMailState(MailState::WRITING_CMD); m_mail_expected_cmd_mails = mail & 0xFFFF; } break; case MailState::RENDERING: if (m_flags & SYNC_PER_FRAME) { int base = m_sync_flags_second_half ? 2 : 0; m_sync_voice_skip_flags[base] = mail >> 16; m_sync_voice_skip_flags[base + 1] = mail & 0xFFFF; if (m_sync_flags_second_half) m_sync_max_voice_id = 0xFFFF; RenderAudio(); if (m_sync_flags_second_half) SetMailState(MailState::WAITING); m_sync_flags_second_half = !m_sync_flags_second_half; } else { m_sync_max_voice_id = (((mail >> 16) & 0xF) + 1) << 4; m_sync_voice_skip_flags[(mail >> 16) & 0xFF] = mail & 0xFFFF; RenderAudio(); SetMailState(MailState::WAITING); } break; case MailState::WRITING_CMD: Write32(mail); if (--m_mail_expected_cmd_mails == 0) { m_pending_commands_count += 1; SetMailState(MailState::WAITING); RunPendingCommands(); } break; case MailState::HALTED: WARN_LOG(DSPHLE, "Received mail %08x while we're halted.", mail); break; } } void ZeldaUCode::HandleMailLight(u32 mail) { bool add_command = true; switch (m_mail_current_state) { case MailState::WAITING: if (!(mail & 0x80000000)) PanicAlert("Mail received in waiting state has MSB=0: %08x", mail); // Start of a command. We have to hardcode the number of mails required // for each command - the alternative is to rewrite command handling as // an asynchronous procedure, and we wouldn't want that, would we? Write32(mail); switch ((mail >> 24) & 0x7F) { case 0x00: m_mail_expected_cmd_mails = 0; break; case 0x01: m_mail_expected_cmd_mails = 4; break; case 0x02: m_mail_expected_cmd_mails = 2; break; // Doesn't even register as a command, just rejumps to the dispatcher. // TODO: That's true on 0x4BE6A5CB and 0x42F64AC4, what about others? case 0x03: add_command = false; break; case 0x0C: if (m_flags & SUPPORTS_GBA_CRYPTO) m_mail_expected_cmd_mails = 1; else if (m_flags & WEIRD_CMD_0C) m_mail_expected_cmd_mails = 2; else m_mail_expected_cmd_mails = 0; break; default: PanicAlert("Received unknown command in light protocol: %08x", mail); break; } if (m_mail_expected_cmd_mails) { SetMailState(MailState::WRITING_CMD); } else if (add_command) { m_pending_commands_count += 1; RunPendingCommands(); } break; case MailState::WRITING_CMD: Write32(mail); if (--m_mail_expected_cmd_mails == 0) { m_pending_commands_count += 1; SetMailState(MailState::WAITING); RunPendingCommands(); } break; case MailState::RENDERING: if (mail != 0) PanicAlert("Sync mail is not zero: %08x", mail); // No per-voice syncing in the light protocol. m_sync_max_voice_id = 0xFFFFFFFF; m_sync_voice_skip_flags.fill(0xFFFF); RenderAudio(); DSP::GenerateDSPInterruptFromDSPEmu(DSP::INT_DSP); break; case MailState::HALTED: WARN_LOG(DSPHLE, "Received mail %08x while we're halted.", mail); break; } } void ZeldaUCode::RunPendingCommands() { if (RenderingInProgress() || !m_cmd_can_execute) { // No commands can be ran while audio rendering is in progress or // waiting for an ACK. return; } while (m_pending_commands_count) { u32 cmd_mail = Read32(); if (!(cmd_mail & 0x80000000)) continue; u32 command = (cmd_mail >> 24) & 0x7f; u32 sync = cmd_mail >> 16; u32 extra_data = cmd_mail & 0xFFFF; m_pending_commands_count--; switch (command) { case 0x00: case 0x0A: case 0x0B: case 0x0F: // NOP commands. Log anyway in case we encounter a new version // where these are not NOPs anymore. NOTICE_LOG(DSPHLE, "Received a NOP command: %d", command); SendCommandAck(CommandAck::STANDARD, sync); break; case 0x03: // NOP on standard protocol but shouldn't ever happen on light protocol // since it's going directly back to the dispatcher with no ack. if (m_flags & LIGHT_PROTOCOL) { PanicAlert("Received a 03 command on light protocol."); break; } SendCommandAck(CommandAck::STANDARD, sync); break; case 0x04: case 0x05: case 0x06: case 0x07: case 0x08: case 0x09: // Commands that crash the DAC UCode on non-light protocols. Log and // enter HALTED mode. // // TODO: These are not crashes on light protocol, however I've never seen // them used so far. NOTICE_LOG(DSPHLE, "Received a crashy command: %d", command); SetMailState(MailState::HALTED); return; // Command 01: Setup/initialization command. Provides the address to // voice parameter blocks (VPBs) as well as some array of coefficients // used for mixing. case 0x01: { m_rendering_voices_per_frame = extra_data; m_renderer.SetVPBBaseAddress(Read32()); u16* data_ptr = (u16*)HLEMemory_Get_Pointer(Read32()); std::array<s16, 0x100> resampling_coeffs; for (size_t i = 0; i < 0x100; ++i) resampling_coeffs[i] = Common::swap16(data_ptr[i]); m_renderer.SetResamplingCoeffs(std::move(resampling_coeffs)); std::array<s16, 0x100> const_patterns; for (size_t i = 0; i < 0x100; ++i) const_patterns[i] = Common::swap16(data_ptr[0x100 + i]); m_renderer.SetConstPatterns(std::move(const_patterns)); std::array<s16, 0x80> sine_table; for (size_t i = 0; i < 0x80; ++i) sine_table[i] = Common::swap16(data_ptr[0x200 + i]); m_renderer.SetSineTable(std::move(sine_table)); u16* afc_coeffs_ptr = (u16*)HLEMemory_Get_Pointer(Read32()); std::array<s16, 0x20> afc_coeffs; for (size_t i = 0; i < 0x20; ++i) afc_coeffs[i] = Common::swap16(afc_coeffs_ptr[i]); m_renderer.SetAfcCoeffs(std::move(afc_coeffs)); m_renderer.SetReverbPBBaseAddress(Read32()); SendCommandAck(CommandAck::STANDARD, sync); break; } // Command 02: starts audio processing. NOTE: this handler uses return, // not break. This is because it hijacks the mail control flow and // stops processing of further commands until audio processing is done. case 0x02: m_rendering_requested_frames = (cmd_mail >> 16) & 0xFF; m_renderer.SetOutputVolume(cmd_mail & 0xFFFF); m_renderer.SetOutputLeftBufferAddr(Read32()); m_renderer.SetOutputRightBufferAddr(Read32()); m_rendering_curr_frame = 0; m_rendering_curr_voice = 0; if (m_flags & LIGHT_PROTOCOL) { SendCommandAck(CommandAck::STANDARD, m_rendering_requested_frames); SetMailState(MailState::RENDERING); } else { RenderAudio(); } return; // Command 0C: used for multiple purpose depending on the UCode version: // * IPL NTSC/PAL, Luigi's Mansion: TODO (unknown as of now). // * Pikmin/AC: GBA crypto. // * SMS and onwards: NOP. case 0x0C: if (m_flags & SUPPORTS_GBA_CRYPTO) { ProcessGBACrypto(Read32()); } else if (m_flags & WEIRD_CMD_0C) { // TODO NOTICE_LOG(DSPHLE, "Received an unhandled 0C command, params: %08x %08x", Read32(), Read32()); } else { WARN_LOG(DSPHLE, "Received a NOP 0C command. Flags=%08x", m_flags); } SendCommandAck(CommandAck::STANDARD, sync); break; // Command 0D: TODO: find a name and implement. case 0x0D: if (m_flags & NO_CMD_0D) { WARN_LOG(DSPHLE, "Received a 0D command which is NOP'd on this UCode."); SendCommandAck(CommandAck::STANDARD, sync); break; } WARN_LOG(DSPHLE, "CMD0D: %08x", Read32()); SendCommandAck(CommandAck::STANDARD, sync); break; // Command 0E: Sets the base address of the ARAM for Wii UCodes. Used // because the Wii does not have an ARAM, so it simulates it with MRAM // and DMAs. case 0x0E: if (!(m_flags & NO_ARAM)) PanicAlert("Setting base ARAM addr on non Wii DAC."); m_renderer.SetARAMBaseAddr(Read32()); SendCommandAck(CommandAck::STANDARD, sync); break; default: NOTICE_LOG(DSPHLE, "Received a non-existing command (%d), halting.", command); SetMailState(MailState::HALTED); return; } } } void ZeldaUCode::SendCommandAck(CommandAck ack_type, u16 sync_value) { if (m_flags & LIGHT_PROTOCOL) { // The light protocol uses the address of the command handler in the // DSP code instead of the command id... go figure. sync_value = 2 * ((sync_value >> 8) & 0x7F) + 0x62; m_mail_handler.PushMail(0x80000000 | sync_value); } else { u32 ack_mail = 0; switch (ack_type) { case CommandAck::STANDARD: ack_mail = DSP_SYNC; break; case CommandAck::DONE_RENDERING: ack_mail = DSP_FRAME_END; break; } m_mail_handler.PushMail(ack_mail, true); if (ack_type == CommandAck::STANDARD) m_mail_handler.PushMail(0xF3550000 | sync_value); } } void ZeldaUCode::RenderAudio() { if (!RenderingInProgress()) { WARN_LOG(DSPHLE, "Trying to render audio while no rendering should be happening."); return; } while (m_rendering_curr_frame < m_rendering_requested_frames) { if (m_rendering_curr_voice == 0) m_renderer.PrepareFrame(); while (m_rendering_curr_voice < m_rendering_voices_per_frame) { // If we are not meant to render this voice yet, go back to message // processing. if (m_rendering_curr_voice >= m_sync_max_voice_id) return; // Test the sync flag for this voice, skip it if not set. u16 flags = m_sync_voice_skip_flags[m_rendering_curr_voice >> 4]; u8 bit = 0xF - (m_rendering_curr_voice & 0xF); if (flags & (1 << bit)) m_renderer.AddVoice(m_rendering_curr_voice); m_rendering_curr_voice++; } if (!(m_flags & LIGHT_PROTOCOL)) SendCommandAck(CommandAck::STANDARD, 0xFF00 | m_rendering_curr_frame); m_renderer.FinalizeFrame(); m_rendering_curr_voice = 0; m_sync_max_voice_id = 0; m_rendering_curr_frame++; } if (!(m_flags & LIGHT_PROTOCOL)) { SendCommandAck(CommandAck::DONE_RENDERING, 0); m_cmd_can_execute = false; // Block command execution until ACK is received. } else { SetMailState(MailState::WAITING); } } // Utility to define 32 bit accessors/modifiers methods based on two 16 bit // fields named _l and _h. #define DEFINE_32BIT_ACCESSOR(field_name, name) \ u32 Get##name() const { return (field_name##_h << 16) | field_name##_l; } \ void Set##name(u32 v) \ { \ field_name##_h = v >> 16; \ field_name##_l = v & 0xFFFF; \ } #pragma pack(push, 1) struct ZeldaAudioRenderer::VPB { // If zero, skip processing this voice. u16 enabled; // If non zero, skip processing this voice. u16 done; // In 4.12 format. 1.0 (0x1000) means 0x50 raw samples from RAM/accelerator // will be "resampled" to 0x50 input samples. 2.0 (0x2000) means 2 raw // samples for one input samples. 0.5 (0x800) means one raw sample for 2 // input samples. u16 resampling_ratio; u16 unk_03; // If non zero, reset some value in the VPB when processing it. u16 reset_vpb; // If non zero, tells PCM8/PCM16 sample sources that the end of the voice // has been reached and looping should be considered if enabled. u16 end_reached; // If non zero, input samples to this VPB will be the fixed value from // VPB[33] (constant_sample_value). This is used when a voice is being // terminated in order to force silence. u16 use_constant_sample; // Number of samples that should be saved in the VPB for processing during // future frames. Should be at most TODO. u16 samples_to_keep_count; // Channel mixing information. Each voice can be mixed to 6 different // channels, with separate volume information. // // Used only if VPB[2C] (use_dolby_volume) is not set. Otherwise, the // values from VPB[0x20:0x2C] are used to mix to all available channels. struct Channel { // Can be treated as an ID, but in the real world this is actually the // address in DRAM of a DSP buffer. The game passes that information to // the DSP, which means the game must know the memory layout of the DSP // UCode... that's terrible. u16 id; s16 target_volume; s16 current_volume; u16 unk; }; Channel channels[6]; u16 unk_20_28[0x8]; // When using Dolby voice mixing (see VPB[2C] use_dolby_volume), the X // (left/right) and Y (front/back) coordinates of the sound. 0x00 is all // right/back, 0x7F is all left/front. Format is 0XXXXXXX0YYYYYYY. u16 dolby_voice_position; u8 GetDolbyVoiceX() const { return (dolby_voice_position >> 8) & 0x7F; } u8 GetDolbyVoiceY() const { return dolby_voice_position & 0x7F; } // How much reverbation to apply to the Dolby mixed voice. 0 is none, // 0x7FFF is the maximum value. s16 dolby_reverb_factor; // The volume for the 0x50 samples being mixed will ramp between current // and target. After the ramping is done, the current value is updated (to // match target, usually). s16 dolby_volume_current; s16 dolby_volume_target; // If non zero, use positional audio mixing. Instead of using the channels // information, use the 4 Dolby related VPB fields defined above. u16 use_dolby_volume; u16 unk_2D; u16 unk_2E; u16 unk_2F; // Fractional part of the current sample position, in 0.12 format (all // decimal part, 0x0800 = 0.5). The 4 top bits are unused. u16 current_pos_frac; u16 unk_31; // Number of remaining decoded AFC samples in the VPB internal buffer (see // VPB[0x58]) that haven't been output yet. u16 afc_remaining_decoded_samples; // Value used as the constant sample value if VPB[6] (use_constant_sample) // is set. Reset to the last sample value after each round of resampling. s16 constant_sample; // Current position in the voice. Not needed for accelerator based voice // types since the accelerator exposes a streaming based interface, but DMA // based voice types (PCM16_FROM_MRAM for example) require it to know where // to seek in the MRAM buffer. u16 current_position_h; u16 current_position_l; DEFINE_32BIT_ACCESSOR(current_position, CurrentPosition) // Number of samples that will be processed before the loop point of the // voice is reached. Maintained by the UCode and used by the game to // schedule some parameters updates. u16 samples_before_loop; u16 unk_37; // Current address used to stream samples for the ARAM sample source types. u16 current_aram_addr_h; u16 current_aram_addr_l; DEFINE_32BIT_ACCESSOR(current_aram_addr, CurrentARAMAddr) // Remaining number of samples to load before considering the voice // rendering complete and setting the done flag. Note that this is an // absolute value that does not take into account loops. If a loop of 100 // samples is played 4 times, remaining_length will have decreased by 400. u16 remaining_length_h; u16 remaining_length_l; DEFINE_32BIT_ACCESSOR(remaining_length, RemainingLength) // Stores the last 4 resampled input samples after each frame, so that they // can be used for future linear interpolation. s16 resample_buffer[4]; // TODO: document and implement. s16 prev_input_samples[0x18]; // Values from the last decoded AFC block. The last two values are // especially important since AFC decoding - as a variant of ADPCM - // requires the two latest sample values to be able to decode future // samples. s16 afc_remaining_samples[0x10]; s16* AFCYN2() { return &afc_remaining_samples[0xE]; } s16* AFCYN1() { return &afc_remaining_samples[0xF]; } u16 unk_68_80[0x80 - 0x68]; enum SamplesSourceType { // Simple square wave at 50% amplitude and frequency controlled via the // resampling ratio. SRC_SQUARE_WAVE = 0, // Simple saw wave at 100% amplitude and frequency controlled via the // resampling ratio. SRC_SAW_WAVE = 1, // Same "square" wave as SRC_SQUARE_WAVE but using a 0.25 duty cycle // instead of 0.5. SRC_SQUARE_WAVE_25PCT = 3, // Breaking the numerical ordering for these, but they are all related. // Simple pattern stored in the data downloaded by command 01. Playback // frequency is controlled by the resampling ratio. SRC_CONST_PATTERN_0 = 7, SRC_CONST_PATTERN_0_VARIABLE_STEP = 10, SRC_CONST_PATTERN_1 = 4, SRC_CONST_PATTERN_2 = 11, SRC_CONST_PATTERN_3 = 12, // Samples stored in ARAM at a rate of 16 samples/5 bytes, AFC encoded, // at an arbitrary sample rate (resampling is applied). SRC_AFC_LQ_FROM_ARAM = 5, // Samples stored in ARAM in PCM8 format, at an arbitrary sampling rate // (resampling is applied). SRC_PCM8_FROM_ARAM = 8, // Samples stored in ARAM at a rate of 16 samples/9 bytes, AFC encoded, // at an arbitrary sample rate (resampling is applied). SRC_AFC_HQ_FROM_ARAM = 9, // Samples stored in ARAM in PCM16 format, at an arbitrary sampling // rate (resampling is applied). SRC_PCM16_FROM_ARAM = 16, // Samples stored in MRAM at an arbitrary sample rate (resampling is // applied, unlike PCM16_FROM_MRAM_RAW). SRC_PCM16_FROM_MRAM = 33, }; u16 samples_source_type; // If non zero, indicates that the sound should loop. u16 is_looping; // For AFC looping voices, the values of the last 2 samples before the // start of the loop, in order to be able to decode samples after looping. s16 loop_yn1; s16 loop_yn2; u16 unk_84; // If true, ramp down quickly to a volume of zero, and end the voice (by // setting VPB[1] done) when it reaches zero. u16 end_requested; u16 unk_86; u16 unk_87; // Base address used to download samples data after the loop point of the // voice has been reached. u16 loop_address_h; u16 loop_address_l; DEFINE_32BIT_ACCESSOR(loop_address, LoopAddress) // Offset (in number of raw samples) of the start of the loop area in the // voice. Note: some sample sources only use the _h part of this. // // TODO: rename to length? confusion with remaining_length... u16 loop_start_position_h; u16 loop_start_position_l; DEFINE_32BIT_ACCESSOR(loop_start_position, LoopStartPosition) // Base address used to download samples data before the loop point of the // voice has been reached. u16 base_address_h; u16 base_address_l; DEFINE_32BIT_ACCESSOR(base_address, BaseAddress) u16 padding[0xC0]; // These next two functions are terrible hacks used in order to support two // different VPB sizes. // Transforms from an NTSC-IPL type 0x80-sized VPB to a full size VPB. void Uncompress() { u16* words = (u16*)this; // RO part of the VPB is from 0x40-0x80 instead of 0x80-0xC0. for (int i = 0; i < 0x40; ++i) { words[0x80 + i] = words[0x40 + i]; words[0x40 + i] = 0; } // AFC decoded samples are offset by 0x28. for (int i = 0; i < 0x10; ++i) { words[0x58 + i] = words[0x30 + i]; words[0x30 + i] = 0; } // Most things are offset by 0x18 because no Dolby mixing. for (int i = 0; i < 0x18; ++i) { words[0x30 + i] = words[0x18 + i]; words[0x18 + i] = 0; } } // Transforms from a full size VPB to an NTSC-IPL 0x80-sized VPB. void Compress() { u16* words = (u16*)this; for (int i = 0; i < 0x18; ++i) { words[0x18 + i] = words[0x30 + i]; words[0x30 + i] = 0; } for (int i = 0; i < 0x10; ++i) { words[0x30 + i] = words[0x58 + i]; words[0x58 + i] = 0; } for (int i = 0; i < 0x40; ++i) { words[0x40 + i] = words[0x80 + i]; words[0x80 + i] = 0; } } }; struct ReverbPB { // If zero, skip this reverb PB. u16 enabled; // Size of the circular buffer in MRAM, expressed in number of 0x50 samples // blocks (0xA0 bytes). u16 circular_buffer_size; // Base address of the circular buffer in MRAM. u16 circular_buffer_base_h; u16 circular_buffer_base_l; struct Destination { u16 buffer_id; // See VPB::Channel::id. u16 volume; // 1.15 format. }; Destination dest[2]; // Coefficients for an 8-tap filter applied to each reverb buffer before // adding its data to the destination. s16 filter_coeffs[8]; }; #pragma pack(pop) void ZeldaAudioRenderer::PrepareFrame() { if (m_prepared) return; m_buf_front_left.fill(0); m_buf_front_right.fill(0); ApplyVolumeInPlace_1_15(&m_buf_back_left, 0x6784); ApplyVolumeInPlace_1_15(&m_buf_back_right, 0x6784); // TODO: Back left and back right should have a filter applied to them, // then get mixed into front left and front right. In practice, TWW never // uses this AFAICT. PanicAlert to help me find places that use this. #ifdef STRICT_ZELDA_HLE if (!(m_flags & LIGHT_PROTOCOL) && (m_buf_back_left[0] != 0 || m_buf_back_right[0] != 0)) PanicAlert("Zelda HLE using back mixing buffers"); #endif // Add reverb data from previous frame. ApplyReverb(false); AddBuffersWithVolume(m_buf_front_left_reverb.data(), m_buf_back_left_reverb.data(), 0x50, 0x7FFF); AddBuffersWithVolume(m_buf_front_right_reverb.data(), m_buf_back_left_reverb.data(), 0x50, 0xB820); AddBuffersWithVolume(m_buf_front_left_reverb.data(), m_buf_back_right_reverb.data() + 0x28, 0x28, 0xB820); AddBuffersWithVolume(m_buf_front_right_reverb.data(), m_buf_back_left_reverb.data() + 0x28, 0x28, 0x7FFF); m_buf_back_left_reverb.fill(0); m_buf_back_right_reverb.fill(0); // Prepare patterns 2/3 - they are not constant unlike 0/1. s16* pattern2 = m_const_patterns.data() + 2 * 0x40; s32 yn2 = pattern2[0x40 - 2], yn1 = pattern2[0x40 - 1], v; for (int i = 0; i < 0x40; i += 2) { v = yn2 * yn1 - (pattern2[i] << 16); yn2 = yn1; yn1 = pattern2[i]; pattern2[i] = v >> 16; v = 2 * (yn2 * yn1 + (pattern2[i + 1] << 16)); yn2 = yn1; yn1 = pattern2[i + 1]; pattern2[i + 1] = v >> 16; } s16* pattern3 = m_const_patterns.data() + 3 * 0x40; yn2 = pattern3[0x40 - 2]; yn1 = pattern3[0x40 - 1]; s16 acc = yn1; s16 step = pattern3[0] + ((yn1 * yn2 + ((yn2 << 16) + yn1)) >> 16); step = (step & 0x1FF) | 0x2000; for (s32 i = 0; i < 0x40; ++i) pattern3[i] = acc + (i + 1) * step; m_prepared = true; } void ZeldaAudioRenderer::ApplyReverb(bool post_rendering) { if (!m_reverb_pb_base_addr) { #ifdef STRICT_ZELDA_HLE PanicAlert("Trying to apply reverb without available parameters."); #endif return; } // Each of the 4 RPBs maps to one of these buffers. MixingBuffer* reverb_buffers[4] = { &m_buf_unk0_reverb, &m_buf_unk1_reverb, &m_buf_front_left_reverb, &m_buf_front_right_reverb, }; std::array<s16, 8>* last8_samples_buffers[4] = { &m_buf_unk0_reverb_last8, &m_buf_unk1_reverb_last8, &m_buf_front_left_reverb_last8, &m_buf_front_right_reverb_last8, }; u16* rpb_base_ptr = (u16*)HLEMemory_Get_Pointer(m_reverb_pb_base_addr); for (u16 rpb_idx = 0; rpb_idx < 4; ++rpb_idx) { ReverbPB rpb; u16* rpb_raw_ptr = reinterpret_cast<u16*>(&rpb); for (size_t i = 0; i < sizeof (ReverbPB) / 2; ++i) rpb_raw_ptr[i] = Common::swap16(rpb_base_ptr[rpb_idx * sizeof (ReverbPB) / 2 + i]); if (!rpb.enabled) continue; u16 mram_buffer_idx = m_reverb_pb_frames_count[rpb_idx]; u32 mram_addr = ((rpb.circular_buffer_base_h << 16) | rpb.circular_buffer_base_l) + mram_buffer_idx * 0x50 * sizeof (s16); s16* mram_ptr = (s16*)HLEMemory_Get_Pointer(mram_addr); if (!post_rendering) { // 8 more samples because of the filter order. The first 8 samples // are the last 8 samples of the previous frame. std::array<s16, 0x58> buffer; for (u16 i = 0; i < 8; ++i) buffer[i] = (*last8_samples_buffers[rpb_idx])[i]; for (u16 i = 0; i < 0x50; ++i) buffer[8 + i] = Common::swap16(mram_ptr[i]); for (u16 i = 0; i < 8; ++i) (*last8_samples_buffers[rpb_idx])[i] = buffer[0x50 + i]; auto ApplyFilter = [&]() { // Filter the buffer using provided coefficients. for (u16 i = 0; i < 0x50; ++i) { s32 sample = 0; for (u16 j = 0; j < 8; ++j) sample += (s32)buffer[i + j] * rpb.filter_coeffs[j]; sample >>= 15; buffer[i] = MathUtil::Clamp(sample, -0x8000, 0x7FFF); } }; // LSB set -> pre-filtering. if (rpb.enabled & 1) ApplyFilter(); for (const auto& dest : rpb.dest) { if (dest.buffer_id == 0) continue; MixingBuffer* dest_buffer = BufferForID(dest.buffer_id); if (!dest_buffer) { #ifdef STRICT_ZELDA_HLE PanicAlert("RPB mixing to an unknown buffer: %04x", dest.buffer_id); #endif continue; } AddBuffersWithVolume(dest_buffer->data(), buffer.data(), 0x50, dest.volume); } // LSB not set, bit 1 set -> post-filtering. if (rpb.enabled & 2) ApplyFilter(); for (u16 i = 0; i < 0x50; ++i) (*reverb_buffers[rpb_idx])[i] = buffer[i]; } else { MixingBuffer* buffer = reverb_buffers[rpb_idx]; // Upload the reverb data to RAM. for (auto sample : *buffer) *mram_ptr++ = Common::swap16(sample); mram_buffer_idx = (mram_buffer_idx + 1) % rpb.circular_buffer_size; m_reverb_pb_frames_count[rpb_idx] = mram_buffer_idx; } } } ZeldaAudioRenderer::MixingBuffer* ZeldaAudioRenderer::BufferForID(u16 buffer_id) { switch (buffer_id) { case 0x0D00: return &m_buf_front_left; case 0x0D60: return &m_buf_front_right; case 0x0F40: return &m_buf_back_left; case 0x0CA0: return &m_buf_back_right; case 0x0E80: return &m_buf_front_left_reverb; case 0x0EE0: return &m_buf_front_right_reverb; case 0x0C00: return &m_buf_back_left_reverb; case 0x0C50: return &m_buf_back_right_reverb; case 0x0DC0: return &m_buf_unk0_reverb; case 0x0E20: return &m_buf_unk1_reverb; case 0x09A0: return &m_buf_unk0; // Used by the GC IPL as a reverb dest. case 0x0FA0: return &m_buf_unk1; // Used by the GC IPL as a mixing dest. case 0x0B00: return &m_buf_unk2; // Used by Pikmin 1 as a mixing dest. default: return nullptr; } } void ZeldaAudioRenderer::AddVoice(u16 voice_id) { VPB vpb; FetchVPB(voice_id, &vpb); if (!vpb.enabled || vpb.done) return; MixingBuffer input_samples; LoadInputSamples(&input_samples, &vpb); // TODO: In place effects. // TODO: IIR filter. if (vpb.use_dolby_volume) { if (vpb.end_requested) { vpb.dolby_volume_target = vpb.dolby_volume_current / 2; if (vpb.dolby_volume_target == 0) vpb.done = true; } // Each of these volumes is in 1.15 fixed format. s16 right_volume = m_sine_table[vpb.GetDolbyVoiceX()]; s16 back_volume = m_sine_table[vpb.GetDolbyVoiceY()]; s16 left_volume = m_sine_table[vpb.GetDolbyVoiceX() ^ 0x7F]; s16 front_volume = m_sine_table[vpb.GetDolbyVoiceY() ^ 0x7F]; // Compute volume for each quadrant. u16 shift_factor = (m_flags & MAKE_DOLBY_LOUDER) ? 15 : 16; s16 quadrant_volumes[4] = { (s16)((left_volume * front_volume) >> shift_factor), (s16)((left_volume * back_volume) >> shift_factor), (s16)((right_volume * front_volume) >> shift_factor), (s16)((right_volume * back_volume) >> shift_factor), }; // Compute the volume delta for each sample to match the difference // between current and target volume. s16 delta = vpb.dolby_volume_target - vpb.dolby_volume_current; s16 volume_deltas[4]; for (size_t i = 0; i < 4; ++i) volume_deltas[i] = ((u16)quadrant_volumes[i] * delta) >> shift_factor; // Apply master volume to each quadrant. for (size_t i = 0; i < 4; ++i) quadrant_volumes[i] = (quadrant_volumes[i] * vpb.dolby_volume_current) >> shift_factor; // Compute reverb volume and ramp deltas. s16 reverb_volumes[4], reverb_volume_deltas[4]; s16 reverb_volume_factor = (vpb.dolby_volume_current * vpb.dolby_reverb_factor) >> (shift_factor - 1); for (size_t i = 0; i < 4; ++i) { reverb_volumes[i] = (quadrant_volumes[i] * reverb_volume_factor) >> shift_factor; reverb_volume_deltas[i] = (volume_deltas[i] * vpb.dolby_reverb_factor) >> shift_factor; } struct { MixingBuffer* buffer; s16 volume; s16 volume_delta; } buffers[8] = { { &m_buf_front_left, quadrant_volumes[0], volume_deltas[0] }, { &m_buf_back_left, quadrant_volumes[1], volume_deltas[1] }, { &m_buf_front_right, quadrant_volumes[2], volume_deltas[2] }, { &m_buf_back_right, quadrant_volumes[3], volume_deltas[3] }, { &m_buf_front_left_reverb, reverb_volumes[0], reverb_volume_deltas[0] }, { &m_buf_back_left_reverb, reverb_volumes[1], reverb_volume_deltas[1] }, { &m_buf_front_right_reverb, reverb_volumes[2], reverb_volume_deltas[2] }, { &m_buf_back_right_reverb, reverb_volumes[3], reverb_volume_deltas[3] }, }; for (const auto& buffer : buffers) { AddBuffersWithVolumeRamp(buffer.buffer, input_samples, buffer.volume << 16, (buffer.volume_delta << 16) / (s32)buffer.buffer->size()); } vpb.dolby_volume_current = vpb.dolby_volume_target; } else { // TODO: Store input samples if requested by the VPB. int num_channels = (m_flags & FOUR_MIXING_DESTS) ? 4 : 6; if (vpb.end_requested) { bool all_mute = true; for (int i = 0; i < num_channels; ++i) { vpb.channels[i].target_volume = vpb.channels[i].current_volume / 2; all_mute &= (vpb.channels[i].target_volume == 0); } if (all_mute) vpb.done = true; } for (int i = 0; i < num_channels; ++i) { if (!vpb.channels[i].id) continue; // Some UCode versions provide the delta directly instead of // providing a target volume. s16 volume_delta; if (m_flags & VOLUME_EXPLICIT_STEP) volume_delta = (vpb.channels[i].target_volume << 16); else volume_delta = vpb.channels[i].target_volume - vpb.channels[i].current_volume; s32 volume_step = (volume_delta << 16) / (s32)input_samples.size(); // In 1.31 format. // TODO: The last value of each channel structure is used to // determine whether a channel should be skipped or not. Not // implemented yet. if (!vpb.channels[i].current_volume && !volume_step) continue; MixingBuffer* dst_buffer = BufferForID(vpb.channels[i].id); if (!dst_buffer) { #ifdef STRICT_ZELDA_HLE PanicAlert("Mixing to an unmapped buffer: %04x", vpb.channels[i].id); #endif continue; } s32 new_volume = AddBuffersWithVolumeRamp( dst_buffer, input_samples, vpb.channels[i].current_volume << 16, volume_step); vpb.channels[i].current_volume = new_volume >> 16; } } // By then the VPB has been reset, unless we're in the "constant sample" / // silence mode. if (!vpb.use_constant_sample) vpb.reset_vpb = false; StoreVPB(voice_id, &vpb); } void ZeldaAudioRenderer::FinalizeFrame() { // TODO: Dolby mixing. ApplyVolumeInPlace_4_12(&m_buf_front_left, m_output_volume); ApplyVolumeInPlace_4_12(&m_buf_front_right, m_output_volume); u16* ram_left_buffer = (u16*)HLEMemory_Get_Pointer(m_output_lbuf_addr); u16* ram_right_buffer = (u16*)HLEMemory_Get_Pointer(m_output_rbuf_addr); for (size_t i = 0; i < m_buf_front_left.size(); ++i) { ram_left_buffer[i] = Common::swap16(m_buf_front_left[i]); ram_right_buffer[i] = Common::swap16(m_buf_front_right[i]); } m_output_lbuf_addr += sizeof (u16) * (u32)m_buf_front_left.size(); m_output_rbuf_addr += sizeof (u16) * (u32)m_buf_front_right.size(); // TODO: Some more Dolby mixing. ApplyReverb(true); m_prepared = false; } void ZeldaAudioRenderer::FetchVPB(u16 voice_id, VPB* vpb) { u16* vpb_words = (u16*)vpb; u16* ram_vpbs = (u16*)HLEMemory_Get_Pointer(m_vpb_base_addr); // A few versions of the UCode have VPB of size 0x80 (vs. the standard // 0xC0). The whole 0x40-0x80 part is gone. Handle that by moving things // around. size_t vpb_size = (m_flags & TINY_VPB) ? 0x80 : 0xC0; size_t base_idx = voice_id * vpb_size; for (size_t i = 0; i < vpb_size; ++i) vpb_words[i] = Common::swap16(ram_vpbs[base_idx + i]); if (m_flags & TINY_VPB) vpb->Uncompress(); } void ZeldaAudioRenderer::StoreVPB(u16 voice_id, VPB* vpb) { u16* vpb_words = (u16*)vpb; u16* ram_vpbs = (u16*)HLEMemory_Get_Pointer(m_vpb_base_addr); size_t vpb_size = (m_flags & TINY_VPB) ? 0x80 : 0xC0; size_t base_idx = voice_id * vpb_size; if (m_flags & TINY_VPB) vpb->Compress(); // Only the first 0x80 words are transferred back - the rest is read-only. for (size_t i = 0; i < vpb_size - 0x40; ++i) ram_vpbs[base_idx + i] = Common::swap16(vpb_words[i]); } void ZeldaAudioRenderer::LoadInputSamples(MixingBuffer* buffer, VPB* vpb) { // Input data pre-resampling. Resampled into the mixing buffer parameter at // the end of processing, if needed. // // Maximum of 0x500 samples here - see NeededRawSamplesCount to understand // this practical limit (resampling_ratio = 0xFFFF -> 0x500 samples). Add a // margin of 4 that is needed for samples source that do resampling. std::array<s16, 0x500 + 4> raw_input_samples; for (size_t i = 0; i < 4; ++i) raw_input_samples[i] = vpb->resample_buffer[i]; if (vpb->use_constant_sample) { buffer->fill(vpb->constant_sample); return; } switch (vpb->samples_source_type) { case VPB::SRC_SQUARE_WAVE: case VPB::SRC_SQUARE_WAVE_25PCT: { u32 shift; if (vpb->samples_source_type == VPB::SRC_SQUARE_WAVE) shift = 1; else shift = 2; u32 mask = (1 << shift) - 1; u32 pos = vpb->current_pos_frac << shift; for (size_t i = 0; i < buffer->size(); ++i) { (*buffer)[i] = ((pos >> 16) & mask) ? 0xC000 : 0x4000; pos += vpb->resampling_ratio; } vpb->current_pos_frac = (pos >> shift) & 0xFFFF; break; } case VPB::SRC_SAW_WAVE: { u32 pos = vpb->current_pos_frac; for (size_t i = 0; i < buffer->size(); ++i) { (*buffer)[i] = pos & 0xFFFF; pos += (vpb->resampling_ratio) >> 1; } vpb->current_pos_frac = pos & 0xFFFF; break; } case VPB::SRC_CONST_PATTERN_0: case VPB::SRC_CONST_PATTERN_0_VARIABLE_STEP: case VPB::SRC_CONST_PATTERN_1: case VPB::SRC_CONST_PATTERN_2: case VPB::SRC_CONST_PATTERN_3: { const u16 PATTERN_SIZE = 0x40; struct PatternInfo { u16 idx; bool variable_step; }; std::map<u16, PatternInfo> samples_source_to_pattern = { { VPB::SRC_CONST_PATTERN_0, {0, false} }, { VPB::SRC_CONST_PATTERN_0_VARIABLE_STEP, {0, true} }, { VPB::SRC_CONST_PATTERN_1, {1, false} }, { VPB::SRC_CONST_PATTERN_2, {2, false} }, { VPB::SRC_CONST_PATTERN_3, {3, false} }, }; auto& pattern_info = samples_source_to_pattern[vpb->samples_source_type]; u16 pattern_offset = pattern_info.idx * PATTERN_SIZE; s16* pattern = m_const_patterns.data() + pattern_offset; u32 pos = vpb->current_pos_frac << 6; // log2(PATTERN_SIZE) u32 step = vpb->resampling_ratio << 5; for (size_t i = 0; i < buffer->size(); ++i) { (*buffer)[i] = pattern[pos >> 16]; pos = (pos + step) % (PATTERN_SIZE << 16); if (pattern_info.variable_step) pos = ((pos << 10) + m_buf_back_right[i] * vpb->resampling_ratio) >> 10; } vpb->current_pos_frac = pos >> 6; break; } case VPB::SRC_PCM8_FROM_ARAM: DownloadPCMSamplesFromARAM<s8>(raw_input_samples.data() + 4, vpb, NeededRawSamplesCount(*vpb)); Resample(vpb, raw_input_samples.data(), buffer); break; case VPB::SRC_AFC_HQ_FROM_ARAM: case VPB::SRC_AFC_LQ_FROM_ARAM: DownloadAFCSamplesFromARAM(raw_input_samples.data() + 4, vpb, NeededRawSamplesCount(*vpb)); Resample(vpb, raw_input_samples.data(), buffer); break; case VPB::SRC_PCM16_FROM_ARAM: DownloadPCMSamplesFromARAM<s16>(raw_input_samples.data() + 4, vpb, NeededRawSamplesCount(*vpb)); Resample(vpb, raw_input_samples.data(), buffer); break; case VPB::SRC_PCM16_FROM_MRAM: DownloadRawSamplesFromMRAM(raw_input_samples.data() + 4, vpb, NeededRawSamplesCount(*vpb)); Resample(vpb, raw_input_samples.data(), buffer); break; default: PanicAlert("Using an unknown/unimplemented sample source: %04x", vpb->samples_source_type); buffer->fill(0); return; } } u16 ZeldaAudioRenderer::NeededRawSamplesCount(const VPB& vpb) { // Both of these are 4.12 fixed point, so shift by 12 to get the int part. return (vpb.current_pos_frac + 0x50 * vpb.resampling_ratio) >> 12; } void ZeldaAudioRenderer::Resample(VPB* vpb, const s16* src, MixingBuffer* dst) { // Both in 20.12 format. u32 ratio = vpb->resampling_ratio; u32 pos = vpb->current_pos_frac; // Check if we need to do some interpolation. If the resampling ratio is // more than 4:1, it's not worth it. if ((ratio >> 12) >= 4) { for (s16& dst_sample : *dst) { pos += ratio; dst_sample = src[pos >> 12]; } } else { for (auto& dst_sample : *dst) { // We have 0x40 * 4 coeffs that need to be selected based on the // most significant bits of the fractional part of the position. 12 // bits >> 6 = 6 bits = 0x40. Multiply by 4 since there are 4 // consecutive coeffs. u32 coeffs_idx = ((pos & 0xFFF) >> 6) * 4; const s16* coeffs = &m_resampling_coeffs[coeffs_idx]; const s16* input = &src[pos >> 12]; s64 dst_sample_unclamped = 0; for (size_t i = 0; i < 4; ++i) dst_sample_unclamped += (s64)2 * coeffs[i] * input[i]; dst_sample_unclamped >>= 16; dst_sample = (s16)MathUtil::Clamp<s64>(dst_sample_unclamped, -0x8000, 0x7FFF); pos += ratio; } } for (u32 i = 0; i < 4; ++i) vpb->resample_buffer[i] = src[(pos >> 12) + i]; vpb->constant_sample = (*dst)[dst->size() - 1]; vpb->current_pos_frac = pos & 0xFFF; } void* ZeldaAudioRenderer::GetARAMPtr() const { if (m_aram_base_addr) return HLEMemory_Get_Pointer(m_aram_base_addr); else return DSP::GetARAMPtr(); } template <typename T> void ZeldaAudioRenderer::DownloadPCMSamplesFromARAM(s16* dst, VPB* vpb, u16 requested_samples_count) { if (vpb->done) { for (u16 i = 0; i < requested_samples_count; ++i) dst[i] = 0; return; } if (vpb->reset_vpb) { vpb->SetRemainingLength( vpb->GetLoopStartPosition() - vpb->GetCurrentPosition()); vpb->SetCurrentARAMAddr( vpb->GetBaseAddress() + vpb->GetCurrentPosition() * sizeof (T)); } vpb->end_reached = false; while (requested_samples_count) { if (vpb->end_reached) { vpb->end_reached = false; if (!vpb->is_looping) { for (u16 i = 0; i < requested_samples_count; ++i) dst[i] = 0; vpb->done = true; break; } vpb->SetCurrentPosition(vpb->GetLoopAddress()); vpb->SetRemainingLength( vpb->GetLoopStartPosition() - vpb->GetCurrentPosition()); vpb->SetCurrentARAMAddr( vpb->GetBaseAddress() + vpb->GetCurrentPosition() * sizeof (T)); } T* src_ptr = (T*)((u8*)GetARAMPtr() + vpb->GetCurrentARAMAddr()); u16 samples_to_download = std::min(vpb->GetRemainingLength(), (u32)requested_samples_count); for (u16 i = 0; i < samples_to_download; ++i) *dst++ = Common::FromBigEndian<T>(*src_ptr++) << (16 - 8 * sizeof (T)); vpb->SetRemainingLength(vpb->GetRemainingLength() - samples_to_download); vpb->SetCurrentARAMAddr(vpb->GetCurrentARAMAddr() + samples_to_download * sizeof (T)); requested_samples_count -= samples_to_download; if (!vpb->GetRemainingLength()) vpb->end_reached = true; } } void ZeldaAudioRenderer::DownloadAFCSamplesFromARAM( s16* dst, VPB* vpb, u16 requested_samples_count) { if (vpb->reset_vpb) { *vpb->AFCYN1() = 0; *vpb->AFCYN2() = 0; vpb->afc_remaining_decoded_samples = 0; vpb->SetRemainingLength(vpb->GetLoopStartPosition()); vpb->SetCurrentARAMAddr(vpb->GetBaseAddress()); } if (vpb->done) { for (u16 i = 0; i < requested_samples_count; ++i) dst[i] = 0; return; } // Try several things until we have output enough samples. while (true) { // Try to push currently cached/already decoded samples. u16 remaining_to_output = std::min(vpb->afc_remaining_decoded_samples, requested_samples_count); s16* base = &vpb->afc_remaining_samples[0x10 - vpb->afc_remaining_decoded_samples]; for (size_t i = 0; i < remaining_to_output; ++i) *dst++ = base[i]; vpb->afc_remaining_decoded_samples -= remaining_to_output; requested_samples_count -= remaining_to_output; if (requested_samples_count == 0) { return; // We have output everything we needed. } else if (requested_samples_count <= vpb->GetRemainingLength()) { // Each AFC block is 16 samples. u16 requested_blocks_count = (requested_samples_count + 0xF) >> 4; u16 decoded_samples_count = requested_blocks_count << 4; if (decoded_samples_count < vpb->GetRemainingLength()) { vpb->afc_remaining_decoded_samples = decoded_samples_count - requested_samples_count; vpb->SetRemainingLength(vpb->GetRemainingLength() - decoded_samples_count); } else { vpb->afc_remaining_decoded_samples = vpb->GetRemainingLength() - requested_samples_count; vpb->SetRemainingLength(0); } DecodeAFC(vpb, dst, requested_blocks_count); if (vpb->afc_remaining_decoded_samples) { for (size_t i = 0; i < 0x10; ++i) vpb->afc_remaining_samples[i] = dst[decoded_samples_count - 0x10 + i]; if (!vpb->GetRemainingLength() && vpb->GetLoopStartPosition()) { // Adjust remaining samples to account for the future loop iteration. base = vpb->afc_remaining_samples + ((vpb->GetLoopStartPosition() + 0xF) & 0xF); for (size_t i = 0; i < vpb->afc_remaining_decoded_samples; ++i) vpb->afc_remaining_samples[0x10 - i - 1] = *base--; } } return; } else { // More samples asked than available. Either complete the sound, or // start looping. if (vpb->GetRemainingLength()) // Skip if we cannot load anything. { requested_samples_count -= vpb->GetRemainingLength(); u16 requested_blocks_count = (vpb->GetRemainingLength() + 0xF) >> 4; DecodeAFC(vpb, dst, requested_blocks_count); dst += vpb->GetRemainingLength(); } if (!vpb->is_looping) { vpb->done = true; for (size_t i = 0; i < requested_samples_count; ++i) *dst++ = 0; return; } else { // We need to loop. Compute the new position, decode a block, // and loop back to the beginning of the download logic. // Use the fact that the sample source number also represents // the number of bytes per 16 samples. u32 loop_off_in_bytes = (vpb->GetLoopAddress() >> 4) * vpb->samples_source_type; u32 loop_start_addr = vpb->GetBaseAddress() + loop_off_in_bytes; vpb->SetCurrentARAMAddr(loop_start_addr); *vpb->AFCYN2() = vpb->loop_yn2; *vpb->AFCYN1() = vpb->loop_yn1; DecodeAFC(vpb, vpb->afc_remaining_samples, 1); // Realign and recompute the number of internally cached // samples and the current position. vpb->afc_remaining_decoded_samples = 0x10 - (vpb->GetLoopAddress() & 0xF); u32 remaining_length = vpb->GetLoopStartPosition(); remaining_length -= vpb->afc_remaining_decoded_samples; remaining_length -= vpb->GetLoopAddress(); vpb->SetRemainingLength(remaining_length); continue; } } } } void ZeldaAudioRenderer::DecodeAFC(VPB* vpb, s16* dst, size_t block_count) { u32 addr = vpb->GetCurrentARAMAddr(); u8* src = (u8*)GetARAMPtr() + addr; vpb->SetCurrentARAMAddr(addr + (u32)block_count * vpb->samples_source_type); for (size_t b = 0; b < block_count; ++b) { s16 nibbles[16]; s16 delta = 1 << ((*src >> 4) & 0xF); s16 idx = (*src & 0xF); src++; if (vpb->samples_source_type == VPB::SRC_AFC_HQ_FROM_ARAM) { for (size_t i = 0; i < 16; i += 2) { nibbles[i + 0] = *src >> 4; nibbles[i + 1] = *src & 0xF; src++; } for (auto& nibble : nibbles) { if (nibble >= 8) nibble -= 16; nibble <<= 11; } } else { for (size_t i = 0; i < 16; i += 4) { nibbles[i + 0] = (*src >> 6) & 3; nibbles[i + 1] = (*src >> 4) & 3; nibbles[i + 2] = (*src >> 2) & 3; nibbles[i + 3] = (*src >> 0) & 3; src++; } for (auto& nibble : nibbles) { if (nibble >= 2) nibble -= 4; nibble <<= 13; } } s32 yn1 = *vpb->AFCYN1(), yn2 = *vpb->AFCYN2(); for (size_t i = 0; i < 16; ++i) { s32 sample = delta * nibbles[i] + yn1 * m_afc_coeffs[idx * 2] + yn2 * m_afc_coeffs[idx * 2 + 1]; sample >>= 11; sample = MathUtil::Clamp(sample, -0x8000, 0x7fff); *dst++ = (s16)sample; yn2 = yn1; yn1 = sample; } *vpb->AFCYN2() = yn2; *vpb->AFCYN1() = yn1; } } void ZeldaAudioRenderer::DownloadRawSamplesFromMRAM( s16* dst, VPB* vpb, u16 requested_samples_count) { u32 addr = vpb->GetBaseAddress() + vpb->current_position_h * sizeof (u16); s16* src_ptr = (s16*)HLEMemory_Get_Pointer(addr); if (requested_samples_count > vpb->GetRemainingLength()) { s16 last_sample = 0; for (u16 i = 0; i < vpb->GetRemainingLength(); ++i) *dst++ = last_sample = Common::swap16(*src_ptr++); for (u16 i = vpb->GetRemainingLength(); i < requested_samples_count; ++i) *dst++ = last_sample; vpb->current_position_h += vpb->GetRemainingLength(); vpb->SetRemainingLength(0); vpb->done = true; } else { vpb->SetRemainingLength(vpb->GetRemainingLength() - requested_samples_count); vpb->samples_before_loop = vpb->loop_start_position_h - vpb->current_position_h; if (requested_samples_count <= vpb->samples_before_loop) { for (u16 i = 0; i < requested_samples_count; ++i) *dst++ = Common::swap16(*src_ptr++); vpb->current_position_h += requested_samples_count; } else { for (u16 i = 0; i < vpb->samples_before_loop; ++i) *dst++ = Common::swap16(*src_ptr++); vpb->SetBaseAddress(vpb->GetLoopAddress()); src_ptr = (s16*)HLEMemory_Get_Pointer(vpb->GetLoopAddress()); for (u16 i = vpb->samples_before_loop; i < requested_samples_count; ++i) *dst++ = Common::swap16(*src_ptr++); vpb->current_position_h = requested_samples_count - vpb->samples_before_loop; } } } void ZeldaAudioRenderer::DoState(PointerWrap& p) { p.Do(m_flags); p.Do(m_prepared); p.Do(m_output_lbuf_addr); p.Do(m_output_rbuf_addr); p.Do(m_output_volume); p.Do(m_buf_front_left); p.Do(m_buf_front_right); p.Do(m_buf_back_left); p.Do(m_buf_back_right); p.Do(m_buf_front_left_reverb); p.Do(m_buf_front_right_reverb); p.Do(m_buf_back_left_reverb); p.Do(m_buf_back_right_reverb); p.Do(m_buf_unk0_reverb); p.Do(m_buf_unk1_reverb); p.Do(m_buf_unk0); p.Do(m_buf_unk1); p.Do(m_buf_unk2); p.Do(m_resampling_coeffs); p.Do(m_const_patterns); p.Do(m_sine_table); p.Do(m_afc_coeffs); p.Do(m_aram_base_addr); p.Do(m_vpb_base_addr); p.Do(m_reverb_pb_base_addr); p.Do(m_reverb_pb_frames_count); p.Do(m_buf_unk0_reverb_last8); p.Do(m_buf_unk1_reverb_last8); p.Do(m_buf_front_left_reverb_last8); p.Do(m_buf_front_right_reverb_last8); }
XeSquirmy/dolphin-7840-memorywatcher
Source/Core/Core/HW/DSPHLE/UCodes/Zelda.cpp
C++
gpl-2.0
52,380
<?php /** * @package Joomla.Plugin * @subpackage System.updatenotification * * @copyright Copyright (C) 2005 - 2019 Open Source Matters, Inc. All rights reserved. * @license GNU General Public License version 2 or later; see LICENSE.txt */ /** * Checks if the com_installer config for the cache Hours are eq 0 and the updatenotification Plugin is enabled * * @return boolean * * @since 3.6.3 */ function updatecachetime_postinstall_condition() { $cacheTimeout = (int) JComponentHelper::getComponent('com_installer')->params->get('cachetimeout', 6); // Check if cachetimeout is eq zero if ($cacheTimeout === 0 && JPluginHelper::isEnabled('system', 'updatenotification')) { return true; } return false; } /** * Sets the cachtimeout back to the default (6 hours) * * @return void * * @since 3.6.3 */ function updatecachetime_postinstall_action() { $installer = JComponentHelper::getComponent('com_installer'); // Sets the cachtimeout back to the default (6 hours) $installer->params->set('cachetimeout', 6); // Save the new parameters back to com_installer $table = JTable::getInstance('extension'); $table->load($installer->id); $table->bind(array('params' => $installer->params->toString())); // Store the changes if (!$table->store()) { // If there is an error show it to the admin JFactory::getApplication()->enqueueMessage($table->getError(), 'error'); } }
andrepereiradasilva/joomla-cms
plugins/system/updatenotification/postinstall/updatecachetime.php
PHP
gpl-2.0
1,424
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/errno.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/sort.h> #include "mdss_fb.h" #include "mdss_mdp.h" #include "mdss_debug.h" #include "mdss_mdp_trace.h" #include "mdss_debug.h" static void mdss_mdp_xlog_mixer_reg(struct mdss_mdp_ctl *ctl); static inline u64 fudge_factor(u64 val, u32 numer, u32 denom) { u64 result = (val * (u64)numer); do_div(result, denom); return result; } static inline u64 apply_fudge_factor(u64 val, struct mdss_fudge_factor *factor) { return fudge_factor(val, factor->numer, factor->denom); } static DEFINE_MUTEX(mdss_mdp_ctl_lock); static int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer); static inline int __mdss_mdp_ctl_get_mixer_off(struct mdss_mdp_mixer *mixer); static inline void mdp_mixer_write(struct mdss_mdp_mixer *mixer, u32 reg, u32 val) { writel_relaxed(val, mixer->base + reg); } static inline u32 mdp_mixer_read(struct mdss_mdp_mixer *mixer, u32 reg) { return readl_relaxed(mixer->base + reg); } static inline u32 mdss_mdp_get_pclk_rate(struct mdss_mdp_ctl *ctl) { struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info; return (ctl->intf_type == MDSS_INTF_DSI) ? pinfo->mipi.dsi_pclk_rate : pinfo->clk_rate; } static inline u32 mdss_mdp_clk_fudge_factor(struct mdss_mdp_mixer *mixer, u32 rate) { struct mdss_panel_info *pinfo = &mixer->ctl->panel_data->panel_info; rate = apply_fudge_factor(rate, &mdss_res->clk_factor); /* * If the panel is video mode and its back porch period is * small, the workaround of increasing mdp clk is needed to * avoid underrun. */ if (mixer->ctl->is_video_mode && pinfo && (pinfo->lcdc.v_back_porch < MDP_MIN_VBP)) rate = apply_fudge_factor(rate, &mdss_res->clk_factor); return rate; } struct mdss_mdp_prefill_params { u32 smp_bytes; u32 xres; u32 src_w; u32 dst_w; u32 src_h; u32 dst_h; u32 dst_y; u32 bpp; bool is_yuv; bool is_caf; bool is_fbc; bool is_bwc; bool is_tile; bool is_hflip; }; static inline bool mdss_mdp_perf_is_caf(struct mdss_mdp_pipe *pipe) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); /* * CAF mode filter is enabled when format is yuv and * upscaling. Post processing had the decision to use CAF * under these conditions. */ return ((mdata->mdp_rev >= MDSS_MDP_HW_REV_102) && pipe->src_fmt->is_yuv && ((pipe->src.h >> pipe->vert_deci) <= pipe->dst.h)); } static inline u32 mdss_mdp_calc_y_scaler_bytes(struct mdss_mdp_prefill_params *params, struct mdss_prefill_data *prefill) { u32 y_scaler_bytes = 0, y_scaler_lines = 0; if (params->is_yuv) { if (params->src_h != params->dst_h) { y_scaler_lines = (params->is_caf) ? prefill->y_scaler_lines_caf : prefill->y_scaler_lines_bilinear; /* * y is src_width, u is src_width/2 and v is * src_width/2, so the total is scaler_lines * * src_w * 2 */ y_scaler_bytes = y_scaler_lines * params->src_w * 2; } } else { if (params->src_h != params->dst_h) { y_scaler_lines = prefill->y_scaler_lines_bilinear; y_scaler_bytes = y_scaler_lines * params->src_w * params->bpp; } } return y_scaler_bytes; } static inline u32 mdss_mdp_calc_latency_buf_bytes(struct mdss_mdp_prefill_params *params, struct mdss_prefill_data *prefill) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); u32 latency_lines, latency_buf_bytes; if (params->is_yuv) { if (params->is_bwc) { latency_lines = 4; latency_buf_bytes = params->src_w * params->bpp * latency_lines; } else { latency_lines = 2; latency_buf_bytes = ALIGN(params->src_w * params->bpp * latency_lines, mdata->smp_mb_size) * 2; } } else { if (params->is_tile) { latency_lines = 8; latency_buf_bytes = params->src_w * params->bpp * latency_lines; } else if (params->is_bwc) { latency_lines = 4; latency_buf_bytes = params->src_w * params->bpp * latency_lines; } else { latency_lines = 2; latency_buf_bytes = ALIGN(params->src_w * params->bpp * latency_lines, mdata->smp_mb_size); } } return latency_buf_bytes; } static inline u32 mdss_mdp_calc_scaling_w_h(u32 val, u32 src_h, u32 dst_h, u32 src_w, u32 dst_w) { if (dst_h) val = mult_frac(val, src_h, dst_h); if (dst_w) val = mult_frac(val, src_w, dst_w); return val; } static u32 mdss_mdp_perf_calc_pipe_prefill_video(struct mdss_mdp_prefill_params *params) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); struct mdss_prefill_data *prefill = &mdata->prefill_data; u32 prefill_bytes; u32 latency_buf_bytes; u32 y_buf_bytes = 0; u32 y_scaler_bytes; u32 pp_bytes = 0, pp_lines = 0; u32 post_scaler_bytes; u32 fbc_bytes = 0; prefill_bytes = prefill->ot_bytes; latency_buf_bytes = mdss_mdp_calc_latency_buf_bytes(params, prefill); prefill_bytes += latency_buf_bytes; pr_debug("latency_buf_bytes bw_calc=%d actual=%d\n", latency_buf_bytes, params->smp_bytes); if (params->is_yuv) y_buf_bytes = prefill->y_buf_bytes; y_scaler_bytes = mdss_mdp_calc_y_scaler_bytes(params, prefill); prefill_bytes += y_buf_bytes + y_scaler_bytes; post_scaler_bytes = prefill->post_scaler_pixels * params->bpp; post_scaler_bytes = mdss_mdp_calc_scaling_w_h(post_scaler_bytes, params->src_h, params->dst_h, params->src_w, params->dst_w); prefill_bytes += post_scaler_bytes; if (params->xres) pp_lines = DIV_ROUND_UP(prefill->pp_pixels, params->xres); if (params->xres && params->dst_h && (params->dst_y <= pp_lines)) pp_bytes = ((params->src_w * params->bpp * prefill->pp_pixels / params->xres) * params->src_h) / params->dst_h; prefill_bytes += pp_bytes; if (params->is_fbc) { fbc_bytes = prefill->fbc_lines * params->bpp; fbc_bytes = mdss_mdp_calc_scaling_w_h(fbc_bytes, params->src_h, params->dst_h, params->src_w, params->dst_w); } prefill_bytes += fbc_bytes; pr_debug("ot=%d y_buf=%d pp_lines=%d pp=%d post_sc=%d fbc_bytes=%d\n", prefill->ot_bytes, y_buf_bytes, pp_lines, pp_bytes, post_scaler_bytes, fbc_bytes); return prefill_bytes; } static u32 mdss_mdp_perf_calc_pipe_prefill_cmd(struct mdss_mdp_prefill_params *params) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); struct mdss_prefill_data *prefill = &mdata->prefill_data; u32 prefill_bytes; u32 ot_bytes = 0; u32 latency_lines, latency_buf_bytes; u32 y_buf_bytes = 0; u32 y_scaler_bytes; u32 fbc_cmd_lines = 0, fbc_cmd_bytes = 0; u32 post_scaler_bytes = 0; /* y_scaler_bytes are same for the first or non first line */ y_scaler_bytes = mdss_mdp_calc_y_scaler_bytes(params, prefill); prefill_bytes = y_scaler_bytes; /* 1st line if fbc is not enabled and 2nd line if fbc is enabled */ if (((params->dst_y == 0) && !params->is_fbc) || ((params->dst_y <= 1) && params->is_fbc)) { if (params->is_bwc || params->is_tile) latency_lines = 4; else if (!params->is_caf && params->is_hflip) latency_lines = 1; else latency_lines = 0; latency_buf_bytes = params->src_w * params->bpp * latency_lines; prefill_bytes += latency_buf_bytes; fbc_cmd_lines++; if (params->is_fbc) fbc_cmd_lines++; fbc_cmd_bytes = params->bpp * params->dst_w * fbc_cmd_lines; fbc_cmd_bytes = mdss_mdp_calc_scaling_w_h(fbc_cmd_bytes, params->src_h, params->dst_h, params->src_w, params->dst_w); prefill_bytes += fbc_cmd_bytes; } else { ot_bytes = prefill->ot_bytes; prefill_bytes += ot_bytes; latency_buf_bytes = mdss_mdp_calc_latency_buf_bytes(params, prefill); prefill_bytes += latency_buf_bytes; if (params->is_yuv) y_buf_bytes = prefill->y_buf_bytes; prefill_bytes += y_buf_bytes; post_scaler_bytes = prefill->post_scaler_pixels * params->bpp; post_scaler_bytes = mdss_mdp_calc_scaling_w_h(post_scaler_bytes, params->src_h, params->dst_h, params->src_w, params->dst_w); prefill_bytes += post_scaler_bytes; } pr_debug("ot=%d bwc=%d smp=%d y_buf=%d fbc=%d\n", ot_bytes, params->is_bwc, latency_buf_bytes, y_buf_bytes, fbc_cmd_bytes); return prefill_bytes; } #ifdef VIDEO_PLAYBACK_AB_1_1_G3 #define SIZE_720P 1280*720 static u32 mdss_mdp_get_bw_vote_mode(struct mdss_mdp_pipe *pipe) { u32 bw_mode = MDSS_MDP_BW_MODE_NONE; if (pipe->src_fmt->is_yuv) { if ((pipe->horz_deci == 0) && (pipe->vert_deci == 0)) { u32 size = pipe->img_width * pipe->img_height; if (size >= SIZE_720P) bw_mode = MDSS_MDP_BW_MODE_VIDEO; } } return bw_mode; } #endif /** * mdss_mdp_perf_calc_pipe() - calculate performance numbers required by pipe * @pipe: Source pipe struct containing updated pipe params * @perf: Structure containing values that should be updated for * performance tuning * @apply_fudge: Boolean to determine if mdp clock fudge is applicable * * Function calculates the minimum required performance calculations in order * to avoid MDP underflow. The calculations are based on the way MDP * fetches (bandwidth requirement) and processes data through MDP pipeline * (MDP clock requirement) based on frame size and scaling requirements. */ int mdss_mdp_perf_calc_pipe(struct mdss_mdp_pipe *pipe, struct mdss_mdp_perf_params *perf, struct mdss_mdp_img_rect *roi, bool apply_fudge) { struct mdss_mdp_mixer *mixer; int fps = DEFAULT_FRAME_RATE; u32 quota, rate, v_total, src_h, xres = 0; struct mdss_mdp_img_rect src, dst; bool is_fbc = false; struct mdss_mdp_prefill_params prefill_params; if (!pipe || !perf || !pipe->mixer) return -EINVAL; mixer = pipe->mixer; dst = pipe->dst; src = pipe->src; if (mixer->rotator_mode) { v_total = pipe->flags & MDP_ROT_90 ? pipe->dst.w : pipe->dst.h; } else if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) { struct mdss_panel_info *pinfo; pinfo = &mixer->ctl->panel_data->panel_info; if (pinfo->type == MIPI_VIDEO_PANEL) { fps = pinfo->panel_max_fps; v_total = pinfo->panel_max_vtotal; } else { fps = mdss_panel_get_framerate(pinfo); v_total = mdss_panel_get_vtotal(pinfo); } xres = pinfo->xres; is_fbc = pinfo->fbc.enabled; } else { v_total = mixer->height; xres = mixer->width; } if (roi) mdss_mdp_crop_rect(&src, &dst, roi); pr_debug("v_total=%d, xres=%d fps=%d\n", v_total, xres, fps); /* * when doing vertical decimation lines will be skipped, hence there is * no need to account for these lines in MDP clock or request bus * bandwidth to fetch them. */ src_h = src.h >> pipe->vert_deci; quota = fps * src.w * src_h; pr_debug("src(w,h)(%d,%d) dst(w,h)(%d,%d) dst_y=%d bpp=%d yuv=%d\n", pipe->src.w, src_h, pipe->dst.w, pipe->dst.h, pipe->dst.y, pipe->src_fmt->bpp, pipe->src_fmt->is_yuv); if (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420) /* * with decimation, chroma is not downsampled, this means we * need to allocate bw for extra lines that will be fetched */ if (pipe->vert_deci) quota *= 2; else quota = (quota * 3) / 2; else quota *= pipe->src_fmt->bpp; rate = dst.w; if (src_h > dst.h) rate = (rate * src_h) / dst.h; rate *= v_total * fps; if (mixer->rotator_mode) { rate /= 4; /* block mode fetch at 4 pix/clk */ quota *= 2; /* bus read + write */ perf->bw_overlap = quota; } else { perf->bw_overlap = (quota / dst.h) * v_total; } if (apply_fudge) perf->mdp_clk_rate = mdss_mdp_clk_fudge_factor(mixer, rate); else perf->mdp_clk_rate = rate; prefill_params.smp_bytes = mdss_mdp_smp_get_size(pipe); prefill_params.xres = xres; prefill_params.src_w = src.w; prefill_params.src_h = src_h; prefill_params.dst_w = dst.w; prefill_params.dst_h = dst.h; prefill_params.dst_y = dst.y; prefill_params.bpp = pipe->src_fmt->bpp; prefill_params.is_yuv = pipe->src_fmt->is_yuv; prefill_params.is_caf = mdss_mdp_perf_is_caf(pipe); prefill_params.is_fbc = is_fbc; prefill_params.is_bwc = pipe->bwc_mode; prefill_params.is_tile = pipe->src_fmt->tile; prefill_params.is_hflip = pipe->flags & MDP_FLIP_LR; #ifdef VIDEO_PLAYBACK_AB_1_1_G3 if (mixer->ctl->is_video_mode) perf->bw_vote_mode = mdss_mdp_get_bw_vote_mode(pipe); #endif if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) { perf->prefill_bytes = (mixer->ctl->is_video_mode) ? mdss_mdp_perf_calc_pipe_prefill_video(&prefill_params) : mdss_mdp_perf_calc_pipe_prefill_cmd(&prefill_params); } else perf->prefill_bytes = 0; pr_debug("mixer=%d pnum=%d clk_rate=%u bw_overlap=%llu prefill=%d\n", mixer->num, pipe->num, perf->mdp_clk_rate, perf->bw_overlap, perf->prefill_bytes); return 0; } static inline int mdss_mdp_perf_is_overlap(u32 y00, u32 y01, u32 y10, u32 y11) { return (y10 < y00 && y11 >= y01) || (y10 >= y00 && y10 < y01); } static inline int cmpu32(const void *a, const void *b) { return (*(u32 *)a < *(u32 *)b) ? -1 : 0; } static void mdss_mdp_perf_calc_mixer(struct mdss_mdp_mixer *mixer, struct mdss_mdp_perf_params *perf, struct mdss_mdp_pipe **pipe_list, int num_pipes) { struct mdss_mdp_pipe *pipe; struct mdss_panel_info *pinfo = NULL; int fps = DEFAULT_FRAME_RATE; u32 v_total = 0; int i; u32 max_clk_rate = 0; u64 bw_overlap_max = 0; u64 bw_overlap[MDSS_MDP_MAX_STAGE] = { 0 }; u32 v_region[MDSS_MDP_MAX_STAGE * 2] = { 0 }; u32 prefill_bytes = 0; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); bool apply_fudge = true; #ifdef VIDEO_PLAYBACK_AB_1_1_G3 u32 bw_vote_mode = MDSS_MDP_BW_MODE_NONE; #endif BUG_ON(num_pipes > MDSS_MDP_MAX_STAGE); memset(perf, 0, sizeof(*perf)); if (!mixer->rotator_mode) { if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) { pinfo = &mixer->ctl->panel_data->panel_info; if (pinfo->type == MIPI_VIDEO_PANEL) { fps = pinfo->panel_max_fps; v_total = pinfo->panel_max_vtotal; } else { fps = mdss_panel_get_framerate(pinfo); v_total = mdss_panel_get_vtotal(pinfo); } if (pinfo->type == WRITEBACK_PANEL) pinfo = NULL; } else { v_total = mixer->height; } perf->mdp_clk_rate = mixer->width * v_total * fps; perf->mdp_clk_rate = mdss_mdp_clk_fudge_factor(mixer, perf->mdp_clk_rate); if (!pinfo) /* perf for bus writeback */ perf->bw_overlap = fps * mixer->width * mixer->height * 3; } memset(bw_overlap, 0, sizeof(u64) * MDSS_MDP_MAX_STAGE); memset(v_region, 0, sizeof(u32) * MDSS_MDP_MAX_STAGE * 2); /* * Apply this logic only for 8x26 to reduce clock rate * for single video playback use case */ if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_101) && mixer->type == MDSS_MDP_MIXER_TYPE_INTF) { u32 npipes = 0; for (i = 0; i < MDSS_MDP_MAX_STAGE; i++) { pipe = mixer->stage_pipe[i]; if (pipe) { if (npipes) { apply_fudge = true; break; } npipes++; apply_fudge = !(pipe->src_fmt->is_yuv) || !(pipe->flags & MDP_SOURCE_ROTATED_90); } } } for (i = 0; i < num_pipes; i++) { struct mdss_mdp_perf_params tmp; #ifdef VIDEO_PLAYBACK_AB_1_1_G3 memset(&tmp, 0, sizeof(tmp)); #endif pipe = pipe_list[i]; if (pipe == NULL) continue; if (mdss_mdp_perf_calc_pipe(pipe, &tmp, &mixer->roi, apply_fudge)) continue; #ifdef VIDEO_PLAYBACK_AB_1_1_G3 bw_vote_mode |= tmp.bw_vote_mode; #endif prefill_bytes += tmp.prefill_bytes; bw_overlap[i] = tmp.bw_overlap; v_region[2*i] = pipe->dst.y; v_region[2*i + 1] = pipe->dst.y + pipe->dst.h; if (tmp.mdp_clk_rate > max_clk_rate) max_clk_rate = tmp.mdp_clk_rate; } /* * Sort the v_region array so the total display area can be * divided into individual regions. Check how many pipes fetch * data for each region and sum them up, then the worst case * of all regions is ib request. */ sort(v_region, num_pipes * 2, sizeof(u32), cmpu32, NULL); for (i = 1; i < num_pipes * 2; i++) { int j; u64 bw_max_region = 0; u32 y0, y1; pr_debug("v_region[%d]%d\n", i, v_region[i]); if (v_region[i] == v_region[i-1]) continue; y0 = v_region[i-1]; y1 = v_region[i]; for (j = 0; j < num_pipes; j++) { if (!bw_overlap[j]) continue; pipe = pipe_list[j]; if (mdss_mdp_perf_is_overlap(y0, y1, pipe->dst.y, (pipe->dst.y + pipe->dst.h))) bw_max_region += bw_overlap[j]; pr_debug("v[%d](%d,%d)pipe[%d](%d,%d)bw(%llu %llu)\n", i, y0, y1, j, pipe->dst.y, pipe->dst.y + pipe->dst.h, bw_overlap[j], bw_max_region); } bw_overlap_max = max(bw_overlap_max, bw_max_region); } perf->bw_overlap += bw_overlap_max; perf->prefill_bytes += prefill_bytes; #ifdef VIDEO_PLAYBACK_AB_1_1_G3 perf->bw_vote_mode = bw_vote_mode; #endif if (max_clk_rate > perf->mdp_clk_rate) perf->mdp_clk_rate = max_clk_rate; pr_debug("final mixer=%d video=%d clk_rate=%u bw=%llu prefill=%d\n", mixer->num, mixer->ctl->is_video_mode, perf->mdp_clk_rate, perf->bw_overlap, perf->prefill_bytes); } static u32 mdss_mdp_get_vbp_factor(struct mdss_mdp_ctl *ctl) { u32 fps, v_total, vbp, vbp_fac; struct mdss_panel_info *pinfo; if (!ctl || !ctl->panel_data) return 0; pinfo = &ctl->panel_data->panel_info; fps = mdss_panel_get_framerate(pinfo); v_total = mdss_panel_get_vtotal(pinfo); vbp = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width; vbp_fac = (vbp) ? fps * v_total / vbp : 0; pr_debug("vbp_fac=%d vbp=%d v_total=%d\n", vbp_fac, vbp, v_total); return vbp_fac; } static u32 mdss_mdp_get_vbp_factor_max(struct mdss_mdp_ctl *ctl) { u32 vbp_max = 0; int i; struct mdss_data_type *mdata; if (!ctl || !ctl->mdata) return 0; mdata = ctl->mdata; for (i = 0; i < mdata->nctl; i++) { struct mdss_mdp_ctl *ctl = mdata->ctl_off + i; u32 vbp_fac; if (ctl->power_on) { vbp_fac = mdss_mdp_get_vbp_factor(ctl); vbp_max = max(vbp_max, vbp_fac); } } return vbp_max; } #ifndef BW_CHECK_AGAIN_FOR_UNDERRUN static bool mdss_mdp_video_mode_intf_connected(struct mdss_mdp_ctl *ctl) { int i; struct mdss_data_type *mdata; if (!ctl || !ctl->mdata) return 0; mdata = ctl->mdata; for (i = 0; i < mdata->nctl; i++) { struct mdss_mdp_ctl *ctl = mdata->ctl_off + i; if (ctl->is_video_mode && ctl->power_on) { pr_debug("video interface connected ctl:%d\n", ctl->num); return true; } } return false; } #endif static void __mdss_mdp_perf_calc_ctl_helper(struct mdss_mdp_ctl *ctl, struct mdss_mdp_perf_params *perf, struct mdss_mdp_pipe **left_plist, int left_cnt, struct mdss_mdp_pipe **right_plist, int right_cnt) { struct mdss_mdp_perf_params tmp; memset(perf, 0, sizeof(*perf)); if (left_cnt && ctl->mixer_left) { mdss_mdp_perf_calc_mixer(ctl->mixer_left, &tmp, left_plist, left_cnt); #ifdef VIDEO_PLAYBACK_AB_1_1_G3 perf->bw_vote_mode = tmp.bw_vote_mode; #endif perf->bw_overlap += tmp.bw_overlap; perf->prefill_bytes += tmp.prefill_bytes; perf->mdp_clk_rate = tmp.mdp_clk_rate; } if (right_cnt && ctl->mixer_right) { mdss_mdp_perf_calc_mixer(ctl->mixer_right, &tmp, right_plist, right_cnt); #ifdef VIDEO_PLAYBACK_AB_1_1_G3 perf->bw_vote_mode |= tmp.bw_vote_mode; #endif perf->bw_overlap += tmp.bw_overlap; perf->prefill_bytes += tmp.prefill_bytes; if (tmp.mdp_clk_rate > perf->mdp_clk_rate) perf->mdp_clk_rate = tmp.mdp_clk_rate; if (ctl->intf_type) { u32 clk_rate = mdss_mdp_get_pclk_rate(ctl); /* minimum clock rate due to inefficiency in 3dmux */ clk_rate = mult_frac(clk_rate >> 1, 9, 8); if (clk_rate > perf->mdp_clk_rate) perf->mdp_clk_rate = clk_rate; } } /* request minimum bandwidth to have bus clock on when display is on */ if (perf->bw_overlap == 0) perf->bw_overlap = SZ_16M; if (ctl->intf_type != MDSS_MDP_NO_INTF) { u32 vbp_fac = mdss_mdp_get_vbp_factor_max(ctl); perf->bw_prefill = perf->prefill_bytes; /* * Prefill bandwidth equals the amount of data (number * of prefill_bytes) divided by the the amount time * available (blanking period). It is equivalent that * prefill bytes times a factor in unit Hz, which is * the reciprocal of time. */ perf->bw_prefill *= vbp_fac; } perf->bw_ctl = max(perf->bw_prefill, perf->bw_overlap); } int mdss_mdp_perf_bw_check(struct mdss_mdp_ctl *ctl, struct mdss_mdp_pipe **left_plist, int left_cnt, struct mdss_mdp_pipe **right_plist, int right_cnt) { struct mdss_data_type *mdata = ctl->mdata; struct mdss_mdp_perf_params perf; u32 bw, threshold; /* we only need bandwidth check on real-time clients (interfaces) */ if (ctl->intf_type == MDSS_MDP_NO_INTF) return 0; __mdss_mdp_perf_calc_ctl_helper(ctl, &perf, left_plist, left_cnt, right_plist, right_cnt); /* convert bandwidth to kb */ bw = DIV_ROUND_UP_ULL(perf.bw_ctl, 1000); pr_debug("calculated bandwidth=%uk\n", bw); threshold = ctl->is_video_mode ? mdata->max_bw_low : mdata->max_bw_high; if (bw > threshold) { pr_debug("exceeds bandwidth: %ukb > %ukb\n", bw, threshold); return -E2BIG; } return 0; } static void mdss_mdp_perf_calc_ctl(struct mdss_mdp_ctl *ctl, struct mdss_mdp_perf_params *perf) { struct mdss_mdp_pipe **left_plist, **right_plist; left_plist = ctl->mixer_left ? ctl->mixer_left->stage_pipe : NULL; right_plist = ctl->mixer_right ? ctl->mixer_right->stage_pipe : NULL; __mdss_mdp_perf_calc_ctl_helper(ctl, perf, left_plist, (left_plist ? MDSS_MDP_MAX_STAGE : 0), right_plist, (right_plist ? MDSS_MDP_MAX_STAGE : 0)); #ifdef BW_CHECK_AGAIN_FOR_UNDERRUN if (ctl->is_video_mode) { if (perf->bw_overlap > perf->bw_prefill) perf->bw_ctl = apply_fudge_factor(perf->bw_ctl, &mdss_res->ib_factor_overlap); else perf->bw_ctl = apply_fudge_factor(perf->bw_ctl, &mdss_res->ib_factor); if (DIV_ROUND_UP_ULL(perf->bw_ctl, 1000) > 3200000) { perf->bw_ctl = max(apply_fudge_factor(perf->bw_overlap, &mdss_res->ib_factor_overlap), apply_fudge_factor(perf->bw_prefill, &mdss_res->ib_factor)); } #else if (ctl->is_video_mode || ((ctl->intf_type != MDSS_MDP_NO_INTF) && mdss_mdp_video_mode_intf_connected(ctl))) { perf->bw_ctl = max(apply_fudge_factor(perf->bw_overlap, &mdss_res->ib_factor_overlap), apply_fudge_factor(perf->bw_prefill, &mdss_res->ib_factor)); #endif } pr_debug("ctl=%d clk_rate=%u\n", ctl->num, perf->mdp_clk_rate); pr_debug("bw_overlap=%llu bw_prefill=%llu prefill_bytes=%d\n", perf->bw_overlap, perf->bw_prefill, perf->prefill_bytes); } static void set_status(u32 *value, bool status, u32 bit_num) { if (status) *value |= BIT(bit_num); else *value &= ~BIT(bit_num); } /** * @ mdss_mdp_ctl_perf_set_transaction_status() - * Set the status of the on-going operations * for the command mode panels. * @ctl - pointer to a ctl * * This function is called to set the status bit in the perf_transaction_status * according to the operation that it is on-going for the command mode * panels, where: * * PERF_SW_COMMIT_STATE: * 1 - If SW operation has been commited and bw * has been requested (HW transaction have not started yet). * 0 - If there is no SW operation pending * PERF_HW_MDP_STATE: * 1 - If HW transaction is on-going * 0 - If there is no HW transaction on going (ping-pong interrupt * has finished) * Only if both states are zero there are no pending operations and * BW could be released. * State can be queried calling "mdss_mdp_ctl_perf_get_transaction_status" */ void mdss_mdp_ctl_perf_set_transaction_status(struct mdss_mdp_ctl *ctl, enum mdss_mdp_perf_state_type component, bool new_status) { u32 previous_transaction; bool previous_status; unsigned long flags; if (!ctl || !ctl->panel_data || (ctl->panel_data->panel_info.type != MIPI_CMD_PANEL)) return; spin_lock_irqsave(&ctl->spin_lock, flags); previous_transaction = ctl->perf_transaction_status; previous_status = previous_transaction & BIT(component) ? PERF_STATUS_BUSY : PERF_STATUS_DONE; /* * If we set "done" state when previous state was not "busy", * we want to print a warning since maybe there is a state * that we are not considering */ WARN((PERF_STATUS_DONE == new_status) && (PERF_STATUS_BUSY != previous_status), "unexpected previous state for component: %d\n", component); set_status(&ctl->perf_transaction_status, new_status, (u32)component); pr_debug("component:%d previous_transaction:%d transaction_status:%d\n", component, previous_transaction, ctl->perf_transaction_status); pr_debug("new_status:%d prev_status:%d\n", new_status, previous_status); spin_unlock_irqrestore(&ctl->spin_lock, flags); } /** * @ mdss_mdp_ctl_perf_get_transaction_status() - * Get the status of the on-going operations * for the command mode panels. * @ctl - pointer to a ctl * * Return: * The status of the transactions for the command mode panels, * note that the bandwidth can be released only if all transaction * status bits are zero. */ u32 mdss_mdp_ctl_perf_get_transaction_status(struct mdss_mdp_ctl *ctl) { unsigned long flags; u32 transaction_status; /* * If Video Mode or not valid data to determine the status, return busy * status, so the bandwidth cannot be freed by the caller */ if (!ctl || !ctl->panel_data || (ctl->panel_data->panel_info.type != MIPI_CMD_PANEL)) { return PERF_STATUS_BUSY; } spin_lock_irqsave(&ctl->spin_lock, flags); transaction_status = ctl->perf_transaction_status; spin_unlock_irqrestore(&ctl->spin_lock, flags); return transaction_status; } static inline void mdss_mdp_ctl_perf_update_bus(struct mdss_mdp_ctl *ctl) { u64 bw_sum_of_intfs = 0; u64 bus_ab_quota, bus_ib_quota; struct mdss_data_type *mdata; int i; #ifdef VIDEO_PLAYBACK_AB_1_1_G3 u32 bw_vote_mode = MDSS_MDP_BW_MODE_NONE; #endif #ifdef MDP_BW_LIMIT_AB struct mdss_overlay_private *mdp5_data = NULL; if (ctl->mfd) mdp5_data = mfd_to_mdp5_data(ctl->mfd); #endif if (!ctl || !ctl->mdata) return; ATRACE_BEGIN(__func__); mdata = ctl->mdata; for (i = 0; i < mdata->nctl; i++) { struct mdss_mdp_ctl *ctl; ctl = mdata->ctl_off + i; if (ctl->power_on) { #ifdef VIDEO_PLAYBACK_AB_1_1_G3 if (ctl->cur_perf.bw_vote_mode) bw_vote_mode |= ctl->cur_perf.bw_vote_mode; #endif bw_sum_of_intfs += ctl->cur_perf.bw_ctl; pr_debug("c=%d bw=%llu\n", ctl->num, ctl->cur_perf.bw_ctl); } } bus_ib_quota = bw_sum_of_intfs; #if defined(VIDEO_PLAYBACK_AB_1_1_G3) if (bw_vote_mode == MDSS_MDP_BW_MODE_NONE) { #ifdef MDP_BW_LIMIT_AB if (mdp5_data && mdp5_data->bw_limit) { /* change the value as you want but should not cause underrun */ pr_debug(" B/W limited !!!\n"); if(bus_ib_quota < 2400000000UL) bus_ib_quota = 2400000000UL; bus_ab_quota = apply_fudge_factor(bw_sum_of_intfs, &mdss_res->ab_factor_limit); } else bus_ab_quota = apply_fudge_factor(bw_sum_of_intfs, &mdss_res->ab_factor); #else bus_ab_quota = apply_fudge_factor(bw_sum_of_intfs, &mdss_res->ab_factor); #endif } else bus_ab_quota = fudge_factor(bw_sum_of_intfs, (u32)11, (u32)10); #else #ifdef MDP_BW_LIMIT_AB if (mdp5_data && mdp5_data->bw_limit) { /* change the value as you want but should not cause underrun */ pr_debug(" B/W limited !!!\n"); if(bus_ib_quota < 2400000000UL) bus_ib_quota = 2400000000UL; bus_ab_quota = apply_fudge_factor(bw_sum_of_intfs, &mdss_res->ab_factor_limit); } else bus_ab_quota = apply_fudge_factor(bw_sum_of_intfs, &mdss_res->ab_factor); #else bus_ab_quota = apply_fudge_factor(bw_sum_of_intfs, &mdss_res->ab_factor); #endif #endif trace_mdp_perf_update_bus(bus_ab_quota, bus_ib_quota); ATRACE_INT("bus_quota", bus_ib_quota); mdss_bus_scale_set_quota(MDSS_HW_MDP, bus_ab_quota, bus_ib_quota); pr_debug("ab=%llu ib=%llu\n", bus_ab_quota, bus_ib_quota); ATRACE_END(__func__); } /** * @mdss_mdp_ctl_perf_release_bw() - request zero bandwidth * @ctl - pointer to a ctl * * Function checks a state variable for the ctl, if all pending commit * requests are done, meaning no more bandwidth is needed, release * bandwidth request. */ void mdss_mdp_ctl_perf_release_bw(struct mdss_mdp_ctl *ctl) { int transaction_status; struct mdss_data_type *mdata; int i; /* only do this for command panel */ if (!ctl || !ctl->mdata || !ctl->panel_data || (ctl->panel_data->panel_info.type != MIPI_CMD_PANEL)) return; mutex_lock(&mdss_mdp_ctl_lock); mdata = ctl->mdata; /* * If video interface present, cmd panel bandwidth cannot be * released. */ for (i = 0; i < mdata->nctl; i++) { struct mdss_mdp_ctl *ctl = mdata->ctl_off + i; if (ctl->power_on && ctl->is_video_mode) goto exit; } transaction_status = mdss_mdp_ctl_perf_get_transaction_status(ctl); pr_debug("transaction_status=0x%x\n", transaction_status); /*Release the bandwidth only if there are no transactions pending*/ if (!transaction_status) { trace_mdp_cmd_release_bw(ctl->num); ctl->cur_perf.bw_ctl = 0; ctl->new_perf.bw_ctl = 0; pr_debug("Release BW ctl=%d\n", ctl->num); mdss_mdp_ctl_perf_update_bus(ctl); } exit: mutex_unlock(&mdss_mdp_ctl_lock); } static int mdss_mdp_select_clk_lvl(struct mdss_mdp_ctl *ctl, u32 clk_rate) { int i; struct mdss_data_type *mdata; if (!ctl) return -ENODEV; mdata = ctl->mdata; for (i = 0; i < mdata->nclk_lvl; i++) { if (clk_rate > mdata->clock_levels[i]) { continue; } else { clk_rate = mdata->clock_levels[i]; break; } } return clk_rate; } static void mdss_mdp_ctl_perf_update(struct mdss_mdp_ctl *ctl, int params_changed) { struct mdss_mdp_perf_params *new, *old; int update_bus = 0, update_clk = 0; struct mdss_data_type *mdata; bool is_bw_released; if (!ctl || !ctl->mdata) return; ATRACE_BEGIN(__func__); mutex_lock(&mdss_mdp_ctl_lock); mdata = ctl->mdata; old = &ctl->cur_perf; new = &ctl->new_perf; /* * We could have released the bandwidth if there were no transactions * pending, so we want to re-calculate the bandwidth in this situation */ is_bw_released = !mdss_mdp_ctl_perf_get_transaction_status(ctl); if (ctl->power_on) { if (is_bw_released || params_changed) mdss_mdp_perf_calc_ctl(ctl, new); /* * if params have just changed delay the update until * later once the hw configuration has been flushed to * MDP */ #if defined(VIDEO_PLAYBACK_AB_1_1_G3) if ((params_changed && (new->bw_ctl > old->bw_ctl)) || (!params_changed && (new->bw_ctl < old->bw_ctl)) || (new->bw_vote_mode != MDSS_MDP_BW_MODE_NONE)) { pr_debug("c=%d p=%d new_bw=%llu,old_bw=%llu\n", ctl->num, params_changed, new->bw_ctl, old->bw_ctl); old->bw_ctl = new->bw_ctl; old->bw_vote_mode = new->bw_vote_mode; update_bus = 1; } #else if ((params_changed && (new->bw_ctl > old->bw_ctl)) || (!params_changed && (new->bw_ctl < old->bw_ctl))) { pr_debug("c=%d p=%d new_bw=%llu,old_bw=%llu\n", ctl->num, params_changed, new->bw_ctl, old->bw_ctl); old->bw_ctl = new->bw_ctl; update_bus = 1; } #endif if ((params_changed && (new->mdp_clk_rate > old->mdp_clk_rate)) || (!params_changed && (new->mdp_clk_rate < old->mdp_clk_rate))) { old->mdp_clk_rate = new->mdp_clk_rate; update_clk = 1; } } else { memset(old, 0, sizeof(old)); memset(new, 0, sizeof(new)); update_bus = 1; update_clk = 1; } if (update_bus) mdss_mdp_ctl_perf_update_bus(ctl); if (update_clk) { u32 clk_rate = 0; int i; for (i = 0; i < mdata->nctl; i++) { struct mdss_mdp_ctl *ctl; ctl = mdata->ctl_off + i; if (ctl->power_on) clk_rate = max(ctl->cur_perf.mdp_clk_rate, clk_rate); } clk_rate = mdss_mdp_select_clk_lvl(ctl, clk_rate); ATRACE_INT("mdp_clk", clk_rate); mdss_mdp_set_clk_rate(clk_rate); pr_debug("update clk rate = %d HZ\n", clk_rate); } mutex_unlock(&mdss_mdp_ctl_lock); ATRACE_END(__func__); } static struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(struct mdss_data_type *mdata, u32 off) { struct mdss_mdp_ctl *ctl = NULL; u32 cnum; u32 nctl = mdata->nctl; mutex_lock(&mdss_mdp_ctl_lock); if (!mdata->has_wfd_blk) nctl++; for (cnum = off; cnum < nctl; cnum++) { ctl = mdata->ctl_off + cnum; if (ctl->ref_cnt == 0) { ctl->ref_cnt++; ctl->mdata = mdata; mutex_init(&ctl->lock); spin_lock_init(&ctl->spin_lock); BLOCKING_INIT_NOTIFIER_HEAD(&ctl->notifier_head); pr_debug("alloc ctl_num=%d\n", ctl->num); break; } ctl = NULL; } mutex_unlock(&mdss_mdp_ctl_lock); return ctl; } static int mdss_mdp_ctl_free(struct mdss_mdp_ctl *ctl) { if (!ctl) return -ENODEV; pr_debug("free ctl_num=%d ref_cnt=%d\n", ctl->num, ctl->ref_cnt); if (!ctl->ref_cnt) { pr_err("called with ref_cnt=0\n"); return -EINVAL; } if (ctl->mixer_left) { mdss_mdp_mixer_free(ctl->mixer_left); ctl->mixer_left = NULL; } if (ctl->mixer_right) { mdss_mdp_mixer_free(ctl->mixer_right); ctl->mixer_right = NULL; } mutex_lock(&mdss_mdp_ctl_lock); ctl->ref_cnt--; ctl->intf_num = MDSS_MDP_NO_INTF; ctl->intf_type = MDSS_MDP_NO_INTF; ctl->is_secure = false; ctl->power_on = false; ctl->start_fnc = NULL; ctl->stop_fnc = NULL; ctl->prepare_fnc = NULL; ctl->display_fnc = NULL; ctl->wait_fnc = NULL; ctl->read_line_cnt_fnc = NULL; ctl->add_vsync_handler = NULL; ctl->remove_vsync_handler = NULL; ctl->panel_data = NULL; ctl->config_fps_fnc = NULL; mutex_unlock(&mdss_mdp_ctl_lock); return 0; } static struct mdss_mdp_mixer *mdss_mdp_mixer_alloc( struct mdss_mdp_ctl *ctl, u32 type, int mux) { struct mdss_mdp_mixer *mixer = NULL, *alt_mixer = NULL; u32 nmixers_intf; u32 nmixers_wb; u32 i; u32 nmixers; struct mdss_mdp_mixer *mixer_pool = NULL; if (!ctl || !ctl->mdata) return NULL; mutex_lock(&mdss_mdp_ctl_lock); nmixers_intf = ctl->mdata->nmixers_intf; nmixers_wb = ctl->mdata->nmixers_wb; switch (type) { case MDSS_MDP_MIXER_TYPE_INTF: mixer_pool = ctl->mdata->mixer_intf; nmixers = nmixers_intf; /* * try to reserve first layer mixer for write back if * assertive display needs to be supported through wfd */ if (ctl->mdata->has_wb_ad && ctl->intf_num) { alt_mixer = mixer_pool; mixer_pool++; nmixers--; } break; case MDSS_MDP_MIXER_TYPE_WRITEBACK: mixer_pool = ctl->mdata->mixer_wb; nmixers = nmixers_wb; break; default: nmixers = 0; pr_err("invalid pipe type %d\n", type); break; } /* early mdp revision only supports mux of dual pipe on mixers 0 and 1, * need to ensure that these pipes are readily available by using * mixer 2 if available and mux is not required */ if (!mux && (ctl->mdata->mdp_rev == MDSS_MDP_HW_REV_100) && (type == MDSS_MDP_MIXER_TYPE_INTF) && (nmixers >= MDSS_MDP_INTF_LAYERMIXER2) && (mixer_pool[MDSS_MDP_INTF_LAYERMIXER2].ref_cnt == 0)) mixer_pool += MDSS_MDP_INTF_LAYERMIXER2; /*Allocate virtual wb mixer if no dedicated wfd wb blk is present*/ if (!ctl->mdata->has_wfd_blk && (type == MDSS_MDP_MIXER_TYPE_WRITEBACK)) nmixers += 1; for (i = 0; i < nmixers; i++) { mixer = mixer_pool + i; if (mixer->ref_cnt == 0) { mixer->ref_cnt++; mixer->params_changed++; mixer->ctl = ctl; pr_debug("alloc mixer num %d for ctl=%d\n", mixer->num, ctl->num); break; } mixer = NULL; } if (!mixer && alt_mixer && (alt_mixer->ref_cnt == 0)) mixer = alt_mixer; mutex_unlock(&mdss_mdp_ctl_lock); return mixer; } static int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer) { if (!mixer) return -ENODEV; pr_debug("free mixer_num=%d ref_cnt=%d\n", mixer->num, mixer->ref_cnt); if (!mixer->ref_cnt) { pr_err("called with ref_cnt=0\n"); return -EINVAL; } mutex_lock(&mdss_mdp_ctl_lock); mixer->ref_cnt--; mutex_unlock(&mdss_mdp_ctl_lock); return 0; } struct mdss_mdp_mixer *mdss_mdp_wb_mixer_alloc(int rotator) { struct mdss_mdp_ctl *ctl = NULL; struct mdss_mdp_mixer *mixer = NULL; ctl = mdss_mdp_ctl_alloc(mdss_res, mdss_res->nmixers_intf); if (!ctl) { pr_debug("unable to allocate wb ctl\n"); return NULL; } mixer = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_WRITEBACK, false); if (!mixer) { pr_debug("unable to allocate wb mixer\n"); goto error; } mixer->rotator_mode = rotator; switch (mixer->num) { case MDSS_MDP_WB_LAYERMIXER0: ctl->opmode = (rotator ? MDSS_MDP_CTL_OP_ROT0_MODE : MDSS_MDP_CTL_OP_WB0_MODE); break; case MDSS_MDP_WB_LAYERMIXER1: ctl->opmode = (rotator ? MDSS_MDP_CTL_OP_ROT1_MODE : MDSS_MDP_CTL_OP_WB1_MODE); break; default: pr_err("invalid layer mixer=%d\n", mixer->num); goto error; } ctl->mixer_left = mixer; ctl->start_fnc = mdss_mdp_writeback_start; ctl->power_on = true; ctl->wb_type = (rotator ? MDSS_MDP_WB_CTL_TYPE_BLOCK : MDSS_MDP_WB_CTL_TYPE_LINE); mixer->ctl = ctl; if (ctl->start_fnc) ctl->start_fnc(ctl); return mixer; error: if (mixer) mdss_mdp_mixer_free(mixer); if (ctl) mdss_mdp_ctl_free(ctl); return NULL; } int mdss_mdp_wb_mixer_destroy(struct mdss_mdp_mixer *mixer) { struct mdss_mdp_ctl *ctl; if (!mixer || !mixer->ctl) { pr_err("invalid ctl handle\n"); return -ENODEV; } ctl = mixer->ctl; mixer->rotator_mode = 0; pr_debug("destroy ctl=%d mixer=%d\n", ctl->num, mixer->num); if (ctl->stop_fnc) ctl->stop_fnc(ctl); mdss_mdp_ctl_free(ctl); mdss_mdp_ctl_perf_update(ctl, 0); return 0; } int mdss_mdp_ctl_splash_finish(struct mdss_mdp_ctl *ctl, bool handoff) { struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl); if (sctl) sctl->panel_data->panel_info.cont_splash_enabled = 0; switch (ctl->panel_data->panel_info.type) { case MIPI_VIDEO_PANEL: case EDP_PANEL: return mdss_mdp_video_reconfigure_splash_done(ctl, handoff); case MIPI_CMD_PANEL: return mdss_mdp_cmd_reconfigure_splash_done(ctl, handoff); default: return 0; } } static inline int mdss_mdp_set_split_ctl(struct mdss_mdp_ctl *ctl, struct mdss_mdp_ctl *split_ctl) { if (!ctl || !split_ctl) return -ENODEV; /* setup split ctl mixer as right mixer of original ctl so that * original ctl can work the same way as dual pipe solution */ ctl->mixer_right = split_ctl->mixer_left; return 0; } static int mdss_mdp_ctl_fbc_enable(int enable, struct mdss_mdp_mixer *mixer, struct mdss_panel_info *pdata) { struct fbc_panel_info *fbc; u32 mode = 0, budget_ctl = 0, lossy_mode = 0; if (!pdata) { pr_err("Invalid pdata\n"); return -EINVAL; } fbc = &pdata->fbc; if (!fbc || !fbc->enabled) { pr_err("Invalid FBC structure\n"); return -EINVAL; } if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0) pr_debug("Mixer supports FBC.\n"); else { pr_debug("Mixer doesn't support FBC.\n"); return -EINVAL; } if (enable) { mode = ((pdata->xres) << 16) | ((fbc->comp_mode) << 8) | ((fbc->qerr_enable) << 7) | ((fbc->cd_bias) << 4) | ((fbc->pat_enable) << 3) | ((fbc->vlc_enable) << 2) | ((fbc->bflc_enable) << 1) | enable; budget_ctl = ((fbc->line_x_budget) << 12) | ((fbc->block_x_budget) << 8) | fbc->block_budget; lossy_mode = ((fbc->lossless_mode_thd) << 16) | ((fbc->lossy_mode_thd) << 8) | ((fbc->lossy_rgb_thd) << 3) | fbc->lossy_mode_idx; } mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_FBC_MODE, mode); mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_FBC_BUDGET_CTL, budget_ctl); mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_FBC_LOSSY_MODE, lossy_mode); return 0; } int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl) { struct mdss_mdp_ctl *split_ctl; u32 width, height; int split_fb; if (!ctl || !ctl->panel_data) { pr_err("invalid ctl handle\n"); return -ENODEV; } split_ctl = mdss_mdp_get_split_ctl(ctl); width = ctl->panel_data->panel_info.xres; height = ctl->panel_data->panel_info.yres; split_fb = (ctl->mfd->split_fb_left && ctl->mfd->split_fb_right && (ctl->mfd->split_fb_left <= MAX_MIXER_WIDTH) && (ctl->mfd->split_fb_right <= MAX_MIXER_WIDTH)) ? 1 : 0; pr_debug("max=%d xres=%d left=%d right=%d\n", MAX_MIXER_WIDTH, width, ctl->mfd->split_fb_left, ctl->mfd->split_fb_right); if ((split_ctl && (width > MAX_MIXER_WIDTH)) || (width > (2 * MAX_MIXER_WIDTH))) { pr_err("Unsupported panel resolution: %dx%d\n", width, height); return -ENOTSUPP; } ctl->width = width; ctl->height = height; ctl->roi = (struct mdss_mdp_img_rect) {0, 0, width, height}; if (!ctl->mixer_left) { ctl->mixer_left = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF, ((width > MAX_MIXER_WIDTH) || split_fb)); if (!ctl->mixer_left) { pr_err("unable to allocate layer mixer\n"); return -ENOMEM; } } if (split_fb) width = ctl->mfd->split_fb_left; else if (width > MAX_MIXER_WIDTH) width /= 2; ctl->mixer_left->width = width; ctl->mixer_left->height = height; ctl->mixer_left->roi = (struct mdss_mdp_img_rect) {0, 0, width, height}; if (split_ctl) { pr_debug("split display detected\n"); return 0; } if (split_fb) width = ctl->mfd->split_fb_right; if (width < ctl->width) { if (ctl->mixer_right == NULL) { ctl->mixer_right = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF, true); if (!ctl->mixer_right) { pr_err("unable to allocate right mixer\n"); if (ctl->mixer_left) mdss_mdp_mixer_free(ctl->mixer_left); return -ENOMEM; } } ctl->mixer_right->width = width; ctl->mixer_right->height = height; ctl->mixer_right->roi = (struct mdss_mdp_img_rect) {0, 0, width, height}; } else if (ctl->mixer_right) { mdss_mdp_mixer_free(ctl->mixer_right); ctl->mixer_right = NULL; } if (ctl->mixer_right) { ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE | MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT; } else { ctl->opmode &= ~(MDSS_MDP_CTL_OP_PACK_3D_ENABLE | MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT); } return 0; } static int mdss_mdp_ctl_setup_wfd(struct mdss_mdp_ctl *ctl) { struct mdss_data_type *mdata = ctl->mdata; struct mdss_mdp_mixer *mixer; int mixer_type; /* if WB2 is supported, try to allocate it first */ if (mdata->nmixers_intf >= MDSS_MDP_INTF_LAYERMIXER2) mixer_type = MDSS_MDP_MIXER_TYPE_INTF; else mixer_type = MDSS_MDP_MIXER_TYPE_WRITEBACK; mixer = mdss_mdp_mixer_alloc(ctl, mixer_type, false); if (!mixer && mixer_type == MDSS_MDP_MIXER_TYPE_INTF) mixer = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_WRITEBACK, false); if (!mixer) { pr_err("Unable to allocate writeback mixer\n"); return -ENOMEM; } if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) { ctl->opmode = MDSS_MDP_CTL_OP_WFD_MODE; } else { switch (mixer->num) { case MDSS_MDP_WB_LAYERMIXER0: ctl->opmode = MDSS_MDP_CTL_OP_WB0_MODE; break; case MDSS_MDP_WB_LAYERMIXER1: ctl->opmode = MDSS_MDP_CTL_OP_WB1_MODE; break; default: pr_err("Incorrect writeback config num=%d\n", mixer->num); mdss_mdp_mixer_free(mixer); return -EINVAL; } ctl->wb_type = MDSS_MDP_WB_CTL_TYPE_LINE; } ctl->mixer_left = mixer; return 0; } struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata, struct msm_fb_data_type *mfd) { struct mdss_mdp_ctl *ctl; int ret = 0; struct mdss_data_type *mdata = mfd_to_mdata(mfd); ctl = mdss_mdp_ctl_alloc(mdata, MDSS_MDP_CTL0); if (!ctl) { pr_err("unable to allocate ctl\n"); return ERR_PTR(-ENOMEM); } ctl->mfd = mfd; ctl->panel_data = pdata; ctl->is_video_mode = false; switch (pdata->panel_info.type) { case EDP_PANEL: ctl->is_video_mode = true; ctl->intf_num = MDSS_MDP_INTF0; ctl->intf_type = MDSS_INTF_EDP; ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE; ctl->start_fnc = mdss_mdp_video_start; break; case MIPI_VIDEO_PANEL: ctl->is_video_mode = true; if (pdata->panel_info.pdest == DISPLAY_1) ctl->intf_num = MDSS_MDP_INTF1; else ctl->intf_num = MDSS_MDP_INTF2; ctl->intf_type = MDSS_INTF_DSI; ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE; ctl->start_fnc = mdss_mdp_video_start; break; case MIPI_CMD_PANEL: if (pdata->panel_info.pdest == DISPLAY_1) ctl->intf_num = MDSS_MDP_INTF1; else ctl->intf_num = MDSS_MDP_INTF2; ctl->intf_type = MDSS_INTF_DSI; ctl->opmode = MDSS_MDP_CTL_OP_CMD_MODE; ctl->start_fnc = mdss_mdp_cmd_start; break; case DTV_PANEL: ctl->is_video_mode = true; ctl->intf_num = MDSS_MDP_INTF3; ctl->intf_type = MDSS_INTF_HDMI; ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE; ctl->start_fnc = mdss_mdp_video_start; ret = mdss_mdp_limited_lut_igc_config(ctl); if (ret) pr_err("Unable to config IGC LUT data"); break; case WRITEBACK_PANEL: ctl->intf_num = MDSS_MDP_NO_INTF; ctl->start_fnc = mdss_mdp_writeback_start; ret = mdss_mdp_ctl_setup_wfd(ctl); if (ret) goto ctl_init_fail; break; default: pr_err("unsupported panel type (%d)\n", pdata->panel_info.type); ret = -EINVAL; goto ctl_init_fail; } ctl->opmode |= (ctl->intf_num << 4); if (ctl->intf_num == MDSS_MDP_NO_INTF) { ctl->dst_format = pdata->panel_info.out_format; } else { struct mdp_dither_cfg_data dither = { .block = mfd->index + MDP_LOGICAL_BLOCK_DISP_0, .flags = MDP_PP_OPS_DISABLE, }; switch (pdata->panel_info.bpp) { case 18: if (ctl->intf_type == MDSS_INTF_DSI) ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB666 | MDSS_MDP_PANEL_FORMAT_PACK_ALIGN_MSB; else ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB666; dither.flags = MDP_PP_OPS_ENABLE | MDP_PP_OPS_WRITE; dither.g_y_depth = 2; dither.r_cr_depth = 2; dither.b_cb_depth = 2; break; case 24: default: ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB888; break; } mdss_mdp_dither_config(&dither, NULL); } return ctl; ctl_init_fail: mdss_mdp_ctl_free(ctl); return ERR_PTR(ret); } int mdss_mdp_ctl_split_display_setup(struct mdss_mdp_ctl *ctl, struct mdss_panel_data *pdata) { struct mdss_mdp_ctl *sctl; struct mdss_mdp_mixer *mixer; if (!ctl || !pdata) return -ENODEV; if (pdata->panel_info.xres > MAX_MIXER_WIDTH) { pr_err("Unsupported second panel resolution: %dx%d\n", pdata->panel_info.xres, pdata->panel_info.yres); return -ENOTSUPP; } if (ctl->mixer_right) { pr_err("right mixer already setup for ctl=%d\n", ctl->num); return -EPERM; } sctl = mdss_mdp_ctl_init(pdata, ctl->mfd); if (!sctl) { pr_err("unable to setup split display\n"); return -ENODEV; } sctl->width = pdata->panel_info.xres; sctl->height = pdata->panel_info.yres; ctl->mixer_left = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF, false); if (!ctl->mixer_left) { pr_err("unable to allocate layer mixer\n"); mdss_mdp_ctl_destroy(sctl); return -ENOMEM; } mixer = mdss_mdp_mixer_alloc(sctl, MDSS_MDP_MIXER_TYPE_INTF, false); if (!mixer) { pr_err("unable to allocate layer mixer\n"); mdss_mdp_ctl_destroy(sctl); return -ENOMEM; } mixer->width = sctl->width; mixer->height = sctl->height; mixer->roi = (struct mdss_mdp_img_rect) {0, 0, mixer->width, mixer->height}; sctl->mixer_left = mixer; return mdss_mdp_set_split_ctl(ctl, sctl); } static void mdss_mdp_ctl_split_display_enable(int enable, struct mdss_mdp_ctl *main_ctl, struct mdss_mdp_ctl *slave_ctl) { u32 upper = 0, lower = 0; pr_debug("split main ctl=%d intf=%d slave ctl=%d intf=%d\n", main_ctl->num, main_ctl->intf_num, slave_ctl->num, slave_ctl->intf_num); if (enable) { if (main_ctl->opmode & MDSS_MDP_CTL_OP_CMD_MODE) { upper |= BIT(1); lower |= BIT(1); /* interface controlling sw trigger */ if (main_ctl->intf_num == MDSS_MDP_INTF2) upper |= BIT(4); else upper |= BIT(8); } else { /* video mode */ if (main_ctl->intf_num == MDSS_MDP_INTF2) lower |= BIT(4); else lower |= BIT(8); } } MDSS_MDP_REG_WRITE(MDSS_MDP_REG_SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper); MDSS_MDP_REG_WRITE(MDSS_MDP_REG_SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower); MDSS_MDP_REG_WRITE(MDSS_MDP_REG_SPLIT_DISPLAY_EN, enable); } int mdss_mdp_ctl_destroy(struct mdss_mdp_ctl *ctl) { struct mdss_mdp_ctl *sctl; int rc; rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CLOSE, NULL); WARN(rc, "unable to close panel for intf=%d\n", ctl->intf_num); sctl = mdss_mdp_get_split_ctl(ctl); if (sctl) { pr_debug("destroying split display ctl=%d\n", sctl->num); if (sctl->mixer_left) mdss_mdp_mixer_free(sctl->mixer_left); mdss_mdp_ctl_free(sctl); } else if (ctl->mixer_right) { mdss_mdp_mixer_free(ctl->mixer_right); ctl->mixer_right = NULL; } if (ctl->mixer_left) { mdss_mdp_mixer_free(ctl->mixer_left); ctl->mixer_left = NULL; } mdss_mdp_ctl_free(ctl); return 0; } int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg) { struct mdss_panel_data *pdata; int rc = 0; if (!ctl || !ctl->panel_data) return -ENODEV; pdata = ctl->panel_data; pr_debug("sending ctl=%d event=%d\n", ctl->num, event); do { if (pdata->event_handler) rc = pdata->event_handler(pdata, event, arg); pdata = pdata->next; } while (rc == 0 && pdata); return rc; } /* * mdss_mdp_ctl_restore() - restore mdp ctl path * @ctl: mdp controller. * * This function is called whenever MDP comes out of a power collapse as * a result of a screen update when DSI ULPS mode is enabled. It restores * the MDP controller's software state to the hardware registers. */ void mdss_mdp_ctl_restore(struct mdss_mdp_ctl *ctl) { u32 temp; temp = readl_relaxed(ctl->mdata->mdp_base + MDSS_MDP_REG_DISP_INTF_SEL); temp |= (ctl->intf_type << ((ctl->intf_num - MDSS_MDP_INTF0) * 8)); writel_relaxed(temp, ctl->mdata->mdp_base + MDSS_MDP_REG_DISP_INTF_SEL); } static int mdss_mdp_ctl_start_sub(struct mdss_mdp_ctl *ctl, bool handoff) { struct mdss_mdp_mixer *mixer; u32 outsize, temp; int ret = 0; int i, nmixers; pr_debug("ctl_num=%d\n", ctl->num); /* * Need start_fnc in 2 cases: * (1) handoff * (2) continuous splash finished. */ if (handoff || !ctl->panel_data->panel_info.cont_splash_enabled) { if (ctl->start_fnc) ret = ctl->start_fnc(ctl); else pr_warn("no start function for ctl=%d type=%d\n", ctl->num, ctl->panel_data->panel_info.type); if (ret) { pr_err("unable to start intf\n"); return ret; } } if (!ctl->panel_data->panel_info.cont_splash_enabled) { nmixers = MDSS_MDP_INTF_MAX_LAYERMIXER + MDSS_MDP_WB_MAX_LAYERMIXER; for (i = 0; i < nmixers; i++) mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_LAYER(i), 0); } mixer = ctl->mixer_left; mdss_mdp_pp_resume(ctl, mixer->num); mixer->params_changed++; temp = MDSS_MDP_REG_READ(MDSS_MDP_REG_DISP_INTF_SEL); temp |= (ctl->intf_type << ((ctl->intf_num - MDSS_MDP_INTF0) * 8)); MDSS_MDP_REG_WRITE(MDSS_MDP_REG_DISP_INTF_SEL, temp); outsize = (mixer->height << 16) | mixer->width; mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OUT_SIZE, outsize); if (ctl->panel_data->panel_info.fbc.enabled) { ret = mdss_mdp_ctl_fbc_enable(1, ctl->mixer_left, &ctl->panel_data->panel_info); } return ret; } int mdss_mdp_ctl_start(struct mdss_mdp_ctl *ctl, bool handoff) { struct mdss_mdp_ctl *sctl; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); int ret = 0; if (ctl->power_on) { pr_debug("%d: panel already on!\n", __LINE__); return 0; } ret = mdss_mdp_ctl_setup(ctl); if (ret) return ret; sctl = mdss_mdp_get_split_ctl(ctl); mutex_lock(&ctl->lock); /* * keep power_on false during handoff to avoid unexpected * operations to overlay. */ if (!handoff) ctl->power_on = true; memset(&ctl->cur_perf, 0, sizeof(ctl->cur_perf)); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_RESET, NULL); if (ret) { pr_err("panel power on failed ctl=%d\n", ctl->num); goto error; } ret = mdss_mdp_ctl_start_sub(ctl, handoff); if (ret == 0) { if (sctl) { /* split display is available */ ret = mdss_mdp_ctl_start_sub(sctl, handoff); if (!ret) mdss_mdp_ctl_split_display_enable(1, ctl, sctl); } else if (ctl->mixer_right) { struct mdss_mdp_mixer *mixer = ctl->mixer_right; u32 out, off; mdss_mdp_pp_resume(ctl, mixer->num); mixer->params_changed++; out = (mixer->height << 16) | mixer->width; off = MDSS_MDP_REG_LM_OFFSET(mixer->num); MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_OUT_SIZE, out); mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_PACK_3D, 0); } } mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_RESUME); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); error: mutex_unlock(&ctl->lock); return ret; } int mdss_mdp_ctl_stop(struct mdss_mdp_ctl *ctl) { struct mdss_mdp_ctl *sctl; int ret = 0; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); u32 off; if (!ctl->power_on) { pr_debug("%s %d already off!\n", __func__, __LINE__); return 0; } sctl = mdss_mdp_get_split_ctl(ctl); pr_debug("ctl_num=%d\n", ctl->num); mutex_lock(&ctl->lock); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_SUSPEND); if (ctl->stop_fnc) ret = ctl->stop_fnc(ctl); else pr_warn("no stop func for ctl=%d\n", ctl->num); if (sctl && sctl->stop_fnc) { ret = sctl->stop_fnc(sctl); mdss_mdp_ctl_split_display_enable(0, ctl, sctl); } if (ret) { pr_warn("error powering off intf ctl=%d\n", ctl->num); } else { mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, 0); if (sctl) mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP, 0); if (ctl->mixer_left) { off = __mdss_mdp_ctl_get_mixer_off(ctl->mixer_left); mdss_mdp_ctl_write(ctl, off, 0); } if (ctl->mixer_right) { off = __mdss_mdp_ctl_get_mixer_off(ctl->mixer_right); mdss_mdp_ctl_write(ctl, off, 0); } ctl->power_on = false; ctl->play_cnt = 0; mdss_mdp_ctl_perf_update(ctl, 0); } mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); mutex_unlock(&ctl->lock); return ret; } void mdss_mdp_set_roi(struct mdss_mdp_ctl *ctl, struct mdp_display_commit *data) { struct mdss_mdp_img_rect temp_roi, mixer_roi; temp_roi.x = data->roi.x; temp_roi.y = data->roi.y; temp_roi.w = data->roi.w; temp_roi.h = data->roi.h; /* * No Partial Update for: * 1) dual DSI panels * 2) non-cmd mode panels */ if (!temp_roi.w || !temp_roi.h || ctl->mixer_right || (ctl->panel_data->panel_info.type != MIPI_CMD_PANEL) || !ctl->panel_data->panel_info.partial_update_enabled) { temp_roi = (struct mdss_mdp_img_rect) {0, 0, ctl->mixer_left->width, ctl->mixer_left->height}; } ctl->roi_changed = 0; if (((temp_roi.x != ctl->roi.x) || (temp_roi.y != ctl->roi.y)) || ((temp_roi.w != ctl->roi.w) || (temp_roi.h != ctl->roi.h))) { ctl->roi = temp_roi; ctl->roi_changed++; mixer_roi = ctl->mixer_left->roi; if ((mixer_roi.w != temp_roi.w) || (mixer_roi.h != temp_roi.h)) { ctl->mixer_left->roi = temp_roi; ctl->mixer_left->params_changed++; } } pr_debug("ROI requested: [%d, %d, %d, %d]\n", ctl->roi.x, ctl->roi.y, ctl->roi.w, ctl->roi.h); } /* * mdss_mdp_ctl_reset() - reset mdp ctl path. * @ctl: mdp controller. * this function called when underflow happen, * it will reset mdp ctl path and poll for its completion * * Note: called within atomic context. */ int mdss_mdp_ctl_reset(struct mdss_mdp_ctl *ctl) { u32 status = 1; int cnt = 20; mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_SW_RESET, 1); /* * it takes around 30us to have mdp finish resetting its ctl path * poll every 50us so that reset should be completed at 1st poll */ do { udelay(50); status = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_SW_RESET); status &= 0x01; pr_debug("status=%x\n", status); cnt--; if (cnt == 0) { pr_err("timeout\n"); return -EAGAIN; } } while (status); return 0; } static int mdss_mdp_mixer_setup(struct mdss_mdp_ctl *ctl, struct mdss_mdp_mixer *mixer) { struct mdss_mdp_pipe *pipe; u32 off, blend_op, blend_stage; u32 mixercfg = 0, blend_color_out = 0, bg_alpha_enable = 0; u32 fg_alpha = 0, bg_alpha = 0; int stage, secure = 0; int screen_state; int outsize = 0; u32 op_mode; screen_state = ctl->force_screen_state; if (!mixer) return -ENODEV; trace_mdp_mixer_update(mixer->num); pr_debug("setup mixer=%d\n", mixer->num); outsize = (mixer->roi.h << 16) | mixer->roi.w; mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OUT_SIZE, outsize); if (screen_state == MDSS_SCREEN_FORCE_BLANK) { mixercfg = MDSS_MDP_LM_BORDER_COLOR; goto update_mixer; } pipe = mixer->stage_pipe[MDSS_MDP_STAGE_BASE]; if (pipe == NULL) { mixercfg = MDSS_MDP_LM_BORDER_COLOR; } else { if (pipe->num == MDSS_MDP_SSPP_VIG3 || pipe->num == MDSS_MDP_SSPP_RGB3) { /* Add 2 to account for Cursor & Border bits */ mixercfg = 1 << ((3 * pipe->num)+2); } else { mixercfg = 1 << (3 * pipe->num); } if (pipe->src_fmt->alpha_enable) bg_alpha_enable = 1; secure = pipe->flags & MDP_SECURE_OVERLAY_SESSION; } for (stage = MDSS_MDP_STAGE_0; stage < MDSS_MDP_MAX_STAGE; stage++) { pipe = mixer->stage_pipe[stage]; if (pipe == NULL) continue; if (stage != pipe->mixer_stage) { mixer->stage_pipe[stage] = NULL; continue; } blend_stage = stage - MDSS_MDP_STAGE_0; off = MDSS_MDP_REG_LM_BLEND_OFFSET(blend_stage); blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST | MDSS_MDP_BLEND_BG_ALPHA_BG_CONST); fg_alpha = pipe->alpha; bg_alpha = 0xFF - pipe->alpha; /* keep fg alpha */ blend_color_out |= 1 << (blend_stage + 1); switch (pipe->blend_op) { case BLEND_OP_OPAQUE: blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST | MDSS_MDP_BLEND_BG_ALPHA_BG_CONST); pr_debug("pnum=%d stg=%d op=OPAQUE\n", pipe->num, stage); break; case BLEND_OP_PREMULTIPLIED: if (pipe->src_fmt->alpha_enable) { blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST | MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL); if (fg_alpha != 0xff) { bg_alpha = fg_alpha; blend_op |= MDSS_MDP_BLEND_BG_MOD_ALPHA | MDSS_MDP_BLEND_BG_INV_MOD_ALPHA; } else { blend_op |= MDSS_MDP_BLEND_BG_INV_ALPHA; } } pr_debug("pnum=%d stg=%d op=PREMULTIPLIED\n", pipe->num, stage); break; case BLEND_OP_COVERAGE: if (pipe->src_fmt->alpha_enable) { blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_PIXEL | MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL); if (fg_alpha != 0xff) { bg_alpha = fg_alpha; blend_op |= MDSS_MDP_BLEND_FG_MOD_ALPHA | MDSS_MDP_BLEND_FG_INV_MOD_ALPHA | MDSS_MDP_BLEND_BG_MOD_ALPHA | MDSS_MDP_BLEND_BG_INV_MOD_ALPHA; } else { blend_op |= MDSS_MDP_BLEND_BG_INV_ALPHA; } } pr_debug("pnum=%d stg=%d op=COVERAGE\n", pipe->num, stage); break; default: blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST | MDSS_MDP_BLEND_BG_ALPHA_BG_CONST); pr_debug("pnum=%d stg=%d op=NONE\n", pipe->num, stage); break; } if (!pipe->src_fmt->alpha_enable && bg_alpha_enable) blend_color_out = 0; mixercfg |= stage << (3 * pipe->num); trace_mdp_sspp_change(pipe); pr_debug("stg=%d op=%x fg_alpha=%x bg_alpha=%x\n", stage, blend_op, fg_alpha, bg_alpha); mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_OP_MODE, blend_op); mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_BLEND_FG_ALPHA, fg_alpha); mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_BLEND_BG_ALPHA, bg_alpha); } if (mixer->cursor_enabled) mixercfg |= MDSS_MDP_LM_CURSOR_OUT; update_mixer: pr_debug("mixer=%d mixer_cfg=%x\n", mixer->num, mixercfg); if (mixer->num == MDSS_MDP_INTF_LAYERMIXER3) ctl->flush_bits |= BIT(20); else if (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) ctl->flush_bits |= BIT(9) << mixer->num; else ctl->flush_bits |= BIT(6) << mixer->num; op_mode = mdp_mixer_read(mixer, MDSS_MDP_REG_LM_OP_MODE); /* Read GC enable/disable status on LM */ op_mode = (op_mode & BIT(0)); blend_color_out |= op_mode; mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OP_MODE, blend_color_out); off = __mdss_mdp_ctl_get_mixer_off(mixer); mdss_mdp_ctl_write(ctl, off, mixercfg); return 0; } int mdss_mdp_mixer_addr_setup(struct mdss_data_type *mdata, u32 *mixer_offsets, u32 *dspp_offsets, u32 *pingpong_offsets, u32 type, u32 len) { struct mdss_mdp_mixer *head; u32 i; int rc = 0; u32 size = len; if ((type == MDSS_MDP_MIXER_TYPE_WRITEBACK) && !mdata->has_wfd_blk) size++; head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_mixer) * size, GFP_KERNEL); if (!head) { pr_err("unable to setup mixer type=%d :kzalloc fail\n", type); return -ENOMEM; } for (i = 0; i < len; i++) { head[i].type = type; head[i].base = mdata->mdp_base + mixer_offsets[i]; head[i].ref_cnt = 0; head[i].num = i; if (type == MDSS_MDP_MIXER_TYPE_INTF) { head[i].dspp_base = mdata->mdp_base + dspp_offsets[i]; head[i].pingpong_base = mdata->mdp_base + pingpong_offsets[i]; } } /* * Duplicate the last writeback mixer for concurrent line and block mode * operations */ if ((type == MDSS_MDP_MIXER_TYPE_WRITEBACK) && !mdata->has_wfd_blk) head[len] = head[len - 1]; switch (type) { case MDSS_MDP_MIXER_TYPE_INTF: mdata->mixer_intf = head; break; case MDSS_MDP_MIXER_TYPE_WRITEBACK: mdata->mixer_wb = head; break; default: pr_err("Invalid mixer type=%d\n", type); rc = -EINVAL; break; } return rc; } int mdss_mdp_ctl_addr_setup(struct mdss_data_type *mdata, u32 *ctl_offsets, u32 *wb_offsets, u32 len) { struct mdss_mdp_ctl *head; struct mutex *shared_lock = NULL; u32 i; u32 size = len; if (!mdata->has_wfd_blk) { size++; shared_lock = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mutex), GFP_KERNEL); if (!shared_lock) { pr_err("unable to allocate mem for mutex\n"); return -ENOMEM; } mutex_init(shared_lock); } head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_ctl) * size, GFP_KERNEL); if (!head) { pr_err("unable to setup ctl and wb: kzalloc fail\n"); return -ENOMEM; } for (i = 0; i < len; i++) { head[i].num = i; head[i].base = (mdata->mdp_base) + ctl_offsets[i]; head[i].wb_base = (mdata->mdp_base) + wb_offsets[i]; head[i].ref_cnt = 0; } if (!mdata->has_wfd_blk) { head[len - 1].shared_lock = shared_lock; /* * Allocate a virtual ctl to be able to perform simultaneous * line mode and block mode operations on the same * writeback block */ head[len] = head[len - 1]; head[len].num = head[len - 1].num; } mdata->ctl_off = head; return 0; } struct mdss_mdp_mixer *mdss_mdp_mixer_get(struct mdss_mdp_ctl *ctl, int mux) { struct mdss_mdp_mixer *mixer = NULL; struct mdss_overlay_private *mdp5_data = NULL; if (!ctl || !ctl->mfd) { pr_err("ctl not initialized\n"); return NULL; } mdp5_data = mfd_to_mdp5_data(ctl->mfd); if (!mdp5_data) { pr_err("ctl not initialized\n"); return NULL; } switch (mux) { case MDSS_MDP_MIXER_MUX_DEFAULT: case MDSS_MDP_MIXER_MUX_LEFT: mixer = mdp5_data->mixer_swap ? ctl->mixer_right : ctl->mixer_left; break; case MDSS_MDP_MIXER_MUX_RIGHT: mixer = mdp5_data->mixer_swap ? ctl->mixer_left : ctl->mixer_right; break; } return mixer; } struct mdss_mdp_pipe *mdss_mdp_mixer_stage_pipe(struct mdss_mdp_ctl *ctl, int mux, int stage) { struct mdss_mdp_pipe *pipe = NULL; struct mdss_mdp_mixer *mixer; if (!ctl) return NULL; if (mutex_lock_interruptible(&ctl->lock)) return NULL; mixer = mdss_mdp_mixer_get(ctl, mux); if (mixer) pipe = mixer->stage_pipe[stage]; mutex_unlock(&ctl->lock); return pipe; } int mdss_mdp_mixer_pipe_update(struct mdss_mdp_pipe *pipe, int params_changed) { struct mdss_mdp_ctl *ctl; struct mdss_mdp_mixer *mixer; int i; if (!pipe) return -EINVAL; mixer = pipe->mixer; if (!mixer) return -EINVAL; ctl = mixer->ctl; if (!ctl) return -EINVAL; if (pipe->mixer_stage >= MDSS_MDP_MAX_STAGE) { pr_err("invalid mixer stage\n"); return -EINVAL; } pr_debug("pnum=%x mixer=%d stage=%d\n", pipe->num, mixer->num, pipe->mixer_stage); if (mutex_lock_interruptible(&ctl->lock)) return -EINTR; if (params_changed) { mixer->params_changed++; for (i = 0; i < MDSS_MDP_MAX_STAGE; i++) { if (i == pipe->mixer_stage) mixer->stage_pipe[i] = pipe; else if (mixer->stage_pipe[i] == pipe) mixer->stage_pipe[i] = NULL; } } if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA) ctl->flush_bits |= BIT(pipe->num) << 5; else if (pipe->num == MDSS_MDP_SSPP_VIG3 || pipe->num == MDSS_MDP_SSPP_RGB3) ctl->flush_bits |= BIT(pipe->num) << 10; else /* RGB/VIG 0-2 pipes */ ctl->flush_bits |= BIT(pipe->num); mutex_unlock(&ctl->lock); return 0; } /** * mdss_mdp_mixer_unstage_all() - Unstage all pipes from mixer * @mixer: Mixer from which to unstage all pipes * * Unstage any pipes that are currently attached to mixer. * * NOTE: this will not update the pipe structure, and thus a full * deinitialization or reconfiguration of all pipes is expected after this call. */ void mdss_mdp_mixer_unstage_all(struct mdss_mdp_mixer *mixer) { struct mdss_mdp_pipe *tmp; int i; if (!mixer) return; for (i = 0; i < MDSS_MDP_MAX_STAGE; i++) { tmp = mixer->stage_pipe[i]; if (tmp) { mixer->stage_pipe[i] = NULL; mixer->params_changed++; tmp->params_changed++; } } } int mdss_mdp_mixer_pipe_unstage(struct mdss_mdp_pipe *pipe) { struct mdss_mdp_ctl *ctl; struct mdss_mdp_mixer *mixer; if (!pipe) return -EINVAL; mixer = pipe->mixer; if (!mixer) return -EINVAL; ctl = mixer->ctl; if (!ctl) return -EINVAL; pr_debug("unstage pnum=%d stage=%d mixer=%d\n", pipe->num, pipe->mixer_stage, mixer->num); if (mutex_lock_interruptible(&ctl->lock)) return -EINTR; if (pipe == mixer->stage_pipe[pipe->mixer_stage]) { mixer->params_changed++; mixer->stage_pipe[pipe->mixer_stage] = NULL; } mutex_unlock(&ctl->lock); return 0; } static int mdss_mdp_mixer_update(struct mdss_mdp_mixer *mixer) { u32 off = 0; if (!mixer) return -EINVAL; mixer->params_changed = 0; /* skip mixer setup for rotator */ if (!mixer->rotator_mode) { mdss_mdp_mixer_setup(mixer->ctl, mixer); } else { off = __mdss_mdp_ctl_get_mixer_off(mixer); mdss_mdp_ctl_write(mixer->ctl, off, 0); } return 0; } int mdss_mdp_ctl_update_fps(struct mdss_mdp_ctl *ctl, int fps) { int ret = 0; struct mdss_mdp_ctl *sctl = NULL; #ifdef CONFIG_LGE_DEVFREQ_DFPS pr_debug("%s + : trying to fps = %d\n", __func__, fps); #endif sctl = mdss_mdp_get_split_ctl(ctl); if (ctl->config_fps_fnc) ret = ctl->config_fps_fnc(ctl, sctl, fps); #ifdef CONFIG_LGE_DEVFREQ_DFPS pr_debug("%s - : fps = %d\n", __func__, fps); #endif return ret; } int mdss_mdp_display_wakeup_time(struct mdss_mdp_ctl *ctl, ktime_t *wakeup_time) { struct mdss_panel_info *pinfo; u32 clk_rate, clk_period; u32 current_line, total_line; u32 time_of_line, time_to_vsync; ktime_t current_time = ktime_get(); if (!ctl->read_line_cnt_fnc) return -ENOSYS; pinfo = &ctl->panel_data->panel_info; if (!pinfo) return -ENODEV; clk_rate = mdss_mdp_get_pclk_rate(ctl); clk_rate /= 1000; /* in kHz */ if (!clk_rate) return -EINVAL; /* * calculate clk_period as pico second to maintain good * accuracy with high pclk rate and this number is in 17 bit * range. */ clk_period = 1000000000 / clk_rate; if (!clk_period) return -EINVAL; time_of_line = (pinfo->lcdc.h_back_porch + pinfo->lcdc.h_front_porch + pinfo->lcdc.h_pulse_width + pinfo->xres) * clk_period; time_of_line /= 1000; /* in nano second */ if (!time_of_line) return -EINVAL; current_line = ctl->read_line_cnt_fnc(ctl); total_line = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_front_porch + pinfo->lcdc.v_pulse_width + pinfo->yres; if (current_line > total_line) return -EINVAL; time_to_vsync = time_of_line * (total_line - current_line); if (!time_to_vsync) return -EINVAL; *wakeup_time = ktime_add_ns(current_time, time_to_vsync); pr_debug("clk_rate=%dkHz clk_period=%d cur_line=%d tot_line=%d\n", clk_rate, clk_period, current_line, total_line); pr_debug("time_to_vsync=%d current_time=%d wakeup_time=%d\n", time_to_vsync, (int)ktime_to_ms(current_time), (int)ktime_to_ms(*wakeup_time)); return 0; } int mdss_mdp_display_wait4comp(struct mdss_mdp_ctl *ctl) { int ret; if (!ctl) { pr_err("invalid ctl\n"); return -ENODEV; } ret = mutex_lock_interruptible(&ctl->lock); if (ret) return ret; if (!ctl->power_on) { mutex_unlock(&ctl->lock); return 0; } ATRACE_BEGIN("wait_fnc"); if (ctl->wait_fnc) ret = ctl->wait_fnc(ctl, NULL); ATRACE_END("wait_fnc"); trace_mdp_commit(ctl); #ifdef VIDEO_PLAYBACK_AB_1_1_G3 if (ctl->mixer_left && !ctl->mixer_left->rotator_mode) #endif mdss_mdp_ctl_perf_update(ctl, 0); mutex_unlock(&ctl->lock); return ret; } int mdss_mdp_display_wait4pingpong(struct mdss_mdp_ctl *ctl) { int ret; ret = mutex_lock_interruptible(&ctl->lock); if (ret) return ret; if (!ctl->power_on) { mutex_unlock(&ctl->lock); return 0; } if (ctl->wait_pingpong) ret = ctl->wait_pingpong(ctl, NULL); mutex_unlock(&ctl->lock); return ret; } int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg) { struct mdss_mdp_ctl *sctl = NULL; int mixer1_changed, mixer2_changed; int ret = 0; bool is_bw_released; if (!ctl) { pr_err("display function not set\n"); return -ENODEV; } mutex_lock(&ctl->lock); pr_debug("commit ctl=%d play_cnt=%d\n", ctl->num, ctl->play_cnt); if (!ctl->power_on) { mutex_unlock(&ctl->lock); return 0; } sctl = mdss_mdp_get_split_ctl(ctl); mixer1_changed = (ctl->mixer_left && ctl->mixer_left->params_changed); mixer2_changed = (ctl->mixer_right && ctl->mixer_right->params_changed); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); /* * We could have released the bandwidth if there were no transactions * pending, so we want to re-calculate the bandwidth in this situation */ is_bw_released = !mdss_mdp_ctl_perf_get_transaction_status(ctl); mdss_mdp_ctl_perf_set_transaction_status(ctl, PERF_SW_COMMIT_STATE, PERF_STATUS_BUSY); if (is_bw_released || mixer1_changed || mixer2_changed || ctl->force_screen_state) { ATRACE_BEGIN("prepare_fnc"); if (ctl->prepare_fnc) ret = ctl->prepare_fnc(ctl, arg); ATRACE_END("prepare_fnc"); if (ret) { pr_err("error preparing display\n"); goto done; } ATRACE_BEGIN("mixer_programming"); mdss_mdp_ctl_perf_update(ctl, 1); if (mixer1_changed) mdss_mdp_mixer_update(ctl->mixer_left); if (mixer2_changed) mdss_mdp_mixer_update(ctl->mixer_right); mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, ctl->opmode); ctl->flush_bits |= BIT(17); /* CTL */ if (sctl) { mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP, sctl->opmode); sctl->flush_bits |= BIT(17); } ATRACE_END("mixer_programming"); } ATRACE_BEGIN("frame_ready"); if (!ctl->shared_lock) mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_READY); ATRACE_END("frame_ready"); ATRACE_BEGIN("wait_pingpong"); if (ctl->wait_pingpong) ctl->wait_pingpong(ctl, NULL); ATRACE_END("wait_pingpong"); ctl->roi_bkup.w = ctl->roi.w; ctl->roi_bkup.h = ctl->roi.h; ATRACE_BEGIN("postproc_programming"); if (ctl->mfd && ctl->mfd->dcm_state != DTM_ENTER) /* postprocessing setup, including dspp */ mdss_mdp_pp_setup_locked(ctl); ATRACE_END("postproc_programming"); ATRACE_BEGIN("flush_kickoff"); mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl->flush_bits); if (sctl) { mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH, sctl->flush_bits); } wmb(); ctl->flush_bits = 0; mdss_mdp_xlog_mixer_reg(ctl); if (ctl->display_fnc) ret = ctl->display_fnc(ctl, arg); /* kickoff */ if (ret) pr_warn("error displaying frame\n"); ctl->play_cnt++; ATRACE_END("flush_kickoff"); done: mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); mutex_unlock(&ctl->lock); return ret; } void mdss_mdp_ctl_notifier_register(struct mdss_mdp_ctl *ctl, struct notifier_block *notifier) { blocking_notifier_chain_register(&ctl->notifier_head, notifier); } void mdss_mdp_ctl_notifier_unregister(struct mdss_mdp_ctl *ctl, struct notifier_block *notifier) { blocking_notifier_chain_unregister(&ctl->notifier_head, notifier); } int mdss_mdp_ctl_notify(struct mdss_mdp_ctl *ctl, int event) { return blocking_notifier_call_chain(&ctl->notifier_head, event, ctl); } int mdss_mdp_get_ctl_mixers(u32 fb_num, u32 *mixer_id) { int i; struct mdss_mdp_ctl *ctl; struct mdss_data_type *mdata; u32 mixer_cnt = 0; mutex_lock(&mdss_mdp_ctl_lock); mdata = mdss_mdp_get_mdata(); for (i = 0; i < mdata->nctl; i++) { ctl = mdata->ctl_off + i; if ((ctl->power_on) && (ctl->mfd) && (ctl->mfd->index == fb_num)) { if (ctl->mixer_left) { mixer_id[mixer_cnt] = ctl->mixer_left->num; mixer_cnt++; } if (mixer_cnt && ctl->mixer_right) { mixer_id[mixer_cnt] = ctl->mixer_right->num; mixer_cnt++; } if (mixer_cnt) break; } } mutex_unlock(&mdss_mdp_ctl_lock); return mixer_cnt; } /** * @mdss_mdp_ctl_mixer_switch() - return ctl mixer of @return_type * @ctl: Pointer to ctl structure to be switched. * @return_type: wb_type of the ctl to be switched to. * * Virtual mixer switch should be performed only when there is no * dedicated wfd block and writeback block is shared. */ struct mdss_mdp_ctl *mdss_mdp_ctl_mixer_switch(struct mdss_mdp_ctl *ctl, u32 return_type) { int i; struct mdss_data_type *mdata = ctl->mdata; if (ctl->wb_type == return_type) { mdata->mixer_switched = false; return ctl; } for (i = 0; i <= mdata->nctl; i++) { if (mdata->ctl_off[i].wb_type == return_type) { pr_debug("switching mixer from ctl=%d to ctl=%d\n", ctl->num, mdata->ctl_off[i].num); mdata->mixer_switched = true; return mdata->ctl_off + i; } } pr_err("unable to switch mixer to type=%d\n", return_type); return NULL; } static inline int __mdss_mdp_ctl_get_mixer_off(struct mdss_mdp_mixer *mixer) { if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) { if (mixer->num == MDSS_MDP_INTF_LAYERMIXER3) return MDSS_MDP_CTL_X_LAYER_5; else return MDSS_MDP_REG_CTL_LAYER(mixer->num); } else { return MDSS_MDP_REG_CTL_LAYER(mixer->num + MDSS_MDP_INTF_LAYERMIXER3); } } static int __mdss_mdp_mixer_handoff_helper(struct mdss_mdp_mixer *mixer, struct mdss_mdp_pipe *pipe) { int rc = 0; if (!mixer) { rc = -EINVAL; goto error; } if (mixer->stage_pipe[MDSS_MDP_STAGE_UNUSED] != NULL) { pr_err("More than one pipe staged on mixer num %d\n", mixer->num); rc = -EINVAL; goto error; } pr_debug("Staging pipe num %d on mixer num %d\n", pipe->num, mixer->num); mixer->stage_pipe[MDSS_MDP_STAGE_UNUSED] = pipe; pipe->mixer = mixer; pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED; error: return rc; } /** * mdss_mdp_mixer_handoff() - Stages a given pipe on the appropriate mixer * @ctl: pointer to the control structure associated with the overlay device. * @num: the mixer number on which the pipe needs to be staged. * @pipe: pointer to the pipe to be staged. * * Function stages a given pipe on either the left mixer or the right mixer * for the control structre based on the mixer number. If the input mixer * number does not match either of the mixers then an error is returned. * This function is called during overlay handoff when certain pipes are * already staged by the bootloader. */ int mdss_mdp_mixer_handoff(struct mdss_mdp_ctl *ctl, u32 num, struct mdss_mdp_pipe *pipe) { int rc = 0; struct mdss_mdp_mixer *mx_left = ctl->mixer_left; struct mdss_mdp_mixer *mx_right = ctl->mixer_right; /* * For performance calculations, stage the handed off pipe * as MDSS_MDP_STAGE_UNUSED */ if (mx_left && (mx_left->num == num)) { rc = __mdss_mdp_mixer_handoff_helper(mx_left, pipe); } else if (mx_right && (mx_right->num == num)) { rc = __mdss_mdp_mixer_handoff_helper(mx_right, pipe); } else { pr_err("pipe num %d staged on unallocated mixer num %d\n", pipe->num, num); rc = -EINVAL; } return rc; } static void mdss_mdp_xlog_mixer_reg(struct mdss_mdp_ctl *ctl) { int i, off; u32 data[MDSS_MDP_INTF_MAX_LAYERMIXER]; for (i = 0; i < MDSS_MDP_INTF_MAX_LAYERMIXER; i++) { off = MDSS_MDP_REG_CTL_LAYER(i); data[i] = mdss_mdp_ctl_read(ctl, off); } MDSS_XLOG(data[MDSS_MDP_INTF_LAYERMIXER0], data[MDSS_MDP_INTF_LAYERMIXER1], data[MDSS_MDP_INTF_LAYERMIXER2], data[MDSS_MDP_INTF_LAYERMIXER3], off); }
davidmueller13/android_kernel_lge_msm8974
drivers/video/msm/mdss/mdss_mdp_ctl.c
C
gpl-2.0
76,385
<?php /** * @package Joomla.Administrator * @subpackage com_finder * * @copyright Copyright (C) 2005 - 2017 Open Source Matters, Inc. All rights reserved. * @license GNU General Public License version 2 or later; see LICENSE.txt */ defined('_JEXEC') or die; /** * Index model class for Finder. * * @since 2.5 */ class FinderModelIndex extends JModelList { /** * The event to trigger after deleting the data. * * @var string * @since 2.5 */ protected $event_after_delete = 'onContentAfterDelete'; /** * The event to trigger before deleting the data. * * @var string * @since 2.5 */ protected $event_before_delete = 'onContentBeforeDelete'; /** * Constructor. * * @param array $config An associative array of configuration settings. [optional] * * @since 2.5 * @see JControllerLegacy */ public function __construct($config = array()) { if (empty($config['filter_fields'])) { $config['filter_fields'] = array( 'state', 'published', 'l.published', 'title', 'l.title', 'type', 'type_id', 'l.type_id', 't.title', 't_title', 'url', 'l.url', 'indexdate', 'l.indexdate', 'content_map', ); } parent::__construct($config); } /** * Method to test whether a record can be deleted. * * @param object $record A record object. * * @return boolean True if allowed to delete the record. Defaults to the permission for the component. * * @since 2.5 */ protected function canDelete($record) { return JFactory::getUser()->authorise('core.delete', $this->option); } /** * Method to test whether a record can have its state changed. * * @param object $record A record object. * * @return boolean True if allowed to change the state of the record. Defaults to the permission for the component. * * @since 2.5 */ protected function canEditState($record) { return JFactory::getUser()->authorise('core.edit.state', $this->option); } /** * Method to delete one or more records. * * @param array &$pks An array of record primary keys. * * @return boolean True if successful, false if an error occurs. * * @since 2.5 */ public function delete(&$pks) { $dispatcher = JEventDispatcher::getInstance(); $pks = (array) $pks; $table = $this->getTable(); // Include the content plugins for the on delete events. JPluginHelper::importPlugin('content'); // Iterate the items to delete each one. foreach ($pks as $i => $pk) { if ($table->load($pk)) { if ($this->canDelete($table)) { $context = $this->option . '.' . $this->name; // Trigger the onContentBeforeDelete event. $result = $dispatcher->trigger($this->event_before_delete, array($context, $table)); if (in_array(false, $result, true)) { $this->setError($table->getError()); return false; } if (!$table->delete($pk)) { $this->setError($table->getError()); return false; } // Trigger the onContentAfterDelete event. $dispatcher->trigger($this->event_after_delete, array($context, $table)); } else { // Prune items that you can't change. unset($pks[$i]); $error = $this->getError(); if ($error) { $this->setError($error); } else { $this->setError(JText::_('JLIB_APPLICATION_ERROR_DELETE_NOT_PERMITTED')); } } } else { $this->setError($table->getError()); return false; } } // Clear the component's cache $this->cleanCache(); return true; } /** * Build an SQL query to load the list data. * * @return JDatabaseQuery A JDatabaseQuery object * * @since 2.5 */ protected function getListQuery() { $db = $this->getDbo(); $query = $db->getQuery(true) ->select('l.*') ->select($db->quoteName('t.title', 't_title')) ->from($db->quoteName('#__finder_links', 'l')) ->join('INNER', $db->quoteName('#__finder_types', 't') . ' ON ' . $db->quoteName('t.id') . ' = ' . $db->quoteName('l.type_id')); // Check the type filter. $type = $this->getState('filter.type'); if (is_numeric($type)) { $query->where($db->quoteName('l.type_id') . ' = ' . (int) $type); } // Check the map filter. $contentMapId = $this->getState('filter.content_map'); if (is_numeric($contentMapId)) { $query->join('INNER', $db->quoteName('#__finder_taxonomy_map', 'm') . ' ON ' . $db->quoteName('m.link_id') . ' = ' . $db->quoteName('l.link_id')) ->where($db->quoteName('m.node_id') . ' = ' . (int) $contentMapId); } // Check for state filter. $state = $this->getState('filter.state'); if (is_numeric($state)) { $query->where($db->quoteName('l.published') . ' = ' . (int) $state); } // Check the search phrase. $search = $this->getState('filter.search'); if (!empty($search)) { $search = $db->quote('%' . str_replace(' ', '%', $db->escape(trim($search), true) . '%')); $orSearchSql = $db->quoteName('l.title') . ' LIKE ' . $search . ' OR ' . $db->quoteName('l.url') . ' LIKE ' . $search; // Filter by indexdate only if $search doesn't contains non-ascii characters if (!preg_match('/[^\x00-\x7F]/', $search)) { $orSearchSql .= ' OR ' . $query->castAsChar($db->quoteName('l.indexdate')) . ' LIKE ' . $search; } $query->where('(' . $orSearchSql . ')'); } // Handle the list ordering. $listOrder = $this->getState('list.ordering', 'l.title'); $listDir = $this->getState('list.direction', 'ASC'); if ($listOrder == 't.title') { $ordering = $db->quoteName('t.title') . ' ' . $db->escape($listDir) . ', ' . $db->quoteName('l.title') . ' ' . $db->escape($listDir); } else { $ordering = $db->escape($listOrder) . ' ' . $db->escape($listDir); } $query->order($ordering); return $query; } /** * Method to get the state of the Smart Search Plugins. * * @return array Array of relevant plugins and whether they are enabled or not. * * @since 2.5 */ public function getPluginState() { $db = $this->getDbo(); $query = $db->getQuery(true) ->select('name, enabled') ->from($db->quoteName('#__extensions')) ->where($db->quoteName('type') . ' = ' . $db->quote('plugin')) ->where($db->quoteName('folder') . ' IN (' . $db->quote('system') . ',' . $db->quote('content') . ')') ->where($db->quoteName('element') . ' = ' . $db->quote('finder')); $db->setQuery($query); return $db->loadObjectList('name'); } /** * Method to get a store id based on model configuration state. * * This is necessary because the model is used by the component and * different modules that might need different sets of data or different * ordering requirements. * * @param string $id A prefix for the store id. [optional] * * @return string A store id. * * @since 2.5 */ protected function getStoreId($id = '') { // Compile the store id. $id .= ':' . $this->getState('filter.search'); $id .= ':' . $this->getState('filter.state'); $id .= ':' . $this->getState('filter.type'); $id .= ':' . $this->getState('filter.content_map'); return parent::getStoreId($id); } /** * Gets the total of indexed items. * * @return int The total of indexed items. * * @since 3.6.0 */ public function getTotalIndexed() { $db = $this->getDbo(); $query = $db->getQuery(true) ->select('COUNT(link_id)') ->from($db->quoteName('#__finder_links')); $db->setQuery($query); $db->execute(); return (int) $db->loadResult(); } /** * Returns a JTable object, always creating it. * * @param string $type The table type to instantiate. [optional] * @param string $prefix A prefix for the table class name. [optional] * @param array $config Configuration array for model. [optional] * * @return JTable A database object * * @since 2.5 */ public function getTable($type = 'Link', $prefix = 'FinderTable', $config = array()) { return JTable::getInstance($type, $prefix, $config); } /** * Method to purge the index, deleting all links. * * @return boolean True on success, false on failure. * * @since 2.5 * @throws Exception on database error */ public function purge() { $db = $this->getDbo(); // Truncate the links table. $db->truncateTable('#__finder_links'); // Truncate the links terms tables. for ($i = 0; $i <= 15; $i++) { // Get the mapping table suffix. $suffix = dechex($i); $db->truncateTable('#__finder_links_terms' . $suffix); } // Truncate the terms table. $db->truncateTable('#__finder_terms'); // Truncate the taxonomy map table. $db->truncateTable('#__finder_taxonomy_map'); // Delete all the taxonomy nodes except the root. $query = $db->getQuery(true) ->delete($db->quoteName('#__finder_taxonomy')) ->where($db->quoteName('id') . ' > 1'); $db->setQuery($query); $db->execute(); // Truncate the tokens tables. $db->truncateTable('#__finder_tokens'); // Truncate the tokens aggregate table. $db->truncateTable('#__finder_tokens_aggregate'); return true; } /** * Method to auto-populate the model state. Calling getState in this method will result in recursion. * * @param string $ordering An optional ordering field. [optional] * @param string $direction An optional direction. [optional] * * @return void * * @since 2.5 */ protected function populateState($ordering = 'l.title', $direction = 'asc') { // Load the filter state. $this->setState('filter.search', $this->getUserStateFromRequest($this->context . '.filter.search', 'filter_search', '', 'string')); $this->setState('filter.state', $this->getUserStateFromRequest($this->context . '.filter.state', 'filter_state', '', 'cmd')); $this->setState('filter.type', $this->getUserStateFromRequest($this->context . '.filter.type', 'filter_type', '', 'cmd')); $this->setState('filter.content_map', $this->getUserStateFromRequest($this->context . '.filter.content_map', 'filter_content_map', '', 'cmd')); // Load the parameters. $params = JComponentHelper::getParams('com_finder'); $this->setState('params', $params); // List state information. parent::populateState($ordering, $direction); } /** * Method to change the published state of one or more records. * * @param array &$pks A list of the primary keys to change. * @param integer $value The value of the published state. [optional] * * @return boolean True on success. * * @since 2.5 */ public function publish(&$pks, $value = 1) { $dispatcher = JEventDispatcher::getInstance(); $user = JFactory::getUser(); $table = $this->getTable(); $pks = (array) $pks; // Include the content plugins for the change of state event. JPluginHelper::importPlugin('content'); // Access checks. foreach ($pks as $i => $pk) { $table->reset(); if ($table->load($pk)) { if (!$this->canEditState($table)) { // Prune items that you can't change. unset($pks[$i]); $this->setError(JText::_('JLIB_APPLICATION_ERROR_EDITSTATE_NOT_PERMITTED')); return false; } } } // Attempt to change the state of the records. if (!$table->publish($pks, $value, $user->get('id'))) { $this->setError($table->getError()); return false; } $context = $this->option . '.' . $this->name; // Trigger the onContentChangeState event. $result = $dispatcher->trigger('onContentChangeState', array($context, $pks, $value)); if (in_array(false, $result, true)) { $this->setError($table->getError()); return false; } // Clear the component's cache $this->cleanCache(); return true; } }
amsv/jm37
administrator/components/com_finder/models/index.php
PHP
gpl-2.0
11,725
<?php /** * File containing the updateisbn13.php bin script * * @copyright Copyright (C) eZ Systems AS. All rights reserved. * @license For full copyright and license information view LICENSE file distributed with this source code. * @version //autogentag// * @package kernel */ /** * This script updates the different ranges used by the ISBN standard to * calculate the length of Registration group, Registrant and Publication element * * It gets the values from xml file normally provided by International ISBN Agency * http://www.isbn-international.org/agency?rmxml=1 */ require_once 'autoload.php'; $url = ''; // http://www.isbn-international.org/agency?rmxml=1 url with the xml. $cli = eZCLI::instance(); $script = eZScript::instance( array( 'description' => "eZ Publish ISBN-13 update\n\n" . "Update the database with new updated ISBN data to the database.", 'use-session' => false, 'use-modules' => true, 'use-extensions' => true ) ); $script->startup(); $options = $script->getOptions( "[url:][db-host:][db-user:][db-password:][db-database:][db-driver:]", "", array( 'url' => "URL containing the xml file for the different ranges", 'db-host' => "Database host.", 'db-user' => "Database user.", 'db-password' => "Database password.", 'db-database' => "Database name.", 'db-driver' => "Database driver." ) ); $script->initialize(); if ( isset( $options['url'] ) ) { $url = $options['url']; } else { $cli->error( 'Error: you need to specify a url to the xml file containing the ranges' ); $script->shutdown( 1 ); } $db = eZDB::instance(); if( !$db->IsConnected ) { // default settings are not valid // try user-defined settings $dbUser = $options['db-user'] ? $options['db-user'] : false; $dbPassword = $options['db-password'] ? $options['db-password'] : false; $dbHost = $options['db-host'] ? $options['db-host'] : false; $dbName = $options['db-database'] ? $options['db-database'] : false; $dbImpl = $options['db-driver'] ? $options['db-driver'] : false; if ( $dbHost or $dbName or $dbUser or $dbImpl ) { $params = array(); if ( $dbHost !== false ) $params['server'] = $dbHost; if ( $dbUser !== false ) { $params['user'] = $dbUser; $params['password'] = ''; } if ( $dbPassword !== false ) $params['password'] = $dbPassword; if ( $dbName !== false ) $params['database'] = $dbName; $db = eZDB::instance( $dbImpl, $params, true ); eZDB::setInstance( $db ); } // still no success? if( !$db->IsConnected ) { $cli->error( "Error: couldn't connect to database '" . $db->DB . "'" ); $cli->error( ' for mysql try: ' ); $cli->error( ' mysql -e "create database ' . $db->DB . ';"' ); $cli->error( ' mysql tmp < kernel/sql/mysql/kernel_schema.sql' ); $cli->error( ' or use --help for more info' ); $script->shutdown( 1 ); } } $cli->output( "Using database '" . $cli->terminalStyle( 'red' ) . $db->DB . $cli->terminalStyle( 'normal' ) . "'" ); $xml = simplexml_load_file( $url ); if ( $xml === false ) { $cli->error( "Error retrieving '$url'" ); $script->shutdown( 1 ); } // Clean up all tables to add everything from the start. eZISBNGroup::cleanAll(); eZISBNGroupRange::cleanAll(); eZISBNRegistrantRange::cleanAll(); // Get registration groups. $registrationGroups = $xml->xpath( 'RegistrationGroups/Group' ); foreach ( $registrationGroups as $group ) { // Prefix is always 978 or 979 followed by an hyphen (-) and up to 5 digits // Explode it in order to get the group number $prefixArray = explode( '-', $group->Prefix ); $groupNumber = $prefixArray[1]; $description = $group->Agency; // name $isbnGroup = eZISBNGroup::create( $groupNumber, $description ); $isbnGroup->store(); $isbnGroupID = $isbnGroup->attribute( 'id' ); // look for the rules $rules = $group->Rules[0]->Rule; foreach ( $rules as $rule ) { $length = (int)$rule->Length; // if length is 0 there is no need to add to the database if( $length > 0 ) { $rangeArray = explode( '-', $rule->Range ); $fromValue = substr( $rangeArray[0], 0, 5 ); $toValue = substr( $rangeArray[1], 0, 5 ); $registrantFrom = substr( $rangeArray[0], 0, $length ); $registrantTo = substr( $rangeArray[1], 0, $length ); $registrationGroup = eZISBNRegistrantRange::create( $isbnGroupID, $fromValue, $toValue, $registrantFrom, $registrantTo, $length ); $registrationGroup->store(); } } } // get group ranges $groupRanges = $xml->xpath( '///EAN.UCC/Rules/Rule' ); foreach( $groupRanges as $groupRange ) { $registrationGroupItemLength = (int)$groupRange->Length; // if length is 0 there is no need to add to the database if( $registrationGroupItemLength > 0 ) { $rangeArray = explode( '-', $groupRange->Range ); $fromValue = substr( $rangeArray[0], 0, 5 ); $toValue = substr( $rangeArray[1], 0, 5 ); $groupFrom = substr( $rangeArray[0], 0, $registrationGroupItemLength ); $groupTo = substr( $rangeArray[1], 0, $registrationGroupItemLength ); $registrationGroupRange = eZISBNGroupRange::create( $fromValue, $toValue, $groupFrom, $groupTo, $registrationGroupItemLength ); $registrationGroupRange->store(); } } $cli->output( 'Complete' ); $script->shutdown(); ?>
Alexnder/ezpublish-legacy
bin/php/updateisbn13.php
PHP
gpl-2.0
6,552
/* * Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2005-2009 MaNGOS <http://getmangos.com/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "TotemAI.h" #include "Totem.h" #include "Creature.h" #include "DBCStores.h" #include "ObjectAccessor.h" #include "SpellMgr.h" #include "GridNotifiers.h" #include "GridNotifiersImpl.h" #include "CellImpl.h" int TotemAI::Permissible(Creature const* creature) { if (creature->IsTotem()) return PERMIT_BASE_PROACTIVE; return PERMIT_BASE_NO; } TotemAI::TotemAI(Creature* c) : CreatureAI(c), i_victimGuid(0) { ASSERT(c->IsTotem()); } void TotemAI::MoveInLineOfSight(Unit* /*who*/) { } void TotemAI::EnterEvadeMode() { me->CombatStop(true); } void TotemAI::UpdateAI(uint32 /*diff*/) { if (me->ToTotem()->GetTotemType() != TOTEM_ACTIVE) return; if (!me->IsAlive() || me->IsNonMeleeSpellCasted(false)) return; // Search spell SpellInfo const* spellInfo = sSpellMgr->GetSpellInfo(me->ToTotem()->GetSpell()); if (!spellInfo) return; // Get spell range float max_range = spellInfo->GetMaxRange(false); // SPELLMOD_RANGE not applied in this place just because not existence range mods for attacking totems // pointer to appropriate target if found any Unit* victim = i_victimGuid ? ObjectAccessor::GetUnit(*me, i_victimGuid) : NULL; // Search victim if no, not attackable, or out of range, or friendly (possible in case duel end) if (!victim || !victim->isTargetableForAttack() || !me->IsWithinDistInMap(victim, max_range) || me->IsFriendlyTo(victim) || !me->CanSeeOrDetect(victim)) { victim = NULL; Trinity::NearestAttackableUnitInObjectRangeCheck u_check(me, me, max_range); Trinity::UnitLastSearcher<Trinity::NearestAttackableUnitInObjectRangeCheck> checker(me, victim, u_check); me->VisitNearbyObject(max_range, checker); } // If have target if (victim) { // remember i_victimGuid = victim->GetGUID(); // attack me->SetInFront(victim); // client change orientation by self me->CastSpell(victim, me->ToTotem()->GetSpell(), false); } else i_victimGuid = 0; } void TotemAI::AttackStart(Unit* /*victim*/) { // Sentry totem sends ping on attack if (me->GetEntry() == SENTRY_TOTEM_ENTRY) if (Unit* owner = me->GetOwner()) if (Player* player = owner->ToPlayer()) { WorldPacket data(MSG_MINIMAP_PING, (8+4+4)); data << me->GetGUID(); data << me->GetPositionX(); data << me->GetPositionY(); player->GetSession()->SendPacket(&data); } }
OrizonNetworkCore/Orizon-Core
src/server/game/AI/CoreAI/TotemAI.cpp
C++
gpl-2.0
3,401
/* kernel/power/earlysuspend.c * * Copyright (C) 2005-2008 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/earlysuspend.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/rtc.h> #include <linux/wakelock.h> #include <linux/workqueue.h> #include <linux/cpu.h> #include <linux/cpufreq.h> #include "power.h" #ifdef CONFIG_TRACING_IRQ_PWR #include "../drivers/gpio/gpio-msm-common.h" #define PWR_KEY_MSMz 26 #endif enum { DEBUG_USER_STATE = 1U << 0, DEBUG_SUSPEND = 1U << 2, DEBUG_VERBOSE = 1U << 3, DEBUG_NO_SUSPEND = 1U << 4, }; #ifdef CONFIG_NO_SUSPEND static int debug_mask = DEBUG_USER_STATE; #else static int debug_mask = DEBUG_USER_STATE; #endif module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); static DEFINE_MUTEX(early_suspend_lock); static LIST_HEAD(early_suspend_handlers); static void early_suspend(struct work_struct *work); static void late_resume(struct work_struct *work); static DECLARE_WORK(early_suspend_work, early_suspend); static DECLARE_WORK(late_resume_work, late_resume); static DEFINE_SPINLOCK(state_lock); enum { SUSPEND_REQUESTED = 0x1, SUSPENDED = 0x2, SUSPEND_REQUESTED_AND_SUSPENDED = SUSPEND_REQUESTED | SUSPENDED, }; static int state; #ifdef CONFIG_HTC_ONMODE_CHARGING static LIST_HEAD(onchg_suspend_handlers); static void onchg_suspend(struct work_struct *work); static void onchg_resume(struct work_struct *work); static DECLARE_WORK(onchg_suspend_work, onchg_suspend); static DECLARE_WORK(onchg_resume_work, onchg_resume); static int state_onchg; #endif #ifdef CONFIG_EARLYSUSPEND_BOOST_CPU_SPEED extern int skip_cpu_offline; int has_boost_cpu_func = 0; static void __ref boost_cpu_speed(int boost) { unsigned long max_wait; unsigned int cpu = 0, isfound = 0; if (!has_boost_cpu_func) return; if (boost) { skip_cpu_offline = 1; for(cpu = 1; cpu < NR_CPUS; cpu++) { if (cpu_online(cpu)) { isfound = 1; break; } } cpu = isfound ? cpu : 1; if (!isfound) { max_wait = jiffies + msecs_to_jiffies(50); cpu_hotplug_driver_lock(); cpu_up(cpu); cpu_hotplug_driver_unlock(); while (!cpu_active(cpu) && jiffies < max_wait) ; } #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND ondemand_boost_cpu(1); #endif } else { #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND ondemand_boost_cpu(0); #endif skip_cpu_offline = 0; } } #else static void boost_cpu_speed(int boost) { return; } #endif void register_early_suspend(struct early_suspend *handler) { struct list_head *pos; mutex_lock(&early_suspend_lock); list_for_each(pos, &early_suspend_handlers) { struct early_suspend *e; e = list_entry(pos, struct early_suspend, link); if (e->level > handler->level) break; } list_add_tail(&handler->link, pos); if ((state & SUSPENDED) && handler->suspend) handler->suspend(handler); mutex_unlock(&early_suspend_lock); } EXPORT_SYMBOL(register_early_suspend); void unregister_early_suspend(struct early_suspend *handler) { mutex_lock(&early_suspend_lock); list_del(&handler->link); mutex_unlock(&early_suspend_lock); } EXPORT_SYMBOL(unregister_early_suspend); #define EARLY_SUSPEND_TIMEOUT_VALUE 5 static void early_suspend_handlers_timeout(unsigned long data) { printk(KERN_EMERG "**** early_suspend_handlers %d secs timeout: %pf ****\n", \ EARLY_SUSPEND_TIMEOUT_VALUE, (void *)data); pr_info("### Show Blocked State in ###\n"); show_state_filter(TASK_UNINTERRUPTIBLE); BUG(); } static void early_suspend(struct work_struct *work) { struct early_suspend *pos; struct timer_list timer; unsigned long irqflags; int abort = 0; pr_info("[R] early_suspend start\n"); mutex_lock(&early_suspend_lock); spin_lock_irqsave(&state_lock, irqflags); if (state == SUSPEND_REQUESTED) { state |= SUSPENDED; #ifdef CONFIG_HTC_ONMODE_CHARGING state_onchg = SUSPEND_REQUESTED_AND_SUSPENDED; #endif } else abort = 1; spin_unlock_irqrestore(&state_lock, irqflags); if (abort) { if (debug_mask & DEBUG_SUSPEND) pr_info("early_suspend: abort, state %d\n", state); mutex_unlock(&early_suspend_lock); goto abort; } boost_cpu_speed(1); init_timer_on_stack(&timer); timer.function = early_suspend_handlers_timeout; if (debug_mask & DEBUG_SUSPEND) pr_info("early_suspend: call handlers\n"); list_for_each_entry(pos, &early_suspend_handlers, link) { if (pos->suspend != NULL) { timer.expires = jiffies + HZ * EARLY_SUSPEND_TIMEOUT_VALUE; timer.data = (unsigned long)pos->suspend; add_timer(&timer); if (debug_mask & DEBUG_VERBOSE) pr_info("early_suspend: calling %pf\n", pos->suspend); pos->suspend(pos); del_timer_sync(&timer); } } destroy_timer_on_stack(&timer); boost_cpu_speed(0); mutex_unlock(&early_suspend_lock); if (debug_mask & DEBUG_SUSPEND) pr_info("early_suspend: sync\n"); suspend_sys_sync_queue(); if (debug_mask & DEBUG_NO_SUSPEND) { pr_info("DEBUG_NO_SUSPEND set, will not suspend\n"); wake_lock(&no_suspend_wake_lock); } abort: spin_lock_irqsave(&state_lock, irqflags); if (state == SUSPEND_REQUESTED_AND_SUSPENDED) wake_unlock(&main_wake_lock); spin_unlock_irqrestore(&state_lock, irqflags); pr_info("[R] early_suspend end\n"); } static void late_resume(struct work_struct *work) { struct early_suspend *pos; struct timer_list timer; unsigned long irqflags; int abort = 0; pr_info("[R] late_resume start\n"); mutex_lock(&early_suspend_lock); spin_lock_irqsave(&state_lock, irqflags); if (state == SUSPENDED) { state &= ~SUSPENDED; #ifdef CONFIG_HTC_ONMODE_CHARGING state_onchg &= ~SUSPEND_REQUESTED_AND_SUSPENDED; #endif } else abort = 1; spin_unlock_irqrestore(&state_lock, irqflags); if (abort) { if (debug_mask & DEBUG_SUSPEND) pr_info("late_resume: abort, state %d\n", state); goto abort; } boost_cpu_speed(1); init_timer_on_stack(&timer); timer.function = early_suspend_handlers_timeout; if (debug_mask & DEBUG_SUSPEND) pr_info("late_resume: call handlers\n"); list_for_each_entry_reverse(pos, &early_suspend_handlers, link) { if (pos->resume != NULL) { timer.expires = jiffies + HZ * EARLY_SUSPEND_TIMEOUT_VALUE; timer.data = (unsigned long)pos->suspend; add_timer(&timer); if (debug_mask & DEBUG_VERBOSE) pr_info("late_resume: calling %pf\n", pos->resume); pos->resume(pos); del_timer_sync(&timer); } } destroy_timer_on_stack(&timer); boost_cpu_speed(0); if (debug_mask & DEBUG_SUSPEND) pr_info("late_resume: done\n"); if (debug_mask & DEBUG_NO_SUSPEND) wake_unlock(&no_suspend_wake_lock); abort: mutex_unlock(&early_suspend_lock); pr_info("[R] late_resume end\n"); } #ifdef CONFIG_HTC_ONMODE_CHARGING void register_onchg_suspend(struct early_suspend *handler) { struct list_head *pos; mutex_lock(&early_suspend_lock); list_for_each(pos, &onchg_suspend_handlers) { struct early_suspend *e; e = list_entry(pos, struct early_suspend, link); if (e->level > handler->level) break; } list_add_tail(&handler->link, pos); mutex_unlock(&early_suspend_lock); } EXPORT_SYMBOL(register_onchg_suspend); void unregister_onchg_suspend(struct early_suspend *handler) { mutex_lock(&early_suspend_lock); list_del(&handler->link); mutex_unlock(&early_suspend_lock); } EXPORT_SYMBOL(unregister_onchg_suspend); static void onchg_suspend(struct work_struct *work) { struct early_suspend *pos; unsigned long irqflags; int abort = 0; pr_info("[R] onchg_suspend start\n"); mutex_lock(&early_suspend_lock); spin_lock_irqsave(&state_lock, irqflags); if (state == SUSPEND_REQUESTED_AND_SUSPENDED && state_onchg == SUSPEND_REQUESTED) state_onchg |= SUSPENDED; else abort = 1; spin_unlock_irqrestore(&state_lock, irqflags); if (abort) { if (debug_mask & DEBUG_SUSPEND) pr_info("onchg_suspend: abort, state %d, state_onchg: %d\n", state, state_onchg); mutex_unlock(&early_suspend_lock); goto abort; } if (debug_mask & DEBUG_SUSPEND) pr_info("onchg_suspend: call handlers\n"); list_for_each_entry(pos, &onchg_suspend_handlers, link) { if (pos->suspend != NULL) pos->suspend(pos); } mutex_unlock(&early_suspend_lock); abort: pr_info("[R] onchg_suspend end\n"); } static void onchg_resume(struct work_struct *work) { struct early_suspend *pos; unsigned long irqflags; int abort = 0; pr_info("[R] onchg_resume start\n"); mutex_lock(&early_suspend_lock); spin_lock_irqsave(&state_lock, irqflags); if (state == SUSPEND_REQUESTED_AND_SUSPENDED && state_onchg == SUSPENDED) state_onchg &= ~SUSPENDED; else abort = 1; spin_unlock_irqrestore(&state_lock, irqflags); if (abort) { if (debug_mask & DEBUG_SUSPEND) pr_info("onchg_resume: abort, state %d, state_onchg: %d\n", state, state_onchg); goto abort; } if (debug_mask & DEBUG_SUSPEND) pr_info("onchg_resume: call handlers\n"); list_for_each_entry_reverse(pos, &onchg_suspend_handlers, link) if (pos->resume != NULL) pos->resume(pos); if (debug_mask & DEBUG_SUSPEND) pr_info("onchg_resume: done\n"); abort: mutex_unlock(&early_suspend_lock); pr_info("[R] onchg_resume end\n"); } void request_onchg_state(int on) { unsigned long irqflags; int old_sleep; spin_lock_irqsave(&state_lock, irqflags); if (debug_mask & DEBUG_USER_STATE) { struct timespec ts; struct rtc_time tm; getnstimeofday(&ts); rtc_time_to_tm(ts.tv_sec, &tm); pr_info("request_onchg_state: %s (%d.%d)->%d at %lld " "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", on == 1 ? "on" : "off", state, !(state_onchg & SUSPEND_REQUESTED), on, ktime_to_ns(ktime_get()), tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); } if (state == SUSPEND_REQUESTED_AND_SUSPENDED) { old_sleep = state_onchg & SUSPEND_REQUESTED; if (!old_sleep && on == 0) { state_onchg |= SUSPEND_REQUESTED; queue_work(suspend_work_queue, &onchg_suspend_work); } else if (old_sleep && on == 1) { state_onchg &= ~SUSPEND_REQUESTED; queue_work(suspend_work_queue, &onchg_resume_work); } } spin_unlock_irqrestore(&state_lock, irqflags); } int get_onchg_state(void) { return state_onchg; } #endif void request_suspend_state(suspend_state_t new_state) { unsigned long irqflags; int old_sleep; spin_lock_irqsave(&state_lock, irqflags); old_sleep = state & SUSPEND_REQUESTED; if (debug_mask & DEBUG_USER_STATE) { struct timespec ts; struct rtc_time tm; getnstimeofday(&ts); rtc_time_to_tm(ts.tv_sec, &tm); pr_info("request_suspend_state: %s (%d->%d) at %lld " "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", new_state != PM_SUSPEND_ON ? "sleep" : "wakeup", requested_suspend_state, new_state, ktime_to_ns(ktime_get()), tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); } if (!old_sleep && new_state != PM_SUSPEND_ON) { state |= SUSPEND_REQUESTED; queue_work(suspend_work_queue, &early_suspend_work); } else if (old_sleep && new_state == PM_SUSPEND_ON) { state &= ~SUSPEND_REQUESTED; wake_lock(&main_wake_lock); #ifdef CONFIG_TRACING_IRQ_PWR pr_info("%s : PWR KEY INT ENABLE : %d\n", __func__, (__msm_gpio_get_intr_config(PWR_KEY_MSMz)&0x1)); #endif queue_work(suspend_work_queue, &late_resume_work); } requested_suspend_state = new_state; spin_unlock_irqrestore(&state_lock, irqflags); } suspend_state_t get_suspend_state(void) { return requested_suspend_state; }
brymaster5000/m7-501
kernel/power/earlysuspend.c
C
gpl-2.0
11,792
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. * * GPL HEADER END */ /* * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * Copyright (c) 2011, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. * * lustre/ldlm/ldlm_inodebits.c * * Author: Peter Braam <braam@clusterfs.com> * Author: Phil Schwan <phil@clusterfs.com> */ /** * This file contains implementation of IBITS lock type * * IBITS lock type contains a bit mask determining various properties of an * object. The meanings of specific bits are specific to the caller and are * opaque to LDLM code. * * Locks with intersecting bitmasks and conflicting lock modes (e.g. LCK_PW) * are considered conflicting. See the lock mode compatibility matrix * in lustre_dlm.h. */ #define DEBUG_SUBSYSTEM S_LDLM #include "../include/lustre_dlm.h" #include "../include/obd_support.h" #include "../include/lustre_lib.h" #include "ldlm_internal.h" void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy, ldlm_policy_data_t *lpolicy) { memset(lpolicy, 0, sizeof(*lpolicy)); lpolicy->l_inodebits.bits = wpolicy->l_inodebits.bits; } void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy, ldlm_wire_policy_data_t *wpolicy) { memset(wpolicy, 0, sizeof(*wpolicy)); wpolicy->l_inodebits.bits = lpolicy->l_inodebits.bits; }
jimbojr/linux
drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
C
gpl-2.0
2,423
/* Copyright 2013-2014 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the above-listed copyright holders nor the * names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "../include/mc-sys.h" #include "../include/mc-cmd.h" #include "../include/dpmng.h" #include "dpmng-cmd.h" /** * mc_get_version() - Retrieves the Management Complex firmware * version information * @mc_io: Pointer to opaque I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @mc_ver_info: Returned version information structure * * Return: '0' on Success; Error code otherwise. */ int mc_get_version(struct fsl_mc_io *mc_io, u32 cmd_flags, struct mc_version *mc_ver_info) { struct mc_command cmd = { 0 }; int err; /* prepare command */ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION, cmd_flags, 0); /* send command to mc*/ err = mc_send_command(mc_io, &cmd); if (err) return err; /* retrieve response parameters */ mc_ver_info->revision = mc_dec(cmd.params[0], 0, 32); mc_ver_info->major = mc_dec(cmd.params[0], 32, 32); mc_ver_info->minor = mc_dec(cmd.params[1], 0, 32); return 0; } /** * dpmng_get_container_id() - Get container ID associated with a given portal. * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @container_id: Requested container ID * * Return: '0' on Success; Error code otherwise. */ int dpmng_get_container_id(struct fsl_mc_io *mc_io, u32 cmd_flags, int *container_id) { struct mc_command cmd = { 0 }; int err; /* prepare command */ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID, cmd_flags, 0); /* send command to mc*/ err = mc_send_command(mc_io, &cmd); if (err) return err; /* retrieve response parameters */ *container_id = mc_dec(cmd.params[0], 0, 32); return 0; }
linuxium/ubuntu-xenial
drivers/staging/fsl-mc/bus/dpmng.c
C
gpl-2.0
3,481
/* * Filename: dev.c * * * Authors: Joshua Morris <josh.h.morris@us.ibm.com> * Philip Kelleher <pjk1939@linux.vnet.ibm.com> * * (C) Copyright 2013 IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/hdreg.h> #include <linux/genhd.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/fs.h> #include "rsxx_priv.h" static unsigned int blkdev_minors = 64; module_param(blkdev_minors, uint, 0444); MODULE_PARM_DESC(blkdev_minors, "Number of minors(partitions)"); /* * For now I'm making this tweakable in case any applications hit this limit. * If you see a "bio too big" error in the log you will need to raise this * value. */ static unsigned int blkdev_max_hw_sectors = 1024; module_param(blkdev_max_hw_sectors, uint, 0444); MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO"); static unsigned int enable_blkdev = 1; module_param(enable_blkdev , uint, 0444); MODULE_PARM_DESC(enable_blkdev, "Enable block device interfaces"); struct rsxx_bio_meta { struct bio *bio; atomic_t pending_dmas; atomic_t error; unsigned long start_time; }; static struct kmem_cache *bio_meta_pool; /*----------------- Block Device Operations -----------------*/ static int rsxx_blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct rsxx_cardinfo *card = bdev->bd_disk->private_data; switch (cmd) { case RSXX_GETREG: return rsxx_reg_access(card, (void __user *)arg, 1); case RSXX_SETREG: return rsxx_reg_access(card, (void __user *)arg, 0); } return -ENOTTY; } static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct rsxx_cardinfo *card = bdev->bd_disk->private_data; u64 blocks = card->size8 >> 9; /* * get geometry: Fake it. I haven't found any drivers that set * geo->start, so we won't either. */ if (card->size8) { geo->heads = 64; geo->sectors = 16; do_div(blocks, (geo->heads * geo->sectors)); geo->cylinders = blocks; } else { geo->heads = 0; geo->sectors = 0; geo->cylinders = 0; } return 0; } static const struct block_device_operations rsxx_fops = { .owner = THIS_MODULE, .getgeo = rsxx_getgeo, .ioctl = rsxx_blkdev_ioctl, }; static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio) { generic_start_io_acct(bio_data_dir(bio), bio_sectors(bio), &card->gendisk->part0); } static void disk_stats_complete(struct rsxx_cardinfo *card, struct bio *bio, unsigned long start_time) { generic_end_io_acct(bio_data_dir(bio), &card->gendisk->part0, start_time); } static void bio_dma_done_cb(struct rsxx_cardinfo *card, void *cb_data, unsigned int error) { struct rsxx_bio_meta *meta = cb_data; if (error) atomic_set(&meta->error, 1); if (atomic_dec_and_test(&meta->pending_dmas)) { if (!card->eeh_state && card->gendisk) disk_stats_complete(card, meta->bio, meta->start_time); if (atomic_read(&meta->error)) bio_io_error(meta->bio); else bio_endio(meta->bio); kmem_cache_free(bio_meta_pool, meta); } } static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio) { struct rsxx_cardinfo *card = q->queuedata; struct rsxx_bio_meta *bio_meta; int st = -EINVAL; blk_queue_split(q, &bio, q->bio_split); might_sleep(); if (!card) goto req_err; if (bio_end_sector(bio) > get_capacity(card->gendisk)) goto req_err; if (unlikely(card->halt)) { st = -EFAULT; goto req_err; } if (unlikely(card->dma_fault)) { st = (-EFAULT); goto req_err; } if (bio->bi_iter.bi_size == 0) { dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); goto req_err; } bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL); if (!bio_meta) { st = -ENOMEM; goto req_err; } bio_meta->bio = bio; atomic_set(&bio_meta->error, 0); atomic_set(&bio_meta->pending_dmas, 0); bio_meta->start_time = jiffies; if (!unlikely(card->halt)) disk_stats_start(card, bio); dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", bio_data_dir(bio) ? 'W' : 'R', bio_meta, (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size); st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas, bio_dma_done_cb, bio_meta); if (st) goto queue_err; return BLK_QC_T_NONE; queue_err: kmem_cache_free(bio_meta_pool, bio_meta); req_err: if (st) bio->bi_error = st; bio_endio(bio); return BLK_QC_T_NONE; } /*----------------- Device Setup -------------------*/ static bool rsxx_discard_supported(struct rsxx_cardinfo *card) { unsigned char pci_rev; pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev); return (pci_rev >= RSXX_DISCARD_SUPPORT); } int rsxx_attach_dev(struct rsxx_cardinfo *card) { mutex_lock(&card->dev_lock); /* The block device requires the stripe size from the config. */ if (enable_blkdev) { if (card->config_valid) set_capacity(card->gendisk, card->size8 >> 9); else set_capacity(card->gendisk, 0); add_disk(card->gendisk); card->bdev_attached = 1; } mutex_unlock(&card->dev_lock); return 0; } void rsxx_detach_dev(struct rsxx_cardinfo *card) { mutex_lock(&card->dev_lock); if (card->bdev_attached) { del_gendisk(card->gendisk); card->bdev_attached = 0; } mutex_unlock(&card->dev_lock); } int rsxx_setup_dev(struct rsxx_cardinfo *card) { unsigned short blk_size; mutex_init(&card->dev_lock); if (!enable_blkdev) return 0; card->major = register_blkdev(0, DRIVER_NAME); if (card->major < 0) { dev_err(CARD_TO_DEV(card), "Failed to get major number\n"); return -ENOMEM; } card->queue = blk_alloc_queue(GFP_KERNEL); if (!card->queue) { dev_err(CARD_TO_DEV(card), "Failed queue alloc\n"); unregister_blkdev(card->major, DRIVER_NAME); return -ENOMEM; } card->gendisk = alloc_disk(blkdev_minors); if (!card->gendisk) { dev_err(CARD_TO_DEV(card), "Failed disk alloc\n"); blk_cleanup_queue(card->queue); unregister_blkdev(card->major, DRIVER_NAME); return -ENOMEM; } if (card->config_valid) { blk_size = card->config.data.block_size; blk_queue_dma_alignment(card->queue, blk_size - 1); blk_queue_logical_block_size(card->queue, blk_size); } blk_queue_make_request(card->queue, rsxx_make_request); blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY); blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, card->queue); if (rsxx_discard_supported(card)) { queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue); blk_queue_max_discard_sectors(card->queue, RSXX_HW_BLK_SIZE >> 9); card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE; card->queue->limits.discard_alignment = RSXX_HW_BLK_SIZE; card->queue->limits.discard_zeroes_data = 1; } card->queue->queuedata = card; snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name), "rsxx%d", card->disk_id); card->gendisk->driverfs_dev = &card->dev->dev; card->gendisk->major = card->major; card->gendisk->first_minor = 0; card->gendisk->fops = &rsxx_fops; card->gendisk->private_data = card; card->gendisk->queue = card->queue; return 0; } void rsxx_destroy_dev(struct rsxx_cardinfo *card) { if (!enable_blkdev) return; put_disk(card->gendisk); card->gendisk = NULL; blk_cleanup_queue(card->queue); card->queue->queuedata = NULL; unregister_blkdev(card->major, DRIVER_NAME); } int rsxx_dev_init(void) { bio_meta_pool = KMEM_CACHE(rsxx_bio_meta, SLAB_HWCACHE_ALIGN); if (!bio_meta_pool) return -ENOMEM; return 0; } void rsxx_dev_cleanup(void) { kmem_cache_destroy(bio_meta_pool); }
alsandeep/kernel-4.4
drivers/block/rsxx/dev.c
C
gpl-2.0
8,494
<?php namespace Drupal\Core\Cache; /** * Defines a memory cache implementation. * * Stores cache items in memory using a PHP array. * * Should be used for unit tests and specialist use-cases only, does not * store cached items between requests. * * @ingroup cache */ class MemoryBackend implements CacheBackendInterface, CacheTagsInvalidatorInterface { /** * Array to store cache objects. */ protected $cache = array(); /** * Constructs a MemoryBackend object. * * @param string $bin * The cache bin for which the object is created. */ public function __construct($bin) { } /** * {@inheritdoc} */ public function get($cid, $allow_invalid = FALSE) { if (isset($this->cache[$cid])) { return $this->prepareItem($this->cache[$cid], $allow_invalid); } else { return FALSE; } } /** * {@inheritdoc} */ public function getMultiple(&$cids, $allow_invalid = FALSE) { $ret = array(); $items = array_intersect_key($this->cache, array_flip($cids)); foreach ($items as $item) { $item = $this->prepareItem($item, $allow_invalid); if ($item) { $ret[$item->cid] = $item; } } $cids = array_diff($cids, array_keys($ret)); return $ret; } /** * Prepares a cached item. * * Checks that items are either permanent or did not expire, and returns data * as appropriate. * * @param object $cache * An item loaded from cache_get() or cache_get_multiple(). * @param bool $allow_invalid * (optional) If TRUE, cache items may be returned even if they have expired * or been invalidated. * * @return mixed * The item with data as appropriate or FALSE if there is no * valid item to load. */ protected function prepareItem($cache, $allow_invalid) { if (!isset($cache->data)) { return FALSE; } // The object passed into this function is the one stored in $this->cache. // We must clone it as part of the preparation step so that the actual // cache object is not affected by the unserialize() call or other // manipulations of the returned object. $prepared = clone $cache; $prepared->data = unserialize($prepared->data); // Check expire time. $prepared->valid = $prepared->expire == Cache::PERMANENT || $prepared->expire >= $this->getRequestTime(); if (!$allow_invalid && !$prepared->valid) { return FALSE; } return $prepared; } /** * {@inheritdoc} */ public function set($cid, $data, $expire = Cache::PERMANENT, array $tags = array()) { assert('\Drupal\Component\Assertion\Inspector::assertAllStrings($tags)', 'Cache Tags must be strings.'); $tags = array_unique($tags); // Sort the cache tags so that they are stored consistently in the database. sort($tags); $this->cache[$cid] = (object) array( 'cid' => $cid, 'data' => serialize($data), 'created' => $this->getRequestTime(), 'expire' => $expire, 'tags' => $tags, ); } /** * {@inheritdoc} */ public function setMultiple(array $items = array()) { foreach ($items as $cid => $item) { $this->set($cid, $item['data'], isset($item['expire']) ? $item['expire'] : CacheBackendInterface::CACHE_PERMANENT, isset($item['tags']) ? $item['tags'] : array()); } } /** * {@inheritdoc} */ public function delete($cid) { unset($this->cache[$cid]); } /** * {@inheritdoc} */ public function deleteMultiple(array $cids) { $this->cache = array_diff_key($this->cache, array_flip($cids)); } /** * {@inheritdoc} */ public function deleteAll() { $this->cache = array(); } /** * {@inheritdoc} */ public function invalidate($cid) { if (isset($this->cache[$cid])) { $this->cache[$cid]->expire = $this->getRequestTime() - 1; } } /** * {@inheritdoc} */ public function invalidateMultiple(array $cids) { foreach ($cids as $cid) { $this->cache[$cid]->expire = $this->getRequestTime() - 1; } } /** * {@inheritdoc} */ public function invalidateTags(array $tags) { foreach ($this->cache as $cid => $item) { if (array_intersect($tags, $item->tags)) { $this->cache[$cid]->expire = $this->getRequestTime() - 1; } } } /** * {@inheritdoc} */ public function invalidateAll() { foreach ($this->cache as $cid => $item) { $this->cache[$cid]->expire = $this->getRequestTime() - 1; } } /** * {@inheritdoc} */ public function garbageCollection() { } /** * {@inheritdoc} */ public function removeBin() { $this->cache = []; } /** * Wrapper method for REQUEST_TIME constant. * * @return int */ protected function getRequestTime() { return defined('REQUEST_TIME') ? REQUEST_TIME : (int) $_SERVER['REQUEST_TIME']; } /** * Prevents data stored in memory backends from being serialized. */ public function __sleep() { return []; } /** * Reset statically cached variables. * * This is only used by tests. */ public function reset() { $this->cache = []; } }
ClickAndLike/radio1
core/lib/Drupal/Core/Cache/MemoryBackend.php
PHP
gpl-2.0
5,169
/* * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * - Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/pci.h> #include <linux/delay.h> #include "hfi.h" #include "common.h" #include "sdma.h" /** * format_hwmsg - format a single hwerror message * @msg message buffer * @msgl length of message buffer * @hwmsg message to add to message buffer */ static void format_hwmsg(char *msg, size_t msgl, const char *hwmsg) { strlcat(msg, "[", msgl); strlcat(msg, hwmsg, msgl); strlcat(msg, "]", msgl); } /** * hfi1_format_hwerrors - format hardware error messages for display * @hwerrs hardware errors bit vector * @hwerrmsgs hardware error descriptions * @nhwerrmsgs number of hwerrmsgs * @msg message buffer * @msgl message buffer length */ void hfi1_format_hwerrors(u64 hwerrs, const struct hfi1_hwerror_msgs *hwerrmsgs, size_t nhwerrmsgs, char *msg, size_t msgl) { int i; for (i = 0; i < nhwerrmsgs; i++) if (hwerrs & hwerrmsgs[i].mask) format_hwmsg(msg, msgl, hwerrmsgs[i].msg); } static void signal_ib_event(struct hfi1_pportdata *ppd, enum ib_event_type ev) { struct ib_event event; struct hfi1_devdata *dd = ppd->dd; /* * Only call ib_dispatch_event() if the IB device has been * registered. HFI1_INITED is set iff the driver has successfully * registered with the IB core. */ if (!(dd->flags & HFI1_INITTED)) return; event.device = &dd->verbs_dev.rdi.ibdev; event.element.port_num = ppd->port; event.event = ev; ib_dispatch_event(&event); } /* * Handle a linkup or link down notification. * This is called outside an interrupt. */ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup) { struct hfi1_pportdata *ppd = &dd->pport[0]; enum ib_event_type ev; if (!(ppd->linkup ^ !!linkup)) return; /* no change, nothing to do */ if (linkup) { /* * Quick linkup and all link up on the simulator does not * trigger or implement: * - VerifyCap interrupt * - VerifyCap frames * But rather moves directly to LinkUp. * * Do the work of the VerifyCap interrupt handler, * handle_verify_cap(), but do not try moving the state to * LinkUp as we are already there. * * NOTE: This uses this device's vAU, vCU, and vl15_init for * the remote values. Both sides must be using the values. */ if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { set_up_vl15(dd, dd->vau, dd->vl15_init); assign_remote_cm_au_table(dd, dd->vcu); ppd->neighbor_guid = read_csr(dd, DC_DC8051_STS_REMOTE_GUID); ppd->neighbor_type = read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) & DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK; ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) & DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK; dd_dev_info(dd, "Neighbor GUID: %llx Neighbor type %d\n", ppd->neighbor_guid, ppd->neighbor_type); } /* physical link went up */ ppd->linkup = 1; ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE); /* link widths are not available until the link is fully up */ get_linkup_link_widths(ppd); } else { /* physical link went down */ ppd->linkup = 0; /* clear HW details of the previous connection */ reset_link_credits(dd); /* freeze after a link down to guarantee a clean egress */ start_freeze_handling(ppd, FREEZE_SELF | FREEZE_LINK_DOWN); ev = IB_EVENT_PORT_ERR; hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LINKDOWN_BIT); /* if we are down, the neighbor is down */ ppd->neighbor_normal = 0; /* notify IB of the link change */ signal_ib_event(ppd, ev); } } /* * Handle receive or urgent interrupts for user contexts. This means a user * process was waiting for a packet to arrive, and didn't want to poll. */ void handle_user_interrupt(struct hfi1_ctxtdata *rcd) { struct hfi1_devdata *dd = rcd->dd; unsigned long flags; spin_lock_irqsave(&dd->uctxt_lock, flags); if (!rcd->cnt) goto done; if (test_and_clear_bit(HFI1_CTXT_WAITING_RCV, &rcd->event_flags)) { wake_up_interruptible(&rcd->wait); hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_DIS, rcd->ctxt); } else if (test_and_clear_bit(HFI1_CTXT_WAITING_URG, &rcd->event_flags)) { rcd->urgent++; wake_up_interruptible(&rcd->wait); } done: spin_unlock_irqrestore(&dd->uctxt_lock, flags); }
bgly/ibmvscsi_tgt
drivers/infiniband/hw/hfi1/intr.c
C
gpl-2.0
6,412
<?php /** * @file * Generic transliteration data for the PhpTransliteration class. */ $base = array( 0x00 => 'fan', 'yi', 'tan', 'lei', 'yong', NULL, 'jin', 'she', 'yin', 'ji', NULL, 'su', NULL, NULL, NULL, 'wang', 0x10 => 'mian', 'su', 'yi', 'shai', 'xi', 'ji', 'luo', 'you', 'mao', 'zha', 'sui', 'zhi', 'bian', 'li', NULL, NULL, 0x20 => NULL, NULL, NULL, NULL, NULL, 'qiao', 'guan', 'xi', 'zhen', 'yong', 'nie', 'jun', 'xie', 'yao', 'xie', 'zhi', 0x30 => 'neng', NULL, 'si', 'long', 'chen', 'mi', 'que', 'dan', 'shan', NULL, NULL, NULL, 'su', 'xie', 'bo', 'ding', 0x40 => 'zu', NULL, 'shu', 'she', 'han', 'tan', 'gao', NULL, NULL, NULL, 'na', 'mi', 'xun', 'men', 'jian', 'cui', 0x50 => 'jue', 'he', 'fei', 'shi', 'che', 'shen', 'nu', 'ping', 'man', NULL, NULL, NULL, NULL, 'yi', 'chou', NULL, 0x60 => 'ku', 'bao', 'lei', 'ke', 'sha', 'bi', 'sui', 'ge', 'pi', 'yi', 'xian', 'ni', 'ying', 'zhu', 'chun', 'feng', 0x70 => 'xu', 'piao', 'wu', 'liao', 'cang', 'zou', 'zuo', 'bian', 'yao', 'huan', 'pai', 'xiu', NULL, 'lei', 'qing', 'xiao', 0x80 => 'jiao', 'guo', NULL, NULL, 'yan', 'xue', 'zhu', 'heng', 'ying', 'xi', NULL, NULL, 'lian', 'xian', 'huan', 'yin', 0x90 => NULL, 'lian', 'shan', 'cang', 'bei', 'jian', 'shu', 'fan', 'dian', NULL, 'ba', 'yu', NULL, NULL, 'nang', 'lei', 0xA0 => 'yi', 'dai', NULL, 'chan', 'chao', 'gan', 'jin', 'nen', NULL, NULL, NULL, 'liao', 'mo', 'you', NULL, 'liu', 0xB0 => 'han', NULL, 'yong', 'jin', 'chi', 'ren', 'nong', NULL, NULL, 'hong', 'tian', NULL, 'ai', 'gua', 'biao', 'bo', 0xC0 => 'qiong', NULL, 'shu', 'chui', 'hui', 'chao', 'fu', 'hui', 'e', 'wei', 'fen', 'tan', NULL, 'lun', 'he', 'yong', 0xD0 => 'hui', NULL, 'yu', 'zong', 'yan', 'qiu', 'zhao', 'jiong', 'tai', NULL, NULL, NULL, NULL, NULL, NULL, 'tui', 0xE0 => 'lin', 'jiong', 'zha', 'xing', 'hu', NULL, 'xu', NULL, NULL, NULL, 'cui', 'qing', 'mo', NULL, 'zao', 'beng', 0xF0 => 'chi', NULL, NULL, 'yan', 'ge', 'mo', 'bei', 'juan', 'die', 'zhao', NULL, 'wu', 'yan', NULL, 'jue', 'xian', );
havran/Drupal.sk
docroot/drupal/core/lib/Drupal/Component/Transliteration/data/x3d.php
PHP
gpl-2.0
2,020
<?php /** * @file * Generic transliteration data for the PhpTransliteration class. */ $base = array( 0x00 => 'kkyeols', 'kkyeolt', 'kkyeolp', 'kkyeolh', 'kkyeom', 'kkyeob', 'kkyeobs', 'kkyeos', 'kkyeoss', 'kkyeong', 'kkyeoj', 'kkyeoch', 'kkyeok', 'kkyeot', 'kkyeop', 'kkyeoh', 0x10 => 'kkye', 'kkyeg', 'kkyekk', 'kkyegs', 'kkyen', 'kkyenj', 'kkyenh', 'kkyed', 'kkyel', 'kkyelg', 'kkyelm', 'kkyelb', 'kkyels', 'kkyelt', 'kkyelp', 'kkyelh', 0x20 => 'kkyem', 'kkyeb', 'kkyebs', 'kkyes', 'kkyess', 'kkyeng', 'kkyej', 'kkyech', 'kkyek', 'kkyet', 'kkyep', 'kkyeh', 'kko', 'kkog', 'kkokk', 'kkogs', 0x30 => 'kkon', 'kkonj', 'kkonh', 'kkod', 'kkol', 'kkolg', 'kkolm', 'kkolb', 'kkols', 'kkolt', 'kkolp', 'kkolh', 'kkom', 'kkob', 'kkobs', 'kkos', 0x40 => 'kkoss', 'kkong', 'kkoj', 'kkoch', 'kkok', 'kkot', 'kkop', 'kkoh', 'kkwa', 'kkwag', 'kkwakk', 'kkwags', 'kkwan', 'kkwanj', 'kkwanh', 'kkwad', 0x50 => 'kkwal', 'kkwalg', 'kkwalm', 'kkwalb', 'kkwals', 'kkwalt', 'kkwalp', 'kkwalh', 'kkwam', 'kkwab', 'kkwabs', 'kkwas', 'kkwass', 'kkwang', 'kkwaj', 'kkwach', 0x60 => 'kkwak', 'kkwat', 'kkwap', 'kkwah', 'kkwae', 'kkwaeg', 'kkwaekk', 'kkwaegs', 'kkwaen', 'kkwaenj', 'kkwaenh', 'kkwaed', 'kkwael', 'kkwaelg', 'kkwaelm', 'kkwaelb', 0x70 => 'kkwaels', 'kkwaelt', 'kkwaelp', 'kkwaelh', 'kkwaem', 'kkwaeb', 'kkwaebs', 'kkwaes', 'kkwaess', 'kkwaeng', 'kkwaej', 'kkwaech', 'kkwaek', 'kkwaet', 'kkwaep', 'kkwaeh', 0x80 => 'kkoe', 'kkoeg', 'kkoekk', 'kkoegs', 'kkoen', 'kkoenj', 'kkoenh', 'kkoed', 'kkoel', 'kkoelg', 'kkoelm', 'kkoelb', 'kkoels', 'kkoelt', 'kkoelp', 'kkoelh', 0x90 => 'kkoem', 'kkoeb', 'kkoebs', 'kkoes', 'kkoess', 'kkoeng', 'kkoej', 'kkoech', 'kkoek', 'kkoet', 'kkoep', 'kkoeh', 'kkyo', 'kkyog', 'kkyokk', 'kkyogs', 0xA0 => 'kkyon', 'kkyonj', 'kkyonh', 'kkyod', 'kkyol', 'kkyolg', 'kkyolm', 'kkyolb', 'kkyols', 'kkyolt', 'kkyolp', 'kkyolh', 'kkyom', 'kkyob', 'kkyobs', 'kkyos', 0xB0 => 'kkyoss', 'kkyong', 'kkyoj', 'kkyoch', 'kkyok', 'kkyot', 'kkyop', 'kkyoh', 'kku', 'kkug', 'kkukk', 'kkugs', 'kkun', 'kkunj', 'kkunh', 'kkud', 0xC0 => 'kkul', 'kkulg', 'kkulm', 'kkulb', 'kkuls', 'kkult', 'kkulp', 'kkulh', 'kkum', 'kkub', 'kkubs', 'kkus', 'kkuss', 'kkung', 'kkuj', 'kkuch', 0xD0 => 'kkuk', 'kkut', 'kkup', 'kkuh', 'kkwo', 'kkwog', 'kkwokk', 'kkwogs', 'kkwon', 'kkwonj', 'kkwonh', 'kkwod', 'kkwol', 'kkwolg', 'kkwolm', 'kkwolb', 0xE0 => 'kkwols', 'kkwolt', 'kkwolp', 'kkwolh', 'kkwom', 'kkwob', 'kkwobs', 'kkwos', 'kkwoss', 'kkwong', 'kkwoj', 'kkwoch', 'kkwok', 'kkwot', 'kkwop', 'kkwoh', 0xF0 => 'kkwe', 'kkweg', 'kkwekk', 'kkwegs', 'kkwen', 'kkwenj', 'kkwenh', 'kkwed', 'kkwel', 'kkwelg', 'kkwelm', 'kkwelb', 'kkwels', 'kkwelt', 'kkwelp', 'kkwelh', );
urisavka/Drupal8
core/lib/Drupal/Component/Transliteration/data/xaf.php
PHP
gpl-2.0
2,691
<?php /** * @file * Generic transliteration data for the PhpTransliteration class. */ $base = array( 0x00 => 'yj', 'yh', 'ym', 'yy', 'yy', 'bj', 'bh', 'bkh', 'bm', 'by', 'by', 'tj', 'th', 'tkh', 'tm', 'ty', 0x10 => 'ty', 'thj', 'thm', 'thy', 'thy', 'jh', 'jm', 'hj', 'hm', 'khj', 'khh', 'khm', 'sj', 'sh', 'skh', 'sm', 0x20 => 'sh', 'sm', 'dj', 'dh', 'dkh', 'dm', 'th', 'tm', 'zm', '', '', 'ghj', 'ghm', 'fj', 'fh', 'fkh', 0x30 => 'fm', 'fy', 'fy', 'qh', 'qm', 'qy', 'qy', 'ka', 'kj', 'kh', 'kkh', 'kl', 'km', 'ky', 'ky', 'lj', 0x40 => 'lh', 'lkh', 'lm', 'ly', 'ly', 'mj', 'mh', 'mkh', 'mm', 'my', 'my', 'nj', 'nh', 'nkh', 'nm', 'ny', 0x50 => 'ny', 'hj', 'hm', 'hy', 'hy', 'yj', 'yh', 'ykh', 'ym', 'yy', 'yy', 'dh', 'r', 'y', '', '', 0x60 => ' a', ' u', ' i', ' ', 'yr', 'yz', 'ym', 'yn', 'yy', 'yy', 'br', 'bz', 'bm', 'bn', 'by', 'by', 0x70 => 'tr', 'tz', 'tm', 'tn', 'ty', 'ty', 'thr', 'thz', 'thm', 'thn', 'thy', 'thy', 'fy', 'fy', 'qy', 'qy', 0x80 => 'ka', 'kl', 'km', 'ky', 'ky', 'lm', 'ly', 'ly', 'ma', 'mm', 'nr', 'nz', 'nm', 'nn', 'ny', 'ny', 0x90 => 'y', 'yr', 'yz', 'ym', 'yn', 'yy', 'yy', 'yj', 'yh', 'ykh', 'ym', 'yh', 'bj', 'bh', 'bkh', 'bm', 0xA0 => 'bh', 'tj', 'th', 'tkh', 'tm', 'th', 'thm', 'jh', 'jm', 'hj', 'hm', 'khj', 'khm', 'sj', 'sh', 'skh', 0xB0 => 'sm', 'sh', 'skh', 'sm', 'dj', 'dh', 'dkh', 'dm', 'th', 'zm', '', '', 'ghj', 'ghm', 'fj', 'fh', 0xC0 => 'fkh', 'fm', 'qh', 'qm', 'kj', 'kh', 'kkh', 'kl', 'km', 'lj', 'lh', 'lkh', 'lm', 'lh', 'mj', 'mh', 0xD0 => 'mkh', 'mm', 'nj', 'nh', 'nkh', 'nm', 'nh', 'hj', 'hm', 'h', 'yj', 'yh', 'ykh', 'ym', 'yh', 'ym', 0xE0 => 'yh', 'bm', 'bh', 'tm', 'th', 'thm', 'thh', 'sm', 'sh', 'shm', 'shh', 'kl', 'km', 'lm', 'nm', 'nh', 0xF0 => 'ym', 'yh', 'a', 'u', 'i', 'ty', 'ty', '', '', 'ghy', 'ghy', 'sy', 'sy', 'shy', 'shy', 'hy', );
nag50/d8training
core/lib/Drupal/Component/Transliteration/data/xfc.php
PHP
gpl-2.0
1,831
/* * Copyright (C) 2013 STMicroelectronics Limited * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <media/rc-core.h> #include <linux/pinctrl/consumer.h> struct st_rc_device { struct device *dev; int irq; int irq_wake; struct clk *sys_clock; volatile void __iomem *base; /* Register base address */ volatile void __iomem *rx_base;/* RX Register base address */ struct rc_dev *rdev; bool overclocking; int sample_mult; int sample_div; bool rxuhfmode; struct reset_control *rstc; }; /* Registers */ #define IRB_SAMPLE_RATE_COMM 0x64 /* sample freq divisor*/ #define IRB_CLOCK_SEL 0x70 /* clock select */ #define IRB_CLOCK_SEL_STATUS 0x74 /* clock status */ /* IRB IR/UHF receiver registers */ #define IRB_RX_ON 0x40 /* pulse time capture */ #define IRB_RX_SYS 0X44 /* sym period capture */ #define IRB_RX_INT_EN 0x48 /* IRQ enable (R/W) */ #define IRB_RX_INT_STATUS 0x4c /* IRQ status (R/W) */ #define IRB_RX_EN 0x50 /* Receive enable */ #define IRB_MAX_SYM_PERIOD 0x54 /* max sym value */ #define IRB_RX_INT_CLEAR 0x58 /* overrun status */ #define IRB_RX_STATUS 0x6c /* receive status */ #define IRB_RX_NOISE_SUPPR 0x5c /* noise suppression */ #define IRB_RX_POLARITY_INV 0x68 /* polarity inverter */ /** * IRQ set: Enable full FIFO 1 -> bit 3; * Enable overrun IRQ 1 -> bit 2; * Enable last symbol IRQ 1 -> bit 1: * Enable RX interrupt 1 -> bit 0; */ #define IRB_RX_INTS 0x0f #define IRB_RX_OVERRUN_INT 0x04 /* maximum symbol period (microsecs),timeout to detect end of symbol train */ #define MAX_SYMB_TIME 0x5000 #define IRB_SAMPLE_FREQ 10000000 #define IRB_FIFO_NOT_EMPTY 0xff00 #define IRB_OVERFLOW 0x4 #define IRB_TIMEOUT 0xffff #define IR_ST_NAME "st-rc" static void st_rc_send_lirc_timeout(struct rc_dev *rdev) { DEFINE_IR_RAW_EVENT(ev); ev.timeout = true; ir_raw_event_store(rdev, &ev); } /** * RX graphical example to better understand the difference between ST IR block * output and standard definition used by LIRC (and most of the world!) * * mark mark * |-IRB_RX_ON-| |-IRB_RX_ON-| * ___ ___ ___ ___ ___ ___ _ * | | | | | | | | | | | | | * | | | | | | space 0 | | | | | | space 1 | * _____| |__| |__| |____________________________| |__| |__| |_____________| * * |--------------- IRB_RX_SYS -------------|------ IRB_RX_SYS -------| * * |------------- encoding bit 0 -----------|---- encoding bit 1 -----| * * ST hardware returns mark (IRB_RX_ON) and total symbol time (IRB_RX_SYS), so * convert to standard mark/space we have to calculate space=(IRB_RX_SYS-mark) * The mark time represents the amount of time the carrier (usually 36-40kHz) * is detected.The above examples shows Pulse Width Modulation encoding where * bit 0 is represented by space>mark. */ static irqreturn_t st_rc_rx_interrupt(int irq, void *data) { unsigned int symbol, mark = 0; struct st_rc_device *dev = data; int last_symbol = 0; u32 status; DEFINE_IR_RAW_EVENT(ev); if (dev->irq_wake) pm_wakeup_event(dev->dev, 0); status = readl(dev->rx_base + IRB_RX_STATUS); while (status & (IRB_FIFO_NOT_EMPTY | IRB_OVERFLOW)) { u32 int_status = readl(dev->rx_base + IRB_RX_INT_STATUS); if (unlikely(int_status & IRB_RX_OVERRUN_INT)) { /* discard the entire collection in case of errors! */ ir_raw_event_reset(dev->rdev); dev_info(dev->dev, "IR RX overrun\n"); writel(IRB_RX_OVERRUN_INT, dev->rx_base + IRB_RX_INT_CLEAR); continue; } symbol = readl(dev->rx_base + IRB_RX_SYS); mark = readl(dev->rx_base + IRB_RX_ON); if (symbol == IRB_TIMEOUT) last_symbol = 1; /* Ignore any noise */ if ((mark > 2) && (symbol > 1)) { symbol -= mark; if (dev->overclocking) { /* adjustments to timings */ symbol *= dev->sample_mult; symbol /= dev->sample_div; mark *= dev->sample_mult; mark /= dev->sample_div; } ev.duration = US_TO_NS(mark); ev.pulse = true; ir_raw_event_store(dev->rdev, &ev); if (!last_symbol) { ev.duration = US_TO_NS(symbol); ev.pulse = false; ir_raw_event_store(dev->rdev, &ev); } else { st_rc_send_lirc_timeout(dev->rdev); } } last_symbol = 0; status = readl(dev->rx_base + IRB_RX_STATUS); } writel(IRB_RX_INTS, dev->rx_base + IRB_RX_INT_CLEAR); /* Empty software fifo */ ir_raw_event_handle(dev->rdev); return IRQ_HANDLED; } static void st_rc_hardware_init(struct st_rc_device *dev) { int baseclock, freqdiff; unsigned int rx_max_symbol_per = MAX_SYMB_TIME; unsigned int rx_sampling_freq_div; /* Enable the IP */ if (dev->rstc) reset_control_deassert(dev->rstc); clk_prepare_enable(dev->sys_clock); baseclock = clk_get_rate(dev->sys_clock); /* IRB input pins are inverted internally from high to low. */ writel(1, dev->rx_base + IRB_RX_POLARITY_INV); rx_sampling_freq_div = baseclock / IRB_SAMPLE_FREQ; writel(rx_sampling_freq_div, dev->base + IRB_SAMPLE_RATE_COMM); freqdiff = baseclock - (rx_sampling_freq_div * IRB_SAMPLE_FREQ); if (freqdiff) { /* over clocking, workout the adjustment factors */ dev->overclocking = true; dev->sample_mult = 1000; dev->sample_div = baseclock / (10000 * rx_sampling_freq_div); rx_max_symbol_per = (rx_max_symbol_per * 1000)/dev->sample_div; } writel(rx_max_symbol_per, dev->rx_base + IRB_MAX_SYM_PERIOD); } static int st_rc_remove(struct platform_device *pdev) { struct st_rc_device *rc_dev = platform_get_drvdata(pdev); clk_disable_unprepare(rc_dev->sys_clock); rc_unregister_device(rc_dev->rdev); return 0; } static int st_rc_open(struct rc_dev *rdev) { struct st_rc_device *dev = rdev->priv; unsigned long flags; local_irq_save(flags); /* enable interrupts and receiver */ writel(IRB_RX_INTS, dev->rx_base + IRB_RX_INT_EN); writel(0x01, dev->rx_base + IRB_RX_EN); local_irq_restore(flags); return 0; } static void st_rc_close(struct rc_dev *rdev) { struct st_rc_device *dev = rdev->priv; /* disable interrupts and receiver */ writel(0x00, dev->rx_base + IRB_RX_EN); writel(0x00, dev->rx_base + IRB_RX_INT_EN); } static int st_rc_probe(struct platform_device *pdev) { int ret = -EINVAL; struct rc_dev *rdev; struct device *dev = &pdev->dev; struct resource *res; struct st_rc_device *rc_dev; struct device_node *np = pdev->dev.of_node; const char *rx_mode; rc_dev = devm_kzalloc(dev, sizeof(struct st_rc_device), GFP_KERNEL); if (!rc_dev) return -ENOMEM; rdev = rc_allocate_device(); if (!rdev) return -ENOMEM; if (np && !of_property_read_string(np, "rx-mode", &rx_mode)) { if (!strcmp(rx_mode, "uhf")) { rc_dev->rxuhfmode = true; } else if (!strcmp(rx_mode, "infrared")) { rc_dev->rxuhfmode = false; } else { dev_err(dev, "Unsupported rx mode [%s]\n", rx_mode); goto err; } } else { goto err; } rc_dev->sys_clock = devm_clk_get(dev, NULL); if (IS_ERR(rc_dev->sys_clock)) { dev_err(dev, "System clock not found\n"); ret = PTR_ERR(rc_dev->sys_clock); goto err; } rc_dev->irq = platform_get_irq(pdev, 0); if (rc_dev->irq < 0) { ret = rc_dev->irq; goto err; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); rc_dev->base = devm_ioremap_resource(dev, res); if (IS_ERR((__force void *)rc_dev->base)) { ret = PTR_ERR((__force void *)rc_dev->base); goto err; } if (rc_dev->rxuhfmode) rc_dev->rx_base = rc_dev->base + 0x40; else rc_dev->rx_base = rc_dev->base; rc_dev->rstc = reset_control_get_optional(dev, NULL); if (IS_ERR(rc_dev->rstc)) rc_dev->rstc = NULL; rc_dev->dev = dev; platform_set_drvdata(pdev, rc_dev); st_rc_hardware_init(rc_dev); rdev->driver_type = RC_DRIVER_IR_RAW; rdev->allowed_protocols = RC_BIT_ALL; /* rx sampling rate is 10Mhz */ rdev->rx_resolution = 100; rdev->timeout = US_TO_NS(MAX_SYMB_TIME); rdev->priv = rc_dev; rdev->open = st_rc_open; rdev->close = st_rc_close; rdev->driver_name = IR_ST_NAME; rdev->map_name = RC_MAP_LIRC; rdev->input_name = "ST Remote Control Receiver"; /* enable wake via this device */ device_set_wakeup_capable(dev, true); device_set_wakeup_enable(dev, true); ret = rc_register_device(rdev); if (ret < 0) goto clkerr; rc_dev->rdev = rdev; if (devm_request_irq(dev, rc_dev->irq, st_rc_rx_interrupt, IRQF_NO_SUSPEND, IR_ST_NAME, rc_dev) < 0) { dev_err(dev, "IRQ %d register failed\n", rc_dev->irq); ret = -EINVAL; goto rcerr; } /** * for LIRC_MODE_MODE2 or LIRC_MODE_PULSE or LIRC_MODE_RAW * lircd expects a long space first before a signal train to sync. */ st_rc_send_lirc_timeout(rdev); dev_info(dev, "setup in %s mode\n", rc_dev->rxuhfmode ? "UHF" : "IR"); return ret; rcerr: rc_unregister_device(rdev); rdev = NULL; clkerr: clk_disable_unprepare(rc_dev->sys_clock); err: rc_free_device(rdev); dev_err(dev, "Unable to register device (%d)\n", ret); return ret; } #ifdef CONFIG_PM static int st_rc_suspend(struct device *dev) { struct st_rc_device *rc_dev = dev_get_drvdata(dev); if (device_may_wakeup(dev)) { if (!enable_irq_wake(rc_dev->irq)) rc_dev->irq_wake = 1; else return -EINVAL; } else { pinctrl_pm_select_sleep_state(dev); writel(0x00, rc_dev->rx_base + IRB_RX_EN); writel(0x00, rc_dev->rx_base + IRB_RX_INT_EN); clk_disable_unprepare(rc_dev->sys_clock); if (rc_dev->rstc) reset_control_assert(rc_dev->rstc); } return 0; } static int st_rc_resume(struct device *dev) { struct st_rc_device *rc_dev = dev_get_drvdata(dev); struct rc_dev *rdev = rc_dev->rdev; if (rc_dev->irq_wake) { disable_irq_wake(rc_dev->irq); rc_dev->irq_wake = 0; } else { pinctrl_pm_select_default_state(dev); st_rc_hardware_init(rc_dev); if (rdev->users) { writel(IRB_RX_INTS, rc_dev->rx_base + IRB_RX_INT_EN); writel(0x01, rc_dev->rx_base + IRB_RX_EN); } } return 0; } #endif static SIMPLE_DEV_PM_OPS(st_rc_pm_ops, st_rc_suspend, st_rc_resume); #ifdef CONFIG_OF static struct of_device_id st_rc_match[] = { { .compatible = "st,comms-irb", }, {}, }; MODULE_DEVICE_TABLE(of, st_rc_match); #endif static struct platform_driver st_rc_driver = { .driver = { .name = IR_ST_NAME, .of_match_table = of_match_ptr(st_rc_match), .pm = &st_rc_pm_ops, }, .probe = st_rc_probe, .remove = st_rc_remove, }; module_platform_driver(st_rc_driver); MODULE_DESCRIPTION("RC Transceiver driver for STMicroelectronics platforms"); MODULE_AUTHOR("STMicroelectronics (R&D) Ltd"); MODULE_LICENSE("GPL");
ZeroInfinityXDA/HelixKernel_Nougat
drivers/media/rc/st_rc.c
C
gpl-2.0
11,221
<?php /** * Install theme administration panel. * * @package WordPress * @subpackage Administration */ /** WordPress Administration Bootstrap */ require_once( dirname( __FILE__ ) . '/admin.php' ); require( ABSPATH . 'wp-admin/includes/theme-install.php' ); wp_reset_vars( array( 'tab' ) ); if ( ! current_user_can('install_themes') ) wp_die( __( 'You do not have sufficient permissions to install themes on this site.' ) ); if ( is_multisite() && ! is_network_admin() ) { wp_redirect( network_admin_url( 'theme-install.php' ) ); exit(); } $title = __( 'Add Themes' ); $parent_file = 'themes.php'; if ( ! is_network_admin() ) { $submenu_file = 'themes.php'; } $sections = array( 'featured' => __( 'Featured Themes' ), 'popular' => __( 'Popular Themes' ), 'new' => __( 'Newest Themes' ), ); $installed_themes = search_theme_directories(); foreach ( $installed_themes as $k => $v ) { if ( false !== strpos( $k, '/' ) ) { unset( $installed_themes[ $k ] ); } } wp_localize_script( 'theme', '_wpThemeSettings', array( 'themes' => false, 'settings' => array( 'isInstall' => true, 'canInstall' => current_user_can( 'install_themes' ), 'installURI' => current_user_can( 'install_themes' ) ? self_admin_url( 'theme-install.php' ) : null, 'adminUrl' => parse_url( self_admin_url(), PHP_URL_PATH ) ), 'l10n' => array( 'addNew' => __( 'Add New Theme' ), 'search' => __( 'Search Themes' ), 'searchPlaceholder' => __( 'Search themes...' ), // placeholder (no ellipsis) 'upload' => __( 'Upload Theme' ), 'back' => __( 'Back' ), 'error' => __( 'An unexpected error occurred. Something may be wrong with WordPress.org or this server&#8217;s configuration. If you continue to have problems, please try the <a href="https://wordpress.org/support/">support forums</a>.' ) ), 'installedThemes' => array_keys( $installed_themes ), 'browse' => array( 'sections' => $sections, ), ) ); wp_enqueue_script( 'theme' ); /** * Fires before each of the tabs are rendered on the Install Themes page. * * The dynamic portion of the hook name, $tab, refers to the current * theme install tab. Possible values are 'dashboard', 'search', 'upload', * 'featured', 'new', or 'updated'. * * @since 2.8.0 */ if ( $tab ) { do_action( "install_themes_pre_{$tab}" ); } $help_overview = '<p>' . sprintf(__('You can find additional themes for your site by using the Theme Browser/Installer on this screen, which will display themes from the <a href="%s" target="_blank">WordPress.org Theme Directory</a>. These themes are designed and developed by third parties, are available free of charge, and are compatible with the license WordPress uses.'), 'https://wordpress.org/themes/') . '</p>' . '<p>' . __('You can Search for themes by keyword, author, or tag, or can get more specific and search by criteria listed in the feature filter. Alternately, you can browse the themes that are Featured, Newest, or Recently Updated. When you find a theme you like, you can preview it or install it.') . '</p>' . '<p>' . __('You can Upload a theme manually if you have already downloaded its ZIP archive onto your computer (make sure it is from a trusted and original source). You can also do it the old-fashioned way and copy a downloaded theme&#8217;s folder via FTP into your <code>/wp-content/themes</code> directory.') . '</p>'; get_current_screen()->add_help_tab( array( 'id' => 'overview', 'title' => __('Overview'), 'content' => $help_overview ) ); $help_installing = '<p>' . __('Once you have generated a list of themes, you can preview and install any of them. Click on the thumbnail of the theme you&#8217;re interested in previewing. It will open up in a full-screen Preview page to give you a better idea of how that theme will look.') . '</p>' . '<p>' . __('To install the theme so you can preview it with your site&#8217;s content and customize its theme options, click the "Install" button at the top of the left-hand pane. The theme files will be downloaded to your website automatically. When this is complete, the theme is now available for activation, which you can do by clicking the "Activate" link, or by navigating to your Manage Themes screen and clicking the "Live Preview" link under any installed theme&#8217;s thumbnail image.') . '</p>'; get_current_screen()->add_help_tab( array( 'id' => 'installing', 'title' => __('Previewing and Installing'), 'content' => $help_installing ) ); get_current_screen()->set_help_sidebar( '<p><strong>' . __('For more information:') . '</strong></p>' . '<p>' . __('<a href="http://codex.wordpress.org/Using_Themes#Adding_New_Themes" target="_blank">Documentation on Adding New Themes</a>') . '</p>' . '<p>' . __('<a href="https://wordpress.org/support/" target="_blank">Support Forums</a>') . '</p>' ); include(ABSPATH . 'wp-admin/admin-header.php'); ?> <div class="wrap"> <h2> <?php echo esc_html( $title ); ?> <a href="#" class="upload add-new-h2"><?php _e( 'Upload Theme' ); ?></a> <a href="#" class="browse-themes add-new-h2"><?php _ex( 'Browse', 'themes' ); ?></a> </h2> <div class="upload-theme"> <?php install_themes_upload(); ?> </div> <div class="theme-navigation"> <span class="theme-count"></span> <a class="theme-section" href="#" data-sort="featured"><?php _ex( 'Featured', 'themes' ); ?></a> <a class="theme-section" href="#" data-sort="popular"><?php _ex( 'Popular', 'themes' ); ?></a> <a class="theme-section" href="#" data-sort="new"><?php _ex( 'Latest', 'themes' ); ?></a> <div class="theme-top-filters"> <!-- <span class="theme-filter" data-filter="photoblogging">Photography</span> <span class="theme-filter" data-filter="responsive-layout">Responsive</span> --> <a class="more-filters" href="#"><?php _e( 'Feature Filter' ); ?></a> </div> <div class="more-filters-container"> <a class="apply-filters button button-secondary" href="#"><?php _e( 'Apply Filters' ); ?><span></span></a> <a class="clear-filters button button-secondary" href="#"><?php _e( 'Clear' ); ?></a> <br class="clear" /> <?php $feature_list = get_theme_feature_list(); foreach ( $feature_list as $feature_name => $features ) { if ( $feature_name === 'Features' || $feature_name === __( 'Features' ) ) { // hack hack hack echo '<div class="filters-group wide-filters-group">'; } else { echo '<div class="filters-group">'; } $feature_name = esc_html( $feature_name ); echo '<h4 class="feature-name">' . $feature_name . '</h4>'; echo '<ol class="feature-group">'; foreach ( $features as $feature => $feature_name ) { $feature = esc_attr( $feature ); echo '<li><input type="checkbox" id="feature-id-' . $feature . '" value="' . $feature . '" /> '; echo '<label for="feature-id-' . $feature . '">' . $feature_name . '</label></li>'; } echo '</ol>'; echo '</div>'; } ?> <div class="filtering-by"> <span><?php _e( 'Filtering by:' ); ?></span> <div class="tags"></div> <a href="#"><?php _e( 'Edit' ); ?></a> </div> </div> </div> <div class="theme-browser"></div> <div class="theme-install-overlay wp-full-overlay expanded"></div> <p class="no-themes"><?php _e( 'No themes found. Try a different search.' ); ?></p> <span class="spinner"></span> <br class="clear" /> <?php /** * Fires at the top of each of the tabs on the Install Themes page. * * The dynamic portion of the hook name, $tab, refers to the current * theme install tab. Possible values are 'dashboard', 'search', 'upload', * 'featured', 'new', or 'updated'. * * @since 2.8.0 * * @param int $paged Number of the current page of results being viewed. */ if ( $tab ) { do_action( "install_themes_{$tab}", $paged ); } ?> </div> <script id="tmpl-theme" type="text/template"> <# if ( data.screenshot_url ) { #> <div class="theme-screenshot"> <img src="{{ data.screenshot_url }}" alt="" /> </div> <# } else { #> <div class="theme-screenshot blank"></div> <# } #> <span class="more-details"><?php _ex( 'Details &amp; Preview', 'theme' ); ?></span> <div class="theme-author"><?php printf( __( 'By %s' ), '{{ data.author }}' ); ?></div> <h3 class="theme-name">{{ data.name }}</h3> <div class="theme-actions"> <a class="button button-primary" href="{{ data.install_url }}"><?php esc_html_e( 'Install' ); ?></a> <a class="button button-secondary preview install-theme-preview" href="#"><?php esc_html_e( 'Preview' ); ?></a> </div> <# if ( data.installed ) { #> <div class="theme-installed"><?php _e( 'Already Installed' ); ?></div> <# } #> </script> <script id="tmpl-theme-preview" type="text/template"> <div class="wp-full-overlay-sidebar"> <div class="wp-full-overlay-header"> <a href="#" class="close-full-overlay button-secondary"><?php _e( 'Close' ); ?></a> <# if ( data.installed ) { #> <a href="#" class="button button-primary theme-install disabled"><?php _e( 'Installed' ); ?></a> <# } else { #> <a href="{{ data.install_url }}" class="button button-primary theme-install"><?php _e( 'Install' ); ?></a> <# } #> </div> <div class="wp-full-overlay-sidebar-content"> <div class="install-theme-info"> <h3 class="theme-name">{{ data.name }}</h3> <span class="theme-by"><?php printf( __( 'By %s' ), '{{ data.author }}' ); ?></span> <img class="theme-screenshot" src="{{ data.screenshot_url }}" alt="" /> <div class="theme-details"> <div class="rating rating-{{ Math.round( data.rating / 10 ) * 10 }}"> <span class="one"></span> <span class="two"></span> <span class="three"></span> <span class="four"></span> <span class="five"></span> <# if ( data.num_ratings ) { #> <p class="ratings">{{ data.num_ratings }}</p> <# } else { #> <p class="ratings"><?php _e( 'No ratings.' ); ?></p> <# } #> </div> <div class="theme-version"><?php printf( __( 'Version: %s' ), '{{ data.version }}' ); ?></div> <div class="theme-description">{{{ data.description }}}</div> </div> </div> </div> <div class="wp-full-overlay-footer"> <a href="#" class="collapse-sidebar" title="<?php esc_attr_e( 'Collapse Sidebar' ); ?>"> <span class="collapse-sidebar-label"><?php _e( 'Collapse' ); ?></span> <span class="collapse-sidebar-arrow"></span> </a> <div class="theme-navigation"> <a class="previous-theme button" href="#"><?php _e( 'Previous' ); ?></a> <a class="next-theme button" href="#"><?php _e( 'Next' ); ?></a> </div> </div> </div> <div class="wp-full-overlay-main"> <iframe src="{{ data.preview_url }}" /> </div> </script> <?php include(ABSPATH . 'wp-admin/admin-footer.php');
damasiorafael/faneh
wp-admin/theme-install.php
PHP
gpl-2.0
10,668
/* * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Dave Airlie * Jerome Glisse <glisse@freedesktop.org> */ #include <drm/drmP.h> #include "radeon.h" #include <drm/radeon_drm.h> #if __OS_HAS_AGP struct radeon_agpmode_quirk { u32 hostbridge_vendor; u32 hostbridge_device; u32 chip_vendor; u32 chip_device; u32 subsys_vendor; u32 subsys_device; u32 default_mode; }; static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = { /* Intel E7505 Memory Controller Hub / RV350 AR [Radeon 9600XT] Needs AGPMode 4 (deb #515326) */ { PCI_VENDOR_ID_INTEL, 0x2550, PCI_VENDOR_ID_ATI, 0x4152, 0x1458, 0x4038, 4}, /* Intel 82865G/PE/P DRAM Controller/Host-Hub / Mobility 9800 Needs AGPMode 4 (deb #462590) */ { PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x4a4e, PCI_VENDOR_ID_DELL, 0x5106, 4}, /* Intel 82865G/PE/P DRAM Controller/Host-Hub / RV280 [Radeon 9200 SE] Needs AGPMode 4 (lp #300304) */ { PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x5964, 0x148c, 0x2073, 4}, /* Intel 82855PM Processor to I/O Controller / Mobility M6 LY Needs AGPMode 1 (deb #467235) */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c59, PCI_VENDOR_ID_IBM, 0x052f, 1}, /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50, PCI_VENDOR_ID_IBM, 0x0550, 1}, /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57, PCI_VENDOR_ID_IBM, 0x0530, 1}, /* Intel 82855PM host bridge / FireGL Mobility T2 RV350 Needs AGPMode 2 (fdo #20647) */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e54, PCI_VENDOR_ID_IBM, 0x054f, 2}, /* Intel 82855PM host bridge / Mobility M9+ / VaioPCG-V505DX Needs AGPMode 2 (fdo #17928) */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61, PCI_VENDOR_ID_SONY, 0x816b, 2}, /* Intel 82855PM Processor to I/O Controller / Mobility M9+ Needs AGPMode 8 (phoronix forum) */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61, PCI_VENDOR_ID_SONY, 0x8195, 8}, /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/ { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59, PCI_VENDOR_ID_DELL, 0x00e3, 2}, /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66, PCI_VENDOR_ID_DELL, 0x0149, 1}, /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66, PCI_VENDOR_ID_IBM, 0x0531, 1}, /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, 0x1025, 0x0061, 1}, /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #203007) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, 0x1025, 0x0064, 1}, /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #141551) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, PCI_VENDOR_ID_ASUSTEK, 0x1942, 1}, /* Intel 82852/82855 host bridge / Mobility 9600/9700 Needs AGPMode 1 (deb #510208) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, 0x10cf, 0x127f, 1}, /* ASRock K7VT4A+ AGP 8x / ATI Radeon 9250 AGP Needs AGPMode 4 (lp #133192) */ { 0x1849, 0x3189, PCI_VENDOR_ID_ATI, 0x5960, 0x1787, 0x5960, 4}, /* VIA K8M800 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (fdo #12544) */ { PCI_VENDOR_ID_VIA, 0x0204, PCI_VENDOR_ID_ATI, 0x5960, 0x17af, 0x2020, 4}, /* VIA KT880 Host Bridge / RV350 [Radeon 9550] Needs AGPMode 4 (fdo #19981) */ { PCI_VENDOR_ID_VIA, 0x0269, PCI_VENDOR_ID_ATI, 0x4153, PCI_VENDOR_ID_ASUSTEK, 0x003c, 4}, /* VIA VT8363 Host Bridge / R200 QL [Radeon 8500] Needs AGPMode 2 (lp #141551) */ { PCI_VENDOR_ID_VIA, 0x0305, PCI_VENDOR_ID_ATI, 0x514c, PCI_VENDOR_ID_ATI, 0x013a, 2}, /* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 (deb #515512) */ { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960, PCI_VENDOR_ID_ASUSTEK, 0x004c, 2}, /* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 */ { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960, PCI_VENDOR_ID_ASUSTEK, 0x0054, 2}, /* VIA VT8377 Host Bridge / R200 QM [Radeon 9100] Needs AGPMode 4 (deb #461144) */ { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x514d, 0x174b, 0x7149, 4}, /* VIA VT8377 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (lp #312693) */ { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5960, 0x1462, 0x0380, 4}, /* VIA VT8377 Host Bridge / RV280 Needs AGPMode 4 (ati ML) */ { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5964, 0x148c, 0x2073, 4}, /* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */ { PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61, PCI_VENDOR_ID_SONY, 0x8175, 1}, { 0, 0, 0, 0, 0, 0, 0 }, }; #endif int radeon_agp_init(struct radeon_device *rdev) { #if __OS_HAS_AGP struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list; struct drm_agp_mode mode; struct drm_agp_info info; uint32_t agp_status; int default_mode; bool is_v3; int ret; /* Acquire AGP. */ ret = drm_agp_acquire(rdev->ddev); if (ret) { DRM_ERROR("Unable to acquire AGP: %d\n", ret); return ret; } ret = drm_agp_info(rdev->ddev, &info); if (ret) { drm_agp_release(rdev->ddev); DRM_ERROR("Unable to get AGP info: %d\n", ret); return ret; } if (rdev->ddev->agp->agp_info.aper_size < 32) { drm_agp_release(rdev->ddev); dev_warn(rdev->dev, "AGP aperture too small (%zuM) " "need at least 32M, disabling AGP\n", rdev->ddev->agp->agp_info.aper_size); return -EINVAL; } mode.mode = info.mode; /* chips with the agp to pcie bridge don't have the AGP_STATUS register * Just use the whatever mode the host sets up. */ if (rdev->family <= CHIP_RV350) agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; else agp_status = mode.mode; is_v3 = !!(agp_status & RADEON_AGPv3_MODE); if (is_v3) { default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4; } else { if (agp_status & RADEON_AGP_4X_MODE) { default_mode = 4; } else if (agp_status & RADEON_AGP_2X_MODE) { default_mode = 2; } else { default_mode = 1; } } /* Apply AGPMode Quirks */ while (p && p->chip_device != 0) { if (info.id_vendor == p->hostbridge_vendor && info.id_device == p->hostbridge_device && rdev->pdev->vendor == p->chip_vendor && rdev->pdev->device == p->chip_device && rdev->pdev->subsystem_vendor == p->subsys_vendor && rdev->pdev->subsystem_device == p->subsys_device) { default_mode = p->default_mode; } ++p; } if (radeon_agpmode > 0) { if ((radeon_agpmode < (is_v3 ? 4 : 1)) || (radeon_agpmode > (is_v3 ? 8 : 4)) || (radeon_agpmode & (radeon_agpmode - 1))) { DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n", radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4", default_mode); radeon_agpmode = default_mode; } else { DRM_INFO("AGP mode requested: %d\n", radeon_agpmode); } } else { radeon_agpmode = default_mode; } mode.mode &= ~RADEON_AGP_MODE_MASK; if (is_v3) { switch (radeon_agpmode) { case 8: mode.mode |= RADEON_AGPv3_8X_MODE; break; case 4: default: mode.mode |= RADEON_AGPv3_4X_MODE; break; } } else { switch (radeon_agpmode) { case 4: mode.mode |= RADEON_AGP_4X_MODE; break; case 2: mode.mode |= RADEON_AGP_2X_MODE; break; case 1: default: mode.mode |= RADEON_AGP_1X_MODE; break; } } mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */ ret = drm_agp_enable(rdev->ddev, mode); if (ret) { DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); drm_agp_release(rdev->ddev); return ret; } rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base; rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20; rdev->mc.gtt_start = rdev->mc.agp_base; rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1; dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end); /* workaround some hw issues */ if (rdev->family < CHIP_R200) { WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000); } return 0; #else return 0; #endif } void radeon_agp_resume(struct radeon_device *rdev) { #if __OS_HAS_AGP int r; if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); if (r) dev_warn(rdev->dev, "radeon AGP reinit failed\n"); } #endif } void radeon_agp_fini(struct radeon_device *rdev) { #if __OS_HAS_AGP if (rdev->ddev->agp && rdev->ddev->agp->acquired) { drm_agp_release(rdev->ddev); } #endif } void radeon_agp_suspend(struct radeon_device *rdev) { radeon_agp_fini(rdev); }
alice-gh/linux
drivers/gpu/drm/radeon/radeon_agp.c
C
gpl-2.0
10,011
<?php /** * Comment template functions * * These functions are meant to live inside of the WordPress loop. * * @package WordPress * @subpackage Template */ /** * Retrieve the author of the current comment. * * If the comment has an empty comment_author field, then 'Anonymous' person is * assumed. * * @since 1.5.0 * * @param int $comment_ID Optional. The ID of the comment for which to retrieve the author. Default current comment. * @return string The comment author */ function get_comment_author( $comment_ID = 0 ) { $comment = get_comment( $comment_ID ); if ( empty( $comment->comment_author ) ) { if ( $comment->user_id && $user = get_userdata( $comment->user_id ) ) $author = $user->display_name; else $author = __('Anonymous'); } else { $author = $comment->comment_author; } /** * Filter the returned comment author name. * * @since 1.5.0 * * @param string $author The comment author's username. */ return apply_filters( 'get_comment_author', $author ); } /** * Displays the author of the current comment. * * @since 0.71 * * @param int $comment_ID Optional. The ID of the comment for which to print the author. Default current comment. */ function comment_author( $comment_ID = 0 ) { $author = get_comment_author( $comment_ID ); /** * Filter the comment author's name for display. * * @since 1.2.0 * * @param string $author The comment author's username. */ $author = apply_filters( 'comment_author', $author ); echo $author; } /** * Retrieve the email of the author of the current comment. * * @since 1.5.0 * * @param int $comment_ID Optional. The ID of the comment for which to get the author's email. Default current comment. * @return string The current comment author's email */ function get_comment_author_email( $comment_ID = 0 ) { $comment = get_comment( $comment_ID ); /** * Filter the comment author's returned email address. * * @since 1.5.0 * * @param string $comment_author_email The comment author's email address. */ return apply_filters( 'get_comment_author_email', $comment->comment_author_email ); } /** * Display the email of the author of the current global $comment. * * Care should be taken to protect the email address and assure that email * harvesters do not capture your commentors' email address. Most assume that * their email address will not appear in raw form on the blog. Doing so will * enable anyone, including those that people don't want to get the email * address and use it for their own means good and bad. * * @since 0.71 * * @param int $comment_ID Optional. The ID of the comment for which to print the author's email. Default current comment. */ function comment_author_email( $comment_ID = 0 ) { $author_email = get_comment_author_email( $comment_ID ); /** * Filter the comment author's email for display. * * @since 1.2.0 * * @param string $author_email The comment author's email address. */ echo apply_filters( 'author_email', $author_email ); } /** * Display the html email link to the author of the current comment. * * Care should be taken to protect the email address and assure that email * harvesters do not capture your commentors' email address. Most assume that * their email address will not appear in raw form on the blog. Doing so will * enable anyone, including those that people don't want to get the email * address and use it for their own means good and bad. * * @since 0.71 * * @param string $linktext Optional. Text to display instead of the comment author's email address. * Default empty. * @param string $before Optional. Text or HTML to display before the email link. Default empty. * @param string $after Optional. Text or HTML to display after the email link. Default empty. */ function comment_author_email_link( $linktext = '', $before = '', $after = '' ) { if ( $link = get_comment_author_email_link( $linktext, $before, $after ) ) echo $link; } /** * Return the html email link to the author of the current comment. * * Care should be taken to protect the email address and assure that email * harvesters do not capture your commentors' email address. Most assume that * their email address will not appear in raw form on the blog. Doing so will * enable anyone, including those that people don't want to get the email * address and use it for their own means good and bad. * * @global object $comment The current Comment row object. * * @since 2.7.0 * * @param string $linktext Optional. Text to display instead of the comment author's email address. * Default empty. * @param string $before Optional. Text or HTML to display before the email link. Default empty. * @param string $after Optional. Text or HTML to display after the email link. Default empty. */ function get_comment_author_email_link( $linktext = '', $before = '', $after = '' ) { global $comment; /** * Filter the comment author's email for display. * * Care should be taken to protect the email address and assure that email * harvesters do not capture your commenters' email address. * * @since 1.2.0 * * @param string $comment_author_email The comment author's email address. */ $email = apply_filters( 'comment_email', $comment->comment_author_email ); if ((!empty($email)) && ($email != '@')) { $display = ($linktext != '') ? $linktext : $email; $return = $before; $return .= "<a href='mailto:$email'>$display</a>"; $return .= $after; return $return; } else { return ''; } } /** * Retrieve the HTML link to the URL of the author of the current comment. * * Both get_comment_author_url() and get_comment_author() rely on get_comment(), * which falls back to the global comment variable if the $comment_ID argument is empty. * * @since 1.5.0 * * @param int $comment_ID ID of the comment for which to get the author's link. * Default current comment. * @return string The comment author name or HTML link for author's URL. */ function get_comment_author_link( $comment_ID = 0 ) { $url = get_comment_author_url( $comment_ID ); $author = get_comment_author( $comment_ID ); if ( empty( $url ) || 'http://' == $url ) $return = $author; else $return = "<a href='$url' rel='external nofollow' class='url'>$author</a>"; /** * Filter the comment author's link for display. * * @since 1.5.0 * * @param string $return The HTML-formatted comment author link. * Empty for an invalid URL. */ return apply_filters( 'get_comment_author_link', $return ); } /** * Display the html link to the url of the author of the current comment. * * @since 0.71 * * @see get_comment_author_link() Echoes result * * @param int $comment_ID ID of the comment for which to print the author's * link. Default current comment. */ function comment_author_link( $comment_ID = 0 ) { echo get_comment_author_link( $comment_ID ); } /** * Retrieve the IP address of the author of the current comment. * * @since 1.5.0 * * @param int $comment_ID ID of the comment for which to get the author's IP * address. Default current comment. * @return string Comment author's IP address. */ function get_comment_author_IP( $comment_ID = 0 ) { $comment = get_comment( $comment_ID ); /** * Filter the comment author's returned IP address. * * @since 1.5.0 * * @param string $comment_author_IP The comment author's IP address. */ return apply_filters( 'get_comment_author_IP', $comment->comment_author_IP ); } /** * Display the IP address of the author of the current comment. * * @since 0.71 * * @param int $comment_ID ID of the comment for which to print the author's IP * address. Default current comment. */ function comment_author_IP( $comment_ID = 0 ) { echo get_comment_author_IP( $comment_ID ); } /** * Retrieve the url of the author of the current comment. * * @since 1.5.0 * * @param int $comment_ID ID of the comment for which to get the author's URL. * Default current comment. * @return string */ function get_comment_author_url( $comment_ID = 0 ) { $comment = get_comment( $comment_ID ); $url = ('http://' == $comment->comment_author_url) ? '' : $comment->comment_author_url; $url = esc_url( $url, array('http', 'https') ); /** * Filter the comment author's URL. * * @since 1.5.0 * * @param string $url The comment author's URL. */ return apply_filters( 'get_comment_author_url', $url ); } /** * Display the url of the author of the current comment. * * @since 0.71 * * @param int $comment_ID ID of the comment for which to print the author's URL. * Default current comment. */ function comment_author_url( $comment_ID = 0 ) { $author_url = get_comment_author_url( $comment_ID ); /** * Filter the comment author's URL for display. * * @since 1.2.0 * * @param string $author_url The comment author's URL. */ echo apply_filters( 'comment_url', $author_url ); } /** * Retrieves the HTML link of the url of the author of the current comment. * * $linktext parameter is only used if the URL does not exist for the comment * author. If the URL does exist then the URL will be used and the $linktext * will be ignored. * * Encapsulate the HTML link between the $before and $after. So it will appear * in the order of $before, link, and finally $after. * * @since 1.5.0 * * @param string $linktext Optional. The text to display instead of the comment * author's email address. Default empty. * @param string $before Optional. The text or HTML to display before the email link. * Default empty. * @param string $after Optional. The text or HTML to display after the email link. * Default empty. * @return string The HTML link between the $before and $after parameters. */ function get_comment_author_url_link( $linktext = '', $before = '', $after = '' ) { $url = get_comment_author_url(); $display = ($linktext != '') ? $linktext : $url; $display = str_replace( 'http://www.', '', $display ); $display = str_replace( 'http://', '', $display ); if ( '/' == substr($display, -1) ) $display = substr($display, 0, -1); $return = "$before<a href='$url' rel='external'>$display</a>$after"; /** * Filter the comment author's returned URL link. * * @since 1.5.0 * * @param string $return The HTML-formatted comment author URL link. */ return apply_filters( 'get_comment_author_url_link', $return ); } /** * Displays the HTML link of the url of the author of the current comment. * * @since 0.71 * * @param string $linktext Optional. Text to display instead of the comment author's * email address. Default empty. * @param string $before Optional. Text or HTML to display before the email link. * Default empty. * @param string $after Optional. Text or HTML to display after the email link. * Default empty. */ function comment_author_url_link( $linktext = '', $before = '', $after = '' ) { echo get_comment_author_url_link( $linktext, $before, $after ); } /** * Generates semantic classes for each comment element. * * @since 2.7.0 * * @param string|array $class Optional. One or more classes to add to the class list. * Default empty. * @param int $comment_id Comment ID. Default current comment. * @param int|WP_Post $post_id Post ID or WP_Post object. Default current post. * @param bool $echo Optional. Whether to cho or return the output. * Default true. */ function comment_class( $class = '', $comment_id = null, $post_id = null, $echo = true ) { // Separates classes with a single space, collates classes for comment DIV $class = 'class="' . join( ' ', get_comment_class( $class, $comment_id, $post_id ) ) . '"'; if ( $echo) echo $class; else return $class; } /** * Returns the classes for the comment div as an array. * * @since 2.7.0 * * @param string|array $class Optional. One or more classes to add to the class list. Default empty. * @param int $comment_id Comment ID. Default current comment. * @param int|WP_Post $post_id Post ID or WP_Post object. Default current post. * @return array An array of classes. */ function get_comment_class( $class = '', $comment_id = null, $post_id = null ) { global $comment_alt, $comment_depth, $comment_thread_alt; $comment = get_comment($comment_id); $classes = array(); // Get the comment type (comment, trackback), $classes[] = ( empty( $comment->comment_type ) ) ? 'comment' : $comment->comment_type; // If the comment author has an id (registered), then print the log in name if ( $comment->user_id > 0 && $user = get_userdata($comment->user_id) ) { // For all registered users, 'byuser' $classes[] = 'byuser'; $classes[] = 'comment-author-' . sanitize_html_class($user->user_nicename, $comment->user_id); // For comment authors who are the author of the post if ( $post = get_post($post_id) ) { if ( $comment->user_id === $post->post_author ) $classes[] = 'bypostauthor'; } } if ( empty($comment_alt) ) $comment_alt = 0; if ( empty($comment_depth) ) $comment_depth = 1; if ( empty($comment_thread_alt) ) $comment_thread_alt = 0; if ( $comment_alt % 2 ) { $classes[] = 'odd'; $classes[] = 'alt'; } else { $classes[] = 'even'; } $comment_alt++; // Alt for top-level comments if ( 1 == $comment_depth ) { if ( $comment_thread_alt % 2 ) { $classes[] = 'thread-odd'; $classes[] = 'thread-alt'; } else { $classes[] = 'thread-even'; } $comment_thread_alt++; } $classes[] = "depth-$comment_depth"; if ( !empty($class) ) { if ( !is_array( $class ) ) $class = preg_split('#\s+#', $class); $classes = array_merge($classes, $class); } $classes = array_map('esc_attr', $classes); /** * Filter the returned CSS classes for the current comment. * * @since 2.7.0 * * @param array $classes An array of comment classes. * @param string $class A comma-separated list of additional classes added to the list. * @param int $comment_id The comment id. * @param int|WP_Post $post_id The post ID or WP_Post object. */ return apply_filters( 'comment_class', $classes, $class, $comment_id, $post_id ); } /** * Retrieve the comment date of the current comment. * * @since 1.5.0 * * @param string $d Optional. The format of the date. Default user's setting. * @param int $comment_ID ID of the comment for which to get the date. Default current comment. * @return string The comment's date. */ function get_comment_date( $d = '', $comment_ID = 0 ) { $comment = get_comment( $comment_ID ); if ( '' == $d ) $date = mysql2date(get_option('date_format'), $comment->comment_date); else $date = mysql2date($d, $comment->comment_date); /** * Filter the returned comment date. * * @since 1.5.0 * * @param string|int $date Formatted date string or Unix timestamp. * @param string $d The format of the date. * @param object $comment The comment object. */ return apply_filters( 'get_comment_date', $date, $d, $comment ); } /** * Display the comment date of the current comment. * * @since 0.71 * * @param string $d Optional. The format of the date. Default user's settings. * @param int $comment_ID ID of the comment for which to print the date. Default current comment. */ function comment_date( $d = '', $comment_ID = 0 ) { echo get_comment_date( $d, $comment_ID ); } /** * Retrieve the excerpt of the current comment. * * Will cut each word and only output the first 20 words with '&hellip;' at the end. * If the word count is less than 20, then no truncating is done and no '&hellip;' * will appear. * * @since 1.5.0 * * @param int $comment_ID ID of the comment for which to get the excerpt. * Default current comment. * @return string The maybe truncated comment with 20 words or less. */ function get_comment_excerpt( $comment_ID = 0 ) { $comment = get_comment( $comment_ID ); $comment_text = strip_tags($comment->comment_content); $blah = explode(' ', $comment_text); if (count($blah) > 20) { $k = 20; $use_dotdotdot = 1; } else { $k = count($blah); $use_dotdotdot = 0; } $excerpt = ''; for ($i=0; $i<$k; $i++) { $excerpt .= $blah[$i] . ' '; } $excerpt .= ($use_dotdotdot) ? '&hellip;' : ''; /** * Filter the retrieved comment excerpt. * * @since 1.5.0 * * @param string $excerpt The comment excerpt text. */ return apply_filters( 'get_comment_excerpt', $excerpt ); } /** * Display the excerpt of the current comment. * * @since 1.2.0 * * @param int $comment_ID ID of the comment for which to print the excerpt. * Default current comment. */ function comment_excerpt( $comment_ID = 0 ) { $comment_excerpt = get_comment_excerpt($comment_ID); /** * Filter the comment excerpt for display. * * @since 1.2.0 * * @param string $comment_excerpt The comment excerpt text. */ echo apply_filters( 'comment_excerpt', $comment_excerpt ); } /** * Retrieve the comment id of the current comment. * * @since 1.5.0 * * @return int The comment ID. */ function get_comment_ID() { global $comment; /** * Filter the returned comment ID. * * @since 1.5.0 * * @param int $comment_ID The current comment ID. */ return apply_filters( 'get_comment_ID', $comment->comment_ID ); } /** * Display the comment id of the current comment. * * @since 0.71 */ function comment_ID() { echo get_comment_ID(); } /** * Retrieve the link to a given comment. * * @since 1.5.0 * * @see get_page_of_comment() * * @param mixed $comment Comment to retrieve. Default current comment. * @param array $args Optional. An array of arguments to override the defaults. * @return string The permalink to the given comment. */ function get_comment_link( $comment = null, $args = array() ) { global $wp_rewrite, $in_comment_loop; $comment = get_comment($comment); // Backwards compat if ( ! is_array( $args ) ) { $args = array( 'page' => $args ); } $defaults = array( 'type' => 'all', 'page' => '', 'per_page' => '', 'max_depth' => '' ); $args = wp_parse_args( $args, $defaults ); if ( '' === $args['per_page'] && get_option('page_comments') ) $args['per_page'] = get_option('comments_per_page'); if ( empty($args['per_page']) ) { $args['per_page'] = 0; $args['page'] = 0; } if ( $args['per_page'] ) { if ( '' == $args['page'] ) $args['page'] = ( !empty($in_comment_loop) ) ? get_query_var('cpage') : get_page_of_comment( $comment->comment_ID, $args ); if ( $wp_rewrite->using_permalinks() ) $link = user_trailingslashit( trailingslashit( get_permalink( $comment->comment_post_ID ) ) . 'comment-page-' . $args['page'], 'comment' ); else $link = add_query_arg( 'cpage', $args['page'], get_permalink( $comment->comment_post_ID ) ); } else { $link = get_permalink( $comment->comment_post_ID ); } $link = $link . '#comment-' . $comment->comment_ID; /** * Filter the returned single comment permalink. * * @since 2.8.0 * * @see get_page_of_comment() * * @param string $link The comment permalink with '#comment-$id' appended. * @param object $comment The current comment object. * @param array $args An array of arguments to override the defaults. */ return apply_filters( 'get_comment_link', $link, $comment, $args ); } /** * Retrieve the link to the current post comments. * * @since 1.5.0 * * @param int|WP_Post $post_id Optional. Post ID or WP_Post object. Default is global $post. * @return string The link to the comments. */ function get_comments_link( $post_id = 0 ) { $comments_link = get_permalink( $post_id ) . '#comments'; /** * Filter the returned post comments permalink. * * @since 3.6.0 * * @param string $comments_link Post comments permalink with '#comments' appended. * @param int|WP_Post $post_id Post ID or WP_Post object. */ return apply_filters( 'get_comments_link', $comments_link, $post_id ); } /** * Display the link to the current post comments. * * @since 0.71 * * @param string $deprecated Not Used. * @param bool $deprecated_2 Not Used. */ function comments_link( $deprecated = '', $deprecated_2 = '' ) { if ( !empty( $deprecated ) ) _deprecated_argument( __FUNCTION__, '0.72' ); if ( !empty( $deprecated_2 ) ) _deprecated_argument( __FUNCTION__, '1.3' ); echo esc_url( get_comments_link() ); } /** * Retrieve the amount of comments a post has. * * @since 1.5.0 * * @param int|WP_Post $post_id Optional. Post ID or WP_Post object. Default is global $post. * @return int The number of comments a post has. */ function get_comments_number( $post_id = 0 ) { $post = get_post( $post_id ); if ( ! $post ) { $count = 0; } else { $count = $post->comment_count; $post_id = $post->ID; } /** * Filter the returned comment count for a post. * * @since 1.5.0 * * @param int $count Number of comments a post has. * @param int $post_id Post ID. */ return apply_filters( 'get_comments_number', $count, $post_id ); } /** * Display the language string for the number of comments the current post has. * * @since 0.71 * * @param string $zero Optional. Text for no comments. Default false. * @param string $one Optional. Text for one comment. Default false. * @param string $more Optional. Text for more than one comment. Default false. * @param string $deprecated Not used. */ function comments_number( $zero = false, $one = false, $more = false, $deprecated = '' ) { if ( ! empty( $deprecated ) ) { _deprecated_argument( __FUNCTION__, '1.3' ); } echo get_comments_number_text( $zero, $one, $more ); } /** * Display the language string for the number of comments the current post has. * * @since 4.0.0 * * @param string $zero Optional. Text for no comments. Default false. * @param string $one Optional. Text for one comment. Default false. * @param string $more Optional. Text for more than one comment. Default false. */ function get_comments_number_text( $zero = false, $one = false, $more = false ) { $number = get_comments_number(); if ( $number > 1 ) { $output = str_replace( '%', number_format_i18n( $number ), ( false === $more ) ? __( '% Comments' ) : $more ); } elseif ( $number == 0 ) { $output = ( false === $zero ) ? __( 'No Comments' ) : $zero; } else { // must be one $output = ( false === $one ) ? __( '1 Comment' ) : $one; } /** * Filter the comments count for display. * * @since 1.5.0 * * @see _n() * * @param string $output A translatable string formatted based on whether the count * is equal to 0, 1, or 1+. * @param int $number The number of post comments. */ return apply_filters( 'comments_number', $output, $number ); } /** * Retrieve the text of the current comment. * * @since 1.5.0 * * @see Walker_Comment::comment() * * @param int $comment_ID ID of the comment for which to get the text. Default current comment. * @param array $args Optional. An array of arguments. Default empty. * @return string The comment content. */ function get_comment_text( $comment_ID = 0, $args = array() ) { $comment = get_comment( $comment_ID ); /** * Filter the text of a comment. * * @since 1.5.0 * * @see Walker_Comment::comment() * * @param string $comment_content Text of the comment. * @param object $comment The comment object. * @param array $args An array of arguments. */ return apply_filters( 'get_comment_text', $comment->comment_content, $comment, $args ); } /** * Display the text of the current comment. * * @since 0.71 * * @see Walker_Comment::comment() * * @param int $comment_ID ID of the comment for which to print the text. Default 0. * @param array $args Optional. An array of arguments. Default empty array. Default empty. */ function comment_text( $comment_ID = 0, $args = array() ) { $comment = get_comment( $comment_ID ); $comment_text = get_comment_text( $comment_ID , $args ); /** * Filter the text of a comment to be displayed. * * @since 1.2.0 * * @see Walker_Comment::comment() * * @param string $comment_text Text of the current comment. * @param object $comment The comment object. * @param array $args An array of arguments. */ echo apply_filters( 'comment_text', $comment_text, $comment, $args ); } /** * Retrieve the comment time of the current comment. * * @since 1.5.0 * * @param string $d Optional. The format of the time. Default user's settings. * @param bool $gmt Optional. Whether to use the GMT date. Default false. * @param bool $translate Optional. Whether to translate the time (for use in feeds). * Default true. * @return string The formatted time. */ function get_comment_time( $d = '', $gmt = false, $translate = true ) { global $comment; $comment_date = $gmt ? $comment->comment_date_gmt : $comment->comment_date; if ( '' == $d ) $date = mysql2date(get_option('time_format'), $comment_date, $translate); else $date = mysql2date($d, $comment_date, $translate); /** * Filter the returned comment time. * * @since 1.5.0 * * @param string|int $date The comment time, formatted as a date string or Unix timestamp. * @param string $d Date format. * @param bool $gmt Whether the GMT date is in use. * @param bool $translate Whether the time is translated. * @param object $comment The comment object. */ return apply_filters( 'get_comment_time', $date, $d, $gmt, $translate, $comment ); } /** * Display the comment time of the current comment. * * @since 0.71 * * @param string $d Optional. The format of the time. Default user's settings. */ function comment_time( $d = '' ) { echo get_comment_time($d); } /** * Retrieve the comment type of the current comment. * * @since 1.5.0 * * @param int $comment_ID ID of the comment for which to get the type. Default current comment. * @return string The comment type. */ function get_comment_type( $comment_ID = 0 ) { $comment = get_comment( $comment_ID ); if ( '' == $comment->comment_type ) $comment->comment_type = 'comment'; /** * Filter the returned comment type. * * @since 1.5.0 * * @param string $comment_type The type of comment, such as 'comment', 'pingback', or 'trackback'. */ return apply_filters( 'get_comment_type', $comment->comment_type ); } /** * Display the comment type of the current comment. * * @since 0.71 * * @param string $commenttxt Optional. String to display for comment type. Default false. * @param string $trackbacktxt Optional. String to display for trackback type. Default false. * @param string $pingbacktxt Optional. String to display for pingback type. Default false. */ function comment_type( $commenttxt = false, $trackbacktxt = false, $pingbacktxt = false ) { if ( false === $commenttxt ) $commenttxt = _x( 'Comment', 'noun' ); if ( false === $trackbacktxt ) $trackbacktxt = __( 'Trackback' ); if ( false === $pingbacktxt ) $pingbacktxt = __( 'Pingback' ); $type = get_comment_type(); switch( $type ) { case 'trackback' : echo $trackbacktxt; break; case 'pingback' : echo $pingbacktxt; break; default : echo $commenttxt; } } /** * Retrieve The current post's trackback URL. * * There is a check to see if permalink's have been enabled and if so, will * retrieve the pretty path. If permalinks weren't enabled, the ID of the * current post is used and appended to the correct page to go to. * * @since 1.5.0 * * @return string The trackback URL after being filtered. */ function get_trackback_url() { if ( '' != get_option('permalink_structure') ) $tb_url = trailingslashit(get_permalink()) . user_trailingslashit('trackback', 'single_trackback'); else $tb_url = get_option('siteurl') . '/wp-trackback.php?p=' . get_the_ID(); /** * Filter the returned trackback URL. * * @since 2.2.0 * * @param string $tb_url The trackback URL. */ return apply_filters( 'trackback_url', $tb_url ); } /** * Display the current post's trackback URL. * * @since 0.71 * * @param bool $deprecated_echo Not used. * @return void|string Should only be used to echo the trackback URL, use get_trackback_url() * for the result instead. */ function trackback_url( $deprecated_echo = true ) { if ( $deprecated_echo !== true ) _deprecated_argument( __FUNCTION__, '2.5', __('Use <code>get_trackback_url()</code> instead if you do not want the value echoed.') ); if ( $deprecated_echo ) echo get_trackback_url(); else return get_trackback_url(); } /** * Generate and display the RDF for the trackback information of current post. * * Deprecated in 3.0.0, and restored in 3.0.1. * * @since 0.71 * * @param int $deprecated Not used (Was $timezone = 0). */ function trackback_rdf( $deprecated = '' ) { if ( ! empty( $deprecated ) ) { _deprecated_argument( __FUNCTION__, '2.5' ); } if ( isset( $_SERVER['HTTP_USER_AGENT'] ) && false !== stripos( $_SERVER['HTTP_USER_AGENT'], 'W3C_Validator' ) ) { return; } echo '<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:trackback="http://madskills.com/public/xml/rss/module/trackback/"> <rdf:Description rdf:about="'; the_permalink(); echo '"'."\n"; echo ' dc:identifier="'; the_permalink(); echo '"'."\n"; echo ' dc:title="'.str_replace('--', '&#x2d;&#x2d;', wptexturize(strip_tags(get_the_title()))).'"'."\n"; echo ' trackback:ping="'.get_trackback_url().'"'." />\n"; echo '</rdf:RDF>'; } /** * Whether the current post is open for comments. * * @since 1.5.0 * * @param int|WP_Post $post_id Post ID or WP_Post object. Default current post. * @return bool True if the comments are open. */ function comments_open( $post_id = null ) { $_post = get_post($post_id); $open = ( 'open' == $_post->comment_status ); /** * Filter whether the current post is open for comments. * * @since 2.5.0 * * @param bool $open Whether the current post is open for comments. * @param int|WP_Post $post_id The post ID or WP_Post object. */ return apply_filters( 'comments_open', $open, $post_id ); } /** * Whether the current post is open for pings. * * @since 1.5.0 * * @param int|WP_Post $post_id Post ID or WP_Post object. Default current post. * @return bool True if pings are accepted */ function pings_open( $post_id = null ) { $_post = get_post($post_id); $open = ( 'open' == $_post->ping_status ); /** * Filter whether the current post is open for pings. * * @since 2.5.0 * * @param bool $open Whether the current post is open for pings. * @param int|WP_Post $post_id The post ID or WP_Post object. */ return apply_filters( 'pings_open', $open, $post_id ); } /** * Display form token for unfiltered comments. * * Will only display nonce token if the current user has permissions for * unfiltered html. Won't display the token for other users. * * The function was backported to 2.0.10 and was added to versions 2.1.3 and * above. Does not exist in versions prior to 2.0.10 in the 2.0 branch and in * the 2.1 branch, prior to 2.1.3. Technically added in 2.2.0. * * Backported to 2.0.10. * * @since 2.1.3 */ function wp_comment_form_unfiltered_html_nonce() { $post = get_post(); $post_id = $post ? $post->ID : 0; if ( current_user_can( 'unfiltered_html' ) ) { wp_nonce_field( 'unfiltered-html-comment_' . $post_id, '_wp_unfiltered_html_comment_disabled', false ); echo "<script>(function(){if(window===window.parent){document.getElementById('_wp_unfiltered_html_comment_disabled').name='_wp_unfiltered_html_comment';}})();</script>\n"; } } /** * Load the comment template specified in $file. * * Will not display the comments template if not on single post or page, or if * the post does not have comments. * * Uses the WordPress database object to query for the comments. The comments * are passed through the 'comments_array' filter hook with the list of comments * and the post ID respectively. * * The $file path is passed through a filter hook called, 'comments_template' * which includes the TEMPLATEPATH and $file combined. Tries the $filtered path * first and if it fails it will require the default comment template from the * default theme. If either does not exist, then the WordPress process will be * halted. It is advised for that reason, that the default theme is not deleted. * * @todo Document globals * @uses $withcomments Will not try to get the comments if the post has none. * * @since 1.5.0 * * @param string $file Optional. The file to load. Default '/comments.php'. * @param bool $separate_comments Optional. Whether to separate the comments by comment type. * Default false. * @return null Returns null if no comments appear. */ function comments_template( $file = '/comments.php', $separate_comments = false ) { global $wp_query, $withcomments, $post, $wpdb, $id, $comment, $user_login, $user_ID, $user_identity, $overridden_cpage; if ( !(is_single() || is_page() || $withcomments) || empty($post) ) return; if ( empty($file) ) $file = '/comments.php'; $req = get_option('require_name_email'); /* * Comment author information fetched from the comment cookies. * Uuses wp_get_current_commenter(). */ $commenter = wp_get_current_commenter(); /* * The name of the current comment author escaped for use in attributes. * Escaped by sanitize_comment_cookies(). */ $comment_author = $commenter['comment_author']; /* * The email address of the current comment author escaped for use in attributes. * Escaped by sanitize_comment_cookies(). */ $comment_author_email = $commenter['comment_author_email']; /* * The url of the current comment author escaped for use in attributes. */ $comment_author_url = esc_url($commenter['comment_author_url']); /** @todo Use API instead of SELECTs. */ if ( $user_ID) { $comments = $wpdb->get_results($wpdb->prepare("SELECT * FROM $wpdb->comments WHERE comment_post_ID = %d AND (comment_approved = '1' OR ( user_id = %d AND comment_approved = '0' ) ) ORDER BY comment_date_gmt", $post->ID, $user_ID)); } else if ( empty($comment_author) ) { $comments = get_comments( array('post_id' => $post->ID, 'status' => 'approve', 'order' => 'ASC') ); } else { $comments = $wpdb->get_results($wpdb->prepare("SELECT * FROM $wpdb->comments WHERE comment_post_ID = %d AND ( comment_approved = '1' OR ( comment_author = %s AND comment_author_email = %s AND comment_approved = '0' ) ) ORDER BY comment_date_gmt", $post->ID, wp_specialchars_decode($comment_author,ENT_QUOTES), $comment_author_email)); } /** * Filter the comments array. * * @since 2.1.0 * * @param array $comments Array of comments supplied to the comments template. * @param int $post_ID Post ID. */ $wp_query->comments = apply_filters( 'comments_array', $comments, $post->ID ); $comments = &$wp_query->comments; $wp_query->comment_count = count($wp_query->comments); update_comment_cache($wp_query->comments); if ( $separate_comments ) { $wp_query->comments_by_type = separate_comments($comments); $comments_by_type = &$wp_query->comments_by_type; } $overridden_cpage = false; if ( '' == get_query_var('cpage') && get_option('page_comments') ) { set_query_var( 'cpage', 'newest' == get_option('default_comments_page') ? get_comment_pages_count() : 1 ); $overridden_cpage = true; } if ( !defined('COMMENTS_TEMPLATE') ) define('COMMENTS_TEMPLATE', true); $theme_template = STYLESHEETPATH . $file; /** * Filter the path to the theme template file used for the comments template. * * @since 1.5.1 * * @param string $theme_template The path to the theme template file. */ $include = apply_filters( 'comments_template', $theme_template ); if ( file_exists( $include ) ) require( $include ); elseif ( file_exists( TEMPLATEPATH . $file ) ) require( TEMPLATEPATH . $file ); else // Backward compat code will be removed in a future release require( ABSPATH . WPINC . '/theme-compat/comments.php'); } /** * Display the JS popup script to show a comment. * * If the $file parameter is empty, then the home page is assumed. The defaults * for the window are 400px by 400px. * * For the comment link popup to work, this function has to be called or the * normal comment link will be assumed. * * @global string $wpcommentspopupfile The URL to use for the popup window. * @global int $wpcommentsjavascript Whether to use JavaScript. Set when function is called. * * @since 0.71 * * @param int $width Optional. The width of the popup window. Default 400. * @param int $height Optional. The height of the popup window. Default 400. * @param string $file Optional. Sets the location of the popup window. */ function comments_popup_script( $width = 400, $height = 400, $file = '' ) { global $wpcommentspopupfile, $wpcommentsjavascript; if (empty ($file)) { $wpcommentspopupfile = ''; // Use the index. } else { $wpcommentspopupfile = $file; } $wpcommentsjavascript = 1; $javascript = "<script type='text/javascript'>\nfunction wpopen (macagna) {\n window.open(macagna, '_blank', 'width=$width,height=$height,scrollbars=yes,status=yes');\n}\n</script>\n"; echo $javascript; } /** * Displays the link to the comments popup window for the current post ID. * * Is not meant to be displayed on single posts and pages. Should be used * on the lists of posts * * @global string $wpcommentspopupfile The URL to use for the popup window. * @global int $wpcommentsjavascript Whether to use JavaScript. Set when function is called. * * @since 0.71 * * @param string $zero Optional. String to display when no comments. Default false. * @param string $one Optional. String to display when only one comment is available. * Default false. * @param string $more Optional. String to display when there are more than one comment. * Default false. * @param string $css_class Optional. CSS class to use for comments. Default empty. * @param string $none Optional. String to display when comments have been turned off. * Default false. * @return null Returns null on single posts and pages. */ function comments_popup_link( $zero = false, $one = false, $more = false, $css_class = '', $none = false ) { global $wpcommentspopupfile, $wpcommentsjavascript; $id = get_the_ID(); if ( false === $zero ) $zero = __( 'No Comments' ); if ( false === $one ) $one = __( '1 Comment' ); if ( false === $more ) $more = __( '% Comments' ); if ( false === $none ) $none = __( 'Comments Off' ); $number = get_comments_number( $id ); if ( 0 == $number && !comments_open() && !pings_open() ) { echo '<span' . ((!empty($css_class)) ? ' class="' . esc_attr( $css_class ) . '"' : '') . '>' . $none . '</span>'; return; } if ( post_password_required() ) { echo __('Enter your password to view comments.'); return; } echo '<a href="'; if ( $wpcommentsjavascript ) { if ( empty( $wpcommentspopupfile ) ) $home = home_url(); else $home = get_option('siteurl'); echo $home . '/' . $wpcommentspopupfile . '?comments_popup=' . $id; echo '" onclick="wpopen(this.href); return false"'; } else { // if comments_popup_script() is not in the template, display simple comment link if ( 0 == $number ) echo get_permalink() . '#respond'; else comments_link(); echo '"'; } if ( !empty( $css_class ) ) { echo ' class="'.$css_class.'" '; } $title = the_title_attribute( array('echo' => 0 ) ); $attributes = ''; /** * Filter the comments popup link attributes for display. * * @since 2.5.0 * * @param string $attributes The comments popup link attributes. Default empty. */ echo apply_filters( 'comments_popup_link_attributes', $attributes ); echo ' title="' . esc_attr( sprintf( __('Comment on %s'), $title ) ) . '">'; comments_number( $zero, $one, $more ); echo '</a>'; } /** * Retrieve HTML content for reply to comment link. * * @since 2.7.0 * * @param array $args { * Optional. Override default arguments. * * @type string $add_below The first part of the selector used to identify the comment to respond below. * The resulting value is passed as the first parameter to addComment.moveForm(), * concatenated as $add_below-$comment->comment_ID. Default 'comment'. * @type string $respond_id The selector identifying the responding comment. Passed as the third parameter * to addComment.moveForm(), and appended to the link URL as a hash value. * Default 'respond'. * @type string $reply_text The text of the Reply link. Default 'Reply'. * @type string $login_text The text of the link to reply if logged out. Default 'Log in to Reply'. * @type int $depth' The depth of the new comment. Must be greater than 0 and less than the value * of the 'thread_comments_depth' option set in Settings > Discussion. Default 0. * @type string $before The text or HTML to add before the reply link. Default empty. * @type string $after The text or HTML to add after the reply link. Default empty. * } * @param int $comment Comment being replied to. Default current comment. * @param int|WP_Post $post Post ID or WP_Post object the comment is going to be displayed on. * Default current post. * @return mixed Link to show comment form, if successful. False, if comments are closed. */ function get_comment_reply_link( $args = array(), $comment = null, $post = null ) { $defaults = array( 'add_below' => 'comment', 'respond_id' => 'respond', 'reply_text' => __('Reply'), 'login_text' => __('Log in to Reply'), 'depth' => 0, 'before' => '', 'after' => '' ); $args = wp_parse_args( $args, $defaults ); if ( 0 == $args['depth'] || $args['max_depth'] <= $args['depth'] ) { return; } $add_below = $args['add_below']; $respond_id = $args['respond_id']; $reply_text = $args['reply_text']; $comment = get_comment( $comment ); if ( empty( $post ) ) { $post = $comment->comment_post_ID; } $post = get_post( $post ); if ( ! comments_open( $post->ID ) ) { return false; } if ( get_option( 'comment_registration' ) && ! is_user_logged_in() ) { $link = '<a rel="nofollow" class="comment-reply-login" href="' . esc_url( wp_login_url( get_permalink() ) ) . '">' . $args['login_text'] . '</a>'; } else { $link = "<a class='comment-reply-link' href='" . esc_url( add_query_arg( 'replytocom', $comment->comment_ID ) ) . "#" . $respond_id . "' onclick='return addComment.moveForm(\"$add_below-$comment->comment_ID\", \"$comment->comment_ID\", \"$respond_id\", \"$post->ID\")'>$reply_text</a>"; } /** * Filter the comment reply link. * * @since 2.7.0 * * @param string $link The HTML markup for the comment reply link. * @param array $args An array of arguments overriding the defaults. * @param object $comment The object of the comment being replied. * @param WP_Post $post The WP_Post object. */ return apply_filters( 'comment_reply_link', $args['before'] . $link . $args['after'], $args, $comment, $post ); } /** * Displays the HTML content for reply to comment link. * * @since 2.7.0 * * @see get_comment_reply_link() * * @param array $args Optional. Override default options. * @param int $comment Comment being replied to. Default current comment. * @param int|WP_Post $post Post ID or WP_Post object the comment is going to be displayed on. * Default current post. * @return mixed Link to show comment form, if successful. False, if comments are closed. */ function comment_reply_link($args = array(), $comment = null, $post = null) { echo get_comment_reply_link($args, $comment, $post); } /** * Retrieve HTML content for reply to post link. * * @since 2.7.0 * * @param array $args { * Optional. Override default arguments. * * @type string $add_below The first part of the selector used to identify the comment to respond below. * The resulting value is passed as the first parameter to addComment.moveForm(), * concatenated as $add_below-$comment->comment_ID. Default is 'post'. * @type string $respond_id The selector identifying the responding comment. Passed as the third parameter * to addComment.moveForm(), and appended to the link URL as a hash value. * Default 'respond'. * @type string $reply_text Text of the Reply link. Default is 'Leave a Comment'. * @type string $login_text Text of the link to reply if logged out. Default is 'Log in to leave a Comment'. * @type string $before Text or HTML to add before the reply link. Default empty. * @type string $after Text or HTML to add after the reply link. Default empty. * } * @param int|WP_Post $post Optional. Post ID or WP_Post object the comment is going to be displayed on. * Default current post. * @return string|bool|null Link to show comment form, if successful. False, if comments are closed. */ function get_post_reply_link($args = array(), $post = null) { $defaults = array( 'add_below' => 'post', 'respond_id' => 'respond', 'reply_text' => __('Leave a Comment'), 'login_text' => __('Log in to leave a Comment'), 'before' => '', 'after' => '', ); $args = wp_parse_args($args, $defaults); $add_below = $args['add_below']; $respond_id = $args['respond_id']; $reply_text = $args['reply_text']; $post = get_post($post); if ( ! comments_open( $post->ID ) ) { return false; } if ( get_option('comment_registration') && ! is_user_logged_in() ) { $link = '<a rel="nofollow" href="' . wp_login_url( get_permalink() ) . '">' . $args['login_text'] . '</a>'; } else { $link = "<a rel='nofollow' class='comment-reply-link' href='" . get_permalink($post->ID) . "#$respond_id' onclick='return addComment.moveForm(\"$add_below-$post->ID\", \"0\", \"$respond_id\", \"$post->ID\")'>$reply_text</a>"; } $formatted_link = $args['before'] . $link . $args['after']; /** * Filter the formatted post comments link HTML. * * @since 2.7.0 * * @param string $formatted The HTML-formatted post comments link. * @param int|WP_Post $post The post ID or WP_Post object. */ return apply_filters( 'post_comments_link', $formatted_link, $post ); } /** * Displays the HTML content for reply to post link. * * @since 2.7.0 * * @see get_post_reply_link() * * @param array $args Optional. Override default options, * @param int|WP_Post $post Post ID or WP_Post object the comment is going to be displayed on. * Default current post. * @return string|bool|null Link to show comment form, if successful. False, if comments are closed. */ function post_reply_link($args = array(), $post = null) { echo get_post_reply_link($args, $post); } /** * Retrieve HTML content for cancel comment reply link. * * @since 2.7.0 * * @param string $text Optional. Text to display for cancel reply link. Default empty. */ function get_cancel_comment_reply_link( $text = '' ) { if ( empty($text) ) $text = __('Click here to cancel reply.'); $style = isset($_GET['replytocom']) ? '' : ' style="display:none;"'; $link = esc_html( remove_query_arg('replytocom') ) . '#respond'; $formatted_link = '<a rel="nofollow" id="cancel-comment-reply-link" href="' . $link . '"' . $style . '>' . $text . '</a>'; /** * Filter the cancel comment reply link HTML. * * @since 2.7.0 * * @param string $formatted_link The HTML-formatted cancel comment reply link. * @param string $link Cancel comment reply link URL. * @param string $text Cancel comment reply link text. */ return apply_filters( 'cancel_comment_reply_link', $formatted_link, $link, $text ); } /** * Display HTML content for cancel comment reply link. * * @since 2.7.0 * * @param string $text Optional. Text to display for cancel reply link. Default empty. */ function cancel_comment_reply_link( $text = '' ) { echo get_cancel_comment_reply_link($text); } /** * Retrieve hidden input HTML for replying to comments. * * @since 3.0.0 * * @param int $id Optional. Post ID. Default current post ID. * @return string Hidden input HTML for replying to comments */ function get_comment_id_fields( $id = 0 ) { if ( empty( $id ) ) $id = get_the_ID(); $replytoid = isset($_GET['replytocom']) ? (int) $_GET['replytocom'] : 0; $result = "<input type='hidden' name='comment_post_ID' value='$id' id='comment_post_ID' />\n"; $result .= "<input type='hidden' name='comment_parent' id='comment_parent' value='$replytoid' />\n"; /** * Filter the returned comment id fields. * * @since 3.0.0 * * @param string $result The HTML-formatted hidden id field comment elements. * @param int $id The post ID. * @param int $replytoid The id of the comment being replied to. */ return apply_filters( 'comment_id_fields', $result, $id, $replytoid ); } /** * Output hidden input HTML for replying to comments. * * @since 2.7.0 * * @param int $id Optional. Post ID. Default current post ID. */ function comment_id_fields( $id = 0 ) { echo get_comment_id_fields( $id ); } /** * Display text based on comment reply status. * * Only affects users with Javascript disabled. * * @since 2.7.0 * * @param string $noreplytext Optional. Text to display when not replying to a comment. * Default false. * @param string $replytext Optional. Text to display when replying to a comment. * Default false. Accepts "%s" for the author of the comment * being replied to. * @param string $linktoparent Optional. Boolean to control making the author's name a link * to their comment. Default true. */ function comment_form_title( $noreplytext = false, $replytext = false, $linktoparent = true ) { global $comment; if ( false === $noreplytext ) $noreplytext = __( 'Leave a Reply' ); if ( false === $replytext ) $replytext = __( 'Leave a Reply to %s' ); $replytoid = isset($_GET['replytocom']) ? (int) $_GET['replytocom'] : 0; if ( 0 == $replytoid ) echo $noreplytext; else { $comment = get_comment($replytoid); $author = ( $linktoparent ) ? '<a href="#comment-' . get_comment_ID() . '">' . get_comment_author() . '</a>' : get_comment_author(); printf( $replytext, $author ); } } /** * HTML comment list class. * * @uses Walker * @since 2.7.0 */ class Walker_Comment extends Walker { /** * What the class handles. * * @see Walker::$tree_type * * @since 2.7.0 * @var string */ public $tree_type = 'comment'; /** * DB fields to use. * * @see Walker::$db_fields * * @since 2.7.0 * @var array */ public $db_fields = array ('parent' => 'comment_parent', 'id' => 'comment_ID'); /** * Start the list before the elements are added. * * @see Walker::start_lvl() * * @since 2.7.0 * * @param string $output Passed by reference. Used to append additional content. * @param int $depth Depth of comment. * @param array $args Uses 'style' argument for type of HTML list. */ public function start_lvl( &$output, $depth = 0, $args = array() ) { $GLOBALS['comment_depth'] = $depth + 1; switch ( $args['style'] ) { case 'div': break; case 'ol': $output .= '<ol class="children">' . "\n"; break; case 'ul': default: $output .= '<ul class="children">' . "\n"; break; } } /** * End the list of items after the elements are added. * * @see Walker::end_lvl() * * @since 2.7.0 * * @param string $output Passed by reference. Used to append additional content. * @param int $depth Depth of comment. * @param array $args Will only append content if style argument value is 'ol' or 'ul'. */ public function end_lvl( &$output, $depth = 0, $args = array() ) { $GLOBALS['comment_depth'] = $depth + 1; switch ( $args['style'] ) { case 'div': break; case 'ol': $output .= "</ol><!-- .children -->\n"; break; case 'ul': default: $output .= "</ul><!-- .children -->\n"; break; } } /** * Traverse elements to create list from elements. * * This function is designed to enhance Walker::display_element() to * display children of higher nesting levels than selected inline on * the highest depth level displayed. This prevents them being orphaned * at the end of the comment list. * * Example: max_depth = 2, with 5 levels of nested content. * 1 * 1.1 * 1.1.1 * 1.1.1.1 * 1.1.1.1.1 * 1.1.2 * 1.1.2.1 * 2 * 2.2 * * @see Walker::display_element() * @see wp_list_comments() * * @since 2.7.0 * * @param object $element Data object. * @param array $children_elements List of elements to continue traversing. * @param int $max_depth Max depth to traverse. * @param int $depth Depth of current element. * @param array $args An array of arguments. * @param string $output Passed by reference. Used to append additional content. * @return null Null on failure with no changes to parameters. */ public function display_element( $element, &$children_elements, $max_depth, $depth, $args, &$output ) { if ( !$element ) return; $id_field = $this->db_fields['id']; $id = $element->$id_field; parent::display_element( $element, $children_elements, $max_depth, $depth, $args, $output ); // If we're at the max depth, and the current element still has children, loop over those and display them at this level // This is to prevent them being orphaned to the end of the list. if ( $max_depth <= $depth + 1 && isset( $children_elements[$id]) ) { foreach ( $children_elements[ $id ] as $child ) $this->display_element( $child, $children_elements, $max_depth, $depth, $args, $output ); unset( $children_elements[ $id ] ); } } /** * Start the element output. * * @since 2.7.0 * * @see Walker::start_el() * @see wp_list_comments() * * @param string $output Passed by reference. Used to append additional content. * @param object $comment Comment data object. * @param int $depth Depth of comment in reference to parents. * @param array $args An array of arguments. */ public function start_el( &$output, $comment, $depth = 0, $args = array(), $id = 0 ) { $depth++; $GLOBALS['comment_depth'] = $depth; $GLOBALS['comment'] = $comment; if ( !empty( $args['callback'] ) ) { ob_start(); call_user_func( $args['callback'], $comment, $args, $depth ); $output .= ob_get_clean(); return; } if ( ( 'pingback' == $comment->comment_type || 'trackback' == $comment->comment_type ) && $args['short_ping'] ) { ob_start(); $this->ping( $comment, $depth, $args ); $output .= ob_get_clean(); } elseif ( 'html5' === $args['format'] ) { ob_start(); $this->html5_comment( $comment, $depth, $args ); $output .= ob_get_clean(); } else { ob_start(); $this->comment( $comment, $depth, $args ); $output .= ob_get_clean(); } } /** * Ends the element output, if needed. * * @since 2.7.0 * * @see Walker::end_el() * @see wp_list_comments() * * @param string $output Passed by reference. Used to append additional content. * @param object $comment The comment object. Default current comment. * @param int $depth Depth of comment. * @param array $args An array of arguments. */ public function end_el( &$output, $comment, $depth = 0, $args = array() ) { if ( !empty( $args['end-callback'] ) ) { ob_start(); call_user_func( $args['end-callback'], $comment, $args, $depth ); $output .= ob_get_clean(); return; } if ( 'div' == $args['style'] ) $output .= "</div><!-- #comment-## -->\n"; else $output .= "</li><!-- #comment-## -->\n"; } /** * Output a pingback comment. * * @access protected * @since 3.6.0 * * @see wp_list_comments() * * @param object $comment The comment object. * @param int $depth Depth of comment. * @param array $args An array of arguments. */ protected function ping( $comment, $depth, $args ) { $tag = ( 'div' == $args['style'] ) ? 'div' : 'li'; ?> <<?php echo $tag; ?> id="comment-<?php comment_ID(); ?>" <?php comment_class(); ?>> <div class="comment-body"> <?php _e( 'Pingback:' ); ?> <?php comment_author_link(); ?> <?php edit_comment_link( __( 'Edit' ), '<span class="edit-link">', '</span>' ); ?> </div> <?php } /** * Output a single comment. * * @access protected * @since 3.6.0 * * @see wp_list_comments() * * @param object $comment Comment to display. * @param int $depth Depth of comment. * @param array $args An array of arguments. */ protected function comment( $comment, $depth, $args ) { if ( 'div' == $args['style'] ) { $tag = 'div'; $add_below = 'comment'; } else { $tag = 'li'; $add_below = 'div-comment'; } ?> <<?php echo $tag; ?> <?php comment_class( $this->has_children ? 'parent' : '' ); ?> id="comment-<?php comment_ID(); ?>"> <?php if ( 'div' != $args['style'] ) : ?> <div id="div-comment-<?php comment_ID(); ?>" class="comment-body"> <?php endif; ?> <div class="comment-author vcard"> <?php if ( 0 != $args['avatar_size'] ) echo get_avatar( $comment, $args['avatar_size'] ); ?> <?php printf( __( '<cite class="fn">%s</cite> <span class="says">says:</span>' ), get_comment_author_link() ); ?> </div> <?php if ( '0' == $comment->comment_approved ) : ?> <em class="comment-awaiting-moderation"><?php _e( 'Your comment is awaiting moderation.' ) ?></em> <br /> <?php endif; ?> <div class="comment-meta commentmetadata"><a href="<?php echo esc_url( get_comment_link( $comment->comment_ID, $args ) ); ?>"> <?php /* translators: 1: date, 2: time */ printf( __( '%1$s at %2$s' ), get_comment_date(), get_comment_time() ); ?></a><?php edit_comment_link( __( '(Edit)' ), '&nbsp;&nbsp;', '' ); ?> </div> <?php comment_text( get_comment_id(), array_merge( $args, array( 'add_below' => $add_below, 'depth' => $depth, 'max_depth' => $args['max_depth'] ) ) ); ?> <div class="reply"> <?php comment_reply_link( array_merge( $args, array( 'add_below' => $add_below, 'depth' => $depth, 'max_depth' => $args['max_depth'] ) ) ); ?> </div> <?php if ( 'div' != $args['style'] ) : ?> </div> <?php endif; ?> <?php } /** * Output a comment in the HTML5 format. * * @access protected * @since 3.6.0 * * @see wp_list_comments() * * @param object $comment Comment to display. * @param int $depth Depth of comment. * @param array $args An array of arguments. */ protected function html5_comment( $comment, $depth, $args ) { $tag = ( 'div' === $args['style'] ) ? 'div' : 'li'; ?> <<?php echo $tag; ?> id="comment-<?php comment_ID(); ?>" <?php comment_class( $this->has_children ? 'parent' : '' ); ?>> <article id="div-comment-<?php comment_ID(); ?>" class="comment-body"> <footer class="comment-meta"> <div class="comment-author vcard"> <?php if ( 0 != $args['avatar_size'] ) echo get_avatar( $comment, $args['avatar_size'] ); ?> <?php printf( __( '%s <span class="says">says:</span>' ), sprintf( '<b class="fn">%s</b>', get_comment_author_link() ) ); ?> </div><!-- .comment-author --> <div class="comment-metadata"> <a href="<?php echo esc_url( get_comment_link( $comment->comment_ID, $args ) ); ?>"> <time datetime="<?php comment_time( 'c' ); ?>"> <?php printf( _x( '%1$s at %2$s', '1: date, 2: time' ), get_comment_date(), get_comment_time() ); ?> </time> </a> <?php edit_comment_link( __( 'Edit' ), '<span class="edit-link">', '</span>' ); ?> </div><!-- .comment-metadata --> <?php if ( '0' == $comment->comment_approved ) : ?> <p class="comment-awaiting-moderation"><?php _e( 'Your comment is awaiting moderation.' ); ?></p> <?php endif; ?> </footer><!-- .comment-meta --> <div class="comment-content"> <?php comment_text(); ?> </div><!-- .comment-content --> <div class="reply"> <?php comment_reply_link( array_merge( $args, array( 'add_below' => 'div-comment', 'depth' => $depth, 'max_depth' => $args['max_depth'] ) ) ); ?> </div><!-- .reply --> </article><!-- .comment-body --> <?php } } /** * List comments. * * Used in the comments.php template to list comments for a particular post. * * @since 2.7.0 * * @see WP_Query->comments * * @param string|array $args { * Optional. Formatting options. * * @type object $walker Instance of a Walker class to list comments. Default null. * @type int $max_depth The maximum comments depth. Default empty. * @type string $style The style of list ordering. Default 'ul'. Accepts 'ul', 'ol'. * @type string $callback Callback function to use. Default null. * @type string $end-callback Callback function to use at the end. Default null. * @type string $type Type of comments to list. * Default 'all'. Accepts 'all', 'comment', 'pingback', 'trackback', 'pings'. * @type int $page Page ID to list comments for. Default empty. * @type int $per_page Number of comments to list per page. Default empty. * @type int $avatar_size Height and width dimensions of the avatar size. Default 32. * @type string $reverse_top_level Ordering of the listed comments. Default null. Accepts 'desc', 'asc'. * @type bool $reverse_children Whether to reverse child comments in the list. Default null. * @type string $format How to format the comments list. * Default 'html5' if the theme supports it. Accepts 'html5', 'xhtml'. * @type bool $short_ping Whether to output short pings. Default false. * @type bool $echo Whether to echo the output or return it. Default true. * } * @param array $comments Optional. Array of comment objects. */ function wp_list_comments( $args = array(), $comments = null ) { global $wp_query, $comment_alt, $comment_depth, $comment_thread_alt, $overridden_cpage, $in_comment_loop; $in_comment_loop = true; $comment_alt = $comment_thread_alt = 0; $comment_depth = 1; $defaults = array( 'walker' => null, 'max_depth' => '', 'style' => 'ul', 'callback' => null, 'end-callback' => null, 'type' => 'all', 'page' => '', 'per_page' => '', 'avatar_size' => 32, 'reverse_top_level' => null, 'reverse_children' => '', 'format' => current_theme_supports( 'html5', 'comment-list' ) ? 'html5' : 'xhtml', 'short_ping' => false, 'echo' => true, ); $r = wp_parse_args( $args, $defaults ); /** * Filter the arguments used in retrieving the comment list. * * @since 4.0.0 * * @see wp_list_comments() * * @param array $r An array of arguments for displaying comments. */ $r = apply_filters( 'wp_list_comments_args', $r ); // Figure out what comments we'll be looping through ($_comments) if ( null !== $comments ) { $comments = (array) $comments; if ( empty($comments) ) return; if ( 'all' != $r['type'] ) { $comments_by_type = separate_comments($comments); if ( empty($comments_by_type[$r['type']]) ) return; $_comments = $comments_by_type[$r['type']]; } else { $_comments = $comments; } } else { if ( empty($wp_query->comments) ) return; if ( 'all' != $r['type'] ) { if ( empty($wp_query->comments_by_type) ) $wp_query->comments_by_type = separate_comments($wp_query->comments); if ( empty($wp_query->comments_by_type[$r['type']]) ) return; $_comments = $wp_query->comments_by_type[$r['type']]; } else { $_comments = $wp_query->comments; } } if ( '' === $r['per_page'] && get_option('page_comments') ) $r['per_page'] = get_query_var('comments_per_page'); if ( empty($r['per_page']) ) { $r['per_page'] = 0; $r['page'] = 0; } if ( '' === $r['max_depth'] ) { if ( get_option('thread_comments') ) $r['max_depth'] = get_option('thread_comments_depth'); else $r['max_depth'] = -1; } if ( '' === $r['page'] ) { if ( empty($overridden_cpage) ) { $r['page'] = get_query_var('cpage'); } else { $threaded = ( -1 != $r['max_depth'] ); $r['page'] = ( 'newest' == get_option('default_comments_page') ) ? get_comment_pages_count($_comments, $r['per_page'], $threaded) : 1; set_query_var( 'cpage', $r['page'] ); } } // Validation check $r['page'] = intval($r['page']); if ( 0 == $r['page'] && 0 != $r['per_page'] ) $r['page'] = 1; if ( null === $r['reverse_top_level'] ) $r['reverse_top_level'] = ( 'desc' == get_option('comment_order') ); if ( empty( $r['walker'] ) ) { $walker = new Walker_Comment; } else { $walker = $r['walker']; } $output = $walker->paged_walk( $_comments, $r['max_depth'], $r['page'], $r['per_page'], $r ); $wp_query->max_num_comment_pages = $walker->max_pages; $in_comment_loop = false; if ( $r['echo'] ) { echo $output; } else { return $output; } } /** * Output a complete commenting form for use within a template. * * Most strings and form fields may be controlled through the $args array passed * into the function, while you may also choose to use the comment_form_default_fields * filter to modify the array of default fields if you'd just like to add a new * one or remove a single field. All fields are also individually passed through * a filter of the form comment_form_field_$name where $name is the key used * in the array of fields. * * @since 3.0.0 * * @param array $args { * Optional. Default arguments and form fields to override. * * @type array $fields { * Default comment fields, filterable by default via the 'comment_form_default_fields' hook. * * @type string $author Comment author field HTML. * @type string $email Comment author email field HTML. * @type string $url Comment author URL field HTML. * } * @type string $comment_field The comment textarea field HTML. * @type string $must_log_in HTML element for a 'must be logged in to comment' message. * @type string $logged_in_as HTML element for a 'logged in as <user>' message. * @type string $comment_notes_before HTML element for a message displayed before the comment form. * Default 'Your email address will not be published.'. * @type string $comment_notes_after HTML element for a message displayed after the comment form. * Default 'You may use these HTML tags and attributes ...'. * @type string $id_form The comment form element id attribute. Default 'commentform'. * @type string $id_submit The comment submit element id attribute. Default 'submit'. * @type string $name_submit The comment submit element name attribute. Default 'submit'. * @type string $title_reply The translatable 'reply' button label. Default 'Leave a Reply'. * @type string $title_reply_to The translatable 'reply-to' button label. Default 'Leave a Reply to %s', * where %s is the author of the comment being replied to. * @type string $cancel_reply_link The translatable 'cancel reply' button label. Default 'Cancel reply'. * @type string $label_submit The translatable 'submit' button label. Default 'Post a comment'. * @type string $format The comment form format. Default 'xhtml'. Accepts 'xhtml', 'html5'. * } * @param int|WP_Post $post_id Post ID or WP_Post object to generate the form for. Default current post. */ function comment_form( $args = array(), $post_id = null ) { if ( null === $post_id ) $post_id = get_the_ID(); $commenter = wp_get_current_commenter(); $user = wp_get_current_user(); $user_identity = $user->exists() ? $user->display_name : ''; $args = wp_parse_args( $args ); if ( ! isset( $args['format'] ) ) $args['format'] = current_theme_supports( 'html5', 'comment-form' ) ? 'html5' : 'xhtml'; $req = get_option( 'require_name_email' ); $aria_req = ( $req ? " aria-required='true'" : '' ); $html5 = 'html5' === $args['format']; $fields = array( 'author' => '<p class="comment-form-author">' . '<label for="author">' . __( 'Name' ) . ( $req ? ' <span class="required">*</span>' : '' ) . '</label> ' . '<input id="author" name="author" type="text" value="' . esc_attr( $commenter['comment_author'] ) . '" size="30"' . $aria_req . ' /></p>', 'email' => '<p class="comment-form-email"><label for="email">' . __( 'Email' ) . ( $req ? ' <span class="required">*</span>' : '' ) . '</label> ' . '<input id="email" name="email" ' . ( $html5 ? 'type="email"' : 'type="text"' ) . ' value="' . esc_attr( $commenter['comment_author_email'] ) . '" size="30"' . $aria_req . ' /></p>', 'url' => '<p class="comment-form-url"><label for="url">' . __( 'Website' ) . '</label> ' . '<input id="url" name="url" ' . ( $html5 ? 'type="url"' : 'type="text"' ) . ' value="' . esc_attr( $commenter['comment_author_url'] ) . '" size="30" /></p>', ); $required_text = sprintf( ' ' . __('Required fields are marked %s'), '<span class="required">*</span>' ); /** * Filter the default comment form fields. * * @since 3.0.0 * * @param array $fields The default comment fields. */ $fields = apply_filters( 'comment_form_default_fields', $fields ); $defaults = array( 'fields' => $fields, 'comment_field' => '<p class="comment-form-comment"><label for="comment">' . _x( 'Comment', 'noun' ) . '</label> <textarea id="comment" name="comment" cols="45" rows="8" aria-required="true"></textarea></p>', /** This filter is documented in wp-includes/link-template.php */ 'must_log_in' => '<p class="must-log-in">' . sprintf( __( 'You must be <a href="%s">logged in</a> to post a comment.' ), wp_login_url( apply_filters( 'the_permalink', get_permalink( $post_id ) ) ) ) . '</p>', /** This filter is documented in wp-includes/link-template.php */ 'logged_in_as' => '<p class="logged-in-as">' . sprintf( __( 'Logged in as <a href="%1$s">%2$s</a>. <a href="%3$s" title="Log out of this account">Log out?</a>' ), get_edit_user_link(), $user_identity, wp_logout_url( apply_filters( 'the_permalink', get_permalink( $post_id ) ) ) ) . '</p>', 'comment_notes_before' => '<p class="comment-notes">' . __( 'Your email address will not be published.' ) . ( $req ? $required_text : '' ) . '</p>', 'comment_notes_after' => '<p class="form-allowed-tags">' . sprintf( __( 'You may use these <abbr title="HyperText Markup Language">HTML</abbr> tags and attributes: %s' ), ' <code>' . allowed_tags() . '</code>' ) . '</p>', 'id_form' => 'commentform', 'id_submit' => 'submit', 'name_submit' => 'submit', 'title_reply' => __( 'Leave a Reply' ), 'title_reply_to' => __( 'Leave a Reply to %s' ), 'cancel_reply_link' => __( 'Cancel reply' ), 'label_submit' => __( 'Post Comment' ), 'format' => 'xhtml', ); /** * Filter the comment form default arguments. * * Use 'comment_form_default_fields' to filter the comment fields. * * @since 3.0.0 * * @param array $defaults The default comment form arguments. */ $args = wp_parse_args( $args, apply_filters( 'comment_form_defaults', $defaults ) ); ?> <?php if ( comments_open( $post_id ) ) : ?> <?php /** * Fires before the comment form. * * @since 3.0.0 */ do_action( 'comment_form_before' ); ?> <div id="respond" class="comment-respond"> <h3 id="reply-title" class="comment-reply-title"><?php comment_form_title( $args['title_reply'], $args['title_reply_to'] ); ?> <small><?php cancel_comment_reply_link( $args['cancel_reply_link'] ); ?></small></h3> <?php if ( get_option( 'comment_registration' ) && !is_user_logged_in() ) : ?> <?php echo $args['must_log_in']; ?> <?php /** * Fires after the HTML-formatted 'must log in after' message in the comment form. * * @since 3.0.0 */ do_action( 'comment_form_must_log_in_after' ); ?> <?php else : ?> <form action="<?php echo site_url( '/wp-comments-post.php' ); ?>" method="post" id="<?php echo esc_attr( $args['id_form'] ); ?>" class="comment-form"<?php echo $html5 ? ' novalidate' : ''; ?>> <?php /** * Fires at the top of the comment form, inside the <form> tag. * * @since 3.0.0 */ do_action( 'comment_form_top' ); ?> <?php if ( is_user_logged_in() ) : ?> <?php /** * Filter the 'logged in' message for the comment form for display. * * @since 3.0.0 * * @param string $args_logged_in The logged-in-as HTML-formatted message. * @param array $commenter An array containing the comment author's * username, email, and URL. * @param string $user_identity If the commenter is a registered user, * the display name, blank otherwise. */ echo apply_filters( 'comment_form_logged_in', $args['logged_in_as'], $commenter, $user_identity ); ?> <?php /** * Fires after the is_user_logged_in() check in the comment form. * * @since 3.0.0 * * @param array $commenter An array containing the comment author's * username, email, and URL. * @param string $user_identity If the commenter is a registered user, * the display name, blank otherwise. */ do_action( 'comment_form_logged_in_after', $commenter, $user_identity ); ?> <?php else : ?> <?php echo $args['comment_notes_before']; ?> <?php /** * Fires before the comment fields in the comment form. * * @since 3.0.0 */ do_action( 'comment_form_before_fields' ); foreach ( (array) $args['fields'] as $name => $field ) { /** * Filter a comment form field for display. * * The dynamic portion of the filter hook, $name, refers to the name * of the comment form field. Such as 'author', 'email', or 'url'. * * @since 3.0.0 * * @param string $field The HTML-formatted output of the comment form field. */ echo apply_filters( "comment_form_field_{$name}", $field ) . "\n"; } /** * Fires after the comment fields in the comment form. * * @since 3.0.0 */ do_action( 'comment_form_after_fields' ); ?> <?php endif; ?> <?php /** * Filter the content of the comment textarea field for display. * * @since 3.0.0 * * @param string $args_comment_field The content of the comment textarea field. */ echo apply_filters( 'comment_form_field_comment', $args['comment_field'] ); ?> <?php echo $args['comment_notes_after']; ?> <p class="form-submit"> <input name="<?php echo esc_attr( $args['name_submit'] ); ?>" type="submit" id="<?php echo esc_attr( $args['id_submit'] ); ?>" value="<?php echo esc_attr( $args['label_submit'] ); ?>" /> <?php comment_id_fields( $post_id ); ?> </p> <?php /** * Fires at the bottom of the comment form, inside the closing </form> tag. * * @since 1.5.0 * * @param int $post_id The post ID. */ do_action( 'comment_form', $post_id ); ?> </form> <?php endif; ?> </div><!-- #respond --> <?php /** * Fires after the comment form. * * @since 3.0.0 */ do_action( 'comment_form_after' ); else : /** * Fires after the comment form if comments are closed. * * @since 3.0.0 */ do_action( 'comment_form_comments_closed' ); endif; }
avinante-israel/zeus
wp-includes/comment-template.php
PHP
gpl-2.0
77,417
/* * INET 802.1Q VLAN * Ethernet-type device handling. * * Authors: Ben Greear <greearb@candelatech.com> * Please send support related email to: netdev@vger.kernel.org * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html * * Fixes: * Fix for packet capture - Nick Eggleston <nick@dccinc.com>; * Add HW acceleration hooks - David S. Miller <davem@redhat.com>; * Correct all the locking - David S. Miller <davem@redhat.com>; * Use hash table for VLAN groups - David S. Miller <davem@redhat.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/capability.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/rculist.h> #include <net/p8022.h> #include <net/arp.h> #include <linux/rtnetlink.h> #include <linux/notifier.h> #include <net/rtnetlink.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <asm/uaccess.h> #include <linux/if_vlan.h> #include "vlan.h" #include "vlanproc.h" #define DRV_VERSION "1.8" /* Global VLAN variables */ int vlan_net_id __read_mostly; const char vlan_fullname[] = "802.1Q VLAN Support"; const char vlan_version[] = DRV_VERSION; /* End of global variables definitions. */ static int vlan_group_prealloc_vid(struct vlan_group *vg, __be16 vlan_proto, u16 vlan_id) { struct net_device **array; unsigned int pidx, vidx; unsigned int size; ASSERT_RTNL(); pidx = vlan_proto_idx(vlan_proto); vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN; array = vg->vlan_devices_arrays[pidx][vidx]; if (array != NULL) return 0; size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN; array = kzalloc(size, GFP_KERNEL); if (array == NULL) return -ENOBUFS; vg->vlan_devices_arrays[pidx][vidx] = array; return 0; } void unregister_vlan_dev(struct net_device *dev, struct list_head *head) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; struct vlan_info *vlan_info; struct vlan_group *grp; u16 vlan_id = vlan->vlan_id; ASSERT_RTNL(); vlan_info = rtnl_dereference(real_dev->vlan_info); BUG_ON(!vlan_info); grp = &vlan_info->grp; grp->nr_vlan_devs--; if (vlan->flags & VLAN_FLAG_MVRP) vlan_mvrp_request_leave(dev); if (vlan->flags & VLAN_FLAG_GVRP) vlan_gvrp_request_leave(dev); vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL); /* Because unregister_netdevice_queue() makes sure at least one rcu * grace period is respected before device freeing, * we dont need to call synchronize_net() here. */ unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(real_dev, dev); if (grp->nr_vlan_devs == 0) { vlan_mvrp_uninit_applicant(real_dev); vlan_gvrp_uninit_applicant(real_dev); } /* Take it out of our own structures, but be sure to interlock with * HW accelerating devices or SW vlan input packet processing if * VLAN is not 0 (leave it there for 802.1p). */ if (vlan_id) vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); /* Get rid of the vlan's reference to real_dev */ dev_put(real_dev); } int vlan_check_real_dev(struct net_device *real_dev, __be16 protocol, u16 vlan_id) { const char *name = real_dev->name; if (real_dev->features & NETIF_F_VLAN_CHALLENGED) { pr_info("VLANs not supported on %s\n", name); return -EOPNOTSUPP; } if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL) return -EEXIST; return 0; } int register_vlan_dev(struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; u16 vlan_id = vlan->vlan_id; struct vlan_info *vlan_info; struct vlan_group *grp; int err; err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id); if (err) return err; vlan_info = rtnl_dereference(real_dev->vlan_info); /* vlan_info should be there now. vlan_vid_add took care of it */ BUG_ON(!vlan_info); grp = &vlan_info->grp; if (grp->nr_vlan_devs == 0) { err = vlan_gvrp_init_applicant(real_dev); if (err < 0) goto out_vid_del; err = vlan_mvrp_init_applicant(real_dev); if (err < 0) goto out_uninit_gvrp; } err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id); if (err < 0) goto out_uninit_mvrp; err = netdev_upper_dev_link(real_dev, dev); if (err) goto out_uninit_mvrp; err = register_netdevice(dev); if (err < 0) goto out_upper_dev_unlink; /* Account for reference in struct vlan_dev_priv */ dev_hold(real_dev); netif_stacked_transfer_operstate(real_dev, dev); linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */ /* So, got the sucker initialized, now lets place * it into our local structure. */ vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev); grp->nr_vlan_devs++; return 0; out_upper_dev_unlink: netdev_upper_dev_unlink(real_dev, dev); out_uninit_mvrp: if (grp->nr_vlan_devs == 0) vlan_mvrp_uninit_applicant(real_dev); out_uninit_gvrp: if (grp->nr_vlan_devs == 0) vlan_gvrp_uninit_applicant(real_dev); out_vid_del: vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); return err; } /* Attach a VLAN device to a mac address (ie Ethernet Card). * Returns 0 if the device was created or a negative error code otherwise. */ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) { struct net_device *new_dev; struct net *net = dev_net(real_dev); struct vlan_net *vn = net_generic(net, vlan_net_id); char name[IFNAMSIZ]; int err; if (vlan_id >= VLAN_VID_MASK) return -ERANGE; err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id); if (err < 0) return err; /* Gotta set up the fields for the device. */ switch (vn->name_type) { case VLAN_NAME_TYPE_RAW_PLUS_VID: /* name will look like: eth1.0005 */ snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id); break; case VLAN_NAME_TYPE_PLUS_VID_NO_PAD: /* Put our vlan.VID in the name. * Name will look like: vlan5 */ snprintf(name, IFNAMSIZ, "vlan%i", vlan_id); break; case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD: /* Put our vlan.VID in the name. * Name will look like: eth0.5 */ snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id); break; case VLAN_NAME_TYPE_PLUS_VID: /* Put our vlan.VID in the name. * Name will look like: vlan0005 */ default: snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id); } new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name, vlan_setup); if (new_dev == NULL) return -ENOBUFS; dev_net_set(new_dev, net); /* need 4 bytes for extra VLAN header info, * hope the underlying device can handle it. */ new_dev->mtu = real_dev->mtu; new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT); vlan_dev_priv(new_dev)->vlan_proto = htons(ETH_P_8021Q); vlan_dev_priv(new_dev)->vlan_id = vlan_id; vlan_dev_priv(new_dev)->real_dev = real_dev; vlan_dev_priv(new_dev)->dent = NULL; vlan_dev_priv(new_dev)->flags = VLAN_FLAG_REORDER_HDR; new_dev->rtnl_link_ops = &vlan_link_ops; err = register_vlan_dev(new_dev); if (err < 0) goto out_free_newdev; return 0; out_free_newdev: free_netdev(new_dev); return err; } static void vlan_sync_address(struct net_device *dev, struct net_device *vlandev) { struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); /* May be called without an actual change */ if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) return; /* vlan address was different from the old address and is equal to * the new address */ if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) dev_uc_del(dev, vlandev->dev_addr); /* vlan address was equal to the old address and is different from * the new address */ if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) dev_uc_add(dev, vlandev->dev_addr); memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); } static void vlan_transfer_features(struct net_device *dev, struct net_device *vlandev) { struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); vlandev->gso_max_size = dev->gso_max_size; if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto)) vlandev->hard_header_len = dev->hard_header_len; else vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; #if IS_ENABLED(CONFIG_FCOE) vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; #endif netdev_update_features(vlandev); } static void __vlan_device_event(struct net_device *dev, unsigned long event) { switch (event) { case NETDEV_CHANGENAME: vlan_proc_rem_dev(dev); if (vlan_proc_add_dev(dev) < 0) pr_warn("failed to change proc name for %s\n", dev->name); break; case NETDEV_REGISTER: if (vlan_proc_add_dev(dev) < 0) pr_warn("failed to add proc entry for %s\n", dev->name); break; case NETDEV_UNREGISTER: vlan_proc_rem_dev(dev); break; } } static int vlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct vlan_group *grp; struct vlan_info *vlan_info; int i, flgs; struct net_device *vlandev; struct vlan_dev_priv *vlan; bool last = false; LIST_HEAD(list); if (is_vlan_dev(dev)) __vlan_device_event(dev, event); if ((event == NETDEV_UP) && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { pr_info("adding VLAN 0 to HW filter on device %s\n", dev->name); vlan_vid_add(dev, htons(ETH_P_8021Q), 0); } vlan_info = rtnl_dereference(dev->vlan_info); if (!vlan_info) goto out; grp = &vlan_info->grp; /* It is OK that we do not hold the group lock right now, * as we run under the RTNL lock. */ switch (event) { case NETDEV_CHANGE: /* Propagate real device state to vlan devices */ vlan_group_for_each_dev(grp, i, vlandev) netif_stacked_transfer_operstate(dev, vlandev); break; case NETDEV_CHANGEADDR: /* Adjust unicast filters on underlying device */ vlan_group_for_each_dev(grp, i, vlandev) { flgs = vlandev->flags; if (!(flgs & IFF_UP)) continue; vlan_sync_address(dev, vlandev); } break; case NETDEV_CHANGEMTU: vlan_group_for_each_dev(grp, i, vlandev) { if (vlandev->mtu <= dev->mtu) continue; dev_set_mtu(vlandev, dev->mtu); } break; case NETDEV_FEAT_CHANGE: /* Propagate device features to underlying device */ vlan_group_for_each_dev(grp, i, vlandev) vlan_transfer_features(dev, vlandev); break; case NETDEV_DOWN: if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) vlan_vid_del(dev, htons(ETH_P_8021Q), 0); /* Put all VLANs for this dev in the down state too. */ vlan_group_for_each_dev(grp, i, vlandev) { flgs = vlandev->flags; if (!(flgs & IFF_UP)) continue; vlan = vlan_dev_priv(vlandev); if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) dev_change_flags(vlandev, flgs & ~IFF_UP); netif_stacked_transfer_operstate(dev, vlandev); } break; case NETDEV_UP: /* Put all VLANs for this dev in the up state too. */ vlan_group_for_each_dev(grp, i, vlandev) { flgs = vlandev->flags; if (flgs & IFF_UP) continue; vlan = vlan_dev_priv(vlandev); if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) dev_change_flags(vlandev, flgs | IFF_UP); netif_stacked_transfer_operstate(dev, vlandev); } break; case NETDEV_UNREGISTER: /* twiddle thumbs on netns device moves */ if (dev->reg_state != NETREG_UNREGISTERING) break; vlan_group_for_each_dev(grp, i, vlandev) { /* removal of last vid destroys vlan_info, abort * afterwards */ if (vlan_info->nr_vids == 1) last = true; unregister_vlan_dev(vlandev, &list); if (last) break; } unregister_netdevice_many(&list); break; case NETDEV_PRE_TYPE_CHANGE: /* Forbid underlaying device to change its type. */ if (vlan_uses_dev(dev)) return NOTIFY_BAD; break; case NETDEV_NOTIFY_PEERS: case NETDEV_BONDING_FAILOVER: /* Propagate to vlan devices */ vlan_group_for_each_dev(grp, i, vlandev) call_netdevice_notifiers(event, vlandev); break; } out: return NOTIFY_DONE; } static struct notifier_block vlan_notifier_block __read_mostly = { .notifier_call = vlan_device_event, }; /* * VLAN IOCTL handler. * o execute requested action or pass command to the device driver * arg is really a struct vlan_ioctl_args __user *. */ static int vlan_ioctl_handler(struct net *net, void __user *arg) { int err; struct vlan_ioctl_args args; struct net_device *dev = NULL; if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args))) return -EFAULT; /* Null terminate this sucker, just in case. */ args.device1[23] = 0; args.u.device2[23] = 0; rtnl_lock(); switch (args.cmd) { case SET_VLAN_INGRESS_PRIORITY_CMD: case SET_VLAN_EGRESS_PRIORITY_CMD: case SET_VLAN_FLAG_CMD: case ADD_VLAN_CMD: case DEL_VLAN_CMD: case GET_VLAN_REALDEV_NAME_CMD: case GET_VLAN_VID_CMD: err = -ENODEV; dev = __dev_get_by_name(net, args.device1); if (!dev) goto out; err = -EINVAL; if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev)) goto out; } switch (args.cmd) { case SET_VLAN_INGRESS_PRIORITY_CMD: err = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; vlan_dev_set_ingress_priority(dev, args.u.skb_priority, args.vlan_qos); err = 0; break; case SET_VLAN_EGRESS_PRIORITY_CMD: err = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; err = vlan_dev_set_egress_priority(dev, args.u.skb_priority, args.vlan_qos); break; case SET_VLAN_FLAG_CMD: err = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; err = vlan_dev_change_flags(dev, args.vlan_qos ? args.u.flag : 0, args.u.flag); break; case SET_VLAN_NAME_TYPE_CMD: err = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; if ((args.u.name_type >= 0) && (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { struct vlan_net *vn; vn = net_generic(net, vlan_net_id); vn->name_type = args.u.name_type; err = 0; } else { err = -EINVAL; } break; case ADD_VLAN_CMD: err = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; err = register_vlan_device(dev, args.u.VID); break; case DEL_VLAN_CMD: err = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; unregister_vlan_dev(dev, NULL); err = 0; break; case GET_VLAN_REALDEV_NAME_CMD: err = 0; vlan_dev_get_realdev_name(dev, args.u.device2); if (copy_to_user(arg, &args, sizeof(struct vlan_ioctl_args))) err = -EFAULT; break; case GET_VLAN_VID_CMD: err = 0; args.u.VID = vlan_dev_vlan_id(dev); if (copy_to_user(arg, &args, sizeof(struct vlan_ioctl_args))) err = -EFAULT; break; default: err = -EOPNOTSUPP; break; } out: rtnl_unlock(); return err; } static int __net_init vlan_init_net(struct net *net) { struct vlan_net *vn = net_generic(net, vlan_net_id); int err; vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD; err = vlan_proc_init(net); return err; } static void __net_exit vlan_exit_net(struct net *net) { vlan_proc_cleanup(net); } static struct pernet_operations vlan_net_ops = { .init = vlan_init_net, .exit = vlan_exit_net, .id = &vlan_net_id, .size = sizeof(struct vlan_net), }; static int __init vlan_proto_init(void) { int err; pr_info("%s v%s\n", vlan_fullname, vlan_version); err = register_pernet_subsys(&vlan_net_ops); if (err < 0) goto err0; err = register_netdevice_notifier(&vlan_notifier_block); if (err < 0) goto err2; err = vlan_gvrp_init(); if (err < 0) goto err3; err = vlan_mvrp_init(); if (err < 0) goto err4; err = vlan_netlink_init(); if (err < 0) goto err5; vlan_ioctl_set(vlan_ioctl_handler); return 0; err5: vlan_mvrp_uninit(); err4: vlan_gvrp_uninit(); err3: unregister_netdevice_notifier(&vlan_notifier_block); err2: unregister_pernet_subsys(&vlan_net_ops); err0: return err; } static void __exit vlan_cleanup_module(void) { vlan_ioctl_set(NULL); vlan_netlink_fini(); unregister_netdevice_notifier(&vlan_notifier_block); unregister_pernet_subsys(&vlan_net_ops); rcu_barrier(); /* Wait for completion of call_rcu()'s */ vlan_mvrp_uninit(); vlan_gvrp_uninit(); } module_init(vlan_proto_init); module_exit(vlan_cleanup_module); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
Nyks45/Veno-M
net/8021q/vlan.c
C
gpl-2.0
16,649
/* * Copyright (C) ST-Ericsson SA 2010 * * License terms: GNU General Public License (GPL), version 2 */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/gpio.h> #include <linux/platform_data/dma-ste-dma40.h> #include <linux/platform_data/asoc-ux500-msp.h> #include "ste-dma40-db8500.h" #include "board-mop500.h" static struct stedma40_chan_cfg msp0_dma_rx = { .high_priority = true, .dir = DMA_DEV_TO_MEM, .dev_type = DB8500_DMA_DEV31_MSP0_SLIM0_CH0, }; static struct stedma40_chan_cfg msp0_dma_tx = { .high_priority = true, .dir = DMA_MEM_TO_DEV, .dev_type = DB8500_DMA_DEV31_MSP0_SLIM0_CH0, }; struct msp_i2s_platform_data msp0_platform_data = { .id = 0, .msp_i2s_dma_rx = &msp0_dma_rx, .msp_i2s_dma_tx = &msp0_dma_tx, }; static struct stedma40_chan_cfg msp1_dma_rx = { .high_priority = true, .dir = DMA_DEV_TO_MEM, .dev_type = DB8500_DMA_DEV30_MSP3, }; static struct stedma40_chan_cfg msp1_dma_tx = { .high_priority = true, .dir = DMA_MEM_TO_DEV, .dev_type = DB8500_DMA_DEV30_MSP1, }; struct msp_i2s_platform_data msp1_platform_data = { .id = 1, .msp_i2s_dma_rx = NULL, .msp_i2s_dma_tx = &msp1_dma_tx, }; static struct stedma40_chan_cfg msp2_dma_rx = { .high_priority = true, .dir = DMA_DEV_TO_MEM, .dev_type = DB8500_DMA_DEV14_MSP2, }; static struct stedma40_chan_cfg msp2_dma_tx = { .high_priority = true, .dir = DMA_MEM_TO_DEV, .dev_type = DB8500_DMA_DEV14_MSP2, .use_fixed_channel = true, .phy_channel = 1, }; struct msp_i2s_platform_data msp2_platform_data = { .id = 2, .msp_i2s_dma_rx = &msp2_dma_rx, .msp_i2s_dma_tx = &msp2_dma_tx, }; struct msp_i2s_platform_data msp3_platform_data = { .id = 3, .msp_i2s_dma_rx = &msp1_dma_rx, .msp_i2s_dma_tx = NULL, };
bigzz/linux-stable
arch/arm/mach-ux500/board-mop500-audio.c
C
gpl-2.0
1,742
/* * leds-lp3944.c - driver for National Semiconductor LP3944 Funlight Chip * * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ /* * I2C driver for National Semiconductor LP3944 Funlight Chip * http://www.national.com/pf/LP/LP3944.html * * This helper chip can drive up to 8 leds, with two programmable DIM modes; * it could even be used as a gpio expander but this driver assumes it is used * as a led controller. * * The DIM modes are used to set _blink_ patterns for leds, the pattern is * specified supplying two parameters: * - period: from 0s to 1.6s * - duty cycle: percentage of the period the led is on, from 0 to 100 * * LP3944 can be found on Motorola A910 smartphone, where it drives the rgb * leds, the camera flash light and the displays backlights. */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/leds.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/leds-lp3944.h> /* Read Only Registers */ #define LP3944_REG_INPUT1 0x00 /* LEDs 0-7 InputRegister (Read Only) */ #define LP3944_REG_REGISTER1 0x01 /* None (Read Only) */ #define LP3944_REG_PSC0 0x02 /* Frequency Prescaler 0 (R/W) */ #define LP3944_REG_PWM0 0x03 /* PWM Register 0 (R/W) */ #define LP3944_REG_PSC1 0x04 /* Frequency Prescaler 1 (R/W) */ #define LP3944_REG_PWM1 0x05 /* PWM Register 1 (R/W) */ #define LP3944_REG_LS0 0x06 /* LEDs 0-3 Selector (R/W) */ #define LP3944_REG_LS1 0x07 /* LEDs 4-7 Selector (R/W) */ /* These registers are not used to control leds in LP3944, they can store * arbitrary values which the chip will ignore. */ #define LP3944_REG_REGISTER8 0x08 #define LP3944_REG_REGISTER9 0x09 #define LP3944_DIM0 0 #define LP3944_DIM1 1 /* period in ms */ #define LP3944_PERIOD_MIN 0 #define LP3944_PERIOD_MAX 1600 /* duty cycle is a percentage */ #define LP3944_DUTY_CYCLE_MIN 0 #define LP3944_DUTY_CYCLE_MAX 100 #define ldev_to_led(c) container_of(c, struct lp3944_led_data, ldev) /* Saved data */ struct lp3944_led_data { u8 id; enum lp3944_type type; enum lp3944_status status; struct led_classdev ldev; struct i2c_client *client; struct work_struct work; }; struct lp3944_data { struct mutex lock; struct i2c_client *client; struct lp3944_led_data leds[LP3944_LEDS_MAX]; }; static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value) { int tmp; tmp = i2c_smbus_read_byte_data(client, reg); if (tmp < 0) return tmp; *value = tmp; return 0; } static int lp3944_reg_write(struct i2c_client *client, u8 reg, u8 value) { return i2c_smbus_write_byte_data(client, reg, value); } /** * Set the period for DIM status * * @client: the i2c client * @dim: either LP3944_DIM0 or LP3944_DIM1 * @period: period of a blink, that is a on/off cycle, expressed in ms. */ static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period) { u8 psc_reg; u8 psc_value; int err; if (dim == LP3944_DIM0) psc_reg = LP3944_REG_PSC0; else if (dim == LP3944_DIM1) psc_reg = LP3944_REG_PSC1; else return -EINVAL; /* Convert period to Prescaler value */ if (period > LP3944_PERIOD_MAX) return -EINVAL; psc_value = (period * 255) / LP3944_PERIOD_MAX; err = lp3944_reg_write(client, psc_reg, psc_value); return err; } /** * Set the duty cycle for DIM status * * @client: the i2c client * @dim: either LP3944_DIM0 or LP3944_DIM1 * @duty_cycle: percentage of a period during which a led is ON */ static int lp3944_dim_set_dutycycle(struct i2c_client *client, u8 dim, u8 duty_cycle) { u8 pwm_reg; u8 pwm_value; int err; if (dim == LP3944_DIM0) pwm_reg = LP3944_REG_PWM0; else if (dim == LP3944_DIM1) pwm_reg = LP3944_REG_PWM1; else return -EINVAL; /* Convert duty cycle to PWM value */ if (duty_cycle > LP3944_DUTY_CYCLE_MAX) return -EINVAL; pwm_value = (duty_cycle * 255) / LP3944_DUTY_CYCLE_MAX; err = lp3944_reg_write(client, pwm_reg, pwm_value); return err; } /** * Set the led status * * @led: a lp3944_led_data structure * @status: one of LP3944_LED_STATUS_OFF * LP3944_LED_STATUS_ON * LP3944_LED_STATUS_DIM0 * LP3944_LED_STATUS_DIM1 */ static int lp3944_led_set(struct lp3944_led_data *led, u8 status) { struct lp3944_data *data = i2c_get_clientdata(led->client); u8 id = led->id; u8 reg; u8 val = 0; int err; dev_dbg(&led->client->dev, "%s: %s, status before normalization:%d\n", __func__, led->ldev.name, status); switch (id) { case LP3944_LED0: case LP3944_LED1: case LP3944_LED2: case LP3944_LED3: reg = LP3944_REG_LS0; break; case LP3944_LED4: case LP3944_LED5: case LP3944_LED6: case LP3944_LED7: id -= LP3944_LED4; reg = LP3944_REG_LS1; break; default: return -EINVAL; } if (status > LP3944_LED_STATUS_DIM1) return -EINVAL; /* invert only 0 and 1, leave unchanged the other values, * remember we are abusing status to set blink patterns */ if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2) status = 1 - status; mutex_lock(&data->lock); lp3944_reg_read(led->client, reg, &val); val &= ~(LP3944_LED_STATUS_MASK << (id << 1)); val |= (status << (id << 1)); dev_dbg(&led->client->dev, "%s: %s, reg:%d id:%d status:%d val:%#x\n", __func__, led->ldev.name, reg, id, status, val); /* set led status */ err = lp3944_reg_write(led->client, reg, val); mutex_unlock(&data->lock); return err; } static int lp3944_led_set_blink(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct lp3944_led_data *led = ldev_to_led(led_cdev); u16 period; u8 duty_cycle; int err; /* units are in ms */ if (*delay_on + *delay_off > LP3944_PERIOD_MAX) return -EINVAL; if (*delay_on == 0 && *delay_off == 0) { /* Special case: the leds subsystem requires a default user * friendly blink pattern for the LED. Let's blink the led * slowly (1Hz). */ *delay_on = 500; *delay_off = 500; } period = (*delay_on) + (*delay_off); /* duty_cycle is the percentage of period during which the led is ON */ duty_cycle = 100 * (*delay_on) / period; /* invert duty cycle for inverted leds, this has the same effect of * swapping delay_on and delay_off */ if (led->type == LP3944_LED_TYPE_LED_INVERTED) duty_cycle = 100 - duty_cycle; /* NOTE: using always the first DIM mode, this means that all leds * will have the same blinking pattern. * * We could find a way later to have two leds blinking in hardware * with different patterns at the same time, falling back to software * control for the other ones. */ err = lp3944_dim_set_period(led->client, LP3944_DIM0, period); if (err) return err; err = lp3944_dim_set_dutycycle(led->client, LP3944_DIM0, duty_cycle); if (err) return err; dev_dbg(&led->client->dev, "%s: OK hardware accelerated blink!\n", __func__); led->status = LP3944_LED_STATUS_DIM0; schedule_work(&led->work); return 0; } static void lp3944_led_set_brightness(struct led_classdev *led_cdev, enum led_brightness brightness) { struct lp3944_led_data *led = ldev_to_led(led_cdev); dev_dbg(&led->client->dev, "%s: %s, %d\n", __func__, led_cdev->name, brightness); led->status = brightness; schedule_work(&led->work); } static void lp3944_led_work(struct work_struct *work) { struct lp3944_led_data *led; led = container_of(work, struct lp3944_led_data, work); lp3944_led_set(led, led->status); } static int lp3944_configure(struct i2c_client *client, struct lp3944_data *data, struct lp3944_platform_data *pdata) { int i, err = 0; for (i = 0; i < pdata->leds_size; i++) { struct lp3944_led *pled = &pdata->leds[i]; struct lp3944_led_data *led = &data->leds[i]; led->client = client; led->id = i; switch (pled->type) { case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led->type = pled->type; led->status = pled->status; led->ldev.name = pled->name; led->ldev.max_brightness = 1; led->ldev.brightness_set = lp3944_led_set_brightness; led->ldev.blink_set = lp3944_led_set_blink; led->ldev.flags = LED_CORE_SUSPENDRESUME; INIT_WORK(&led->work, lp3944_led_work); err = led_classdev_register(&client->dev, &led->ldev); if (err < 0) { dev_err(&client->dev, "couldn't register LED %s\n", led->ldev.name); goto exit; } /* to expose the default value to userspace */ led->ldev.brightness = led->status; /* Set the default led status */ err = lp3944_led_set(led, led->status); if (err < 0) { dev_err(&client->dev, "%s couldn't set STATUS %d\n", led->ldev.name, led->status); goto exit; } break; case LP3944_LED_TYPE_NONE: default: break; } } return 0; exit: if (i > 0) for (i = i - 1; i >= 0; i--) switch (pdata->leds[i].type) { case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led_classdev_unregister(&data->leds[i].ldev); cancel_work_sync(&data->leds[i].work); break; case LP3944_LED_TYPE_NONE: default: break; } return err; } static int lp3944_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data; struct lp3944_data *data; int err; if (lp3944_pdata == NULL) { dev_err(&client->dev, "no platform data\n"); return -EINVAL; } /* Let's see whether this adapter can support what we need. */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&client->dev, "insufficient functionality!\n"); return -ENODEV; } data = devm_kzalloc(&client->dev, sizeof(struct lp3944_data), GFP_KERNEL); if (!data) return -ENOMEM; data->client = client; i2c_set_clientdata(client, data); mutex_init(&data->lock); err = lp3944_configure(client, data, lp3944_pdata); if (err < 0) return err; dev_info(&client->dev, "lp3944 enabled\n"); return 0; } static int lp3944_remove(struct i2c_client *client) { struct lp3944_platform_data *pdata = client->dev.platform_data; struct lp3944_data *data = i2c_get_clientdata(client); int i; for (i = 0; i < pdata->leds_size; i++) switch (data->leds[i].type) { case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led_classdev_unregister(&data->leds[i].ldev); cancel_work_sync(&data->leds[i].work); break; case LP3944_LED_TYPE_NONE: default: break; } return 0; } /* lp3944 i2c driver struct */ static const struct i2c_device_id lp3944_id[] = { {"lp3944", 0}, {} }; MODULE_DEVICE_TABLE(i2c, lp3944_id); static struct i2c_driver lp3944_driver = { .driver = { .name = "lp3944", }, .probe = lp3944_probe, .remove = lp3944_remove, .id_table = lp3944_id, }; module_i2c_driver(lp3944_driver); MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>"); MODULE_DESCRIPTION("LP3944 Fun Light Chip"); MODULE_LICENSE("GPL");
droidcore/KrakenTC
drivers/leds/leds-lp3944.c
C
gpl-2.0
11,146
/* * Set up the VMAs to tell the VM about the vDSO. * Copyright 2007 Andi Kleen, SUSE Labs. * Subject to the GPL, v.2 */ #include <linux/mm.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/random.h> #include <linux/elf.h> #include <asm/vsyscall.h> #include <asm/vgtod.h> #include <asm/proto.h> #include <asm/vdso.h> unsigned int __read_mostly vdso_enabled = 1; extern char vdso_start[], vdso_end[]; extern unsigned short vdso_sync_cpuid; static struct page **vdso_pages; static unsigned vdso_size; static int __init init_vdso_vars(void) { int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; int i; vdso_size = npages << PAGE_SHIFT; vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL); if (!vdso_pages) goto oom; for (i = 0; i < npages; i++) { struct page *p; p = alloc_page(GFP_KERNEL); if (!p) goto oom; vdso_pages[i] = p; copy_page(page_address(p), vdso_start + i*PAGE_SIZE); } return 0; oom: printk("Cannot allocate vdso\n"); vdso_enabled = 0; return -ENOMEM; } subsys_initcall(init_vdso_vars); struct linux_binprm; /* Put the vdso above the (randomized) stack with another randomized offset. This way there is no hole in the middle of address space. To save memory make sure it is still in the same PTE as the stack top. This doesn't give that many random bits */ static unsigned long vdso_addr(unsigned long start, unsigned len) { unsigned long addr, end; unsigned offset; end = (start + PMD_SIZE - 1) & PMD_MASK; if (end >= TASK_SIZE_MAX) end = TASK_SIZE_MAX; end -= len; /* This loses some more bits than a modulo, but is cheaper */ offset = get_random_int() & (PTRS_PER_PTE - 1); addr = start + (offset << PAGE_SHIFT); if (addr >= end) addr = end; return addr; } /* Setup a VMA at program startup for the vsyscall page. Not called for compat tasks */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; int ret; if (!vdso_enabled) return 0; down_write(&mm->mmap_sem); addr = vdso_addr(mm->start_stack, vdso_size); addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } current->mm->context.vdso = (void *)addr; ret = install_special_mapping(mm, addr, vdso_size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| VM_ALWAYSDUMP, vdso_pages); if (ret) { current->mm->context.vdso = NULL; goto up_fail; } up_fail: up_write(&mm->mmap_sem); return ret; } static __init int vdso_setup(char *s) { vdso_enabled = simple_strtoul(s, NULL, 0); return 0; } __setup("vdso=", vdso_setup);
fat-tire/omap
arch/x86/vdso/vma.c
C
gpl-2.0
2,753
/* * cimax2.c * * CIMax2(R) SP2 driver in conjunction with NetUp Dual DVB-S2 CI card * * Copyright (C) 2009 NetUP Inc. * Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru> * Copyright (C) 2009 Abylay Ospan <aospan@netup.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "cx23885.h" #include "cimax2.h" #include "dvb_ca_en50221.h" /* Max transfer size done by I2C transfer functions */ #define MAX_XFER_SIZE 64 /**** Bit definitions for MC417_RWD and MC417_OEN registers *** bits 31-16 +-----------+ | Reserved | +-----------+ bit 15 bit 14 bit 13 bit 12 bit 11 bit 10 bit 9 bit 8 +-------+-------+-------+-------+-------+-------+-------+-------+ | WR# | RD# | | ACK# | ADHI | ADLO | CS1# | CS0# | +-------+-------+-------+-------+-------+-------+-------+-------+ bit 7 bit 6 bit 5 bit 4 bit 3 bit 2 bit 1 bit 0 +-------+-------+-------+-------+-------+-------+-------+-------+ | DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0| +-------+-------+-------+-------+-------+-------+-------+-------+ ***/ /* MC417 */ #define NETUP_DATA 0x000000ff #define NETUP_WR 0x00008000 #define NETUP_RD 0x00004000 #define NETUP_ACK 0x00001000 #define NETUP_ADHI 0x00000800 #define NETUP_ADLO 0x00000400 #define NETUP_CS1 0x00000200 #define NETUP_CS0 0x00000100 #define NETUP_EN_ALL 0x00001000 #define NETUP_CTRL_OFF (NETUP_CS1 | NETUP_CS0 | NETUP_WR | NETUP_RD) #define NETUP_CI_CTL 0x04 #define NETUP_CI_RD 1 #define NETUP_IRQ_DETAM 0x1 #define NETUP_IRQ_IRQAM 0x4 static unsigned int ci_dbg; module_param(ci_dbg, int, 0644); MODULE_PARM_DESC(ci_dbg, "Enable CI debugging"); static unsigned int ci_irq_enable; module_param(ci_irq_enable, int, 0644); MODULE_PARM_DESC(ci_irq_enable, "Enable IRQ from CAM"); #define ci_dbg_print(args...) \ do { \ if (ci_dbg) \ printk(KERN_DEBUG args); \ } while (0) #define ci_irq_flags() (ci_irq_enable ? NETUP_IRQ_IRQAM : 0) /* stores all private variables for communication with CI */ struct netup_ci_state { struct dvb_ca_en50221 ca; struct mutex ca_mutex; struct i2c_adapter *i2c_adap; u8 ci_i2c_addr; int status; struct work_struct work; void *priv; u8 current_irq_mode; int current_ci_flag; unsigned long next_status_checked_time; }; static int netup_read_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg, u8 *buf, int len) { int ret; struct i2c_msg msg[] = { { .addr = addr, .flags = 0, .buf = &reg, .len = 1 }, { .addr = addr, .flags = I2C_M_RD, .buf = buf, .len = len } }; ret = i2c_transfer(i2c_adap, msg, 2); if (ret != 2) { ci_dbg_print("%s: i2c read error, Reg = 0x%02x, Status = %d\n", __func__, reg, ret); return -1; } ci_dbg_print("%s: i2c read Addr=0x%04x, Reg = 0x%02x, data = %02x\n", __func__, addr, reg, buf[0]); return 0; } static int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg, u8 *buf, int len) { int ret; u8 buffer[MAX_XFER_SIZE]; struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = &buffer[0], .len = len + 1 }; if (1 + len > sizeof(buffer)) { printk(KERN_WARNING "%s: i2c wr reg=%04x: len=%d is too big!\n", KBUILD_MODNAME, reg, len); return -EINVAL; } buffer[0] = reg; memcpy(&buffer[1], buf, len); ret = i2c_transfer(i2c_adap, &msg, 1); if (ret != 1) { ci_dbg_print("%s: i2c write error, Reg=[0x%02x], Status=%d\n", __func__, reg, ret); return -1; } return 0; } static int netup_ci_get_mem(struct cx23885_dev *dev) { int mem; unsigned long timeout = jiffies + msecs_to_jiffies(1); for (;;) { mem = cx_read(MC417_RWD); if ((mem & NETUP_ACK) == 0) break; if (time_after(jiffies, timeout)) break; udelay(1); } cx_set(MC417_RWD, NETUP_CTRL_OFF); return mem & 0xff; } static int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot, u8 flag, u8 read, int addr, u8 data) { struct netup_ci_state *state = en50221->data; struct cx23885_tsport *port = state->priv; struct cx23885_dev *dev = port->dev; u8 store; int mem; int ret; if (0 != slot) return -EINVAL; if (state->current_ci_flag != flag) { ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &store, 1); if (ret != 0) return ret; store &= ~0x0c; store |= flag; ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &store, 1); if (ret != 0) return ret; } state->current_ci_flag = flag; mutex_lock(&dev->gpio_lock); /* write addr */ cx_write(MC417_OEN, NETUP_EN_ALL); cx_write(MC417_RWD, NETUP_CTRL_OFF | NETUP_ADLO | (0xff & addr)); cx_clear(MC417_RWD, NETUP_ADLO); cx_write(MC417_RWD, NETUP_CTRL_OFF | NETUP_ADHI | (0xff & (addr >> 8))); cx_clear(MC417_RWD, NETUP_ADHI); if (read) { /* data in */ cx_write(MC417_OEN, NETUP_EN_ALL | NETUP_DATA); } else /* data out */ cx_write(MC417_RWD, NETUP_CTRL_OFF | data); /* choose chip */ cx_clear(MC417_RWD, (state->ci_i2c_addr == 0x40) ? NETUP_CS0 : NETUP_CS1); /* read/write */ cx_clear(MC417_RWD, (read) ? NETUP_RD : NETUP_WR); mem = netup_ci_get_mem(dev); mutex_unlock(&dev->gpio_lock); if (!read) if (mem < 0) return -EREMOTEIO; ci_dbg_print("%s: %s: chipaddr=[0x%x] addr=[0x%02x], %s=%x\n", __func__, (read) ? "read" : "write", state->ci_i2c_addr, addr, (flag == NETUP_CI_CTL) ? "ctl" : "mem", (read) ? mem : data); if (read) return mem; return 0; } int netup_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221, int slot, int addr) { return netup_ci_op_cam(en50221, slot, 0, NETUP_CI_RD, addr, 0); } int netup_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221, int slot, int addr, u8 data) { return netup_ci_op_cam(en50221, slot, 0, 0, addr, data); } int netup_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr) { return netup_ci_op_cam(en50221, slot, NETUP_CI_CTL, NETUP_CI_RD, addr, 0); } int netup_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr, u8 data) { return netup_ci_op_cam(en50221, slot, NETUP_CI_CTL, 0, addr, data); } int netup_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot) { struct netup_ci_state *state = en50221->data; u8 buf = 0x80; int ret; if (0 != slot) return -EINVAL; udelay(500); ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &buf, 1); if (ret != 0) return ret; udelay(500); buf = 0x00; ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &buf, 1); msleep(1000); dvb_ca_en50221_camready_irq(&state->ca, 0); return 0; } int netup_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot) { /* not implemented */ return 0; } static int netup_ci_set_irq(struct dvb_ca_en50221 *en50221, u8 irq_mode) { struct netup_ci_state *state = en50221->data; int ret; if (irq_mode == state->current_irq_mode) return 0; ci_dbg_print("%s: chipaddr=[0x%x] setting ci IRQ to [0x%x] \n", __func__, state->ci_i2c_addr, irq_mode); ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0x1b, &irq_mode, 1); if (ret != 0) return ret; state->current_irq_mode = irq_mode; return 0; } int netup_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot) { struct netup_ci_state *state = en50221->data; u8 buf; if (0 != slot) return -EINVAL; netup_read_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &buf, 1); buf |= 0x60; return netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &buf, 1); } /* work handler */ static void netup_read_ci_status(struct work_struct *work) { struct netup_ci_state *state = container_of(work, struct netup_ci_state, work); u8 buf[33]; int ret; /* CAM module IRQ processing. fast operation */ dvb_ca_en50221_frda_irq(&state->ca, 0); /* CAM module INSERT/REMOVE processing. slow operation because of i2c * transfers */ if (time_after(jiffies, state->next_status_checked_time) || !state->status) { ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &buf[0], 33); state->next_status_checked_time = jiffies + msecs_to_jiffies(1000); if (ret != 0) return; ci_dbg_print("%s: Slot Status Addr=[0x%04x], " "Reg=[0x%02x], data=%02x, " "TS config = %02x\n", __func__, state->ci_i2c_addr, 0, buf[0], buf[0]); if (buf[0] & 1) state->status = DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY; else state->status = 0; } } /* CI irq handler */ int netup_ci_slot_status(struct cx23885_dev *dev, u32 pci_status) { struct cx23885_tsport *port = NULL; struct netup_ci_state *state = NULL; ci_dbg_print("%s:\n", __func__); if (0 == (pci_status & (PCI_MSK_GPIO0 | PCI_MSK_GPIO1))) return 0; if (pci_status & PCI_MSK_GPIO0) { port = &dev->ts1; state = port->port_priv; schedule_work(&state->work); ci_dbg_print("%s: Wakeup CI0\n", __func__); } if (pci_status & PCI_MSK_GPIO1) { port = &dev->ts2; state = port->port_priv; schedule_work(&state->work); ci_dbg_print("%s: Wakeup CI1\n", __func__); } return 1; } int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open) { struct netup_ci_state *state = en50221->data; if (0 != slot) return -EINVAL; netup_ci_set_irq(en50221, open ? (NETUP_IRQ_DETAM | ci_irq_flags()) : NETUP_IRQ_DETAM); return state->status; } int netup_ci_init(struct cx23885_tsport *port) { struct netup_ci_state *state; u8 cimax_init[34] = { 0x00, /* module A control*/ 0x00, /* auto select mask high A */ 0x00, /* auto select mask low A */ 0x00, /* auto select pattern high A */ 0x00, /* auto select pattern low A */ 0x44, /* memory access time A */ 0x00, /* invert input A */ 0x00, /* RFU */ 0x00, /* RFU */ 0x00, /* module B control*/ 0x00, /* auto select mask high B */ 0x00, /* auto select mask low B */ 0x00, /* auto select pattern high B */ 0x00, /* auto select pattern low B */ 0x44, /* memory access time B */ 0x00, /* invert input B */ 0x00, /* RFU */ 0x00, /* RFU */ 0x00, /* auto select mask high Ext */ 0x00, /* auto select mask low Ext */ 0x00, /* auto select pattern high Ext */ 0x00, /* auto select pattern low Ext */ 0x00, /* RFU */ 0x02, /* destination - module A */ 0x01, /* power on (use it like store place) */ 0x00, /* RFU */ 0x00, /* int status read only */ ci_irq_flags() | NETUP_IRQ_DETAM, /* DETAM, IRQAM unmasked */ 0x05, /* EXTINT=active-high, INT=push-pull */ 0x00, /* USCG1 */ 0x04, /* ack active low */ 0x00, /* LOCK = 0 */ 0x33, /* serial mode, rising in, rising out, MSB first*/ 0x31, /* synchronization */ }; int ret; ci_dbg_print("%s\n", __func__); state = kzalloc(sizeof(struct netup_ci_state), GFP_KERNEL); if (!state) { ci_dbg_print("%s: Unable create CI structure!\n", __func__); ret = -ENOMEM; goto err; } port->port_priv = state; switch (port->nr) { case 1: state->ci_i2c_addr = 0x40; break; case 2: state->ci_i2c_addr = 0x41; break; } state->i2c_adap = &port->dev->i2c_bus[0].i2c_adap; state->ca.owner = THIS_MODULE; state->ca.read_attribute_mem = netup_ci_read_attribute_mem; state->ca.write_attribute_mem = netup_ci_write_attribute_mem; state->ca.read_cam_control = netup_ci_read_cam_ctl; state->ca.write_cam_control = netup_ci_write_cam_ctl; state->ca.slot_reset = netup_ci_slot_reset; state->ca.slot_shutdown = netup_ci_slot_shutdown; state->ca.slot_ts_enable = netup_ci_slot_ts_ctl; state->ca.poll_slot_status = netup_poll_ci_slot_status; state->ca.data = state; state->priv = port; state->current_irq_mode = ci_irq_flags() | NETUP_IRQ_DETAM; ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0, &cimax_init[0], 34); /* lock registers */ ret |= netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0x1f, &cimax_init[0x18], 1); /* power on slots */ ret |= netup_write_i2c(state->i2c_adap, state->ci_i2c_addr, 0x18, &cimax_init[0x18], 1); if (0 != ret) goto err; ret = dvb_ca_en50221_init(&port->frontends.adapter, &state->ca, /* flags */ 0, /* n_slots */ 1); if (0 != ret) goto err; INIT_WORK(&state->work, netup_read_ci_status); schedule_work(&state->work); ci_dbg_print("%s: CI initialized!\n", __func__); return 0; err: ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret); kfree(state); return ret; } void netup_ci_exit(struct cx23885_tsport *port) { struct netup_ci_state *state; if (NULL == port) return; state = (struct netup_ci_state *)port->port_priv; if (NULL == state) return; if (NULL == state->ca.data) return; dvb_ca_en50221_release(&state->ca); kfree(state); }
jeboo/kernel-msm
drivers/media/pci/cx23885/cimax2.c
C
gpl-2.0
13,287
/********************************************************************* * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2010 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information *********************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/init.h> #include <linux/etherdevice.h> #include <linux/ip.h> #include <linux/ratelimit.h> #include <linux/string.h> #include <linux/interrupt.h> #include <net/dst.h> #ifdef CONFIG_XFRM #include <linux/xfrm.h> #include <net/xfrm.h> #endif /* CONFIG_XFRM */ #include <linux/atomic.h> #include <asm/octeon/octeon.h> #include "ethernet-defines.h" #include "octeon-ethernet.h" #include "ethernet-tx.h" #include "ethernet-util.h" #include <asm/octeon/cvmx-wqe.h> #include <asm/octeon/cvmx-fau.h> #include <asm/octeon/cvmx-pip.h> #include <asm/octeon/cvmx-pko.h> #include <asm/octeon/cvmx-helper.h> #include <asm/octeon/cvmx-gmxx-defs.h> #define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb)) /* * You can define GET_SKBUFF_QOS() to override how the skbuff output * function determines which output queue is used. The default * implementation always uses the base queue for the port. If, for * example, you wanted to use the skb->priority field, define * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority) */ #ifndef GET_SKBUFF_QOS #define GET_SKBUFF_QOS(skb) 0 #endif static void cvm_oct_tx_do_cleanup(unsigned long arg); static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0); /* Maximum number of SKBs to try to free per xmit packet. */ #define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2) static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau) { int32_t undo; undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE; if (undo > 0) cvmx_fau_atomic_add32(fau, -undo); skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free; return skb_to_free; } static void cvm_oct_kick_tx_poll_watchdog(void) { union cvmx_ciu_timx ciu_timx; ciu_timx.u64 = 0; ciu_timx.s.one_shot = 1; ciu_timx.s.len = cvm_oct_tx_poll_interval; cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64); } void cvm_oct_free_tx_skbs(struct net_device *dev) { int32_t skb_to_free; int qos, queues_per_port; int total_freed = 0; int total_remaining = 0; unsigned long flags; struct octeon_ethernet *priv = netdev_priv(dev); queues_per_port = cvmx_pko_get_num_queues(priv->port); /* Drain any pending packets in the free list */ for (qos = 0; qos < queues_per_port; qos++) { if (skb_queue_len(&priv->tx_free_list[qos]) == 0) continue; skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE); skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4); total_freed += skb_to_free; if (skb_to_free > 0) { struct sk_buff *to_free_list = NULL; spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); while (skb_to_free > 0) { struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]); t->next = to_free_list; to_free_list = t; skb_to_free--; } spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); /* Do the actual freeing outside of the lock. */ while (to_free_list) { struct sk_buff *t = to_free_list; to_free_list = to_free_list->next; dev_kfree_skb_any(t); } } total_remaining += skb_queue_len(&priv->tx_free_list[qos]); } if (total_freed >= 0 && netif_queue_stopped(dev)) netif_wake_queue(dev); if (total_remaining) cvm_oct_kick_tx_poll_watchdog(); } /** * cvm_oct_xmit - transmit a packet * @skb: Packet to send * @dev: Device info structure * * Returns Always returns NETDEV_TX_OK */ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) { cvmx_pko_command_word0_t pko_command; union cvmx_buf_ptr hw_buffer; uint64_t old_scratch; uint64_t old_scratch2; int qos; int i; enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type; struct octeon_ethernet *priv = netdev_priv(dev); struct sk_buff *to_free_list; int32_t skb_to_free; int32_t buffers_to_free; u32 total_to_clean; unsigned long flags; #if REUSE_SKBUFFS_WITHOUT_FREE unsigned char *fpa_head; #endif /* * Prefetch the private data structure. It is larger than the * one cache line. */ prefetch(priv); /* * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to * completely remove "qos" in the event neither interface * supports multiple queues per port. */ if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { qos = GET_SKBUFF_QOS(skb); if (qos <= 0) qos = 0; else if (qos >= cvmx_pko_get_num_queues(priv->port)) qos = 0; } else qos = 0; if (USE_ASYNC_IOBDMA) { /* Save scratch in case userspace is using it */ CVMX_SYNCIOBDMA; old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); /* * Fetch and increment the number of packets to be * freed. */ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8, FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, priv->fau + qos * 4, MAX_SKB_TO_FREE); } /* * We have space for 6 segment pointers, If there will be more * than that, we must linearize. */ if (unlikely(skb_shinfo(skb)->nr_frags > 5)) { if (unlikely(__skb_linearize(skb))) { queue_type = QUEUE_DROP; if (USE_ASYNC_IOBDMA) { /* Get the number of skbuffs in use by the hardware */ CVMX_SYNCIOBDMA; skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH); } else { /* Get the number of skbuffs in use by the hardware */ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, MAX_SKB_TO_FREE); } skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4); spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); goto skip_xmit; } } /* * The CN3XXX series of parts has an errata (GMX-401) which * causes the GMX block to hang if a collision occurs towards * the end of a <68 byte packet. As a workaround for this, we * pad packets to be 68 bytes whenever we are in half duplex * mode. We don't handle the case of having a small packet but * no room to add the padding. The kernel should always give * us at least a cache line */ if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) { union cvmx_gmxx_prtx_cfg gmx_prt_cfg; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); if (interface < 2) { /* We only need to pad packet in half duplex mode */ gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); if (gmx_prt_cfg.s.duplex == 0) { int add_bytes = 64 - skb->len; if ((skb_tail_pointer(skb) + add_bytes) <= skb_end_pointer(skb)) memset(__skb_put(skb, add_bytes), 0, add_bytes); } } } /* Build the PKO command */ pko_command.u64 = 0; pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ pko_command.s.segs = 1; pko_command.s.total_bytes = skb->len; pko_command.s.size0 = CVMX_FAU_OP_SIZE_32; pko_command.s.subone0 = 1; pko_command.s.dontfree = 1; /* Build the PKO buffer pointer */ hw_buffer.u64 = 0; if (skb_shinfo(skb)->nr_frags == 0) { hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); hw_buffer.s.pool = 0; hw_buffer.s.size = skb->len; } else { hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); hw_buffer.s.pool = 0; hw_buffer.s.size = skb_headlen(skb); CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i; hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + fs->page_offset)); hw_buffer.s.size = fs->size; CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; } hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb)); hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1; pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; pko_command.s.gather = 1; goto dont_put_skbuff_in_hw; } /* * See if we can put this skb in the FPA pool. Any strange * behavior from the Linux networking stack will most likely * be caused by a bug in the following code. If some field is * in use by the network stack and gets carried over when a * buffer is reused, bad things may happen. If in doubt and * you dont need the absolute best performance, disable the * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has * shown a 25% increase in performance under some loads. */ #if REUSE_SKBUFFS_WITHOUT_FREE fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f); if (unlikely(skb->data < fpa_head)) { /* * printk("TX buffer beginning can't meet FPA * alignment constraints\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) { /* printk("TX buffer isn't large enough for the FPA\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_shared(skb))) { /* printk("TX buffer sharing data with someone else\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_cloned(skb))) { /* printk("TX buffer has been cloned\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_header_cloned(skb))) { /* printk("TX buffer header has been cloned\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely(skb->destructor)) { /* printk("TX buffer has a destructor\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_shinfo(skb)->nr_frags)) { /* printk("TX buffer has fragments\n"); */ goto dont_put_skbuff_in_hw; } if (unlikely (skb->truesize != sizeof(*skb) + skb_end_offset(skb))) { /* printk("TX buffer truesize has been changed\n"); */ goto dont_put_skbuff_in_hw; } /* * We can use this buffer in the FPA. We don't need the FAU * update anymore */ pko_command.s.dontfree = 0; hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7); *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb; /* * The skbuff will be reused without ever being freed. We must * cleanup a bunch of core things. */ dst_release(skb_dst(skb)); skb_dst_set(skb, NULL); #ifdef CONFIG_XFRM secpath_put(skb->sp); skb->sp = NULL; #endif nf_reset(skb); #ifdef CONFIG_NET_SCHED skb->tc_index = 0; #ifdef CONFIG_NET_CLS_ACT skb->tc_verd = 0; #endif /* CONFIG_NET_CLS_ACT */ #endif /* CONFIG_NET_SCHED */ #endif /* REUSE_SKBUFFS_WITHOUT_FREE */ dont_put_skbuff_in_hw: /* Check if we can use the hardware checksumming */ if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) && (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) && ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14)) && ((ip_hdr(skb)->protocol == IPPROTO_TCP) || (ip_hdr(skb)->protocol == IPPROTO_UDP))) { /* Use hardware checksum calc */ pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; } if (USE_ASYNC_IOBDMA) { /* Get the number of skbuffs in use by the hardware */ CVMX_SYNCIOBDMA; skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH); buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); } else { /* Get the number of skbuffs in use by the hardware */ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, MAX_SKB_TO_FREE); buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); } skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4); /* * If we're sending faster than the receive can free them then * don't do the HW free. */ if ((buffers_to_free < -100) && !pko_command.s.dontfree) pko_command.s.dontfree = 1; if (pko_command.s.dontfree) { queue_type = QUEUE_CORE; pko_command.s.reg0 = priv->fau+qos*4; } else { queue_type = QUEUE_HW; } if (USE_ASYNC_IOBDMA) cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1); spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); /* Drop this packet if we have too many already queued to the HW */ if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) { if (dev->tx_queue_len != 0) { /* Drop the lock when notifying the core. */ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); netif_stop_queue(dev); spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); } else { /* If not using normal queueing. */ queue_type = QUEUE_DROP; goto skip_xmit; } } cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_NONE); /* Send the packet to the output queue */ if (unlikely(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_NONE))) { printk_ratelimited("%s: Failed to send the packet\n", dev->name); queue_type = QUEUE_DROP; } skip_xmit: to_free_list = NULL; switch (queue_type) { case QUEUE_DROP: skb->next = to_free_list; to_free_list = skb; priv->stats.tx_dropped++; break; case QUEUE_HW: cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1); break; case QUEUE_CORE: __skb_queue_tail(&priv->tx_free_list[qos], skb); break; default: BUG(); } while (skb_to_free > 0) { struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]); t->next = to_free_list; to_free_list = t; skb_to_free--; } spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); /* Do the actual freeing outside of the lock. */ while (to_free_list) { struct sk_buff *t = to_free_list; to_free_list = to_free_list->next; dev_kfree_skb_any(t); } if (USE_ASYNC_IOBDMA) { CVMX_SYNCIOBDMA; total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH); /* Restore the scratch area */ cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); } else { total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1); } if (total_to_clean & 0x3ff) { /* * Schedule the cleanup tasklet every 1024 packets for * the pathological case of high traffic on one port * delaying clean up of packets on a different port * that is blocked waiting for the cleanup. */ tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); } cvm_oct_kick_tx_poll_watchdog(); return NETDEV_TX_OK; } /** * cvm_oct_xmit_pow - transmit a packet to the POW * @skb: Packet to send * @dev: Device info structure * Returns Always returns zero */ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); void *packet_buffer; void *copy_location; /* Get a work queue entry */ cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); if (unlikely(work == NULL)) { printk_ratelimited("%s: Failed to allocate a work " "queue entry\n", dev->name); priv->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } /* Get a packet buffer */ packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL); if (unlikely(packet_buffer == NULL)) { printk_ratelimited("%s: Failed to allocate a packet buffer\n", dev->name); cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); priv->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } /* * Calculate where we need to copy the data to. We need to * leave 8 bytes for a next pointer (unused). We also need to * include any configure skip. Then we need to align the IP * packet src and dest into the same 64bit word. The below * calculation may add a little extra, but that doesn't * hurt. */ copy_location = packet_buffer + sizeof(uint64_t); copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6; /* * We have to copy the packet since whoever processes this * packet will free it to a hardware pool. We can't use the * trick of counting outstanding packets like in * cvm_oct_xmit. */ memcpy(copy_location, skb->data, skb->len); /* * Fill in some of the work queue fields. We may need to add * more if the software at the other end needs them. */ work->hw_chksum = skb->csum; work->len = skb->len; work->ipprt = priv->port; work->qos = priv->port & 0x7; work->grp = pow_send_group; work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE; work->tag = pow_send_group; /* FIXME */ /* Default to zero. Sets of zero later are commented out */ work->word2.u64 = 0; work->word2.s.bufs = 1; work->packet_ptr.u64 = 0; work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location); work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL; work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE; work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7; if (skb->protocol == htons(ETH_P_IP)) { work->word2.s.ip_offset = 14; #if 0 work->word2.s.vlan_valid = 0; /* FIXME */ work->word2.s.vlan_cfi = 0; /* FIXME */ work->word2.s.vlan_id = 0; /* FIXME */ work->word2.s.dec_ipcomp = 0; /* FIXME */ #endif work->word2.s.tcp_or_udp = (ip_hdr(skb)->protocol == IPPROTO_TCP) || (ip_hdr(skb)->protocol == IPPROTO_UDP); #if 0 /* FIXME */ work->word2.s.dec_ipsec = 0; /* We only support IPv4 right now */ work->word2.s.is_v6 = 0; /* Hardware would set to zero */ work->word2.s.software = 0; /* No error, packet is internal */ work->word2.s.L4_error = 0; #endif work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14)); #if 0 /* Assume Linux is sending a good packet */ work->word2.s.IP_exc = 0; #endif work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST); work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST); #if 0 /* This is an IP packet */ work->word2.s.not_IP = 0; /* No error, packet is internal */ work->word2.s.rcv_error = 0; /* No error, packet is internal */ work->word2.s.err_code = 0; #endif /* * When copying the data, include 4 bytes of the * ethernet header to align the same way hardware * does. */ memcpy(work->packet_data, skb->data + 10, sizeof(work->packet_data)); } else { #if 0 work->word2.snoip.vlan_valid = 0; /* FIXME */ work->word2.snoip.vlan_cfi = 0; /* FIXME */ work->word2.snoip.vlan_id = 0; /* FIXME */ work->word2.snoip.software = 0; /* Hardware would set to zero */ #endif work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP); work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP); work->word2.snoip.is_bcast = (skb->pkt_type == PACKET_BROADCAST); work->word2.snoip.is_mcast = (skb->pkt_type == PACKET_MULTICAST); work->word2.snoip.not_IP = 1; /* IP was done up above */ #if 0 /* No error, packet is internal */ work->word2.snoip.rcv_error = 0; /* No error, packet is internal */ work->word2.snoip.err_code = 0; #endif memcpy(work->packet_data, skb->data, sizeof(work->packet_data)); } /* Submit the packet to the POW */ cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos, work->grp); priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len; dev_kfree_skb(skb); return 0; } /** * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX. * @dev: Device being shutdown * */ void cvm_oct_tx_shutdown_dev(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); unsigned long flags; int qos; for (qos = 0; qos < 16; qos++) { spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); while (skb_queue_len(&priv->tx_free_list[qos])) dev_kfree_skb_any(__skb_dequeue (&priv->tx_free_list[qos])); spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); } } static void cvm_oct_tx_do_cleanup(unsigned long arg) { int port; for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { if (cvm_oct_device[port]) { struct net_device *dev = cvm_oct_device[port]; cvm_oct_free_tx_skbs(dev); } } } static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id) { /* Disable the interrupt. */ cvmx_write_csr(CVMX_CIU_TIMX(1), 0); /* Do the work in the tasklet. */ tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); return IRQ_HANDLED; } void cvm_oct_tx_initialize(void) { int i; /* Disable the interrupt. */ cvmx_write_csr(CVMX_CIU_TIMX(1), 0); /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */ i = request_irq(OCTEON_IRQ_TIMER1, cvm_oct_tx_cleanup_watchdog, 0, "Ethernet", cvm_oct_device); if (i) panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1); } void cvm_oct_tx_shutdown(void) { /* Free the interrupt handler */ free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device); }
varunchitre15/thunderzap_sprout
drivers/staging/octeon/ethernet-tx.c
C
gpl-2.0
21,555
/* * net/sched/sch_netem.c Network emulator * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License. * * Many of the algorithms and ideas for this came from * NIST Net which is not copyrighted. * * Authors: Stephen Hemminger <shemminger@osdl.org> * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> */ #include <linux/mm.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/vmalloc.h> #include <linux/rtnetlink.h> #include <linux/reciprocal_div.h> #include <net/netlink.h> #include <net/pkt_sched.h> #define VERSION "1.3" /* Network Emulation Queuing algorithm. ==================================== Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based Network Emulation Tool [2] Luigi Rizzo, DummyNet for FreeBSD ---------------------------------------------------------------- This started out as a simple way to delay outgoing packets to test TCP but has grown to include most of the functionality of a full blown network emulator like NISTnet. It can delay packets and add random jitter (and correlation). The random distribution can be loaded from a table as well to provide normal, Pareto, or experimental curves. Packet loss, duplication, and reordering can also be emulated. This qdisc does not do classification that can be handled in layering other disciplines. It does not need to do bandwidth control either since that can be handled by using token bucket or other rate control. Correlated Loss Generator models Added generation of correlated loss according to the "Gilbert-Elliot" model, a 4-state markov model. References: [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general and intuitive loss model for packet networks and its implementation in the Netem module in the Linux kernel", available in [1] Authors: Stefano Salsano <stefano.salsano at uniroma2.it Fabio Ludovici <fabio.ludovici at yahoo.it> */ struct netem_sched_data { /* internal t(ime)fifo qdisc uses sch->q and sch->limit */ /* optional qdisc for classful handling (NULL at netem init) */ struct Qdisc *qdisc; struct qdisc_watchdog watchdog; psched_tdiff_t latency; psched_tdiff_t jitter; u32 loss; u32 limit; u32 counter; u32 gap; u32 duplicate; u32 reorder; u32 corrupt; u32 rate; s32 packet_overhead; u32 cell_size; u32 cell_size_reciprocal; s32 cell_overhead; struct crndstate { u32 last; u32 rho; } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; struct disttable { u32 size; s16 table[0]; } *delay_dist; enum { CLG_RANDOM, CLG_4_STATES, CLG_GILB_ELL, } loss_model; /* Correlated Loss Generation models */ struct clgstate { /* state of the Markov chain */ u8 state; /* 4-states and Gilbert-Elliot models */ u32 a1; /* p13 for 4-states or p for GE */ u32 a2; /* p31 for 4-states or r for GE */ u32 a3; /* p32 for 4-states or h for GE */ u32 a4; /* p14 for 4-states or 1-k for GE */ u32 a5; /* p23 used only in 4-states */ } clg; }; /* Time stamp put into socket buffer control block * Only valid when skbs are in our internal t(ime)fifo queue. */ struct netem_skb_cb { psched_time_t time_to_send; }; static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) { qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb)); return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; } /* init_crandom - initialize correlated random number generator * Use entropy source for initial seed. */ static void init_crandom(struct crndstate *state, unsigned long rho) { state->rho = rho; state->last = net_random(); } /* get_crandom - correlated random number generator * Next number depends on last value. * rho is scaled to avoid floating point. */ static u32 get_crandom(struct crndstate *state) { u64 value, rho; unsigned long answer; if (state->rho == 0) /* no correlation */ return net_random(); value = net_random(); rho = (u64)state->rho + 1; answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; state->last = answer; return answer; } /* loss_4state - 4-state model loss generator * Generates losses according to the 4-state Markov chain adopted in * the GI (General and Intuitive) loss model. */ static bool loss_4state(struct netem_sched_data *q) { struct clgstate *clg = &q->clg; u32 rnd = net_random(); /* * Makes a comparison between rnd and the transition * probabilities outgoing from the current state, then decides the * next state and if the next packet has to be transmitted or lost. * The four states correspond to: * 1 => successfully transmitted packets within a gap period * 4 => isolated losses within a gap period * 3 => lost packets within a burst period * 2 => successfully transmitted packets within a burst period */ switch (clg->state) { case 1: if (rnd < clg->a4) { clg->state = 4; return true; } else if (clg->a4 < rnd && rnd < clg->a1) { clg->state = 3; return true; } else if (clg->a1 < rnd) clg->state = 1; break; case 2: if (rnd < clg->a5) { clg->state = 3; return true; } else clg->state = 2; break; case 3: if (rnd < clg->a3) clg->state = 2; else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { clg->state = 1; return true; } else if (clg->a2 + clg->a3 < rnd) { clg->state = 3; return true; } break; case 4: clg->state = 1; break; } return false; } /* loss_gilb_ell - Gilbert-Elliot model loss generator * Generates losses according to the Gilbert-Elliot loss model or * its special cases (Gilbert or Simple Gilbert) * * Makes a comparison between random number and the transition * probabilities outgoing from the current state, then decides the * next state. A second random number is extracted and the comparison * with the loss probability of the current state decides if the next * packet will be transmitted or lost. */ static bool loss_gilb_ell(struct netem_sched_data *q) { struct clgstate *clg = &q->clg; switch (clg->state) { case 1: if (net_random() < clg->a1) clg->state = 2; if (net_random() < clg->a4) return true; case 2: if (net_random() < clg->a2) clg->state = 1; if (clg->a3 > net_random()) return true; } return false; } static bool loss_event(struct netem_sched_data *q) { switch (q->loss_model) { case CLG_RANDOM: /* Random packet drop 0 => none, ~0 => all */ return q->loss && q->loss >= get_crandom(&q->loss_cor); case CLG_4_STATES: /* 4state loss model algorithm (used also for GI model) * Extracts a value from the markov 4 state loss generator, * if it is 1 drops a packet and if needed writes the event in * the kernel logs */ return loss_4state(q); case CLG_GILB_ELL: /* Gilbert-Elliot loss model algorithm * Extracts a value from the Gilbert-Elliot loss generator, * if it is 1 drops a packet and if needed writes the event in * the kernel logs */ return loss_gilb_ell(q); } return false; /* not reached */ } /* tabledist - return a pseudo-randomly distributed value with mean mu and * std deviation sigma. Uses table lookup to approximate the desired * distribution, and a uniformly-distributed pseudo-random source. */ static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, struct crndstate *state, const struct disttable *dist) { psched_tdiff_t x; long t; u32 rnd; if (sigma == 0) return mu; rnd = get_crandom(state); /* default uniform distribution */ if (dist == NULL) return (rnd % (2*sigma)) - sigma + mu; t = dist->table[rnd % dist->size]; x = (sigma % NETEM_DIST_SCALE) * t; if (x >= 0) x += NETEM_DIST_SCALE/2; else x -= NETEM_DIST_SCALE/2; return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; } static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) { u64 ticks; len += q->packet_overhead; if (q->cell_size) { u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); if (len > cells * q->cell_size) /* extra cell needed for remainder */ cells++; len = cells * (q->cell_size + q->cell_overhead); } ticks = (u64)len * NSEC_PER_SEC; do_div(ticks, q->rate); return PSCHED_NS2TICKS(ticks); } static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) { struct sk_buff_head *list = &sch->q; psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; struct sk_buff *skb; if (likely(skb_queue_len(list) < sch->limit)) { skb = skb_peek_tail(list); /* Optimize for add at tail */ if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send)) return qdisc_enqueue_tail(nskb, sch); skb_queue_reverse_walk(list, skb) { if (tnext >= netem_skb_cb(skb)->time_to_send) break; } __skb_queue_after(list, skb, nskb); sch->qstats.backlog += qdisc_pkt_len(nskb); return NET_XMIT_SUCCESS; } return qdisc_reshape_fail(nskb, sch); } /* * Insert one skb into qdisc. * Note: parent depends on return value to account for queue length. * NET_XMIT_DROP: queue length didn't change. * NET_XMIT_SUCCESS: one skb was queued. */ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); /* We don't fill cb now as skb_unshare() may invalidate it */ struct netem_skb_cb *cb; struct sk_buff *skb2; int ret; int count = 1; /* Random duplication */ if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) ++count; /* Drop packet? */ if (loss_event(q)) --count; if (count == 0) { sch->qstats.drops++; kfree_skb(skb); return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } skb_orphan(skb); /* * If we need to duplicate packet, then re-insert at top of the * qdisc tree, since parent queuer expects that only one * skb will be queued. */ if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { struct Qdisc *rootq = qdisc_root(sch); u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ q->duplicate = 0; qdisc_enqueue_root(skb2, rootq); q->duplicate = dupsave; } /* * Randomized packet corruption. * Make copy if needed since we are modifying * If packet is going to be hardware checksummed, then * do it now in software before we mangle it. */ if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))) return qdisc_drop(skb, sch); skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); } cb = netem_skb_cb(skb); if (q->gap == 0 || /* not doing reordering */ q->counter < q->gap - 1 || /* inside last reordering gap */ q->reorder < get_crandom(&q->reorder_cor)) { psched_time_t now; psched_tdiff_t delay; delay = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); now = psched_get_time(); if (q->rate) { struct sk_buff_head *list = &sch->q; delay += packet_len_2_sched_time(skb->len, q); if (!skb_queue_empty(list)) { /* * Last packet in queue is reference point (now). * First packet in queue is already in flight, * calculate this time bonus and substract * from delay. */ delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; now = netem_skb_cb(skb_peek_tail(list))->time_to_send; } } cb->time_to_send = now + delay; ++q->counter; ret = tfifo_enqueue(skb, sch); } else { /* * Do re-ordering by putting one out of N packets at the front * of the queue. */ cb->time_to_send = psched_get_time(); q->counter = 0; __skb_queue_head(&sch->q, skb); sch->qstats.backlog += qdisc_pkt_len(skb); sch->qstats.requeues++; ret = NET_XMIT_SUCCESS; } if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) { sch->qstats.drops++; return ret; } } return NET_XMIT_SUCCESS; } static unsigned int netem_drop(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); unsigned int len; len = qdisc_queue_drop(sch); if (!len && q->qdisc && q->qdisc->ops->drop) len = q->qdisc->ops->drop(q->qdisc); if (len) sch->qstats.drops++; return len; } static struct sk_buff *netem_dequeue(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; if (qdisc_is_throttled(sch)) return NULL; tfifo_dequeue: skb = qdisc_peek_head(sch); if (skb) { const struct netem_skb_cb *cb = netem_skb_cb(skb); /* if more time remaining? */ if (cb->time_to_send <= psched_get_time()) { __skb_unlink(skb, &sch->q); sch->qstats.backlog -= qdisc_pkt_len(skb); #ifdef CONFIG_NET_CLS_ACT /* * If it's at ingress let's pretend the delay is * from the network (tstamp will be updated). */ if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) skb->tstamp.tv64 = 0; #endif if (q->qdisc) { int err = qdisc_enqueue(skb, q->qdisc); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { sch->qstats.drops++; qdisc_tree_decrease_qlen(sch, 1); } } goto tfifo_dequeue; } deliver: qdisc_unthrottled(sch); qdisc_bstats_update(sch, skb); return skb; } if (q->qdisc) { skb = q->qdisc->ops->dequeue(q->qdisc); if (skb) goto deliver; } qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); } if (q->qdisc) { skb = q->qdisc->ops->dequeue(q->qdisc); if (skb) goto deliver; } return NULL; } static void netem_reset(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); qdisc_reset_queue(sch); if (q->qdisc) qdisc_reset(q->qdisc); qdisc_watchdog_cancel(&q->watchdog); } static void dist_free(struct disttable *d) { if (d) { if (is_vmalloc_addr(d)) vfree(d); else kfree(d); } } /* * Distribution data is a variable size payload containing * signed 16 bit values. */ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); size_t n = nla_len(attr)/sizeof(__s16); const __s16 *data = nla_data(attr); spinlock_t *root_lock; struct disttable *d; int i; size_t s; if (n > NETEM_DIST_MAX) return -EINVAL; s = sizeof(struct disttable) + n * sizeof(s16); d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN); if (!d) d = vmalloc(s); if (!d) return -ENOMEM; d->size = n; for (i = 0; i < n; i++) d->table[i] = data[i]; root_lock = qdisc_root_sleeping_lock(sch); spin_lock_bh(root_lock); swap(q->delay_dist, d); spin_unlock_bh(root_lock); dist_free(d); return 0; } static void get_correlation(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); const struct tc_netem_corr *c = nla_data(attr); init_crandom(&q->delay_cor, c->delay_corr); init_crandom(&q->loss_cor, c->loss_corr); init_crandom(&q->dup_cor, c->dup_corr); } static void get_reorder(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); const struct tc_netem_reorder *r = nla_data(attr); q->reorder = r->probability; init_crandom(&q->reorder_cor, r->correlation); } static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); const struct tc_netem_corrupt *r = nla_data(attr); q->corrupt = r->probability; init_crandom(&q->corrupt_cor, r->correlation); } static void get_rate(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); const struct tc_netem_rate *r = nla_data(attr); q->rate = r->rate; q->packet_overhead = r->packet_overhead; q->cell_size = r->cell_size; if (q->cell_size) q->cell_size_reciprocal = reciprocal_value(q->cell_size); q->cell_overhead = r->cell_overhead; } static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); const struct nlattr *la; int rem; nla_for_each_nested(la, attr, rem) { u16 type = nla_type(la); switch(type) { case NETEM_LOSS_GI: { const struct tc_netem_gimodel *gi = nla_data(la); if (nla_len(la) < sizeof(struct tc_netem_gimodel)) { pr_info("netem: incorrect gi model size\n"); return -EINVAL; } q->loss_model = CLG_4_STATES; q->clg.state = 1; q->clg.a1 = gi->p13; q->clg.a2 = gi->p31; q->clg.a3 = gi->p32; q->clg.a4 = gi->p14; q->clg.a5 = gi->p23; break; } case NETEM_LOSS_GE: { const struct tc_netem_gemodel *ge = nla_data(la); if (nla_len(la) < sizeof(struct tc_netem_gemodel)) { pr_info("netem: incorrect ge model size\n"); return -EINVAL; } q->loss_model = CLG_GILB_ELL; q->clg.state = 1; q->clg.a1 = ge->p; q->clg.a2 = ge->r; q->clg.a3 = ge->h; q->clg.a4 = ge->k1; break; } default: pr_info("netem: unknown loss type %u\n", type); return -EINVAL; } } return 0; } static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, }; static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, const struct nla_policy *policy, int len) { int nested_len = nla_len(nla) - NLA_ALIGN(len); if (nested_len < 0) { pr_info("netem: invalid attributes len %d\n", nested_len); return -EINVAL; } if (nested_len >= nla_attr_size(0)) return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), nested_len, policy); memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); return 0; } /* Parse netlink message to set options */ static int netem_change(struct Qdisc *sch, struct nlattr *opt) { struct netem_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_NETEM_MAX + 1]; struct tc_netem_qopt *qopt; int ret; if (opt == NULL) return -EINVAL; qopt = nla_data(opt); ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); if (ret < 0) return ret; sch->limit = qopt->limit; q->latency = qopt->latency; q->jitter = qopt->jitter; q->limit = qopt->limit; q->gap = qopt->gap; q->counter = 0; q->loss = qopt->loss; q->duplicate = qopt->duplicate; /* for compatibility with earlier versions. * if gap is set, need to assume 100% probability */ if (q->gap) q->reorder = ~0; if (tb[TCA_NETEM_CORR]) get_correlation(sch, tb[TCA_NETEM_CORR]); if (tb[TCA_NETEM_DELAY_DIST]) { ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); if (ret) return ret; } if (tb[TCA_NETEM_REORDER]) get_reorder(sch, tb[TCA_NETEM_REORDER]); if (tb[TCA_NETEM_CORRUPT]) get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); if (tb[TCA_NETEM_RATE]) get_rate(sch, tb[TCA_NETEM_RATE]); q->loss_model = CLG_RANDOM; if (tb[TCA_NETEM_LOSS]) ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]); return ret; } static int netem_init(struct Qdisc *sch, struct nlattr *opt) { struct netem_sched_data *q = qdisc_priv(sch); int ret; if (!opt) return -EINVAL; qdisc_watchdog_init(&q->watchdog, sch); q->loss_model = CLG_RANDOM; ret = netem_change(sch, opt); if (ret) pr_info("netem: change failed\n"); return ret; } static void netem_destroy(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); qdisc_watchdog_cancel(&q->watchdog); if (q->qdisc) qdisc_destroy(q->qdisc); dist_free(q->delay_dist); } static int dump_loss_model(const struct netem_sched_data *q, struct sk_buff *skb) { struct nlattr *nest; nest = nla_nest_start(skb, TCA_NETEM_LOSS); if (nest == NULL) goto nla_put_failure; switch (q->loss_model) { case CLG_RANDOM: /* legacy loss model */ nla_nest_cancel(skb, nest); return 0; /* no data */ case CLG_4_STATES: { struct tc_netem_gimodel gi = { .p13 = q->clg.a1, .p31 = q->clg.a2, .p32 = q->clg.a3, .p14 = q->clg.a4, .p23 = q->clg.a5, }; NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi); break; } case CLG_GILB_ELL: { struct tc_netem_gemodel ge = { .p = q->clg.a1, .r = q->clg.a2, .h = q->clg.a3, .k1 = q->clg.a4, }; NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge); break; } } nla_nest_end(skb, nest); return 0; nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) { const struct netem_sched_data *q = qdisc_priv(sch); struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb); struct tc_netem_qopt qopt; struct tc_netem_corr cor; struct tc_netem_reorder reorder; struct tc_netem_corrupt corrupt; struct tc_netem_rate rate; qopt.latency = q->latency; qopt.jitter = q->jitter; qopt.limit = q->limit; qopt.loss = q->loss; qopt.gap = q->gap; qopt.duplicate = q->duplicate; NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); cor.delay_corr = q->delay_cor.rho; cor.loss_corr = q->loss_cor.rho; cor.dup_corr = q->dup_cor.rho; NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); reorder.probability = q->reorder; reorder.correlation = q->reorder_cor.rho; NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); corrupt.probability = q->corrupt; corrupt.correlation = q->corrupt_cor.rho; NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); rate.rate = q->rate; rate.packet_overhead = q->packet_overhead; rate.cell_size = q->cell_size; rate.cell_overhead = q->cell_overhead; NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate); if (dump_loss_model(q, skb) != 0) goto nla_put_failure; return nla_nest_end(skb, nla); nla_put_failure: nlmsg_trim(skb, nla); return -1; } static int netem_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct netem_sched_data *q = qdisc_priv(sch); if (cl != 1 || !q->qdisc) /* only one class */ return -ENOENT; tcm->tcm_handle |= TC_H_MIN(1); tcm->tcm_info = q->qdisc->handle; return 0; } static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct netem_sched_data *q = qdisc_priv(sch); sch_tree_lock(sch); *old = q->qdisc; q->qdisc = new; if (*old) { qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); } sch_tree_unlock(sch); return 0; } static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg) { struct netem_sched_data *q = qdisc_priv(sch); return q->qdisc; } static unsigned long netem_get(struct Qdisc *sch, u32 classid) { return 1; } static void netem_put(struct Qdisc *sch, unsigned long arg) { } static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) { if (!walker->stop) { if (walker->count >= walker->skip) if (walker->fn(sch, 1, walker) < 0) { walker->stop = 1; return; } walker->count++; } } static const struct Qdisc_class_ops netem_class_ops = { .graft = netem_graft, .leaf = netem_leaf, .get = netem_get, .put = netem_put, .walk = netem_walk, .dump = netem_dump_class, }; static struct Qdisc_ops netem_qdisc_ops __read_mostly = { .id = "netem", .cl_ops = &netem_class_ops, .priv_size = sizeof(struct netem_sched_data), .enqueue = netem_enqueue, .dequeue = netem_dequeue, .peek = qdisc_peek_dequeued, .drop = netem_drop, .init = netem_init, .reset = netem_reset, .destroy = netem_destroy, .change = netem_change, .dump = netem_dump, .owner = THIS_MODULE, }; static int __init netem_module_init(void) { pr_info("netem: version " VERSION "\n"); return register_qdisc(&netem_qdisc_ops); } static void __exit netem_module_exit(void) { unregister_qdisc(&netem_qdisc_ops); } module_init(netem_module_init) module_exit(netem_module_exit) MODULE_LICENSE("GPL");
G2Mini-DevTeam/android_kernel_lge_msm8226
net/sched/sch_netem.c
C
gpl-2.0
23,954
/* * sh7372 processor support - INTC hardware block * * Copyright (C) 2010 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/sh_intc.h> #include <mach/intc.h> #include <mach/irqs.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> enum { UNUSED_INTCA = 0, /* interrupt sources INTCA */ DIRC, CRYPT_STD, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1, AP_ARM_IRQPMU, AP_ARM_COMMTX, AP_ARM_COMMRX, MFI_MFIM, MFI_MFIS, BBIF1, BBIF2, USBHSDMAC0_USHDMI, _3DG_SGX540, CMT1_CMT10, CMT1_CMT11, CMT1_CMT12, CMT1_CMT13, CMT2, CMT3, KEYSC_KEY, SCIFA0, SCIFA1, SCIFA2, SCIFA3, MSIOF2, MSIOF1, SCIFA4, SCIFA5, SCIFB, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3, SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2, IRREM, IRDA, TPU0, TTI20, DDM, SDHI2_SDHI2I0, SDHI2_SDHI2I1, SDHI2_SDHI2I2, SDHI2_SDHI2I3, RWDT0, DMAC1_1_DEI0, DMAC1_1_DEI1, DMAC1_1_DEI2, DMAC1_1_DEI3, DMAC1_2_DEI4, DMAC1_2_DEI5, DMAC1_2_DADERR, DMAC2_1_DEI0, DMAC2_1_DEI1, DMAC2_1_DEI2, DMAC2_1_DEI3, DMAC2_2_DEI4, DMAC2_2_DEI5, DMAC2_2_DADERR, DMAC3_1_DEI0, DMAC3_1_DEI1, DMAC3_1_DEI2, DMAC3_1_DEI3, DMAC3_2_DEI4, DMAC3_2_DEI5, DMAC3_2_DADERR, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, HDMI, SPU2_SPU0, SPU2_SPU1, FSI, FMSI, MIPI_HSI, IPMMU_IPMMUD, CEC_1, CEC_2, AP_ARM_CTIIRQ, AP_ARM_DMAEXTERRIRQ, AP_ARM_DMAIRQ, AP_ARM_DMASIRQ, MFIS2, CPORTR2S, CMT14, CMT15, MMC_MMC_ERR, MMC_MMC_NOR, IIC4_ALI4, IIC4_TACKI4, IIC4_WAITI4, IIC4_DTEI4, IIC3_ALI3, IIC3_TACKI3, IIC3_WAITI3, IIC3_DTEI3, USB0_USB0I1, USB0_USB0I0, USB1_USB1I1, USB1_USB1I0, USBHSDMAC1_USHDMI, /* interrupt groups INTCA */ DMAC1_1, DMAC1_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT, AP_ARM1, AP_ARM2, SPU2, FLCTL, IIC1, SDHI0, SDHI1, SDHI2 }; static struct intc_vect intca_vectors[] __initdata = { INTC_VECT(DIRC, 0x0560), INTC_VECT(CRYPT_STD, 0x0700), INTC_VECT(IIC1_ALI1, 0x0780), INTC_VECT(IIC1_TACKI1, 0x07a0), INTC_VECT(IIC1_WAITI1, 0x07c0), INTC_VECT(IIC1_DTEI1, 0x07e0), INTC_VECT(AP_ARM_IRQPMU, 0x0800), INTC_VECT(AP_ARM_COMMTX, 0x0840), INTC_VECT(AP_ARM_COMMRX, 0x0860), INTC_VECT(MFI_MFIM, 0x0900), INTC_VECT(MFI_MFIS, 0x0920), INTC_VECT(BBIF1, 0x0940), INTC_VECT(BBIF2, 0x0960), INTC_VECT(USBHSDMAC0_USHDMI, 0x0a00), INTC_VECT(_3DG_SGX540, 0x0a60), INTC_VECT(CMT1_CMT10, 0x0b00), INTC_VECT(CMT1_CMT11, 0x0b20), INTC_VECT(CMT1_CMT12, 0x0b40), INTC_VECT(CMT1_CMT13, 0x0b60), INTC_VECT(CMT2, 0x0b80), INTC_VECT(CMT3, 0x0ba0), INTC_VECT(KEYSC_KEY, 0x0be0), INTC_VECT(SCIFA0, 0x0c00), INTC_VECT(SCIFA1, 0x0c20), INTC_VECT(SCIFA2, 0x0c40), INTC_VECT(SCIFA3, 0x0c60), INTC_VECT(MSIOF2, 0x0c80), INTC_VECT(MSIOF1, 0x0d00), INTC_VECT(SCIFA4, 0x0d20), INTC_VECT(SCIFA5, 0x0d40), INTC_VECT(SCIFB, 0x0d60), INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0), INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0), INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20), INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60), INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0), INTC_VECT(SDHI1_SDHI1I2, 0x0ec0), INTC_VECT(IRREM, 0x0f60), INTC_VECT(IRDA, 0x0480), INTC_VECT(TPU0, 0x04a0), INTC_VECT(TTI20, 0x1100), INTC_VECT(DDM, 0x1140), INTC_VECT(SDHI2_SDHI2I0, 0x1200), INTC_VECT(SDHI2_SDHI2I1, 0x1220), INTC_VECT(SDHI2_SDHI2I2, 0x1240), INTC_VECT(SDHI2_SDHI2I3, 0x1260), INTC_VECT(RWDT0, 0x1280), INTC_VECT(DMAC1_1_DEI0, 0x2000), INTC_VECT(DMAC1_1_DEI1, 0x2020), INTC_VECT(DMAC1_1_DEI2, 0x2040), INTC_VECT(DMAC1_1_DEI3, 0x2060), INTC_VECT(DMAC1_2_DEI4, 0x2080), INTC_VECT(DMAC1_2_DEI5, 0x20a0), INTC_VECT(DMAC1_2_DADERR, 0x20c0), INTC_VECT(DMAC2_1_DEI0, 0x2100), INTC_VECT(DMAC2_1_DEI1, 0x2120), INTC_VECT(DMAC2_1_DEI2, 0x2140), INTC_VECT(DMAC2_1_DEI3, 0x2160), INTC_VECT(DMAC2_2_DEI4, 0x2180), INTC_VECT(DMAC2_2_DEI5, 0x21a0), INTC_VECT(DMAC2_2_DADERR, 0x21c0), INTC_VECT(DMAC3_1_DEI0, 0x2200), INTC_VECT(DMAC3_1_DEI1, 0x2220), INTC_VECT(DMAC3_1_DEI2, 0x2240), INTC_VECT(DMAC3_1_DEI3, 0x2260), INTC_VECT(DMAC3_2_DEI4, 0x2280), INTC_VECT(DMAC3_2_DEI5, 0x22a0), INTC_VECT(DMAC3_2_DADERR, 0x22c0), INTC_VECT(SHWYSTAT_RT, 0x1300), INTC_VECT(SHWYSTAT_HS, 0x1320), INTC_VECT(SHWYSTAT_COM, 0x1340), INTC_VECT(HDMI, 0x17e0), INTC_VECT(SPU2_SPU0, 0x1800), INTC_VECT(SPU2_SPU1, 0x1820), INTC_VECT(FSI, 0x1840), INTC_VECT(FMSI, 0x1860), INTC_VECT(MIPI_HSI, 0x18e0), INTC_VECT(IPMMU_IPMMUD, 0x1920), INTC_VECT(CEC_1, 0x1940), INTC_VECT(CEC_2, 0x1960), INTC_VECT(AP_ARM_CTIIRQ, 0x1980), INTC_VECT(AP_ARM_DMAEXTERRIRQ, 0x19a0), INTC_VECT(AP_ARM_DMAIRQ, 0x19c0), INTC_VECT(AP_ARM_DMASIRQ, 0x19e0), INTC_VECT(MFIS2, 0x1a00), INTC_VECT(CPORTR2S, 0x1a20), INTC_VECT(CMT14, 0x1a40), INTC_VECT(CMT15, 0x1a60), INTC_VECT(MMC_MMC_ERR, 0x1ac0), INTC_VECT(MMC_MMC_NOR, 0x1ae0), INTC_VECT(IIC4_ALI4, 0x1b00), INTC_VECT(IIC4_TACKI4, 0x1b20), INTC_VECT(IIC4_WAITI4, 0x1b40), INTC_VECT(IIC4_DTEI4, 0x1b60), INTC_VECT(IIC3_ALI3, 0x1b80), INTC_VECT(IIC3_TACKI3, 0x1ba0), INTC_VECT(IIC3_WAITI3, 0x1bc0), INTC_VECT(IIC3_DTEI3, 0x1be0), INTC_VECT(USB0_USB0I1, 0x1c80), INTC_VECT(USB0_USB0I0, 0x1ca0), INTC_VECT(USB1_USB1I1, 0x1cc0), INTC_VECT(USB1_USB1I0, 0x1ce0), INTC_VECT(USBHSDMAC1_USHDMI, 0x1d00), }; static struct intc_group intca_groups[] __initdata = { INTC_GROUP(DMAC1_1, DMAC1_1_DEI0, DMAC1_1_DEI1, DMAC1_1_DEI2, DMAC1_1_DEI3), INTC_GROUP(DMAC1_2, DMAC1_2_DEI4, DMAC1_2_DEI5, DMAC1_2_DADERR), INTC_GROUP(DMAC2_1, DMAC2_1_DEI0, DMAC2_1_DEI1, DMAC2_1_DEI2, DMAC2_1_DEI3), INTC_GROUP(DMAC2_2, DMAC2_2_DEI4, DMAC2_2_DEI5, DMAC2_2_DADERR), INTC_GROUP(DMAC3_1, DMAC3_1_DEI0, DMAC3_1_DEI1, DMAC3_1_DEI2, DMAC3_1_DEI3), INTC_GROUP(DMAC3_2, DMAC3_2_DEI4, DMAC3_2_DEI5, DMAC3_2_DADERR), INTC_GROUP(AP_ARM1, AP_ARM_IRQPMU, AP_ARM_COMMTX, AP_ARM_COMMRX), INTC_GROUP(AP_ARM2, AP_ARM_CTIIRQ, AP_ARM_DMAEXTERRIRQ, AP_ARM_DMAIRQ, AP_ARM_DMASIRQ), INTC_GROUP(SPU2, SPU2_SPU0, SPU2_SPU1), INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1), INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3), INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2), INTC_GROUP(SDHI2, SDHI2_SDHI2I0, SDHI2_SDHI2I1, SDHI2_SDHI2I2, SDHI2_SDHI2I3), INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM), }; static struct intc_mask_reg intca_mask_registers[] __initdata = { { 0xe6940080, 0xe69400c0, 8, /* IMR0A / IMCR0A */ { DMAC2_1_DEI3, DMAC2_1_DEI2, DMAC2_1_DEI1, DMAC2_1_DEI0, AP_ARM_IRQPMU, 0, AP_ARM_COMMTX, AP_ARM_COMMRX } }, { 0xe6940084, 0xe69400c4, 8, /* IMR1A / IMCR1A */ { 0, CRYPT_STD, DIRC, 0, DMAC1_1_DEI3, DMAC1_1_DEI2, DMAC1_1_DEI1, DMAC1_1_DEI0 } }, { 0xe6940088, 0xe69400c8, 8, /* IMR2A / IMCR2A */ { 0, 0, 0, 0, BBIF1, BBIF2, MFI_MFIS, MFI_MFIM } }, { 0xe694008c, 0xe69400cc, 8, /* IMR3A / IMCR3A */ { DMAC3_1_DEI3, DMAC3_1_DEI2, DMAC3_1_DEI1, DMAC3_1_DEI0, DMAC3_2_DADERR, DMAC3_2_DEI5, DMAC3_2_DEI4, IRDA } }, { 0xe6940090, 0xe69400d0, 8, /* IMR4A / IMCR4A */ { DDM, 0, 0, 0, 0, 0, 0, 0 } }, { 0xe6940094, 0xe69400d4, 8, /* IMR5A / IMCR5A */ { KEYSC_KEY, DMAC1_2_DADERR, DMAC1_2_DEI5, DMAC1_2_DEI4, SCIFA3, SCIFA2, SCIFA1, SCIFA0 } }, { 0xe6940098, 0xe69400d8, 8, /* IMR6A / IMCR6A */ { SCIFB, SCIFA5, SCIFA4, MSIOF1, 0, 0, MSIOF2, 0 } }, { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ { 0, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0, TTI20, USBHSDMAC0_USHDMI, 0, 0 } }, { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, CMT2, 0, 0, _3DG_SGX540 } }, { 0xe69400a8, 0xe69400e8, 8, /* IMR10A / IMCR10A */ { 0, DMAC2_2_DADERR, DMAC2_2_DEI5, DMAC2_2_DEI4, 0, 0, 0, 0 } }, { 0xe69400ac, 0xe69400ec, 8, /* IMR11A / IMCR11A */ { IIC1_DTEI1, IIC1_WAITI1, IIC1_TACKI1, IIC1_ALI1, 0, 0, IRREM, 0 } }, { 0xe69400b0, 0xe69400f0, 8, /* IMR12A / IMCR12A */ { 0, 0, TPU0, 0, 0, 0, 0, 0 } }, { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */ { SDHI2_SDHI2I3, SDHI2_SDHI2I2, SDHI2_SDHI2I1, SDHI2_SDHI2I0, 0, CMT3, 0, RWDT0 } }, { 0xe6950080, 0xe69500c0, 8, /* IMR0A3 / IMCR0A3 */ { SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, 0, 0, 0, 0, 0 } }, { 0xe6950090, 0xe69500d0, 8, /* IMR4A3 / IMCR4A3 */ { 0, 0, 0, 0, 0, 0, 0, HDMI } }, { 0xe6950094, 0xe69500d4, 8, /* IMR5A3 / IMCR5A3 */ { SPU2_SPU0, SPU2_SPU1, FSI, FMSI, 0, 0, 0, MIPI_HSI } }, { 0xe6950098, 0xe69500d8, 8, /* IMR6A3 / IMCR6A3 */ { 0, IPMMU_IPMMUD, CEC_1, CEC_2, AP_ARM_CTIIRQ, AP_ARM_DMAEXTERRIRQ, AP_ARM_DMAIRQ, AP_ARM_DMASIRQ } }, { 0xe695009c, 0xe69500dc, 8, /* IMR7A3 / IMCR7A3 */ { MFIS2, CPORTR2S, CMT14, CMT15, 0, 0, MMC_MMC_ERR, MMC_MMC_NOR } }, { 0xe69500a0, 0xe69500e0, 8, /* IMR8A3 / IMCR8A3 */ { IIC4_ALI4, IIC4_TACKI4, IIC4_WAITI4, IIC4_DTEI4, IIC3_ALI3, IIC3_TACKI3, IIC3_WAITI3, IIC3_DTEI3 } }, { 0xe69500a4, 0xe69500e4, 8, /* IMR9A3 / IMCR9A3 */ { 0, 0, 0, 0, USB0_USB0I1, USB0_USB0I0, USB1_USB1I1, USB1_USB1I0 } }, { 0xe69500a8, 0xe69500e8, 8, /* IMR10A3 / IMCR10A3 */ { USBHSDMAC1_USHDMI, 0, 0, 0, 0, 0, 0, 0 } }, }; static struct intc_prio_reg intca_prio_registers[] __initdata = { { 0xe6940000, 0, 16, 4, /* IPRAA */ { DMAC3_1, DMAC3_2, CMT2, 0 } }, { 0xe6940004, 0, 16, 4, /* IPRBA */ { IRDA, 0, BBIF1, BBIF2 } }, { 0xe6940008, 0, 16, 4, /* IPRCA */ { 0, CRYPT_STD, CMT1_CMT11, AP_ARM1 } }, { 0xe694000c, 0, 16, 4, /* IPRDA */ { 0, 0, CMT1_CMT12, 0 } }, { 0xe6940010, 0, 16, 4, /* IPREA */ { DMAC1_1, MFI_MFIS, MFI_MFIM, 0 } }, { 0xe6940014, 0, 16, 4, /* IPRFA */ { KEYSC_KEY, DMAC1_2, _3DG_SGX540, CMT1_CMT10 } }, { 0xe6940018, 0, 16, 4, /* IPRGA */ { SCIFA0, SCIFA1, SCIFA2, SCIFA3 } }, { 0xe694001c, 0, 16, 4, /* IPRGH */ { MSIOF2, USBHSDMAC0_USHDMI, FLCTL, SDHI0 } }, { 0xe6940020, 0, 16, 4, /* IPRIA */ { MSIOF1, SCIFA4, 0/* MSU */, IIC1 } }, { 0xe6940024, 0, 16, 4, /* IPRJA */ { DMAC2_1, DMAC2_2, 0/* MSUG */, TTI20 } }, { 0xe6940028, 0, 16, 4, /* IPRKA */ { 0, CMT1_CMT13, IRREM, SDHI1 } }, { 0xe694002c, 0, 16, 4, /* IPRLA */ { TPU0, 0, 0, 0 } }, { 0xe6940030, 0, 16, 4, /* IPRMA */ { 0, CMT3, 0, RWDT0 } }, { 0xe6940034, 0, 16, 4, /* IPRNA */ { SCIFB, SCIFA5, 0, DDM } }, { 0xe6940038, 0, 16, 4, /* IPROA */ { 0, 0, DIRC, SDHI2 } }, { 0xe6950000, 0, 16, 4, /* IPRAA3 */ { SHWYSTAT, 0, 0, 0 } }, { 0xe6950024, 0, 16, 4, /* IPRJA3 */ { 0, 0, 0, HDMI } }, { 0xe6950028, 0, 16, 4, /* IPRKA3 */ { SPU2, 0, FSI, FMSI } }, { 0xe695002c, 0, 16, 4, /* IPRLA3 */ { 0, 0, 0, MIPI_HSI } }, { 0xe6950030, 0, 16, 4, /* IPRMA3 */ { IPMMU_IPMMUD, 0, CEC_1, CEC_2 } }, { 0xe6950034, 0, 16, 4, /* IPRNA3 */ { AP_ARM2, 0, 0, 0 } }, { 0xe6950038, 0, 16, 4, /* IPROA3 */ { MFIS2, CPORTR2S, CMT14, CMT15 } }, { 0xe695003c, 0, 16, 4, /* IPRPA3 */ { 0, 0, MMC_MMC_ERR, MMC_MMC_NOR } }, { 0xe6950040, 0, 16, 4, /* IPRQA3 */ { IIC4_ALI4, IIC4_TACKI4, IIC4_WAITI4, IIC4_DTEI4 } }, { 0xe6950044, 0, 16, 4, /* IPRRA3 */ { IIC3_ALI3, IIC3_TACKI3, IIC3_WAITI3, IIC3_DTEI3 } }, { 0xe6950048, 0, 16, 4, /* IPRSA3 */ { 0/*ERI*/, 0/*RXI*/, 0/*TXI*/, 0/*TEI*/} }, { 0xe695004c, 0, 16, 4, /* IPRTA3 */ { USB0_USB0I1, USB0_USB0I0, USB1_USB1I1, USB1_USB1I0 } }, { 0xe6950050, 0, 16, 4, /* IPRUA3 */ { USBHSDMAC1_USHDMI, 0, 0, 0 } }, }; static DECLARE_INTC_DESC(intca_desc, "sh7372-intca", intca_vectors, intca_groups, intca_mask_registers, intca_prio_registers, NULL); INTC_IRQ_PINS_16(intca_irq_pins_lo, 0xe6900000, INTC_VECT, "sh7372-intca-irq-lo"); INTC_IRQ_PINS_16H(intca_irq_pins_hi, 0xe6900000, INTC_VECT, "sh7372-intca-irq-hi"); enum { UNUSED_INTCS = 0, ENABLED_INTCS, /* interrupt sources INTCS */ /* IRQ0S - IRQ31S */ VEU_VEU0, VEU_VEU1, VEU_VEU2, VEU_VEU3, RTDMAC_1_DEI0, RTDMAC_1_DEI1, RTDMAC_1_DEI2, RTDMAC_1_DEI3, CEU, BEU_BEU0, BEU_BEU1, BEU_BEU2, /* MFI */ /* BBIF2 */ VPU, TSIF1, /* 3DG */ _2DDMAC, IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2, IPMMU_IPMMUR, IPMMU_IPMMUR2, RTDMAC_2_DEI4, RTDMAC_2_DEI5, RTDMAC_2_DADERR, /* KEYSC */ /* TTI20 */ MSIOF, IIC0_ALI0, IIC0_TACKI0, IIC0_WAITI0, IIC0_DTEI0, TMU_TUNI0, TMU_TUNI1, TMU_TUNI2, CMT0, TSIF0, /* CMT2 */ LMB, CTI, /* RWDT0 */ ICB, JPU_JPEG, LCDC, LCRC, RTDMAC2_1_DEI0, RTDMAC2_1_DEI1, RTDMAC2_1_DEI2, RTDMAC2_1_DEI3, RTDMAC2_2_DEI4, RTDMAC2_2_DEI5, RTDMAC2_2_DADERR, ISP, LCDC1, CSIRX, DSITX_DSITX0, DSITX_DSITX1, /* SPU2 */ /* FSI */ /* FMSI */ /* HDMI */ TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2, CMT4, DSITX1_DSITX1_0, DSITX1_DSITX1_1, MFIS2_INTCS, /* Priority always enabled using ENABLED_INTCS */ CPORTS2R, /* CEC */ JPU6E, /* interrupt groups INTCS */ RTDMAC_1, RTDMAC_2, VEU, BEU, IIC0, IPMMU, IIC2, RTDMAC2_1, RTDMAC2_2, TMU1, DSITX, }; static struct intc_vect intcs_vectors[] = { /* IRQ0S - IRQ31S */ INTCS_VECT(VEU_VEU0, 0x700), INTCS_VECT(VEU_VEU1, 0x720), INTCS_VECT(VEU_VEU2, 0x740), INTCS_VECT(VEU_VEU3, 0x760), INTCS_VECT(RTDMAC_1_DEI0, 0x800), INTCS_VECT(RTDMAC_1_DEI1, 0x820), INTCS_VECT(RTDMAC_1_DEI2, 0x840), INTCS_VECT(RTDMAC_1_DEI3, 0x860), INTCS_VECT(CEU, 0x880), INTCS_VECT(BEU_BEU0, 0x8a0), INTCS_VECT(BEU_BEU1, 0x8c0), INTCS_VECT(BEU_BEU2, 0x8e0), /* MFI */ /* BBIF2 */ INTCS_VECT(VPU, 0x980), INTCS_VECT(TSIF1, 0x9a0), /* 3DG */ INTCS_VECT(_2DDMAC, 0xa00), INTCS_VECT(IIC2_ALI2, 0xa80), INTCS_VECT(IIC2_TACKI2, 0xaa0), INTCS_VECT(IIC2_WAITI2, 0xac0), INTCS_VECT(IIC2_DTEI2, 0xae0), INTCS_VECT(IPMMU_IPMMUR, 0xb00), INTCS_VECT(IPMMU_IPMMUR2, 0xb20), INTCS_VECT(RTDMAC_2_DEI4, 0xb80), INTCS_VECT(RTDMAC_2_DEI5, 0xba0), INTCS_VECT(RTDMAC_2_DADERR, 0xbc0), /* KEYSC */ /* TTI20 */ INTCS_VECT(MSIOF, 0x0d20), INTCS_VECT(IIC0_ALI0, 0xe00), INTCS_VECT(IIC0_TACKI0, 0xe20), INTCS_VECT(IIC0_WAITI0, 0xe40), INTCS_VECT(IIC0_DTEI0, 0xe60), INTCS_VECT(TMU_TUNI0, 0xe80), INTCS_VECT(TMU_TUNI1, 0xea0), INTCS_VECT(TMU_TUNI2, 0xec0), INTCS_VECT(CMT0, 0xf00), INTCS_VECT(TSIF0, 0xf20), /* CMT2 */ INTCS_VECT(LMB, 0xf60), INTCS_VECT(CTI, 0x400), /* RWDT0 */ INTCS_VECT(ICB, 0x480), INTCS_VECT(JPU_JPEG, 0x560), INTCS_VECT(LCDC, 0x580), INTCS_VECT(LCRC, 0x5a0), INTCS_VECT(RTDMAC2_1_DEI0, 0x1300), INTCS_VECT(RTDMAC2_1_DEI1, 0x1320), INTCS_VECT(RTDMAC2_1_DEI2, 0x1340), INTCS_VECT(RTDMAC2_1_DEI3, 0x1360), INTCS_VECT(RTDMAC2_2_DEI4, 0x1380), INTCS_VECT(RTDMAC2_2_DEI5, 0x13a0), INTCS_VECT(RTDMAC2_2_DADERR, 0x13c0), INTCS_VECT(ISP, 0x1720), INTCS_VECT(LCDC1, 0x1780), INTCS_VECT(CSIRX, 0x17a0), INTCS_VECT(DSITX_DSITX0, 0x17c0), INTCS_VECT(DSITX_DSITX1, 0x17e0), /* SPU2 */ /* FSI */ /* FMSI */ /* HDMI */ INTCS_VECT(TMU1_TUNI0, 0x1900), INTCS_VECT(TMU1_TUNI1, 0x1920), INTCS_VECT(TMU1_TUNI2, 0x1940), INTCS_VECT(CMT4, 0x1980), INTCS_VECT(DSITX1_DSITX1_0, 0x19a0), INTCS_VECT(DSITX1_DSITX1_1, 0x19c0), INTCS_VECT(MFIS2_INTCS, 0x1a00), INTCS_VECT(CPORTS2R, 0x1a20), /* CEC */ INTCS_VECT(JPU6E, 0x1a80), }; static struct intc_group intcs_groups[] __initdata = { INTC_GROUP(RTDMAC_1, RTDMAC_1_DEI0, RTDMAC_1_DEI1, RTDMAC_1_DEI2, RTDMAC_1_DEI3), INTC_GROUP(RTDMAC_2, RTDMAC_2_DEI4, RTDMAC_2_DEI5, RTDMAC_2_DADERR), INTC_GROUP(VEU, VEU_VEU0, VEU_VEU1, VEU_VEU2, VEU_VEU3), INTC_GROUP(BEU, BEU_BEU0, BEU_BEU1, BEU_BEU2), INTC_GROUP(IIC0, IIC0_ALI0, IIC0_TACKI0, IIC0_WAITI0, IIC0_DTEI0), INTC_GROUP(IPMMU, IPMMU_IPMMUR, IPMMU_IPMMUR2), INTC_GROUP(IIC2, IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2), INTC_GROUP(RTDMAC2_1, RTDMAC2_1_DEI0, RTDMAC2_1_DEI1, RTDMAC2_1_DEI2, RTDMAC2_1_DEI3), INTC_GROUP(RTDMAC2_2, RTDMAC2_2_DEI4, RTDMAC2_2_DEI5, RTDMAC2_2_DADERR), INTC_GROUP(TMU1, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0), INTC_GROUP(DSITX, DSITX_DSITX0, DSITX_DSITX1), }; static struct intc_mask_reg intcs_mask_registers[] = { { 0xffd20184, 0xffd201c4, 8, /* IMR1SA / IMCR1SA */ { BEU_BEU2, BEU_BEU1, BEU_BEU0, CEU, VEU_VEU3, VEU_VEU2, VEU_VEU1, VEU_VEU0 } }, { 0xffd20188, 0xffd201c8, 8, /* IMR2SA / IMCR2SA */ { 0, 0, 0, VPU, 0, 0, 0, 0 } }, { 0xffd2018c, 0xffd201cc, 8, /* IMR3SA / IMCR3SA */ { 0, 0, 0, _2DDMAC, 0, 0, 0, ICB } }, { 0xffd20190, 0xffd201d0, 8, /* IMR4SA / IMCR4SA */ { 0, 0, 0, CTI, JPU_JPEG, 0, LCRC, LCDC } }, { 0xffd20194, 0xffd201d4, 8, /* IMR5SA / IMCR5SA */ { 0, RTDMAC_2_DADERR, RTDMAC_2_DEI5, RTDMAC_2_DEI4, RTDMAC_1_DEI3, RTDMAC_1_DEI2, RTDMAC_1_DEI1, RTDMAC_1_DEI0 } }, { 0xffd20198, 0xffd201d8, 8, /* IMR6SA / IMCR6SA */ { 0, 0, MSIOF, 0, 0, 0, 0, 0 } }, { 0xffd2019c, 0xffd201dc, 8, /* IMR7SA / IMCR7SA */ { 0, TMU_TUNI2, TMU_TUNI1, TMU_TUNI0, 0, 0, 0, 0 } }, { 0xffd201a4, 0xffd201e4, 8, /* IMR9SA / IMCR9SA */ { 0, 0, 0, CMT0, IIC2_DTEI2, IIC2_WAITI2, IIC2_TACKI2, IIC2_ALI2 } }, { 0xffd201a8, 0xffd201e8, 8, /* IMR10SA / IMCR10SA */ { 0, 0, IPMMU_IPMMUR2, IPMMU_IPMMUR, 0, 0, 0, 0 } }, { 0xffd201ac, 0xffd201ec, 8, /* IMR11SA / IMCR11SA */ { IIC0_DTEI0, IIC0_WAITI0, IIC0_TACKI0, IIC0_ALI0, 0, TSIF1, LMB, TSIF0 } }, { 0xffd50180, 0xffd501c0, 8, /* IMR0SA3 / IMCR0SA3 */ { 0, RTDMAC2_2_DADERR, RTDMAC2_2_DEI5, RTDMAC2_2_DEI4, RTDMAC2_1_DEI3, RTDMAC2_1_DEI2, RTDMAC2_1_DEI1, RTDMAC2_1_DEI0 } }, { 0xffd50190, 0xffd501d0, 8, /* IMR4SA3 / IMCR4SA3 */ { 0, ISP, 0, 0, LCDC1, CSIRX, DSITX_DSITX0, DSITX_DSITX1 } }, { 0xffd50198, 0xffd501d8, 8, /* IMR6SA3 / IMCR6SA3 */ { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0, CMT4, DSITX1_DSITX1_0, DSITX1_DSITX1_1, 0 } }, { 0xffd5019c, 0xffd501dc, 8, /* IMR7SA3 / IMCR7SA3 */ { MFIS2_INTCS, CPORTS2R, 0, 0, JPU6E, 0, 0, 0 } }, }; /* Priority is needed for INTCA to receive the INTCS interrupt */ static struct intc_prio_reg intcs_prio_registers[] = { { 0xffd20000, 0, 16, 4, /* IPRAS */ { CTI, 0, _2DDMAC, ICB } }, { 0xffd20004, 0, 16, 4, /* IPRBS */ { JPU_JPEG, LCDC, 0, LCRC } }, { 0xffd20010, 0, 16, 4, /* IPRES */ { RTDMAC_1, CEU, 0, VPU } }, { 0xffd20014, 0, 16, 4, /* IPRFS */ { 0, RTDMAC_2, 0, CMT0 } }, { 0xffd20018, 0, 16, 4, /* IPRGS */ { TMU_TUNI0, TMU_TUNI1, TMU_TUNI2, TSIF1 } }, { 0xffd2001c, 0, 16, 4, /* IPRHS */ { 0, 0, VEU, BEU } }, { 0xffd20020, 0, 16, 4, /* IPRIS */ { 0, MSIOF, TSIF0, IIC0 } }, { 0xffd20028, 0, 16, 4, /* IPRKS */ { 0, 0, LMB, 0 } }, { 0xffd2002c, 0, 16, 4, /* IPRLS */ { IPMMU, 0, 0, 0 } }, { 0xffd20030, 0, 16, 4, /* IPRMS */ { IIC2, 0, 0, 0 } }, { 0xffd50000, 0, 16, 4, /* IPRAS3 */ { RTDMAC2_1, 0, 0, 0 } }, { 0xffd50004, 0, 16, 4, /* IPRBS3 */ { RTDMAC2_2, 0, 0, 0 } }, { 0xffd50020, 0, 16, 4, /* IPRIS3 */ { 0, ISP, 0, 0 } }, { 0xffd50024, 0, 16, 4, /* IPRJS3 */ { LCDC1, CSIRX, DSITX, 0 } }, { 0xffd50030, 0, 16, 4, /* IPRMS3 */ { TMU1, 0, 0, 0 } }, { 0xffd50034, 0, 16, 4, /* IPRNS3 */ { CMT4, DSITX1_DSITX1_0, DSITX1_DSITX1_1, 0 } }, { 0xffd50038, 0, 16, 4, /* IPROS3 */ { ENABLED_INTCS, CPORTS2R, 0, 0 } }, { 0xffd5003c, 0, 16, 4, /* IPRPS3 */ { JPU6E, 0, 0, 0 } }, }; static struct resource intcs_resources[] __initdata = { [0] = { .start = 0xffd20000, .end = 0xffd201ff, .flags = IORESOURCE_MEM, }, [1] = { .start = 0xffd50000, .end = 0xffd501ff, .flags = IORESOURCE_MEM, } }; static struct intc_desc intcs_desc __initdata = { .name = "sh7372-intcs", .force_enable = ENABLED_INTCS, .skip_syscore_suspend = true, .resource = intcs_resources, .num_resources = ARRAY_SIZE(intcs_resources), .hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers, intcs_prio_registers, NULL, NULL), }; static void intcs_demux(unsigned int irq, struct irq_desc *desc) { void __iomem *reg = (void *)irq_get_handler_data(irq); unsigned int evtcodeas = ioread32(reg); generic_handle_irq(intcs_evt2irq(evtcodeas)); } static void __iomem *intcs_ffd2; static void __iomem *intcs_ffd5; void __init sh7372_init_irq(void) { void __iomem *intevtsa; int n; intcs_ffd2 = ioremap_nocache(0xffd20000, PAGE_SIZE); intevtsa = intcs_ffd2 + 0x100; intcs_ffd5 = ioremap_nocache(0xffd50000, PAGE_SIZE); register_intc_controller(&intca_desc); register_intc_controller(&intca_irq_pins_lo_desc); register_intc_controller(&intca_irq_pins_hi_desc); register_intc_controller(&intcs_desc); /* setup dummy cascade chip for INTCS */ n = evt2irq(0xf80); irq_alloc_desc_at(n, numa_node_id()); irq_set_chip_and_handler_name(n, &dummy_irq_chip, handle_level_irq, "level"); set_irq_flags(n, IRQF_VALID); /* yuck */ /* demux using INTEVTSA */ irq_set_handler_data(n, (void *)intevtsa); irq_set_chained_handler(n, intcs_demux); /* unmask INTCS in INTAMASK */ iowrite16(0, intcs_ffd2 + 0x104); } static unsigned short ffd2[0x200]; static unsigned short ffd5[0x100]; void sh7372_intcs_suspend(void) { int k; for (k = 0x00; k <= 0x30; k += 4) ffd2[k] = __raw_readw(intcs_ffd2 + k); for (k = 0x80; k <= 0xb0; k += 4) ffd2[k] = __raw_readb(intcs_ffd2 + k); for (k = 0x180; k <= 0x188; k += 4) ffd2[k] = __raw_readb(intcs_ffd2 + k); for (k = 0x00; k <= 0x3c; k += 4) ffd5[k] = __raw_readw(intcs_ffd5 + k); for (k = 0x80; k <= 0x9c; k += 4) ffd5[k] = __raw_readb(intcs_ffd5 + k); } void sh7372_intcs_resume(void) { int k; for (k = 0x00; k <= 0x30; k += 4) __raw_writew(ffd2[k], intcs_ffd2 + k); for (k = 0x80; k <= 0xb0; k += 4) __raw_writeb(ffd2[k], intcs_ffd2 + k); for (k = 0x180; k <= 0x188; k += 4) __raw_writeb(ffd2[k], intcs_ffd2 + k); for (k = 0x00; k <= 0x3c; k += 4) __raw_writew(ffd5[k], intcs_ffd5 + k); for (k = 0x80; k <= 0x9c; k += 4) __raw_writeb(ffd5[k], intcs_ffd5 + k); } #define E694_BASE IOMEM(0xe6940000) #define E695_BASE IOMEM(0xe6950000) static unsigned short e694[0x200]; static unsigned short e695[0x200]; void sh7372_intca_suspend(void) { int k; for (k = 0x00; k <= 0x38; k += 4) e694[k] = __raw_readw(E694_BASE + k); for (k = 0x80; k <= 0xb4; k += 4) e694[k] = __raw_readb(E694_BASE + k); for (k = 0x180; k <= 0x1b4; k += 4) e694[k] = __raw_readb(E694_BASE + k); for (k = 0x00; k <= 0x50; k += 4) e695[k] = __raw_readw(E695_BASE + k); for (k = 0x80; k <= 0xa8; k += 4) e695[k] = __raw_readb(E695_BASE + k); for (k = 0x180; k <= 0x1a8; k += 4) e695[k] = __raw_readb(E695_BASE + k); } void sh7372_intca_resume(void) { int k; for (k = 0x00; k <= 0x38; k += 4) __raw_writew(e694[k], E694_BASE + k); for (k = 0x80; k <= 0xb4; k += 4) __raw_writeb(e694[k], E694_BASE + k); for (k = 0x180; k <= 0x1b4; k += 4) __raw_writeb(e694[k], E694_BASE + k); for (k = 0x00; k <= 0x50; k += 4) __raw_writew(e695[k], E695_BASE + k); for (k = 0x80; k <= 0xa8; k += 4) __raw_writeb(e695[k], E695_BASE + k); for (k = 0x180; k <= 0x1a8; k += 4) __raw_writeb(e695[k], E695_BASE + k); }
zf2-laser-dev/android_kernel_asus_msm8939
arch/arm/mach-shmobile/intc-sh7372.c
C
gpl-2.0
23,866
<?php /** * Twenty Fourteen Featured Content * * This module allows you to define a subset of posts to be displayed * in the theme's Featured Content area. * * For maximum compatibility with different methods of posting users * will designate a featured post tag to associate posts with. Since * this tag now has special meaning beyond that of a normal tags, users * will have the ability to hide it from the front-end of their site. */ class Featured_Content { /** * The maximum number of posts a Featured Content area can contain. * * We define a default value here but themes can override * this by defining a "max_posts" entry in the second parameter * passed in the call to add_theme_support( 'featured-content' ). * * @see Featured_Content::init() * * @since Twenty Fourteen 1.0 * * @static * @access public * @var int */ public static $max_posts = 15; /** * Instantiate. * * All custom functionality will be hooked into the "init" action. * * @static * @access public * @since Twenty Fourteen 1.0 */ public static function setup() { add_action( 'init', array( __CLASS__, 'init' ), 30 ); } /** * Conditionally hook into WordPress. * * Theme must declare that they support this module by adding * add_theme_support( 'featured-content' ); during after_setup_theme. * * If no theme support is found there is no need to hook into WordPress. * We'll just return early instead. * * @static * @access public * @since Twenty Fourteen 1.0 */ public static function init() { $theme_support = get_theme_support( 'featured-content' ); // Return early if theme does not support Featured Content. if ( ! $theme_support ) { return; } /* * An array of named arguments must be passed as the second parameter * of add_theme_support(). */ if ( ! isset( $theme_support[0] ) ) { return; } // Return early if "featured_content_filter" has not been defined. if ( ! isset( $theme_support[0]['featured_content_filter'] ) ) { return; } $filter = $theme_support[0]['featured_content_filter']; // Theme can override the number of max posts. if ( isset( $theme_support[0]['max_posts'] ) ) { self::$max_posts = absint( $theme_support[0]['max_posts'] ); } add_filter( $filter, array( __CLASS__, 'get_featured_posts' ) ); add_action( 'customize_register', array( __CLASS__, 'customize_register' ), 9 ); add_action( 'admin_init', array( __CLASS__, 'register_setting' ) ); add_action( 'switch_theme', array( __CLASS__, 'delete_transient' ) ); add_action( 'save_post', array( __CLASS__, 'delete_transient' ) ); add_action( 'delete_post_tag', array( __CLASS__, 'delete_post_tag' ) ); add_action( 'customize_controls_enqueue_scripts', array( __CLASS__, 'enqueue_scripts' ) ); add_action( 'pre_get_posts', array( __CLASS__, 'pre_get_posts' ) ); add_action( 'wp_loaded', array( __CLASS__, 'wp_loaded' ) ); } /** * Hide "featured" tag from the front-end. * * Has to run on wp_loaded so that the preview filters of the Customizer * have a chance to alter the value. * * @static * @access public * @since Twenty Fourteen 1.0 */ public static function wp_loaded() { if ( self::get_setting( 'hide-tag' ) ) { add_filter( 'get_terms', array( __CLASS__, 'hide_featured_term' ), 10, 3 ); add_filter( 'get_the_terms', array( __CLASS__, 'hide_the_featured_term' ), 10, 3 ); } } /** * Get featured posts. * * @static * @access public * @since Twenty Fourteen 1.0 * * @return array Array of featured posts. */ public static function get_featured_posts() { $post_ids = self::get_featured_post_ids(); // No need to query if there is are no featured posts. if ( empty( $post_ids ) ) { return array(); } $featured_posts = get_posts( array( 'include' => $post_ids, 'posts_per_page' => count( $post_ids ), ) ); return $featured_posts; } /** * Get featured post IDs * * This function will return the an array containing the * post IDs of all featured posts. * * Sets the "featured_content_ids" transient. * * @static * @access public * @since Twenty Fourteen 1.0 * * @return array Array of post IDs. */ public static function get_featured_post_ids() { // Get array of cached results if they exist. $featured_ids = get_transient( 'featured_content_ids' ); if ( false === $featured_ids ) { $settings = self::get_setting(); $term = get_term_by( 'name', $settings['tag-name'], 'post_tag' ); if ( $term ) { // Query for featured posts. $featured_ids = get_posts( array( 'fields' => 'ids', 'numberposts' => self::$max_posts, 'suppress_filters' => false, 'tax_query' => array( array( 'field' => 'term_id', 'taxonomy' => 'post_tag', 'terms' => $term->term_id, ), ), ) ); } // Get sticky posts if no Featured Content exists. if ( ! $featured_ids ) { $featured_ids = self::get_sticky_posts(); } set_transient( 'featured_content_ids', $featured_ids ); } // Ensure correct format before return. return array_map( 'absint', $featured_ids ); } /** * Return an array with IDs of posts maked as sticky. * * @static * @access public * @since Twenty Fourteen 1.0 * * @return array Array of sticky posts. */ public static function get_sticky_posts() { return array_slice( get_option( 'sticky_posts', array() ), 0, self::$max_posts ); } /** * Delete featured content ids transient. * * Hooks in the "save_post" action. * * @see Featured_Content::validate_settings(). * * @static * @access public * @since Twenty Fourteen 1.0 */ public static function delete_transient() { delete_transient( 'featured_content_ids' ); } /** * Exclude featured posts from the home page blog query. * * Filter the home page posts, and remove any featured post ID's from it. * Hooked onto the 'pre_get_posts' action, this changes the parameters of * the query before it gets any posts. * * @static * @access public * @since Twenty Fourteen 1.0 * * @param WP_Query $query WP_Query object. * @return WP_Query Possibly-modified WP_Query. */ public static function pre_get_posts( $query ) { // Bail if not home or not main query. if ( ! $query->is_home() || ! $query->is_main_query() ) { return; } // Bail if the blog page is not the front page. if ( 'posts' !== get_option( 'show_on_front' ) ) { return; } $featured = self::get_featured_post_ids(); // Bail if no featured posts. if ( ! $featured ) { return; } // We need to respect post ids already in the blacklist. $post__not_in = $query->get( 'post__not_in' ); if ( ! empty( $post__not_in ) ) { $featured = array_merge( (array) $post__not_in, $featured ); $featured = array_unique( $featured ); } $query->set( 'post__not_in', $featured ); } /** * Reset tag option when the saved tag is deleted. * * It's important to mention that the transient needs to be deleted, * too. While it may not be obvious by looking at the function alone, * the transient is deleted by Featured_Content::validate_settings(). * * Hooks in the "delete_post_tag" action. * * @see Featured_Content::validate_settings(). * * @static * @access public * @since Twenty Fourteen 1.0 * * @param int $tag_id The term_id of the tag that has been deleted. */ public static function delete_post_tag( $tag_id ) { $settings = self::get_setting(); if ( empty( $settings['tag-id'] ) || $tag_id != $settings['tag-id'] ) { return; } $settings['tag-id'] = 0; $settings = self::validate_settings( $settings ); update_option( 'featured-content', $settings ); } /** * Hide featured tag from displaying when global terms are queried from the front-end. * * Hooks into the "get_terms" filter. * * @static * @access public * @since Twenty Fourteen 1.0 * * @param array $terms List of term objects. This is the return value of get_terms(). * @param array $taxonomies An array of taxonomy slugs. * @return array A filtered array of terms. * * @uses Featured_Content::get_setting() */ public static function hide_featured_term( $terms, $taxonomies, $args ) { // This filter is only appropriate on the front-end. if ( is_admin() ) { return $terms; } // We only want to hide the featured tag. if ( ! in_array( 'post_tag', $taxonomies ) ) { return $terms; } // Bail if no terms were returned. if ( empty( $terms ) ) { return $terms; } // Bail if term objects are unavailable. if ( 'all' != $args['fields'] ) { return $terms; } $settings = self::get_setting(); foreach ( $terms as $order => $term ) { if ( ( $settings['tag-id'] === $term->term_id || $settings['tag-name'] === $term->name ) && 'post_tag' === $term->taxonomy ) { unset( $terms[ $order ] ); } } return $terms; } /** * Hide featured tag from display when terms associated with a post object * are queried from the front-end. * * Hooks into the "get_the_terms" filter. * * @static * @access public * @since Twenty Fourteen 1.0 * * @param array $terms A list of term objects. This is the return value of get_the_terms(). * @param int $id The ID field for the post object that terms are associated with. * @param array $taxonomy An array of taxonomy slugs. * @return array Filtered array of terms. * * @uses Featured_Content::get_setting() */ public static function hide_the_featured_term( $terms, $id, $taxonomy ) { // This filter is only appropriate on the front-end. if ( is_admin() ) { return $terms; } // Make sure we are in the correct taxonomy. if ( 'post_tag' != $taxonomy ) { return $terms; } // No terms? Return early! if ( empty( $terms ) ) { return $terms; } $settings = self::get_setting(); foreach ( $terms as $order => $term ) { if ( ( $settings['tag-id'] === $term->term_id || $settings['tag-name'] === $term->name ) && 'post_tag' === $term->taxonomy ) { unset( $terms[ $term->term_id ] ); } } return $terms; } /** * Register custom setting on the Settings -> Reading screen. * * @static * @access public * @since Twenty Fourteen 1.0 */ public static function register_setting() { register_setting( 'featured-content', 'featured-content', array( __CLASS__, 'validate_settings' ) ); } /** * Add settings to the Customizer. * * @static * @access public * @since Twenty Fourteen 1.0 * * @param WP_Customize_Manager $wp_customize Customizer object. */ public static function customize_register( $wp_customize ) { $wp_customize->add_section( 'featured_content', array( 'title' => __( 'Featured Content', 'twentyfourteen' ), 'description' => sprintf( __( 'Use a <a href="%1$s">tag</a> to feature your posts. If no posts match the tag, <a href="%2$s">sticky posts</a> will be displayed instead.', 'twentyfourteen' ), esc_url( add_query_arg( 'tag', _x( 'featured', 'featured content default tag slug', 'twentyfourteen' ), admin_url( 'edit.php' ) ) ), admin_url( 'edit.php?show_sticky=1' ) ), 'priority' => 130, 'theme_supports' => 'featured-content', ) ); // Add Featured Content settings. $wp_customize->add_setting( 'featured-content[tag-name]', array( 'default' => _x( 'featured', 'featured content default tag slug', 'twentyfourteen' ), 'type' => 'option', 'sanitize_js_callback' => array( __CLASS__, 'delete_transient' ), ) ); $wp_customize->add_setting( 'featured-content[hide-tag]', array( 'default' => true, 'type' => 'option', 'sanitize_js_callback' => array( __CLASS__, 'delete_transient' ), ) ); // Add Featured Content controls. $wp_customize->add_control( 'featured-content[tag-name]', array( 'label' => __( 'Tag Name', 'twentyfourteen' ), 'section' => 'featured_content', 'priority' => 20, ) ); $wp_customize->add_control( 'featured-content[hide-tag]', array( 'label' => __( 'Don&rsquo;t display tag on front end.', 'twentyfourteen' ), 'section' => 'featured_content', 'type' => 'checkbox', 'priority' => 30, ) ); } /** * Enqueue the tag suggestion script. * * @static * @access public * @since Twenty Fourteen 1.0 */ public static function enqueue_scripts() { wp_enqueue_script( 'featured-content-suggest', get_template_directory_uri() . '/js/featured-content-admin.js', array( 'jquery', 'suggest' ), '20131022', true ); } /** * Get featured content settings. * * Get all settings recognized by this module. This function * will return all settings whether or not they have been stored * in the database yet. This ensures that all keys are available * at all times. * * In the event that you only require one setting, you may pass * its name as the first parameter to the function and only that * value will be returned. * * @static * @access public * @since Twenty Fourteen 1.0 * * @param string $key The key of a recognized setting. * @return mixed Array of all settings by default. A single value if passed as first parameter. */ public static function get_setting( $key = 'all' ) { $saved = (array) get_option( 'featured-content' ); $defaults = array( 'hide-tag' => 1, 'tag-id' => 0, 'tag-name' => _x( 'featured', 'featured content default tag slug', 'twentyfourteen' ), ); $options = wp_parse_args( $saved, $defaults ); $options = array_intersect_key( $options, $defaults ); if ( 'all' != $key ) { return isset( $options[ $key ] ) ? $options[ $key ] : false; } return $options; } /** * Validate featured content settings. * * Make sure that all user supplied content is in an expected * format before saving to the database. This function will also * delete the transient set in Featured_Content::get_featured_content(). * * @static * @access public * @since Twenty Fourteen 1.0 * * @param array $input Array of settings input. * @return array Validated settings output. */ public static function validate_settings( $input ) { $output = array(); if ( empty( $input['tag-name'] ) ) { $output['tag-id'] = 0; } else { $term = get_term_by( 'name', $input['tag-name'], 'post_tag' ); if ( $term ) { $output['tag-id'] = $term->term_id; } else { $new_tag = wp_create_tag( $input['tag-name'] ); if ( ! is_wp_error( $new_tag ) && isset( $new_tag['term_id'] ) ) { $output['tag-id'] = $new_tag['term_id']; } } $output['tag-name'] = $input['tag-name']; } $output['hide-tag'] = isset( $input['hide-tag'] ) && $input['hide-tag'] ? 1 : 0; // Delete the featured post ids transient. self::delete_transient(); return $output; } } // Featured_Content Featured_Content::setup();
gpy6529/uivsux
wp-content/themes/twentyfourteen/inc/featured-content.php
PHP
gpl-2.0
15,187
/* * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/err.h> #include <linux/msm_ssbi.h> #include <linux/mfd/core.h> #include <linux/mfd/pm8xxx/pm8018.h> #include <linux/mfd/pm8xxx/core.h> #include <linux/mfd/pm8xxx/regulator.h> #include <linux/leds-pm8xxx.h> /* PMIC PM8018 SSBI Addresses */ #define REG_HWREV 0x002 /* PMIC4 revision */ #define REG_HWREV_2 0x0E8 /* PMIC4 revision 2 */ #define REG_MPP_BASE 0x050 #define REG_IRQ_BASE 0x1BB #define REG_RTC_BASE 0x11D #define REG_TEMP_ALARM_CTRL 0x01B #define REG_TEMP_ALARM_PWM 0x09B #define PM8018_VERSION_MASK 0xFFF0 #define PM8018_VERSION_VALUE 0x08F0 #define PM8018_REVISION_MASK 0x000F #define REG_PM8018_PON_CNTRL_3 0x01D #define SINGLE_IRQ_RESOURCE(_name, _irq) \ { \ .name = _name, \ .start = _irq, \ .end = _irq, \ .flags = IORESOURCE_IRQ, \ } struct pm8018 { struct device *dev; struct pm_irq_chip *irq_chip; struct mfd_cell *mfd_regulators; struct pm8xxx_regulator_core_platform_data *regulator_cdata; u32 rev_registers; u8 restart_reason; }; static int pm8018_readb(const struct device *dev, u16 addr, u8 *val) { const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev); const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data; return msm_ssbi_read(pmic->dev->parent, addr, val, 1); } static int pm8018_writeb(const struct device *dev, u16 addr, u8 val) { const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev); const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data; return msm_ssbi_write(pmic->dev->parent, addr, &val, 1); } static int pm8018_read_buf(const struct device *dev, u16 addr, u8 *buf, int cnt) { const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev); const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data; return msm_ssbi_read(pmic->dev->parent, addr, buf, cnt); } static int pm8018_write_buf(const struct device *dev, u16 addr, u8 *buf, int cnt) { const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev); const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data; return msm_ssbi_write(pmic->dev->parent, addr, buf, cnt); } static int pm8018_read_irq_stat(const struct device *dev, int irq) { const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev); const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data; return pm8xxx_get_irq_stat(pmic->irq_chip, irq); } static enum pm8xxx_version pm8018_get_version(const struct device *dev) { const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev); const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data; enum pm8xxx_version version = -ENODEV; if ((pmic->rev_registers & PM8018_VERSION_MASK) == PM8018_VERSION_VALUE) version = PM8XXX_VERSION_8018; return version; } static int pm8018_get_revision(const struct device *dev) { const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev); const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data; return pmic->rev_registers & PM8018_REVISION_MASK; } static u8 pm8018_restart_reason(const struct device *dev) { const struct pm8xxx_drvdata *pm8018_drvdata = dev_get_drvdata(dev); const struct pm8018 *pmic = pm8018_drvdata->pm_chip_data; return pmic->restart_reason; } static struct pm8xxx_drvdata pm8018_drvdata = { .pmic_readb = pm8018_readb, .pmic_writeb = pm8018_writeb, .pmic_read_buf = pm8018_read_buf, .pmic_write_buf = pm8018_write_buf, .pmic_read_irq_stat = pm8018_read_irq_stat, .pmic_get_version = pm8018_get_version, .pmic_get_revision = pm8018_get_revision, .pmic_restart_reason = pm8018_restart_reason, }; static const struct resource gpio_cell_resources[] __devinitconst = { [0] = { .start = PM8018_IRQ_BLOCK_BIT(PM8018_GPIO_BLOCK_START, 0), .end = PM8018_IRQ_BLOCK_BIT(PM8018_GPIO_BLOCK_START, 0) + PM8018_NR_GPIOS - 1, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell gpio_cell __devinitdata = { .name = PM8XXX_GPIO_DEV_NAME, .id = -1, .resources = gpio_cell_resources, .num_resources = ARRAY_SIZE(gpio_cell_resources), }; static const struct resource adc_cell_resources[] __devinitconst = { SINGLE_IRQ_RESOURCE(NULL, PM8018_ADC_EOC_USR_IRQ), SINGLE_IRQ_RESOURCE(NULL, PM8018_ADC_BATT_TEMP_WARM_IRQ), SINGLE_IRQ_RESOURCE(NULL, PM8018_ADC_BATT_TEMP_COLD_IRQ), }; static struct mfd_cell adc_cell __devinitdata = { .name = PM8XXX_ADC_DEV_NAME, .id = -1, .resources = adc_cell_resources, .num_resources = ARRAY_SIZE(adc_cell_resources), }; static const struct resource mpp_cell_resources[] __devinitconst = { { .start = PM8018_IRQ_BLOCK_BIT(PM8018_MPP_BLOCK_START, 0), .end = PM8018_IRQ_BLOCK_BIT(PM8018_MPP_BLOCK_START, 0) + PM8018_NR_MPPS - 1, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell mpp_cell __devinitdata = { .name = PM8XXX_MPP_DEV_NAME, .id = -1, .resources = mpp_cell_resources, .num_resources = ARRAY_SIZE(mpp_cell_resources), }; static const struct resource rtc_cell_resources[] __devinitconst = { [0] = SINGLE_IRQ_RESOURCE(NULL, PM8018_RTC_ALARM_IRQ), [1] = { .name = "pmic_rtc_base", .start = REG_RTC_BASE, .end = REG_RTC_BASE, .flags = IORESOURCE_IO, }, }; static struct mfd_cell rtc_cell __devinitdata = { .name = PM8XXX_RTC_DEV_NAME, .id = -1, .resources = rtc_cell_resources, .num_resources = ARRAY_SIZE(rtc_cell_resources), }; static const struct resource resources_pwrkey[] __devinitconst = { SINGLE_IRQ_RESOURCE(NULL, PM8018_PWRKEY_REL_IRQ), SINGLE_IRQ_RESOURCE(NULL, PM8018_PWRKEY_PRESS_IRQ), }; static struct mfd_cell pwrkey_cell __devinitdata = { .name = PM8XXX_PWRKEY_DEV_NAME, .id = -1, .num_resources = ARRAY_SIZE(resources_pwrkey), .resources = resources_pwrkey, }; static struct mfd_cell misc_cell __devinitdata = { .name = PM8XXX_MISC_DEV_NAME, .id = -1, }; static struct mfd_cell debugfs_cell __devinitdata = { .name = "pm8xxx-debug", .id = -1, .platform_data = "pm8018-dbg", .pdata_size = sizeof("pm8018-dbg"), }; static struct mfd_cell pwm_cell __devinitdata = { .name = PM8XXX_PWM_DEV_NAME, .id = -1, }; static struct mfd_cell leds_cell __devinitdata = { .name = PM8XXX_LEDS_DEV_NAME, .id = -1, }; static const struct resource thermal_alarm_cell_resources[] __devinitconst = { SINGLE_IRQ_RESOURCE("pm8018_tempstat_irq", PM8018_TEMPSTAT_IRQ), SINGLE_IRQ_RESOURCE("pm8018_overtemp_irq", PM8018_OVERTEMP_IRQ), }; static struct pm8xxx_tm_core_data thermal_alarm_cdata = { .adc_channel = CHANNEL_DIE_TEMP, .adc_type = PM8XXX_TM_ADC_PM8XXX_ADC, .reg_addr_temp_alarm_ctrl = REG_TEMP_ALARM_CTRL, .reg_addr_temp_alarm_pwm = REG_TEMP_ALARM_PWM, .tm_name = "pm8018_tz", .irq_name_temp_stat = "pm8018_tempstat_irq", .irq_name_over_temp = "pm8018_overtemp_irq", }; static struct mfd_cell thermal_alarm_cell __devinitdata = { .name = PM8XXX_TM_DEV_NAME, .id = -1, .resources = thermal_alarm_cell_resources, .num_resources = ARRAY_SIZE(thermal_alarm_cell_resources), .platform_data = &thermal_alarm_cdata, .pdata_size = sizeof(struct pm8xxx_tm_core_data), }; static struct pm8xxx_vreg regulator_data[] = { /* name pc_name ctrl test hpm_min */ PLDO("8018_l2", "8018_l2_pc", 0x0B0, 0x0B1, LDO_50), PLDO("8018_l3", "8018_l3_pc", 0x0B2, 0x0B3, LDO_50), PLDO("8018_l4", "8018_l4_pc", 0x0B4, 0x0B5, LDO_300), PLDO("8018_l5", "8018_l5_pc", 0x0B6, 0x0B7, LDO_150), PLDO("8018_l6", "8018_l6_pc", 0x0B8, 0x0B9, LDO_150), PLDO("8018_l7", "8018_l7_pc", 0x0BA, 0x0BB, LDO_300), NLDO("8018_l8", "8018_l8_pc", 0x0BC, 0x0BD, LDO_150), NLDO1200("8018_l9", 0x0BE, 0x0BF, LDO_1200), NLDO1200("8018_l10", 0x0C0, 0x0C1, LDO_1200), NLDO1200("8018_l11", 0x0C2, 0x0C3, LDO_1200), NLDO1200("8018_l12", 0x0C4, 0x0C5, LDO_1200), PLDO("8018_l13", "8018_l13_pc", 0x0C8, 0x0C9, LDO_50), PLDO("8018_l14", "8018_l14_pc", 0x0CA, 0x0CB, LDO_50), /* name pc_name ctrl test2 clk sleep hpm_min */ SMPS("8018_s1", "8018_s1_pc", 0x1D0, 0x1D5, 0x009, 0x1D2, SMPS_1500), SMPS("8018_s2", "8018_s2_pc", 0x1D8, 0x1DD, 0x00A, 0x1DA, SMPS_1500), SMPS("8018_s3", "8018_s3_pc", 0x1E0, 0x1E5, 0x00B, 0x1E2, SMPS_1500), SMPS("8018_s4", "8018_s4_pc", 0x1E8, 0x1ED, 0x00C, 0x1EA, SMPS_1500), SMPS("8018_s5", "8018_s5_pc", 0x1F0, 0x1F5, 0x00D, 0x1F2, SMPS_1500), /* name pc_name ctrl test */ VS("8018_lvs1", "8018_lvs1_pc", 0x060, 0x061), }; #define MAX_NAME_COMPARISON_LEN 32 static int __devinit match_regulator( struct pm8xxx_regulator_core_platform_data *core_data, const char *name) { int found = 0; int i; for (i = 0; i < ARRAY_SIZE(regulator_data); i++) { if (regulator_data[i].rdesc.name && strncmp(regulator_data[i].rdesc.name, name, MAX_NAME_COMPARISON_LEN) == 0) { core_data->is_pin_controlled = false; core_data->vreg = &regulator_data[i]; found = 1; break; } else if (regulator_data[i].rdesc_pc.name && strncmp(regulator_data[i].rdesc_pc.name, name, MAX_NAME_COMPARISON_LEN) == 0) { core_data->is_pin_controlled = true; core_data->vreg = &regulator_data[i]; found = 1; break; } } if (!found) pr_err("could not find a match for regulator: %s\n", name); return found; } static int __devinit pm8018_add_regulators(const struct pm8018_platform_data *pdata, struct pm8018 *pmic, int irq_base) { int ret = 0; struct mfd_cell *mfd_regulators; struct pm8xxx_regulator_core_platform_data *cdata; int i; /* Add one device for each regulator used by the board. */ mfd_regulators = kzalloc(sizeof(struct mfd_cell) * (pdata->num_regulators), GFP_KERNEL); if (!mfd_regulators) { pr_err("Cannot allocate %d bytes for pm8018 regulator " "mfd cells\n", sizeof(struct mfd_cell) * (pdata->num_regulators)); return -ENOMEM; } cdata = kzalloc(sizeof(struct pm8xxx_regulator_core_platform_data) * pdata->num_regulators, GFP_KERNEL); if (!cdata) { pr_err("Cannot allocate %d bytes for pm8018 regulator " "core data\n", pdata->num_regulators * sizeof(struct pm8xxx_regulator_core_platform_data)); kfree(mfd_regulators); return -ENOMEM; } for (i = 0; i < ARRAY_SIZE(regulator_data); i++) mutex_init(&regulator_data[i].pc_lock); for (i = 0; i < pdata->num_regulators; i++) { if (!pdata->regulator_pdatas[i].init_data.constraints.name) { pr_err("name missing for regulator %d\n", i); ret = -EINVAL; goto bail; } if (!match_regulator(&cdata[i], pdata->regulator_pdatas[i].init_data.constraints.name)) { ret = -ENODEV; goto bail; } cdata[i].pdata = &(pdata->regulator_pdatas[i]); mfd_regulators[i].name = PM8XXX_REGULATOR_DEV_NAME; mfd_regulators[i].id = cdata[i].pdata->id; mfd_regulators[i].platform_data = &cdata[i]; mfd_regulators[i].pdata_size = sizeof(struct pm8xxx_regulator_core_platform_data); } ret = mfd_add_devices(pmic->dev, 0, mfd_regulators, pdata->num_regulators, NULL, irq_base); if (ret) goto bail; pmic->mfd_regulators = mfd_regulators; pmic->regulator_cdata = cdata; return ret; bail: for (i = 0; i < ARRAY_SIZE(regulator_data); i++) mutex_destroy(&regulator_data[i].pc_lock); kfree(mfd_regulators); kfree(cdata); return ret; } static int __devinit pm8018_add_subdevices(const struct pm8018_platform_data *pdata, struct pm8018 *pmic) { int ret = 0, irq_base = 0; struct pm_irq_chip *irq_chip; if (pdata->irq_pdata) { pdata->irq_pdata->irq_cdata.nirqs = PM8018_NR_IRQS; pdata->irq_pdata->irq_cdata.base_addr = REG_IRQ_BASE; irq_base = pdata->irq_pdata->irq_base; irq_chip = pm8xxx_irq_init(pmic->dev, pdata->irq_pdata); if (IS_ERR(irq_chip)) { pr_err("Failed to init interrupts ret=%ld\n", PTR_ERR(irq_chip)); return PTR_ERR(irq_chip); } pmic->irq_chip = irq_chip; } if (pdata->gpio_pdata) { pdata->gpio_pdata->gpio_cdata.ngpios = PM8018_NR_GPIOS; gpio_cell.platform_data = pdata->gpio_pdata; gpio_cell.pdata_size = sizeof(struct pm8xxx_gpio_platform_data); ret = mfd_add_devices(pmic->dev, 0, &gpio_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add gpio subdevice ret=%d\n", ret); goto bail; } } if (pdata->mpp_pdata) { pdata->mpp_pdata->core_data.nmpps = PM8018_NR_MPPS; pdata->mpp_pdata->core_data.base_addr = REG_MPP_BASE; mpp_cell.platform_data = pdata->mpp_pdata; mpp_cell.pdata_size = sizeof(struct pm8xxx_mpp_platform_data); ret = mfd_add_devices(pmic->dev, 0, &mpp_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add mpp subdevice ret=%d\n", ret); goto bail; } } if (pdata->rtc_pdata) { rtc_cell.platform_data = pdata->rtc_pdata; rtc_cell.pdata_size = sizeof(struct pm8xxx_rtc_platform_data); ret = mfd_add_devices(pmic->dev, 0, &rtc_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add rtc subdevice ret=%d\n", ret); goto bail; } } if (pdata->pwrkey_pdata) { pwrkey_cell.platform_data = pdata->pwrkey_pdata; pwrkey_cell.pdata_size = sizeof(struct pm8xxx_pwrkey_platform_data); ret = mfd_add_devices(pmic->dev, 0, &pwrkey_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add pwrkey subdevice ret=%d\n", ret); goto bail; } } if (pdata->misc_pdata) { misc_cell.platform_data = pdata->misc_pdata; misc_cell.pdata_size = sizeof(struct pm8xxx_misc_platform_data); ret = mfd_add_devices(pmic->dev, 0, &misc_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add misc subdevice ret=%d\n", ret); goto bail; } } if (pdata->adc_pdata) { adc_cell.platform_data = pdata->adc_pdata; adc_cell.pdata_size = sizeof(struct pm8xxx_adc_platform_data); ret = mfd_add_devices(pmic->dev, 0, &adc_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add adc subdevice ret=%d\n", ret); } } if (pdata->leds_pdata) { leds_cell.platform_data = pdata->leds_pdata; leds_cell.pdata_size = sizeof(struct pm8xxx_led_platform_data); ret = mfd_add_devices(pmic->dev, 0, &leds_cell, 1, NULL, 0); if (ret) { pr_err("Failed to add leds subdevice ret=%d\n", ret); goto bail; } } ret = mfd_add_devices(pmic->dev, 0, &debugfs_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add debugfs subdevice ret=%d\n", ret); goto bail; } ret = mfd_add_devices(pmic->dev, 0, &pwm_cell, 1, NULL, 0); if (ret) { pr_err("Failed to add pwm subdevice ret=%d\n", ret); goto bail; } if (pdata->num_regulators > 0 && pdata->regulator_pdatas) { ret = pm8018_add_regulators(pdata, pmic, irq_base); if (ret) { pr_err("Failed to add regulator subdevices ret=%d\n", ret); goto bail; } } ret = mfd_add_devices(pmic->dev, 0, &thermal_alarm_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add thermal alarm subdevice, ret=%d\n", ret); goto bail; } return 0; bail: if (pmic->irq_chip) { pm8xxx_irq_exit(pmic->irq_chip); pmic->irq_chip = NULL; } return ret; } static const char * const pm8018_rev_names[] = { [PM8XXX_REVISION_8018_TEST] = "test", [PM8XXX_REVISION_8018_1p0] = "1.0", [PM8XXX_REVISION_8018_2p0] = "2.0", [PM8XXX_REVISION_8018_2p1] = "2.1", }; static int __devinit pm8018_probe(struct platform_device *pdev) { const struct pm8018_platform_data *pdata = pdev->dev.platform_data; const char *revision_name = "unknown"; struct pm8018 *pmic; enum pm8xxx_version version; int revision; int rc; u8 val; if (!pdata) { pr_err("missing platform data\n"); return -EINVAL; } pmic = kzalloc(sizeof(struct pm8018), GFP_KERNEL); if (!pmic) { pr_err("Cannot alloc pm8018 struct\n"); return -ENOMEM; } /* Read PMIC chip revision */ rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV, &val, sizeof(val)); if (rc) { pr_err("Failed to read hw rev 1 reg %d:rc=%d\n", REG_HWREV, rc); goto err_read_rev; } pr_info("PMIC revision 1: %02X\n", val); pmic->rev_registers = val; /* Read PMIC chip revision 2 */ rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV_2, &val, sizeof(val)); if (rc) { pr_err("Failed to read hw rev 2 reg %d:rc=%d\n", REG_HWREV_2, rc); goto err_read_rev; } pr_info("PMIC revision 2: %02X\n", val); pmic->rev_registers |= val << BITS_PER_BYTE; pmic->dev = &pdev->dev; pm8018_drvdata.pm_chip_data = pmic; platform_set_drvdata(pdev, &pm8018_drvdata); /* Print out human readable version and revision names. */ version = pm8xxx_get_version(pmic->dev); if (version == PM8XXX_VERSION_8018) { revision = pm8xxx_get_revision(pmic->dev); if (revision >= 0 && revision < ARRAY_SIZE(pm8018_rev_names)) revision_name = pm8018_rev_names[revision]; pr_info("PMIC version: PM8018 rev %s\n", revision_name); } else { WARN_ON(version != PM8XXX_VERSION_8018); } /* Log human readable restart reason */ rc = msm_ssbi_read(pdev->dev.parent, REG_PM8018_PON_CNTRL_3, &val, 1); if (rc) { pr_err("Cannot read restart reason rc=%d\n", rc); goto err_read_rev; } val &= PM8XXX_RESTART_REASON_MASK; pr_info("PMIC Restart Reason: %s\n", pm8xxx_restart_reason_str[val]); pmic->restart_reason = val; rc = pm8018_add_subdevices(pdata, pmic); if (rc) { pr_err("Cannot add subdevices rc=%d\n", rc); goto err; } /* gpio might not work if no irq device is found */ WARN_ON(pmic->irq_chip == NULL); return 0; err: mfd_remove_devices(pmic->dev); platform_set_drvdata(pdev, NULL); kfree(pmic->mfd_regulators); kfree(pmic->regulator_cdata); err_read_rev: kfree(pmic); return rc; } static int __devexit pm8018_remove(struct platform_device *pdev) { struct pm8xxx_drvdata *drvdata; struct pm8018 *pmic = NULL; int i; drvdata = platform_get_drvdata(pdev); if (drvdata) pmic = drvdata->pm_chip_data; if (pmic) { if (pmic->dev) mfd_remove_devices(pmic->dev); if (pmic->irq_chip) { pm8xxx_irq_exit(pmic->irq_chip); pmic->irq_chip = NULL; } if (pmic->mfd_regulators) { for (i = 0; i < ARRAY_SIZE(regulator_data); i++) mutex_destroy(&regulator_data[i].pc_lock); } kfree(pmic->mfd_regulators); kfree(pmic->regulator_cdata); kfree(pmic); } platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver pm8018_driver = { .probe = pm8018_probe, .remove = __devexit_p(pm8018_remove), .driver = { .name = PM8018_CORE_DEV_NAME, .owner = THIS_MODULE, }, }; static int __init pm8018_init(void) { return platform_driver_register(&pm8018_driver); } postcore_initcall(pm8018_init); static void __exit pm8018_exit(void) { platform_driver_unregister(&pm8018_driver); } module_exit(pm8018_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PMIC 8018 core driver"); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:" PM8018_CORE_DEV_NAME);
javelinanddart/android_kernel_htc_pyramid
drivers/mfd/pm8018-core.c
C
gpl-2.0
19,304
/* * snapshot.c Ceph snapshot context utility routines (part of libceph) * * Copyright (C) 2013 Inktank Storage, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <stddef.h> #include <linux/types.h> #include <linux/export.h> #include <linux/ceph/libceph.h> /* * Ceph snapshot contexts are reference counted objects, and the * returned structure holds a single reference. Acquire additional * references with ceph_get_snap_context(), and release them with * ceph_put_snap_context(). When the reference count reaches zero * the entire structure is freed. */ /* * Create a new ceph snapshot context large enough to hold the * indicated number of snapshot ids (which can be 0). Caller has * to fill in snapc->seq and snapc->snaps[0..snap_count-1]. * * Returns a null pointer if an error occurs. */ struct ceph_snap_context *ceph_create_snap_context(u32 snap_count, gfp_t gfp_flags) { struct ceph_snap_context *snapc; size_t size; size = sizeof (struct ceph_snap_context); size += snap_count * sizeof (snapc->snaps[0]); snapc = kzalloc(size, gfp_flags); if (!snapc) return NULL; atomic_set(&snapc->nref, 1); snapc->num_snaps = snap_count; return snapc; } EXPORT_SYMBOL(ceph_create_snap_context); struct ceph_snap_context *ceph_get_snap_context(struct ceph_snap_context *sc) { if (sc) atomic_inc(&sc->nref); return sc; } EXPORT_SYMBOL(ceph_get_snap_context); void ceph_put_snap_context(struct ceph_snap_context *sc) { if (!sc) return; if (atomic_dec_and_test(&sc->nref)) { /*printk(" deleting snap_context %p\n", sc);*/ kfree(sc); } } EXPORT_SYMBOL(ceph_put_snap_context);
MojieBuddhist/linux-1
net/ceph/snapshot.c
C
gpl-2.0
2,254
/* * linux/fs/filesystems.c * * Copyright (C) 1991, 1992 Linus Torvalds * * table of configured filesystems */ #include <linux/syscalls.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <asm/uaccess.h> /* * Handling of filesystem drivers list. * Rules: * Inclusion to/removals from/scanning of list are protected by spinlock. * During the unload module must call unregister_filesystem(). * We can access the fields of list element if: * 1) spinlock is held or * 2) we hold the reference to the module. * The latter can be guaranteed by call of try_module_get(); if it * returned 0 we must skip the element, otherwise we got the reference. * Once the reference is obtained we can drop the spinlock. */ static struct file_system_type *file_systems; static DEFINE_RWLOCK(file_systems_lock); /* WARNING: This can be used only if we _already_ own a reference */ void get_filesystem(struct file_system_type *fs) { __module_get(fs->owner); } void put_filesystem(struct file_system_type *fs) { module_put(fs->owner); } static struct file_system_type **find_filesystem(const char *name, unsigned len) { struct file_system_type **p; for (p=&file_systems; *p; p=&(*p)->next) if (strlen((*p)->name) == len && strncmp((*p)->name, name, len) == 0) break; return p; } /** * register_filesystem - register a new filesystem * @fs: the file system structure * * Adds the file system passed to the list of file systems the kernel * is aware of for mount and other syscalls. Returns 0 on success, * or a negative errno code on an error. * * The &struct file_system_type that is passed is linked into the kernel * structures and must not be freed until the file system has been * unregistered. */ int register_filesystem(struct file_system_type * fs) { int res = 0; struct file_system_type ** p; BUG_ON(strchr(fs->name, '.')); if (fs->next) return -EBUSY; write_lock(&file_systems_lock); p = find_filesystem(fs->name, strlen(fs->name)); if (*p) res = -EBUSY; else *p = fs; write_unlock(&file_systems_lock); return res; } EXPORT_SYMBOL(register_filesystem); /** * unregister_filesystem - unregister a file system * @fs: filesystem to unregister * * Remove a file system that was previously successfully registered * with the kernel. An error is returned if the file system is not found. * Zero is returned on a success. * * Once this function has returned the &struct file_system_type structure * may be freed or reused. */ int unregister_filesystem(struct file_system_type * fs) { struct file_system_type ** tmp; write_lock(&file_systems_lock); tmp = &file_systems; while (*tmp) { if (fs == *tmp) { *tmp = fs->next; fs->next = NULL; write_unlock(&file_systems_lock); synchronize_rcu(); return 0; } tmp = &(*tmp)->next; } write_unlock(&file_systems_lock); return -EINVAL; } EXPORT_SYMBOL(unregister_filesystem); static int fs_index(const char __user * __name) { struct file_system_type * tmp; char * name; int err, index; name = getname(__name); err = PTR_ERR(name); if (IS_ERR(name)) return err; err = -EINVAL; read_lock(&file_systems_lock); for (tmp=file_systems, index=0 ; tmp ; tmp=tmp->next, index++) { if (strcmp(tmp->name,name) == 0) { err = index; break; } } read_unlock(&file_systems_lock); putname(name); return err; } static int fs_name(unsigned int index, char __user * buf) { struct file_system_type * tmp; int len, res; read_lock(&file_systems_lock); for (tmp = file_systems; tmp; tmp = tmp->next, index--) if (index <= 0 && try_module_get(tmp->owner)) break; read_unlock(&file_systems_lock); if (!tmp) return -EINVAL; /* OK, we got the reference, so we can safely block */ len = strlen(tmp->name) + 1; res = copy_to_user(buf, tmp->name, len) ? -EFAULT : 0; put_filesystem(tmp); return res; } static int fs_maxindex(void) { struct file_system_type * tmp; int index; read_lock(&file_systems_lock); for (tmp = file_systems, index = 0 ; tmp ; tmp = tmp->next, index++) ; read_unlock(&file_systems_lock); return index; } /* * Whee.. Weird sysv syscall. */ SYSCALL_DEFINE3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2) { int retval = -EINVAL; switch (option) { case 1: retval = fs_index((const char __user *) arg1); break; case 2: retval = fs_name(arg1, (char __user *) arg2); break; case 3: retval = fs_maxindex(); break; } return retval; } int __init get_filesystem_list(char *buf) { int len = 0; struct file_system_type * tmp; read_lock(&file_systems_lock); tmp = file_systems; while (tmp && len < PAGE_SIZE - 80) { len += sprintf(buf+len, "%s\t%s\n", (tmp->fs_flags & FS_REQUIRES_DEV) ? "" : "nodev", tmp->name); tmp = tmp->next; } read_unlock(&file_systems_lock); return len; } #ifdef CONFIG_PROC_FS static int filesystems_proc_show(struct seq_file *m, void *v) { struct file_system_type * tmp; read_lock(&file_systems_lock); tmp = file_systems; while (tmp) { seq_printf(m, "%s\t%s\n", (tmp->fs_flags & FS_REQUIRES_DEV) ? "" : "nodev", tmp->name); tmp = tmp->next; } read_unlock(&file_systems_lock); return 0; } static int filesystems_proc_open(struct inode *inode, struct file *file) { return single_open(file, filesystems_proc_show, NULL); } static const struct file_operations filesystems_proc_fops = { .open = filesystems_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init proc_filesystems_init(void) { proc_create("filesystems", 0, NULL, &filesystems_proc_fops); return 0; } module_init(proc_filesystems_init); #endif static struct file_system_type *__get_fs_type(const char *name, int len) { struct file_system_type *fs; read_lock(&file_systems_lock); fs = *(find_filesystem(name, len)); if (fs && !try_module_get(fs->owner)) fs = NULL; read_unlock(&file_systems_lock); return fs; } struct file_system_type *get_fs_type(const char *name) { struct file_system_type *fs; const char *dot = strchr(name, '.'); int len = dot ? dot - name : strlen(name); fs = __get_fs_type(name, len); if (!fs && (request_module("%.*s", len, name) == 0)) fs = __get_fs_type(name, len); if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { put_filesystem(fs); fs = NULL; } return fs; } EXPORT_SYMBOL(get_fs_type);
Phreya/phreya_kernel_bacon_cm
fs/filesystems.c
C
gpl-2.0
6,486
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net> */ #include <linux/export.h> #include <linux/ssb/ssb.h> #include <linux/ssb/ssb_driver_chipcommon.h> #include <linux/ssb/ssb_driver_extif.h> #include <asm/mach-bcm47xx/bcm47xx.h> #include <asm/mach-bcm47xx/gpio.h> #if (BCM47XX_CHIPCO_GPIO_LINES > BCM47XX_EXTIF_GPIO_LINES) static DECLARE_BITMAP(gpio_in_use, BCM47XX_CHIPCO_GPIO_LINES); #else static DECLARE_BITMAP(gpio_in_use, BCM47XX_EXTIF_GPIO_LINES); #endif int gpio_request(unsigned gpio, const char *tag) { switch (bcm47xx_bus_type) { #ifdef CONFIG_BCM47XX_SSB case BCM47XX_BUS_TYPE_SSB: if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco) && ((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES)) return -EINVAL; if (ssb_extif_available(&bcm47xx_bus.ssb.extif) && ((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES)) return -EINVAL; if (test_and_set_bit(gpio, gpio_in_use)) return -EBUSY; return 0; #endif #ifdef CONFIG_BCM47XX_BCMA case BCM47XX_BUS_TYPE_BCMA: if (gpio >= BCM47XX_CHIPCO_GPIO_LINES) return -EINVAL; if (test_and_set_bit(gpio, gpio_in_use)) return -EBUSY; return 0; #endif } return -EINVAL; } EXPORT_SYMBOL(gpio_request); void gpio_free(unsigned gpio) { switch (bcm47xx_bus_type) { #ifdef CONFIG_BCM47XX_SSB case BCM47XX_BUS_TYPE_SSB: if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco) && ((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES)) return; if (ssb_extif_available(&bcm47xx_bus.ssb.extif) && ((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES)) return; clear_bit(gpio, gpio_in_use); return; #endif #ifdef CONFIG_BCM47XX_BCMA case BCM47XX_BUS_TYPE_BCMA: if (gpio >= BCM47XX_CHIPCO_GPIO_LINES) return; clear_bit(gpio, gpio_in_use); return; #endif } } EXPORT_SYMBOL(gpio_free); int gpio_to_irq(unsigned gpio) { switch (bcm47xx_bus_type) { #ifdef CONFIG_BCM47XX_SSB case BCM47XX_BUS_TYPE_SSB: if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco)) return ssb_mips_irq(bcm47xx_bus.ssb.chipco.dev) + 2; else if (ssb_extif_available(&bcm47xx_bus.ssb.extif)) return ssb_mips_irq(bcm47xx_bus.ssb.extif.dev) + 2; else return -EINVAL; #endif #ifdef CONFIG_BCM47XX_BCMA case BCM47XX_BUS_TYPE_BCMA: return bcma_core_mips_irq(bcm47xx_bus.bcma.bus.drv_cc.core) + 2; #endif } return -EINVAL; } EXPORT_SYMBOL_GPL(gpio_to_irq);
arnet95/linux
arch/mips/bcm47xx/gpio.c
C
gpl-2.0
2,516
/* * Transparent proxy support for Linux/iptables * * Copyright (c) 2006-2010 BalaBit IT Ltd. * Author: Balazs Scheidler, Krisztian Kovacs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <net/checksum.h> #include <net/udp.h> #include <net/inet_sock.h> #include <linux/inetdevice.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <net/netfilter/ipv4/nf_defrag_ipv4.h> #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) #define XT_TPROXY_HAVE_IPV6 1 #include <net/if_inet6.h> #include <net/addrconf.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <net/netfilter/ipv6/nf_defrag_ipv6.h> #endif #include <net/netfilter/nf_tproxy_core.h> #include <linux/netfilter/xt_TPROXY.h> static bool tproxy_sk_is_transparent(struct sock *sk) { if (sk->sk_state != TCP_TIME_WAIT) { if (inet_sk(sk)->transparent) return true; sock_put(sk); } else { if (inet_twsk(sk)->tw_transparent) return true; inet_twsk_put(inet_twsk(sk)); } return false; } static inline __be32 tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) { struct in_device *indev; __be32 laddr; if (user_laddr) return user_laddr; laddr = 0; rcu_read_lock(); indev = __in_dev_get_rcu(skb->dev); for_primary_ifa(indev) { laddr = ifa->ifa_local; break; } endfor_ifa(indev); rcu_read_unlock(); return laddr ? laddr : daddr; } /** * tproxy_handle_time_wait4() - handle IPv4 TCP TIME_WAIT reopen redirections * @skb: The skb being processed. * @laddr: IPv4 address to redirect to or zero. * @lport: TCP port to redirect to or zero. * @sk: The TIME_WAIT TCP socket found by the lookup. * * We have to handle SYN packets arriving to TIME_WAIT sockets * differently: instead of reopening the connection we should rather * redirect the new connection to the proxy if there's a listener * socket present. * * tproxy_handle_time_wait4() consumes the socket reference passed in. * * Returns the listener socket if there's one, the TIME_WAIT socket if * no such listener is found, or NULL if the TCP header is incomplete. */ static struct sock * tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport, struct sock *sk) { const struct iphdr *iph = ip_hdr(skb); struct tcphdr _hdr, *hp; hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr); if (hp == NULL) { inet_twsk_put(inet_twsk(sk)); return NULL; } if (hp->syn && !hp->rst && !hp->ack && !hp->fin) { /* SYN to a TIME_WAIT socket, we'd rather redirect it * to a listener socket if there's one */ struct sock *sk2; sk2 = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, iph->saddr, laddr ? laddr : iph->daddr, hp->source, lport ? lport : hp->dest, skb->dev, NFT_LOOKUP_LISTENER); if (sk2) { inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row); inet_twsk_put(inet_twsk(sk)); sk = sk2; } } return sk; } static unsigned int tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, u_int32_t mark_mask, u_int32_t mark_value) { const struct iphdr *iph = ip_hdr(skb); struct udphdr _hdr, *hp; struct sock *sk; hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr); if (hp == NULL) return NF_DROP; /* check if there's an ongoing connection on the packet * addresses, this happens if the redirect already happened * and the current packet belongs to an already established * connection */ sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, iph->saddr, iph->daddr, hp->source, hp->dest, skb->dev, NFT_LOOKUP_ESTABLISHED); laddr = tproxy_laddr4(skb, laddr, iph->daddr); if (!lport) lport = hp->dest; /* UDP has no TCP_TIME_WAIT state, so we never enter here */ if (sk && sk->sk_state == TCP_TIME_WAIT) /* reopening a TIME_WAIT connection needs special handling */ sk = tproxy_handle_time_wait4(skb, laddr, lport, sk); else if (!sk) /* no, there's no established connection, check if * there's a listener on the redirected addr/port */ sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, iph->saddr, laddr, hp->source, lport, skb->dev, NFT_LOOKUP_LISTENER); /* NOTE: assign_sock consumes our sk reference */ if (sk && tproxy_sk_is_transparent(sk)) { /* This should be in a separate target, but we don't do multiple targets on the same rule yet */ skb->mark = (skb->mark & ~mark_mask) ^ mark_value; pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", iph->protocol, &iph->daddr, ntohs(hp->dest), &laddr, ntohs(lport), skb->mark); nf_tproxy_assign_sock(skb, sk); return NF_ACCEPT; } pr_debug("no socket, dropping: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", iph->protocol, &iph->saddr, ntohs(hp->source), &iph->daddr, ntohs(hp->dest), skb->mark); return NF_DROP; } static unsigned int tproxy_tg4_v0(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tproxy_target_info *tgi = par->targinfo; return tproxy_tg4(skb, tgi->laddr, tgi->lport, tgi->mark_mask, tgi->mark_value); } static unsigned int tproxy_tg4_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; return tproxy_tg4(skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value); } #ifdef XT_TPROXY_HAVE_IPV6 static inline const struct in6_addr * tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr, const struct in6_addr *daddr) { struct inet6_dev *indev; struct inet6_ifaddr *ifa; struct in6_addr *laddr; if (!ipv6_addr_any(user_laddr)) return user_laddr; laddr = NULL; rcu_read_lock(); indev = __in6_dev_get(skb->dev); if (indev) list_for_each_entry(ifa, &indev->addr_list, if_list) { if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED)) continue; laddr = &ifa->addr; break; } rcu_read_unlock(); return laddr ? laddr : daddr; } /** * tproxy_handle_time_wait6() - handle IPv6 TCP TIME_WAIT reopen redirections * @skb: The skb being processed. * @tproto: Transport protocol. * @thoff: Transport protocol header offset. * @par: Iptables target parameters. * @sk: The TIME_WAIT TCP socket found by the lookup. * * We have to handle SYN packets arriving to TIME_WAIT sockets * differently: instead of reopening the connection we should rather * redirect the new connection to the proxy if there's a listener * socket present. * * tproxy_handle_time_wait6() consumes the socket reference passed in. * * Returns the listener socket if there's one, the TIME_WAIT socket if * no such listener is found, or NULL if the TCP header is incomplete. */ static struct sock * tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff, const struct xt_action_param *par, struct sock *sk) { const struct ipv6hdr *iph = ipv6_hdr(skb); struct tcphdr _hdr, *hp; const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); if (hp == NULL) { inet_twsk_put(inet_twsk(sk)); return NULL; } if (hp->syn && !hp->rst && !hp->ack && !hp->fin) { /* SYN to a TIME_WAIT socket, we'd rather redirect it * to a listener socket if there's one */ struct sock *sk2; sk2 = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, &iph->saddr, tproxy_laddr6(skb, &tgi->laddr.in6, &iph->daddr), hp->source, tgi->lport ? tgi->lport : hp->dest, skb->dev, NFT_LOOKUP_LISTENER); if (sk2) { inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row); inet_twsk_put(inet_twsk(sk)); sk = sk2; } } return sk; } static unsigned int tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct ipv6hdr *iph = ipv6_hdr(skb); const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; struct udphdr _hdr, *hp; struct sock *sk; const struct in6_addr *laddr; __be16 lport; int thoff; int tproto; tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); if (tproto < 0) { pr_debug("unable to find transport header in IPv6 packet, dropping\n"); return NF_DROP; } hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); if (hp == NULL) { pr_debug("unable to grab transport header contents in IPv6 packet, dropping\n"); return NF_DROP; } /* check if there's an ongoing connection on the packet * addresses, this happens if the redirect already happened * and the current packet belongs to an already established * connection */ sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, &iph->saddr, &iph->daddr, hp->source, hp->dest, par->in, NFT_LOOKUP_ESTABLISHED); laddr = tproxy_laddr6(skb, &tgi->laddr.in6, &iph->daddr); lport = tgi->lport ? tgi->lport : hp->dest; /* UDP has no TCP_TIME_WAIT state, so we never enter here */ if (sk && sk->sk_state == TCP_TIME_WAIT) /* reopening a TIME_WAIT connection needs special handling */ sk = tproxy_handle_time_wait6(skb, tproto, thoff, par, sk); else if (!sk) /* no there's no established connection, check if * there's a listener on the redirected addr/port */ sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, &iph->saddr, laddr, hp->source, lport, par->in, NFT_LOOKUP_LISTENER); /* NOTE: assign_sock consumes our sk reference */ if (sk && tproxy_sk_is_transparent(sk)) { /* This should be in a separate target, but we don't do multiple targets on the same rule yet */ skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", tproto, &iph->saddr, ntohs(hp->source), laddr, ntohs(lport), skb->mark); nf_tproxy_assign_sock(skb, sk); return NF_ACCEPT; } pr_debug("no socket, dropping: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", tproto, &iph->saddr, ntohs(hp->source), &iph->daddr, ntohs(hp->dest), skb->mark); return NF_DROP; } static int tproxy_tg6_check(const struct xt_tgchk_param *par) { const struct ip6t_ip6 *i = par->entryinfo; if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) && !(i->flags & IP6T_INV_PROTO)) return 0; pr_info("Can be used only in combination with " "either -p tcp or -p udp\n"); return -EINVAL; } #endif static int tproxy_tg4_check(const struct xt_tgchk_param *par) { const struct ipt_ip *i = par->entryinfo; if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) && !(i->invflags & IPT_INV_PROTO)) return 0; pr_info("Can be used only in combination with " "either -p tcp or -p udp\n"); return -EINVAL; } static struct xt_target tproxy_tg_reg[] __read_mostly = { { .name = "TPROXY", .family = NFPROTO_IPV4, .table = "mangle", .target = tproxy_tg4_v0, .revision = 0, .targetsize = sizeof(struct xt_tproxy_target_info), .checkentry = tproxy_tg4_check, .hooks = 1 << NF_INET_PRE_ROUTING, .me = THIS_MODULE, }, { .name = "TPROXY", .family = NFPROTO_IPV4, .table = "mangle", .target = tproxy_tg4_v1, .revision = 1, .targetsize = sizeof(struct xt_tproxy_target_info_v1), .checkentry = tproxy_tg4_check, .hooks = 1 << NF_INET_PRE_ROUTING, .me = THIS_MODULE, }, #ifdef XT_TPROXY_HAVE_IPV6 { .name = "TPROXY", .family = NFPROTO_IPV6, .table = "mangle", .target = tproxy_tg6_v1, .revision = 1, .targetsize = sizeof(struct xt_tproxy_target_info_v1), .checkentry = tproxy_tg6_check, .hooks = 1 << NF_INET_PRE_ROUTING, .me = THIS_MODULE, }, #endif }; static int __init tproxy_tg_init(void) { nf_defrag_ipv4_enable(); #ifdef XT_TPROXY_HAVE_IPV6 nf_defrag_ipv6_enable(); #endif return xt_register_targets(tproxy_tg_reg, ARRAY_SIZE(tproxy_tg_reg)); } static void __exit tproxy_tg_exit(void) { xt_unregister_targets(tproxy_tg_reg, ARRAY_SIZE(tproxy_tg_reg)); } module_init(tproxy_tg_init); module_exit(tproxy_tg_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Balazs Scheidler, Krisztian Kovacs"); MODULE_DESCRIPTION("Netfilter transparent proxy (TPROXY) target module."); MODULE_ALIAS("ipt_TPROXY"); MODULE_ALIAS("ip6t_TPROXY");
N3uTr0nRom/N3uTr0n_kernel
net/netfilter/xt_TPROXY.c
C
gpl-2.0
12,321
/* * linux/arch/arm/plat-omap/debug-devices.c * * Copyright (C) 2005 Nokia Corporation * Modified from mach-omap2/board-h4.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/smc91x.h> #include <mach/hardware.h> #include <plat/board.h> /* Many OMAP development platforms reuse the same "debug board"; these * platforms include H2, H3, H4, and Perseus2. */ static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { [0] = { .flags = IORESOURCE_MEM, }, [1] = { .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = -1, .dev = { .platform_data = &smc91x_info, }, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; static struct resource led_resources[] = { [0] = { .flags = IORESOURCE_MEM, }, }; static struct platform_device led_device = { .name = "omap_dbg_led", .id = -1, .num_resources = ARRAY_SIZE(led_resources), .resource = led_resources, }; static struct platform_device *debug_devices[] __initdata = { &smc91x_device, &led_device, /* ps2 kbd + mouse ports */ /* 4 extra uarts */ /* 6 input dip switches */ /* 8 output pins */ }; int __init debug_card_init(u32 addr, unsigned gpio) { int status; smc91x_resources[0].start = addr + 0x300; smc91x_resources[0].end = addr + 0x30f; smc91x_resources[1].start = gpio_to_irq(gpio); smc91x_resources[1].end = gpio_to_irq(gpio); status = gpio_request(gpio, "SMC91x irq"); if (status < 0) { printk(KERN_ERR "GPIO%d unavailable for smc91x IRQ\n", gpio); return status; } gpio_direction_input(gpio); led_resources[0].start = addr; led_resources[0].end = addr + SZ_4K - 1; return platform_add_devices(debug_devices, ARRAY_SIZE(debug_devices)); }
NamelessRom/android_kernel_samsung_jf
arch/arm/plat-omap/debug-devices.c
C
gpl-2.0
2,192
/* * i8259 interrupt controller driver. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #undef DEBUG #include <linux/init.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/delay.h> #include <asm/io.h> #include <asm/i8259.h> #include <asm/prom.h> static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */ static unsigned char cached_8259[2] = { 0xff, 0xff }; #define cached_A1 (cached_8259[0]) #define cached_21 (cached_8259[1]) static DEFINE_RAW_SPINLOCK(i8259_lock); static struct irq_domain *i8259_host; /* * Acknowledge the IRQ using either the PCI host bridge's interrupt * acknowledge feature or poll. How i8259_init() is called determines * which is called. It should be noted that polling is broken on some * IBM and Motorola PReP boxes so we must use the int-ack feature on them. */ unsigned int i8259_irq(void) { int irq; int lock = 0; /* Either int-ack or poll for the IRQ */ if (pci_intack) irq = readb(pci_intack); else { raw_spin_lock(&i8259_lock); lock = 1; /* Perform an interrupt acknowledge cycle on controller 1. */ outb(0x0C, 0x20); /* prepare for poll */ irq = inb(0x20) & 7; if (irq == 2 ) { /* * Interrupt is cascaded so perform interrupt * acknowledge on controller 2. */ outb(0x0C, 0xA0); /* prepare for poll */ irq = (inb(0xA0) & 7) + 8; } } if (irq == 7) { /* * This may be a spurious interrupt. * * Read the interrupt status register (ISR). If the most * significant bit is not set then there is no valid * interrupt. */ if (!pci_intack) outb(0x0B, 0x20); /* ISR register */ if(~inb(0x20) & 0x80) irq = NO_IRQ; } else if (irq == 0xff) irq = NO_IRQ; if (lock) raw_spin_unlock(&i8259_lock); return irq; } static void i8259_mask_and_ack_irq(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&i8259_lock, flags); if (d->irq > 7) { cached_A1 |= 1 << (d->irq-8); inb(0xA1); /* DUMMY */ outb(cached_A1, 0xA1); outb(0x20, 0xA0); /* Non-specific EOI */ outb(0x20, 0x20); /* Non-specific EOI to cascade */ } else { cached_21 |= 1 << d->irq; inb(0x21); /* DUMMY */ outb(cached_21, 0x21); outb(0x20, 0x20); /* Non-specific EOI */ } raw_spin_unlock_irqrestore(&i8259_lock, flags); } static void i8259_set_irq_mask(int irq_nr) { outb(cached_A1,0xA1); outb(cached_21,0x21); } static void i8259_mask_irq(struct irq_data *d) { unsigned long flags; pr_debug("i8259_mask_irq(%d)\n", d->irq); raw_spin_lock_irqsave(&i8259_lock, flags); if (d->irq < 8) cached_21 |= 1 << d->irq; else cached_A1 |= 1 << (d->irq-8); i8259_set_irq_mask(d->irq); raw_spin_unlock_irqrestore(&i8259_lock, flags); } static void i8259_unmask_irq(struct irq_data *d) { unsigned long flags; pr_debug("i8259_unmask_irq(%d)\n", d->irq); raw_spin_lock_irqsave(&i8259_lock, flags); if (d->irq < 8) cached_21 &= ~(1 << d->irq); else cached_A1 &= ~(1 << (d->irq-8)); i8259_set_irq_mask(d->irq); raw_spin_unlock_irqrestore(&i8259_lock, flags); } static struct irq_chip i8259_pic = { .name = "i8259", .irq_mask = i8259_mask_irq, .irq_disable = i8259_mask_irq, .irq_unmask = i8259_unmask_irq, .irq_mask_ack = i8259_mask_and_ack_irq, }; static struct resource pic1_iores = { .name = "8259 (master)", .start = 0x20, .end = 0x21, .flags = IORESOURCE_BUSY, }; static struct resource pic2_iores = { .name = "8259 (slave)", .start = 0xa0, .end = 0xa1, .flags = IORESOURCE_BUSY, }; static struct resource pic_edgectrl_iores = { .name = "8259 edge control", .start = 0x4d0, .end = 0x4d1, .flags = IORESOURCE_BUSY, }; static int i8259_host_match(struct irq_domain *h, struct device_node *node) { return h->of_node == NULL || h->of_node == node; } static int i8259_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw); /* We block the internal cascade */ if (hw == 2) irq_set_status_flags(virq, IRQ_NOREQUEST); /* We use the level handler only for now, we might want to * be more cautious here but that works for now */ irq_set_status_flags(virq, IRQ_LEVEL); irq_set_chip_and_handler(virq, &i8259_pic, handle_level_irq); return 0; } static int i8259_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { static unsigned char map_isa_senses[4] = { IRQ_TYPE_LEVEL_LOW, IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_FALLING, IRQ_TYPE_EDGE_RISING, }; *out_hwirq = intspec[0]; if (intsize > 1 && intspec[1] < 4) *out_flags = map_isa_senses[intspec[1]]; else *out_flags = IRQ_TYPE_NONE; return 0; } static struct irq_domain_ops i8259_host_ops = { .match = i8259_host_match, .map = i8259_host_map, .xlate = i8259_host_xlate, }; struct irq_domain *i8259_get_host(void) { return i8259_host; } /** * i8259_init - Initialize the legacy controller * @node: device node of the legacy PIC (can be NULL, but then, it will match * all interrupts, so beware) * @intack_addr: PCI interrupt acknowledge (real) address which will return * the active irq from the 8259 */ void i8259_init(struct device_node *node, unsigned long intack_addr) { unsigned long flags; /* initialize the controller */ raw_spin_lock_irqsave(&i8259_lock, flags); /* Mask all first */ outb(0xff, 0xA1); outb(0xff, 0x21); /* init master interrupt controller */ outb(0x11, 0x20); /* Start init sequence */ outb(0x00, 0x21); /* Vector base */ outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */ outb(0x01, 0x21); /* Select 8086 mode */ /* init slave interrupt controller */ outb(0x11, 0xA0); /* Start init sequence */ outb(0x08, 0xA1); /* Vector base */ outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */ outb(0x01, 0xA1); /* Select 8086 mode */ /* That thing is slow */ udelay(100); /* always read ISR */ outb(0x0B, 0x20); outb(0x0B, 0xA0); /* Unmask the internal cascade */ cached_21 &= ~(1 << 2); /* Set interrupt masks */ outb(cached_A1, 0xA1); outb(cached_21, 0x21); raw_spin_unlock_irqrestore(&i8259_lock, flags); /* create a legacy host */ i8259_host = irq_domain_add_legacy_isa(node, &i8259_host_ops, NULL); if (i8259_host == NULL) { printk(KERN_ERR "i8259: failed to allocate irq host !\n"); return; } /* reserve our resources */ /* XXX should we continue doing that ? it seems to cause problems * with further requesting of PCI IO resources for that range... * need to look into it. */ request_resource(&ioport_resource, &pic1_iores); request_resource(&ioport_resource, &pic2_iores); request_resource(&ioport_resource, &pic_edgectrl_iores); if (intack_addr != 0) pci_intack = ioremap(intack_addr, 1); printk(KERN_INFO "i8259 legacy interrupt controller initialized\n"); }
kinsamanka/linux
arch/powerpc/sysdev/i8259.c
C
gpl-2.0
7,105
/* * Scatterlist Cryptographic API. * * Procfs information. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <linux/init.h> //#include <linux/crypto.h> #include "rtl_crypto.h" #include <linux/rwsem.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include "internal.h" extern struct list_head crypto_alg_list; extern struct rw_semaphore crypto_alg_sem; static void *c_start(struct seq_file *m, loff_t *pos) { struct list_head *v; loff_t n = *pos; down_read(&crypto_alg_sem); list_for_each(v, &crypto_alg_list) if (!n--) return list_entry(v, struct crypto_alg, cra_list); return NULL; } static void *c_next(struct seq_file *m, void *p, loff_t *pos) { struct list_head *v = p; (*pos)++; v = v->next; return (v == &crypto_alg_list) ? NULL : list_entry(v, struct crypto_alg, cra_list); } static void c_stop(struct seq_file *m, void *p) { up_read(&crypto_alg_sem); } static int c_show(struct seq_file *m, void *p) { struct crypto_alg *alg = (struct crypto_alg *)p; seq_printf(m, "name : %s\n", alg->cra_name); seq_printf(m, "module : %s\n", (alg->cra_module ? alg->cra_module->name : "kernel")); switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_CIPHER: seq_printf(m, "type : cipher\n"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "min keysize : %u\n", alg->cra_cipher.cia_min_keysize); seq_printf(m, "max keysize : %u\n", alg->cra_cipher.cia_max_keysize); break; case CRYPTO_ALG_TYPE_DIGEST: seq_printf(m, "type : digest\n"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "digestsize : %u\n", alg->cra_digest.dia_digestsize); break; case CRYPTO_ALG_TYPE_COMPRESS: seq_printf(m, "type : compression\n"); break; default: seq_printf(m, "type : unknown\n"); break; } seq_putc(m, '\n'); return 0; } static struct seq_operations crypto_seq_ops = { .start = c_start, .next = c_next, .stop = c_stop, .show = c_show }; static int crypto_info_open(struct inode *inode, struct file *file) { return seq_open(file, &crypto_seq_ops); } static struct file_operations proc_crypto_ops = { .open = crypto_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release }; void __init crypto_init_proc(void) { struct proc_dir_entry *proc; proc = create_proc_entry("crypto", 0, NULL); if (proc) proc->proc_fops = &proc_crypto_ops; }
santod/android_GE_kernel_htc_m7vzw
drivers/staging/rtl8192u/ieee80211/proc.c
C
gpl-2.0
2,766