code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
3
942
language
stringclasses
30 values
license
stringclasses
15 values
size
int32
3
1.05M
line_mean
float64
0.5
100
line_max
int64
1
1k
alpha_frac
float64
0.25
1
autogenerated
bool
1 class
/* $Id$ */ /** @file * IPRT - RTPathStripExt */ /* * Copyright (C) 2006-2007 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; * you can redistribute it and/or modify it under the terms of the GNU * General Public License (GPL) as published by the Free Software * Foundation, in version 2 as it comes in the "COPYING" file of the * VirtualBox OSE distribution. VirtualBox OSE is distributed in the * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. * * The contents of this file may alternatively be used under the terms * of the Common Development and Distribution License Version 1.0 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the * VirtualBox OSE distribution, in which case the provisions of the * CDDL are applicable instead of those of the GPL. * * You may elect to license modified versions of this file under the * terms and conditions of either the GPL or the CDDL or both. */ /******************************************************************************* * Header Files * *******************************************************************************/ #include "internal/iprt.h" #include <iprt/path.h> #include <iprt/string.h> /** * Strips the extension from a path. * * @param pszPath Path which extension should be stripped. */ RTDECL(void) RTPathStripExt(char *pszPath) { char *pszDot = NULL; for (;; pszPath++) { switch (*pszPath) { /* handle separators. */ #if defined(RT_OS_WINDOWS) || defined(RT_OS_OS2) case ':': case '\\': #endif case '/': pszDot = NULL; break; case '.': pszDot = pszPath; break; /* the end */ case '\0': if (pszDot) *pszDot = '\0'; return; } } /* will never get here */ }
dezelin/vbox-haiku
src/VBox/Runtime/common/path/RTPathStripExt.cpp
C++
gpl-2.0
2,098
28.971429
80
0.539561
false
/**************************************************************************** * * Copyright (C) 2005 - 2013 by Vivante Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the license, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * *****************************************************************************/ #ifndef __gc_hal_kernel_hardware_h_ #define __gc_hal_kernel_hardware_h_ #if gcdENABLE_VG #include "gc_hal_kernel_hardware_vg.h" #endif #ifdef __cplusplus extern "C" { #endif /* gckHARDWARE object. */ struct _gckHARDWARE { /* Object. */ gcsOBJECT object; /* Pointer to gctKERNEL object. */ gckKERNEL kernel; /* Pointer to gctOS object. */ gckOS os; /* Core */ gceCORE core; /* Chip characteristics. */ gcsHAL_QUERY_CHIP_IDENTITY identity; gctBOOL allowFastClear; gctBOOL allowCompression; gctUINT32 powerBaseAddress; gctBOOL extraEventStates; /* Big endian */ gctBOOL bigEndian; /* Chip status */ gctPOINTER powerMutex; gctUINT32 powerProcess; gctUINT32 powerThread; gceCHIPPOWERSTATE chipPowerState; gctUINT32 lastWaitLink; gctBOOL clockState; gctBOOL powerState; gctBOOL interruptState; gctPOINTER globalSemaphore; gctISRMANAGERFUNC startIsr; gctISRMANAGERFUNC stopIsr; gctPOINTER isrContext; gctUINT32 mmuVersion; /* Type */ gceHARDWARE_TYPE type; #if gcdPOWEROFF_TIMEOUT gctUINT32 powerOffTime; gctUINT32 powerOffTimeout; gctPOINTER powerOffTimer; #endif #if gcdENABLE_FSCALE_VAL_ADJUST gctUINT32 powerOnFscaleVal; #endif gctPOINTER pageTableDirty; #if gcdLINK_QUEUE_SIZE struct _gckLINKQUEUE linkQueue; #endif gctBOOL powerManagement; gctBOOL gpuProfiler; }; gceSTATUS gckHARDWARE_GetBaseAddress( IN gckHARDWARE Hardware, OUT gctUINT32_PTR BaseAddress ); gceSTATUS gckHARDWARE_NeedBaseAddress( IN gckHARDWARE Hardware, IN gctUINT32 State, OUT gctBOOL_PTR NeedBase ); gceSTATUS gckHARDWARE_GetFrameInfo( IN gckHARDWARE Hardware, OUT gcsHAL_FRAME_INFO * FrameInfo ); #ifdef __cplusplus } #endif #endif /* __gc_hal_kernel_hardware_h_ */
yuenar/huawei_Honor3_kernel
drivers/misc/gpu_drv/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.h
C
gpl-2.0
3,321
26.446281
78
0.560072
false
/* Functions dealing with attribute handling, used by most front ends. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2010 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "flags.h" #include "toplev.h" #include "output.h" #include "rtl.h" #include "ggc.h" #include "tm_p.h" #include "cpplib.h" #include "target.h" #include "langhooks.h" #include "hashtab.h" #include "c-common.h" static void init_attributes (void); /* Table of the tables of attributes (common, language, format, machine) searched. */ static const struct attribute_spec *attribute_tables[4]; /* Hashtable mapping names (represented as substrings) to attribute specs. */ static htab_t attribute_hash; /* Substring representation. */ struct substring { const char *str; int length; }; static bool attributes_initialized = false; /* Default empty table of attributes. */ static const struct attribute_spec empty_attribute_table[] = { { NULL, 0, 0, false, false, false, NULL } }; /* Return base name of the attribute. Ie '__attr__' is turned into 'attr'. To avoid need for copying, we simply return length of the string. */ static void extract_attribute_substring (struct substring *str) { if (str->length > 4 && str->str[0] == '_' && str->str[1] == '_' && str->str[str->length - 1] == '_' && str->str[str->length - 2] == '_') { str->length -= 4; str->str += 2; } } /* Simple hash function to avoid need to scan whole string. */ static inline hashval_t substring_hash (const char *str, int l) { return str[0] + str[l - 1] * 256 + l * 65536; } /* Used for attribute_hash. */ static hashval_t hash_attr (const void *p) { const struct attribute_spec *const spec = (const struct attribute_spec *) p; const int l = strlen (spec->name); return substring_hash (spec->name, l); } /* Used for attribute_hash. */ static int eq_attr (const void *p, const void *q) { const struct attribute_spec *const spec = (const struct attribute_spec *) p; const struct substring *const str = (const struct substring *) q; return (!strncmp (spec->name, str->str, str->length) && !spec->name[str->length]); } /* Initialize attribute tables, and make some sanity checks if --enable-checking. */ static void init_attributes (void) { size_t i; int k; attribute_tables[0] = lang_hooks.common_attribute_table; attribute_tables[1] = lang_hooks.attribute_table; attribute_tables[2] = lang_hooks.format_attribute_table; attribute_tables[3] = targetm.attribute_table; /* Translate NULL pointers to pointers to the empty table. */ for (i = 0; i < ARRAY_SIZE (attribute_tables); i++) if (attribute_tables[i] == NULL) attribute_tables[i] = empty_attribute_table; #ifdef ENABLE_CHECKING /* Make some sanity checks on the attribute tables. */ for (i = 0; i < ARRAY_SIZE (attribute_tables); i++) { int j; for (j = 0; attribute_tables[i][j].name != NULL; j++) { /* The name must not begin and end with __. */ const char *name = attribute_tables[i][j].name; int len = strlen (name); gcc_assert (!(name[0] == '_' && name[1] == '_' && name[len - 1] == '_' && name[len - 2] == '_')); /* The minimum and maximum lengths must be consistent. */ gcc_assert (attribute_tables[i][j].min_length >= 0); gcc_assert (attribute_tables[i][j].max_length == -1 || (attribute_tables[i][j].max_length >= attribute_tables[i][j].min_length)); /* An attribute cannot require both a DECL and a TYPE. */ gcc_assert (!attribute_tables[i][j].decl_required || !attribute_tables[i][j].type_required); /* If an attribute requires a function type, in particular it requires a type. */ gcc_assert (!attribute_tables[i][j].function_type_required || attribute_tables[i][j].type_required); } } /* Check that each name occurs just once in each table. */ for (i = 0; i < ARRAY_SIZE (attribute_tables); i++) { int j, k; for (j = 0; attribute_tables[i][j].name != NULL; j++) for (k = j + 1; attribute_tables[i][k].name != NULL; k++) gcc_assert (strcmp (attribute_tables[i][j].name, attribute_tables[i][k].name)); } /* Check that no name occurs in more than one table. */ for (i = 0; i < ARRAY_SIZE (attribute_tables); i++) { size_t j, k, l; for (j = i + 1; j < ARRAY_SIZE (attribute_tables); j++) for (k = 0; attribute_tables[i][k].name != NULL; k++) for (l = 0; attribute_tables[j][l].name != NULL; l++) gcc_assert (strcmp (attribute_tables[i][k].name, attribute_tables[j][l].name)); } #endif attribute_hash = htab_create (200, hash_attr, eq_attr, NULL); for (i = 0; i < ARRAY_SIZE (attribute_tables); i++) for (k = 0; attribute_tables[i][k].name != NULL; k++) { struct substring str; const void **slot; str.str = attribute_tables[i][k].name; str.length = strlen (attribute_tables[i][k].name); slot = (const void **)htab_find_slot_with_hash (attribute_hash, &str, substring_hash (str.str, str.length), INSERT); gcc_assert (!*slot); *slot = &attribute_tables[i][k]; } attributes_initialized = true; } /* Return the spec for the attribute named NAME. */ const struct attribute_spec * lookup_attribute_spec (tree name) { struct substring attr; attr.str = IDENTIFIER_POINTER (name); attr.length = IDENTIFIER_LENGTH (name); extract_attribute_substring (&attr); return (const struct attribute_spec *) htab_find_with_hash (attribute_hash, &attr, substring_hash (attr.str, attr.length)); } /* Process the attributes listed in ATTRIBUTES and install them in *NODE, which is either a DECL (including a TYPE_DECL) or a TYPE. If a DECL, it should be modified in place; if a TYPE, a copy should be created unless ATTR_FLAG_TYPE_IN_PLACE is set in FLAGS. FLAGS gives further information, in the form of a bitwise OR of flags in enum attribute_flags from tree.h. Depending on these flags, some attributes may be returned to be applied at a later stage (for example, to apply a decl attribute to the declaration rather than to its type). */ tree decl_attributes (tree *node, tree attributes, int flags) { tree a; tree returned_attrs = NULL_TREE; if (TREE_TYPE (*node) == error_mark_node) return NULL_TREE; if (!attributes_initialized) init_attributes (); /* If this is a function and the user used #pragma GCC optimize, add the options to the attribute((optimize(...))) list. */ if (TREE_CODE (*node) == FUNCTION_DECL && current_optimize_pragma) { tree cur_attr = lookup_attribute ("optimize", attributes); tree opts = copy_list (current_optimize_pragma); if (! cur_attr) attributes = tree_cons (get_identifier ("optimize"), opts, attributes); else TREE_VALUE (cur_attr) = chainon (opts, TREE_VALUE (cur_attr)); } if (TREE_CODE (*node) == FUNCTION_DECL && optimization_current_node != optimization_default_node && !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (*node)) DECL_FUNCTION_SPECIFIC_OPTIMIZATION (*node) = optimization_current_node; /* If this is a function and the user used #pragma GCC target, add the options to the attribute((target(...))) list. */ if (TREE_CODE (*node) == FUNCTION_DECL && current_target_pragma && targetm.target_option.valid_attribute_p (*node, NULL_TREE, current_target_pragma, 0)) { tree cur_attr = lookup_attribute ("target", attributes); tree opts = copy_list (current_target_pragma); if (! cur_attr) attributes = tree_cons (get_identifier ("target"), opts, attributes); else TREE_VALUE (cur_attr) = chainon (opts, TREE_VALUE (cur_attr)); } targetm.insert_attributes (*node, &attributes); for (a = attributes; a; a = TREE_CHAIN (a)) { tree name = TREE_PURPOSE (a); tree args = TREE_VALUE (a); tree *anode = node; const struct attribute_spec *spec = lookup_attribute_spec (name); bool no_add_attrs = 0; int fn_ptr_quals = 0; tree fn_ptr_tmp = NULL_TREE; if (spec == NULL) { warning (OPT_Wattributes, "%qs attribute directive ignored", IDENTIFIER_POINTER (name)); continue; } else if (list_length (args) < spec->min_length || (spec->max_length >= 0 && list_length (args) > spec->max_length)) { error ("wrong number of arguments specified for %qs attribute", IDENTIFIER_POINTER (name)); continue; } gcc_assert (is_attribute_p (spec->name, name)); if (spec->decl_required && !DECL_P (*anode)) { if (flags & ((int) ATTR_FLAG_DECL_NEXT | (int) ATTR_FLAG_FUNCTION_NEXT | (int) ATTR_FLAG_ARRAY_NEXT)) { /* Pass on this attribute to be tried again. */ returned_attrs = tree_cons (name, args, returned_attrs); continue; } else { warning (OPT_Wattributes, "%qs attribute does not apply to types", IDENTIFIER_POINTER (name)); continue; } } /* If we require a type, but were passed a decl, set up to make a new type and update the one in the decl. ATTR_FLAG_TYPE_IN_PLACE would have applied if we'd been passed a type, but we cannot modify the decl's type in place here. */ if (spec->type_required && DECL_P (*anode)) { anode = &TREE_TYPE (*anode); /* Allow ATTR_FLAG_TYPE_IN_PLACE for the type's naming decl. */ if (!(TREE_CODE (*anode) == TYPE_DECL && *anode == TYPE_NAME (TYPE_MAIN_VARIANT (TREE_TYPE (*anode))))) flags &= ~(int) ATTR_FLAG_TYPE_IN_PLACE; } if (spec->function_type_required && TREE_CODE (*anode) != FUNCTION_TYPE && TREE_CODE (*anode) != METHOD_TYPE) { if (TREE_CODE (*anode) == POINTER_TYPE && (TREE_CODE (TREE_TYPE (*anode)) == FUNCTION_TYPE || TREE_CODE (TREE_TYPE (*anode)) == METHOD_TYPE)) { /* OK, this is a bit convoluted. We can't just make a copy of the pointer type and modify its TREE_TYPE, because if we change the attributes of the target type the pointer type needs to have a different TYPE_MAIN_VARIANT. So we pull out the target type now, frob it as appropriate, and rebuild the pointer type later. This would all be simpler if attributes were part of the declarator, grumble grumble. */ fn_ptr_tmp = TREE_TYPE (*anode); fn_ptr_quals = TYPE_QUALS (*anode); anode = &fn_ptr_tmp; flags &= ~(int) ATTR_FLAG_TYPE_IN_PLACE; } else if (flags & (int) ATTR_FLAG_FUNCTION_NEXT) { /* Pass on this attribute to be tried again. */ returned_attrs = tree_cons (name, args, returned_attrs); continue; } if (TREE_CODE (*anode) != FUNCTION_TYPE && TREE_CODE (*anode) != METHOD_TYPE) { warning (OPT_Wattributes, "%qs attribute only applies to function types", IDENTIFIER_POINTER (name)); continue; } } if (TYPE_P (*anode) && (flags & (int) ATTR_FLAG_TYPE_IN_PLACE) && TYPE_SIZE (*anode) != NULL_TREE) { warning (OPT_Wattributes, "type attributes ignored after type is already defined"); continue; } if (spec->handler != NULL) returned_attrs = chainon ((*spec->handler) (anode, name, args, flags, &no_add_attrs), returned_attrs); /* Layout the decl in case anything changed. */ if (spec->type_required && DECL_P (*node) && (TREE_CODE (*node) == VAR_DECL || TREE_CODE (*node) == PARM_DECL || TREE_CODE (*node) == RESULT_DECL)) relayout_decl (*node); if (!no_add_attrs) { tree old_attrs; tree a; if (DECL_P (*anode)) old_attrs = DECL_ATTRIBUTES (*anode); else old_attrs = TYPE_ATTRIBUTES (*anode); for (a = lookup_attribute (spec->name, old_attrs); a != NULL_TREE; a = lookup_attribute (spec->name, TREE_CHAIN (a))) { if (simple_cst_equal (TREE_VALUE (a), args) == 1) break; } if (a == NULL_TREE) { /* This attribute isn't already in the list. */ if (DECL_P (*anode)) DECL_ATTRIBUTES (*anode) = tree_cons (name, args, old_attrs); else if (flags & (int) ATTR_FLAG_TYPE_IN_PLACE) { TYPE_ATTRIBUTES (*anode) = tree_cons (name, args, old_attrs); /* If this is the main variant, also push the attributes out to the other variants. */ if (*anode == TYPE_MAIN_VARIANT (*anode)) { tree variant; for (variant = *anode; variant; variant = TYPE_NEXT_VARIANT (variant)) { if (TYPE_ATTRIBUTES (variant) == old_attrs) TYPE_ATTRIBUTES (variant) = TYPE_ATTRIBUTES (*anode); else if (!lookup_attribute (spec->name, TYPE_ATTRIBUTES (variant))) TYPE_ATTRIBUTES (variant) = tree_cons (name, args, TYPE_ATTRIBUTES (variant)); } } } else *anode = build_type_attribute_variant (*anode, tree_cons (name, args, old_attrs)); } } if (fn_ptr_tmp) { /* Rebuild the function pointer type and put it in the appropriate place. */ fn_ptr_tmp = build_pointer_type (fn_ptr_tmp); if (fn_ptr_quals) fn_ptr_tmp = build_qualified_type (fn_ptr_tmp, fn_ptr_quals); if (DECL_P (*node)) TREE_TYPE (*node) = fn_ptr_tmp; else { gcc_assert (TREE_CODE (*node) == POINTER_TYPE); *node = fn_ptr_tmp; } } } return returned_attrs; }
embecosm/avr32-gcc
gcc/attribs.c
C
gpl-2.0
14,144
29.814815
86
0.628394
false
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <meta http-equiv="X-UA-Compatible" content="IE=9"/> <meta name="generator" content="Doxygen 1.8.11"/> <title>Wayverb: Member List</title> <link href="tabs.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="jquery.js"></script> <script type="text/javascript" src="dynsections.js"></script> <link href="navtree.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="resize.js"></script> <script type="text/javascript" src="navtreedata.js"></script> <script type="text/javascript" src="navtree.js"></script> <script type="text/javascript"> $(document).ready(initResizable); $(window).load(resizeHeight); </script> <link href="search/search.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="search/searchdata.js"></script> <script type="text/javascript" src="search/search.js"></script> <script type="text/javascript"> $(document).ready(function() { init_search(); }); </script> <link href="doxygen.css" rel="stylesheet" type="text/css" /> </head> <body> <div id="top"><!-- do not remove this div, it is closed by doxygen! --> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 56px;"> <td id="projectalign" style="padding-left: 0.5em;"> <div id="projectname">Wayverb </div> </td> </tr> </tbody> </table> </div> <!-- end header part --> <!-- Generated by Doxygen 1.8.11 --> <script type="text/javascript"> var searchBox = new SearchBox("searchBox", "search",false,'Search'); </script> <div id="navrow1" class="tabs"> <ul class="tablist"> <li><a href="index.html"><span>Main&#160;Page</span></a></li> <li><a href="pages.html"><span>Related&#160;Pages</span></a></li> <li class="current"><a href="annotated.html"><span>Classes</span></a></li> <li><a href="files.html"><span>Files</span></a></li> <li> <div id="MSearchBox" class="MSearchBoxInactive"> <span class="left"> <img id="MSearchSelect" src="search/mag_sel.png" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" alt=""/> <input type="text" id="MSearchField" value="Search" accesskey="S" onfocus="searchBox.OnSearchFieldFocus(true)" onblur="searchBox.OnSearchFieldFocus(false)" onkeyup="searchBox.OnSearchFieldChange(event)"/> </span><span class="right"> <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a> </span> </div> </li> </ul> </div> <div id="navrow2" class="tabs2"> <ul class="tablist"> <li><a href="annotated.html"><span>Class&#160;List</span></a></li> <li><a href="classes.html"><span>Class&#160;Index</span></a></li> <li><a href="hierarchy.html"><span>Class&#160;Hierarchy</span></a></li> <li><a href="functions.html"><span>Class&#160;Members</span></a></li> </ul> </div> </div><!-- top --> <div id="side-nav" class="ui-resizable side-nav-resizable"> <div id="nav-tree"> <div id="nav-tree-contents"> <div id="nav-sync" class="sync"></div> </div> </div> <div id="splitbar" style="-moz-user-select:none;" class="ui-resizable-handle"> </div> </div> <script type="text/javascript"> $(document).ready(function(){initNavTree('classwayverb_1_1raytracer_1_1reflector.html','');}); </script> <div id="doc-content"> <!-- window showing the filter options --> <div id="MSearchSelectWindow" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" onkeydown="return searchBox.OnSearchSelectKey(event)"> </div> <!-- iframe showing the search results (closed by default) --> <div id="MSearchResultsWindow"> <iframe src="javascript:void(0)" frameborder="0" name="MSearchResults" id="MSearchResults"> </iframe> </div> <div class="header"> <div class="headertitle"> <div class="title">wayverb::raytracer::reflector Member List</div> </div> </div><!--header--> <div class="contents"> <p>This is the complete list of members for <a class="el" href="classwayverb_1_1raytracer_1_1reflector.html">wayverb::raytracer::reflector</a>, including all inherited members.</p> <table class="directory"> <tr class="even"><td class="entry"><a class="el" href="classwayverb_1_1raytracer_1_1reflector.html#ace324d0ac4e24e5ee497f1858e804bfc">get_per_ray_size</a>()</td><td class="entry"><a class="el" href="classwayverb_1_1raytracer_1_1reflector.html">wayverb::raytracer::reflector</a></td><td class="entry"><span class="mlabel">inline</span><span class="mlabel">static</span></td></tr> <tr bgcolor="#f0f0f0"><td class="entry"><b>get_rays</b>() (defined in <a class="el" href="classwayverb_1_1raytracer_1_1reflector.html">wayverb::raytracer::reflector</a>)</td><td class="entry"><a class="el" href="classwayverb_1_1raytracer_1_1reflector.html">wayverb::raytracer::reflector</a></td><td class="entry"></td></tr> <tr bgcolor="#f0f0f0" class="even"><td class="entry"><b>get_reflections</b>() (defined in <a class="el" href="classwayverb_1_1raytracer_1_1reflector.html">wayverb::raytracer::reflector</a>)</td><td class="entry"><a class="el" href="classwayverb_1_1raytracer_1_1reflector.html">wayverb::raytracer::reflector</a></td><td class="entry"></td></tr> <tr bgcolor="#f0f0f0"><td class="entry"><b>get_rng</b>() (defined in <a class="el" href="classwayverb_1_1raytracer_1_1reflector.html">wayverb::raytracer::reflector</a>)</td><td class="entry"><a class="el" href="classwayverb_1_1raytracer_1_1reflector.html">wayverb::raytracer::reflector</a></td><td class="entry"></td></tr> <tr bgcolor="#f0f0f0" class="even"><td class="entry"><b>reflector</b>(const core::compute_context &amp;cc, const glm::vec3 &amp;receiver, It b, It e) (defined in <a class="el" href="classwayverb_1_1raytracer_1_1reflector.html">wayverb::raytracer::reflector</a>)</td><td class="entry"><a class="el" href="classwayverb_1_1raytracer_1_1reflector.html">wayverb::raytracer::reflector</a></td><td class="entry"><span class="mlabel">inline</span></td></tr> <tr bgcolor="#f0f0f0"><td class="entry"><b>run_step</b>(const core::scene_buffers &amp;buffers) (defined in <a class="el" href="classwayverb_1_1raytracer_1_1reflector.html">wayverb::raytracer::reflector</a>)</td><td class="entry"><a class="el" href="classwayverb_1_1raytracer_1_1reflector.html">wayverb::raytracer::reflector</a></td><td class="entry"></td></tr> </table></div><!-- contents --> </div><!-- doc-content --> <!-- start footer part --> <div id="nav-path" class="navpath"><!-- id is needed for treeview function! --> <ul> <li class="footer">Generated by <a href="http://www.doxygen.org/index.html"> <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.11 </li> </ul> </div> </body> </html>
reuk/waveguide
docs_source/doxygen/html/classwayverb_1_1raytracer_1_1reflector-members.html
HTML
gpl-2.0
7,165
53.280303
451
0.666015
false
/* -*- c++ -*- ---------------------------------------------------------- LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator https://www.lammps.org/, Sandia National Laboratories Steve Plimpton, sjplimp@sandia.gov Copyright (2003) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. This software is distributed under the GNU General Public License. See the README file in the top-level LAMMPS directory. ------------------------------------------------------------------------- */ #ifdef ATOM_CLASS // clang-format off AtomStyle(charge/kk,AtomVecChargeKokkos); AtomStyle(charge/kk/device,AtomVecChargeKokkos); AtomStyle(charge/kk/host,AtomVecChargeKokkos); // clang-format on #else // clang-format off #ifndef LMP_ATOM_VEC_CHARGE_KOKKOS_H #define LMP_ATOM_VEC_CHARGE_KOKKOS_H #include "atom_vec_kokkos.h" #include "kokkos_type.h" namespace LAMMPS_NS { class AtomVecChargeKokkos : public AtomVecKokkos { public: AtomVecChargeKokkos(class LAMMPS *); void grow(int) override; void copy(int, int, int) override; int pack_border(int, int *, double *, int, int *) override; int pack_border_vel(int, int *, double *, int, int *) override; int pack_border_hybrid(int, int *, double *) override; void unpack_border(int, int, double *) override; void unpack_border_vel(int, int, double *) override; int unpack_border_hybrid(int, int, double *) override; int pack_exchange(int, double *) override; int unpack_exchange(double *) override; int size_restart() override; int pack_restart(int, double *) override; int unpack_restart(double *) override; void create_atom(int, double *) override; void data_atom(double *, imageint, const std::vector<std::string> &) override; int data_atom_hybrid(int , const std::vector<std::string> &, int) override; void pack_data(double **) override; int pack_data_hybrid(int, double *) override; void write_data(FILE *, int, double **) override; int write_data_hybrid(FILE *, double *) override; double memory_usage() override; void grow_pointers() override; int pack_border_kokkos(int n, DAT::tdual_int_2d k_sendlist, DAT::tdual_xfloat_2d buf,int iswap, int pbc_flag, int *pbc, ExecutionSpace space) override; void unpack_border_kokkos(const int &n, const int &nfirst, const DAT::tdual_xfloat_2d &buf, ExecutionSpace space) override; int pack_exchange_kokkos(const int &nsend,DAT::tdual_xfloat_2d &buf, DAT::tdual_int_1d k_sendlist, DAT::tdual_int_1d k_copylist, ExecutionSpace space, int dim, X_FLOAT lo, X_FLOAT hi) override; int unpack_exchange_kokkos(DAT::tdual_xfloat_2d &k_buf, int nrecv, int nlocal, int dim, X_FLOAT lo, X_FLOAT hi, ExecutionSpace space) override; void sync(ExecutionSpace space, unsigned int mask) override; void modified(ExecutionSpace space, unsigned int mask) override; void sync_overlapping_device(ExecutionSpace space, unsigned int mask) override; protected: tagint *tag; int *type,*mask; imageint *image; double **x,**v,**f; double *q; DAT::t_tagint_1d d_tag; HAT::t_tagint_1d h_tag; DAT::t_int_1d d_type, d_mask; HAT::t_int_1d h_type, h_mask; DAT::t_imageint_1d d_image; HAT::t_imageint_1d h_image; DAT::t_x_array d_x; DAT::t_v_array d_v; DAT::t_f_array d_f; DAT::t_float_1d d_q; HAT::t_float_1d h_q; DAT::tdual_int_1d k_count; }; } #endif #endif /* ERROR/WARNING messages: E: Per-processor system is too big The number of owned atoms plus ghost atoms on a single processor must fit in 32-bit integer. E: Invalid atom type in Atoms section of data file Atom types must range from 1 to specified # of types. */
ndtrung81/lammps
src/KOKKOS/atom_vec_charge_kokkos.h
C
gpl-2.0
4,001
32.066116
81
0.645589
false
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <meta http-equiv="X-UA-Compatible" content="IE=9"/> <meta name="generator" content="Doxygen 1.8.6"/> <title>CORE POS - IS4C: COREPOS\pos\lib\Scanning\VariableWeightReWrites\ZeroedPriceReWrite Class Reference</title> <link href="tabs.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="jquery.js"></script> <script type="text/javascript" src="dynsections.js"></script> <link href="search/search.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="search/search.js"></script> <script type="text/javascript"> $(document).ready(function() { if ($('.searchresults').length > 0) { searchBox.DOMSearchField().focus(); } }); </script> <link rel="search" href="search-opensearch.php?v=opensearch.xml" type="application/opensearchdescription+xml" title="CORE POS - IS4C"/> <link href="doxygen.css" rel="stylesheet" type="text/css" /> </head> <body> <div id="top"><!-- do not remove this div, it is closed by doxygen! --> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 56px;"> <td style="padding-left: 0.5em;"> <div id="projectname">CORE POS - IS4C </div> <div id="projectbrief">The CORE POS front end</div> </td> </tr> </tbody> </table> </div> <!-- end header part --> <!-- Generated by Doxygen 1.8.6 --> <script type="text/javascript"> var searchBox = new SearchBox("searchBox", "search",false,'Search'); </script> <div id="navrow1" class="tabs"> <ul class="tablist"> <li><a href="index.html"><span>Main&#160;Page</span></a></li> <li><a href="pages.html"><span>Related&#160;Pages</span></a></li> <li class="current"><a href="annotated.html"><span>Classes</span></a></li> <li><a href="files.html"><span>Files</span></a></li> <li><a href="examples.html"><span>Examples</span></a></li> <li> <div id="MSearchBox" class="MSearchBoxInactive"> <div class="left"> <form id="FSearchBox" action="search.php" method="get"> <img id="MSearchSelect" src="search/mag.png" alt=""/> <input type="text" id="MSearchField" name="query" value="Search" size="20" accesskey="S" onfocus="searchBox.OnSearchFieldFocus(true)" onblur="searchBox.OnSearchFieldFocus(false)"/> </form> </div><div class="right"></div> </div> </li> </ul> </div> <div id="navrow2" class="tabs2"> <ul class="tablist"> <li><a href="annotated.html"><span>Class&#160;List</span></a></li> <li><a href="classes.html"><span>Class&#160;Index</span></a></li> <li><a href="hierarchy.html"><span>Class&#160;Hierarchy</span></a></li> <li><a href="functions.html"><span>Class&#160;Members</span></a></li> </ul> </div> <div id="nav-path" class="navpath"> <ul> <li class="navelem"><b>COREPOS</b></li><li class="navelem"><b>pos</b></li><li class="navelem"><b>lib</b></li><li class="navelem"><b>Scanning</b></li><li class="navelem"><b>VariableWeightReWrites</b></li><li class="navelem"><a class="el" href="class_c_o_r_e_p_o_s_1_1pos_1_1lib_1_1_scanning_1_1_variable_weight_re_writes_1_1_zeroed_price_re_write.html">ZeroedPriceReWrite</a></li> </ul> </div> </div><!-- top --> <div class="header"> <div class="summary"> <a href="#pub-methods">Public Member Functions</a> &#124; <a href="class_c_o_r_e_p_o_s_1_1pos_1_1lib_1_1_scanning_1_1_variable_weight_re_writes_1_1_zeroed_price_re_write-members.html">List of all members</a> </div> <div class="headertitle"> <div class="title">COREPOS\pos\lib\Scanning\VariableWeightReWrites\ZeroedPriceReWrite Class Reference</div> </div> </div><!--header--> <div class="contents"> <div class="dynheader"> Inheritance diagram for COREPOS\pos\lib\Scanning\VariableWeightReWrites\ZeroedPriceReWrite:</div> <div class="dyncontent"> <div class="center"> <img src="class_c_o_r_e_p_o_s_1_1pos_1_1lib_1_1_scanning_1_1_variable_weight_re_writes_1_1_zeroed_price_re_write.png" usemap="#COREPOS\pos\lib\Scanning\VariableWeightReWrites\ZeroedPriceReWrite_map" alt=""/> <map id="COREPOS\pos\lib\Scanning\VariableWeightReWrites\ZeroedPriceReWrite_map" name="COREPOS\pos\lib\Scanning\VariableWeightReWrites\ZeroedPriceReWrite_map"> <area href="class_c_o_r_e_p_o_s_1_1pos_1_1lib_1_1_scanning_1_1_variable_weight_re_write.html" alt="COREPOS\pos\lib\Scanning\VariableWeightReWrite" shape="rect" coords="0,0,431,24"/> </map> </div></div> <table class="memberdecls"> <tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="pub-methods"></a> Public Member Functions</h2></td></tr> <tr class="memitem:aea2d310b645fe42d2841232db4bfe846"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="aea2d310b645fe42d2841232db4bfe846"></a> &#160;</td><td class="memItemRight" valign="bottom"><b>translate</b> ($upc, $includes_check_digit=false)</td></tr> <tr class="separator:aea2d310b645fe42d2841232db4bfe846"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="inherit_header pub_methods_class_c_o_r_e_p_o_s_1_1pos_1_1lib_1_1_scanning_1_1_variable_weight_re_write"><td colspan="2" onclick="javascript:toggleInherit('pub_methods_class_c_o_r_e_p_o_s_1_1pos_1_1lib_1_1_scanning_1_1_variable_weight_re_write')"><img src="closed.png" alt="-"/>&#160;Public Member Functions inherited from <a class="el" href="class_c_o_r_e_p_o_s_1_1pos_1_1lib_1_1_scanning_1_1_variable_weight_re_write.html">COREPOS\pos\lib\Scanning\VariableWeightReWrite</a></td></tr> <tr class="memitem:a2ae5d24b441499a45974a88c844de632 inherit pub_methods_class_c_o_r_e_p_o_s_1_1pos_1_1lib_1_1_scanning_1_1_variable_weight_re_write"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a2ae5d24b441499a45974a88c844de632"></a> &#160;</td><td class="memItemRight" valign="bottom"><b>translate</b> ($upc, $includes_check_digit=false)</td></tr> <tr class="separator:a2ae5d24b441499a45974a88c844de632 inherit pub_methods_class_c_o_r_e_p_o_s_1_1pos_1_1lib_1_1_scanning_1_1_variable_weight_re_write"><td class="memSeparator" colspan="2">&#160;</td></tr> </table> <a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2> <div class="textblock"><p>Replaces the price portion of the UPC with zeros.</p> <p>Ex: 0021234500199 beomces 0021234500000</p> <p>Note: if using check digits, the check digit is also zeroed out rather than re-calculating the correct value for the zero-price UPC. </p> </div><hr/>The documentation for this class was generated from the following file:<ul> <li>pos/is4c-nf/lib/Scanning/VariableWeightReWrites/ZeroedPriceReWrite.php</li> </ul> </div><!-- contents --> <!-- start footer part --> <hr class="footer"/><address class="footer"><small> Generated on Fri Sep 2 2016 11:30:51 for CORE POS - IS4C by &#160;<a href="http://www.doxygen.org/index.html"> <img class="footer" src="doxygen.png" alt="doxygen"/> </a> 1.8.6 </small></address> </body> </html>
CORE-POS/IS4C
documentation/doxy/output/is4c-nf/html/class_c_o_r_e_p_o_s_1_1pos_1_1lib_1_1_scanning_1_1_variable_weight_re_writes_1_1_zeroed_price_re_write.html
HTML
gpl-2.0
7,147
59.567797
495
0.684623
false
/* Implement fma using soft-fp. Copyright (C) 2013-2015 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. In addition to the permissions in the GNU Lesser General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The Lesser General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <libc-internal.h> #include <math.h> #include "soft-fp.h" #include "double.h" double __fma (double a, double b, double c) { FP_DECL_EX; FP_DECL_D (A); FP_DECL_D (B); FP_DECL_D (C); FP_DECL_D (R); double r; FP_INIT_ROUNDMODE; FP_UNPACK_D (A, a); FP_UNPACK_D (B, b); FP_UNPACK_D (C, c); FP_FMA_D (R, A, B, C); /* R_e is not set in cases where it is not used in packing, but the compiler does not see that it is set in all cases where it is used, resulting in warnings that it may be used uninitialized. */ DIAG_PUSH_NEEDS_COMMENT; #if __GNUC_PREREQ (4, 7) DIAG_IGNORE_NEEDS_COMMENT (4.9, "-Wmaybe-uninitialized"); #else DIAG_IGNORE_NEEDS_COMMENT (4.9, "-Wuninitialized"); #endif FP_PACK_D (r, R); DIAG_POP_NEEDS_COMMENT; FP_HANDLE_EXCEPTIONS; return r; } #ifndef __fma weak_alias (__fma, fma) #endif #ifdef NO_LONG_DOUBLE strong_alias (__fma, __fmal) weak_alias (__fmal, fmal) #endif
Chen-tao/glibc_mirror
soft-fp/fmadf4.c
C
gpl-2.0
2,291
31.267606
71
0.709734
false
<?php /** * Magento * * NOTICE OF LICENSE * * This source file is subject to the Open Software License (OSL 3.0) * that is bundled with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://opensource.org/licenses/osl-3.0.php * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@magento.com so we can send you a copy immediately. * * DISCLAIMER * * Do not edit or add to this file if you wish to upgrade Magento to newer * versions in the future. If you wish to customize Magento for your * needs please refer to http://www.magento.com for more information. * * @category Mage * @package Mage_CatalogRule * @copyright Copyright (c) 2006-2014 X.commerce, Inc. (http://www.magento.com) * @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0) */ /** * Catalog rules resource model * * @category Mage * @package Mage_CatalogRule * @author Magento Core Team <core@magentocommerce.com> */ class Mage_CatalogRule_Model_Resource_Rule extends Mage_Rule_Model_Resource_Abstract { /** * Store number of seconds in a day */ const SECONDS_IN_DAY = 86400; /** * Number of products in range for insert */ const RANGE_PRODUCT_STEP = 1000000; /** * Store associated with rule entities information map * * @var array */ protected $_associatedEntitiesMap = array( 'website' => array( 'associations_table' => 'catalogrule/website', 'rule_id_field' => 'rule_id', 'entity_id_field' => 'website_id' ), 'customer_group' => array( 'associations_table' => 'catalogrule/customer_group', 'rule_id_field' => 'rule_id', 'entity_id_field' => 'customer_group_id' ) ); /** * Factory instance * * @var Mage_Core_Model_Factory */ protected $_factory; /** * App instance * * @var Mage_Core_Model_App */ protected $_app; /** * Constructor with parameters * Array of arguments with keys * - 'factory' Mage_Core_Model_Factory * * @param array $args */ public function __construct(array $args = array()) { $this->_factory = !empty($args['factory']) ? $args['factory'] : Mage::getSingleton('core/factory'); $this->_app = !empty($args['app']) ? $args['app'] : Mage::app(); parent::__construct(); } /** * Initialize main table and table id field */ protected function _construct() { $this->_init('catalogrule/rule', 'rule_id'); } /** * Add customer group ids and website ids to rule data after load * * @param Mage_Core_Model_Abstract $object * * @return Mage_CatalogRule_Model_Resource_Rule */ protected function _afterLoad(Mage_Core_Model_Abstract $object) { $object->setData('customer_group_ids', (array)$this->getCustomerGroupIds($object->getId())); $object->setData('website_ids', (array)$this->getWebsiteIds($object->getId())); return parent::_afterLoad($object); } /** * Bind catalog rule to customer group(s) and website(s). * Update products which are matched for rule. * * @param Mage_Core_Model_Abstract $object * * @return Mage_CatalogRule_Model_Resource_Rule */ protected function _afterSave(Mage_Core_Model_Abstract $object) { if ($object->hasWebsiteIds()) { $websiteIds = $object->getWebsiteIds(); if (!is_array($websiteIds)) { $websiteIds = explode(',', (string)$websiteIds); } $this->bindRuleToEntity($object->getId(), $websiteIds, 'website'); } if ($object->hasCustomerGroupIds()) { $customerGroupIds = $object->getCustomerGroupIds(); if (!is_array($customerGroupIds)) { $customerGroupIds = explode(',', (string)$customerGroupIds); } $this->bindRuleToEntity($object->getId(), $customerGroupIds, 'customer_group'); } parent::_afterSave($object); return $this; } /** * Deletes records in catalogrule/product_data by rule ID and product IDs * * @param int $ruleId * @param array $productIds */ public function cleanProductData($ruleId, array $productIds = array()) { /** @var $write Varien_Db_Adapter_Interface */ $write = $this->_getWriteAdapter(); $conditions = array('rule_id = ?' => $ruleId); if (count($productIds) > 0) { $conditions['product_id IN (?)'] = $productIds; } $write->delete($this->getTable('catalogrule/rule_product'), $conditions); } /** * Return whether the product fits the rule * * @param Mage_CatalogRule_Model_Rule $rule * @param Varien_Object $product * @param array $websiteIds * @return bool */ public function validateProduct(Mage_CatalogRule_Model_Rule $rule, Varien_Object $product, $websiteIds = array()) { /** @var $helper Mage_Catalog_Helper_Product_Flat */ $helper = $this->_factory->getHelper('catalog/product_flat'); if ($helper->isEnabled() && $helper->isBuiltAllStores()) { /** @var $store Mage_Core_Model_Store */ foreach ($this->_app->getStores(false) as $store) { if (count($websiteIds) == 0 || in_array($store->getWebsiteId(), $websiteIds)) { /** @var $selectByStore Varien_Db_Select */ $selectByStore = $rule->getProductFlatSelect($store->getId()); $selectByStore->where('p.entity_id = ?', $product->getId()); $selectByStore->limit(1); if ($this->_getReadAdapter()->fetchOne($selectByStore)) { return true; } } } return false; } else { return $rule->getConditions()->validate($product); } } /** * Inserts rule data into catalogrule/rule_product table * * @param Mage_CatalogRule_Model_Rule $rule * @param array $websiteIds * @param array $productIds */ public function insertRuleData(Mage_CatalogRule_Model_Rule $rule, array $websiteIds, array $productIds = array()) { /** @var $write Varien_Db_Adapter_Interface */ $write = $this->_getWriteAdapter(); $customerGroupIds = $rule->getCustomerGroupIds(); $fromTime = (int) strtotime($rule->getFromDate()); $toTime = (int) strtotime($rule->getToDate()); $toTime = $toTime ? ($toTime + self::SECONDS_IN_DAY - 1) : 0; /** @var Mage_Core_Model_Date $coreDate */ $coreDate = $this->_factory->getModel('core/date'); $timestamp = $coreDate->gmtTimestamp('Today'); if ($fromTime > $timestamp || ($toTime && $toTime < $timestamp) ) { return; } $sortOrder = (int) $rule->getSortOrder(); $actionOperator = $rule->getSimpleAction(); $actionAmount = (float) $rule->getDiscountAmount(); $subActionOperator = $rule->getSubIsEnable() ? $rule->getSubSimpleAction() : ''; $subActionAmount = (float) $rule->getSubDiscountAmount(); $actionStop = (int) $rule->getStopRulesProcessing(); /** @var $helper Mage_Catalog_Helper_Product_Flat */ $helper = $this->_factory->getHelper('catalog/product_flat'); if ($helper->isEnabled() && $helper->isBuiltAllStores()) { /** @var $store Mage_Core_Model_Store */ foreach ($this->_app->getStores(false) as $store) { if (in_array($store->getWebsiteId(), $websiteIds)) { /** @var $selectByStore Varien_Db_Select */ $selectByStore = $rule->getProductFlatSelect($store->getId()) ->joinLeft(array('cg' => $this->getTable('customer/customer_group')), $write->quoteInto('cg.customer_group_id IN (?)', $customerGroupIds), array('cg.customer_group_id')) ->reset(Varien_Db_Select::COLUMNS) ->columns(array( new Zend_Db_Expr($store->getWebsiteId()), 'cg.customer_group_id', 'p.entity_id', new Zend_Db_Expr($rule->getId()), new Zend_Db_Expr($fromTime), new Zend_Db_Expr($toTime), new Zend_Db_Expr("'" . $actionOperator . "'"), new Zend_Db_Expr($actionAmount), new Zend_Db_Expr($actionStop), new Zend_Db_Expr($sortOrder), new Zend_Db_Expr("'" . $subActionOperator . "'"), new Zend_Db_Expr($subActionAmount), )); if (count($productIds) > 0) { $selectByStore->where('p.entity_id IN (?)', array_keys($productIds)); } $selects = $write->selectsByRange('entity_id', $selectByStore, self::RANGE_PRODUCT_STEP); foreach ($selects as $select) { $write->query( $write->insertFromSelect( $select, $this->getTable('catalogrule/rule_product'), array( 'website_id', 'customer_group_id', 'product_id', 'rule_id', 'from_time', 'to_time', 'action_operator', 'action_amount', 'action_stop', 'sort_order', 'sub_simple_action', 'sub_discount_amount', ), Varien_Db_Adapter_Interface::INSERT_IGNORE ) ); } } } } else { if (count($productIds) == 0) { Varien_Profiler::start('__MATCH_PRODUCTS__'); $productIds = $rule->getMatchingProductIds(); Varien_Profiler::stop('__MATCH_PRODUCTS__'); } $rows = array(); foreach ($productIds as $productId => $validationByWebsite) { foreach ($websiteIds as $websiteId) { foreach ($customerGroupIds as $customerGroupId) { if (empty($validationByWebsite[$websiteId])) { continue; } $rows[] = array( 'rule_id' => $rule->getId(), 'from_time' => $fromTime, 'to_time' => $toTime, 'website_id' => $websiteId, 'customer_group_id' => $customerGroupId, 'product_id' => $productId, 'action_operator' => $actionOperator, 'action_amount' => $actionAmount, 'action_stop' => $actionStop, 'sort_order' => $sortOrder, 'sub_simple_action' => $subActionOperator, 'sub_discount_amount' => $subActionAmount, ); if (count($rows) == 1000) { $write->insertMultiple($this->getTable('catalogrule/rule_product'), $rows); $rows = array(); } } } } if (!empty($rows)) { $write->insertMultiple($this->getTable('catalogrule/rule_product'), $rows); } } } /** * Update products which are matched for rule * * @param Mage_CatalogRule_Model_Rule $rule * * @throws Exception * @return Mage_CatalogRule_Model_Resource_Rule */ public function updateRuleProductData(Mage_CatalogRule_Model_Rule $rule) { $ruleId = $rule->getId(); $write = $this->_getWriteAdapter(); $write->beginTransaction(); if ($rule->getProductsFilter()) { $this->cleanProductData($ruleId, $rule->getProductsFilter()); } else { $this->cleanProductData($ruleId); } if (!$rule->getIsActive()) { $write->commit(); return $this; } $websiteIds = $rule->getWebsiteIds(); if (!is_array($websiteIds)) { $websiteIds = explode(',', $websiteIds); } if (empty($websiteIds)) { return $this; } try { $this->insertRuleData($rule, $websiteIds); $write->commit(); } catch (Exception $e) { $write->rollback(); throw $e; } return $this; } /** * Get all product ids matched for rule * * @param int $ruleId * * @return array */ public function getRuleProductIds($ruleId) { $read = $this->_getReadAdapter(); $select = $read->select()->from($this->getTable('catalogrule/rule_product'), 'product_id') ->where('rule_id=?', $ruleId); return $read->fetchCol($select); } /** * Remove catalog rules product prices for specified date range and product * * @param int|string $fromDate * @param int|string $toDate * @param int|null $productId * * @return Mage_CatalogRule_Model_Resource_Rule */ public function removeCatalogPricesForDateRange($fromDate, $toDate, $productId = null) { $write = $this->_getWriteAdapter(); $conds = array(); $cond = $write->quoteInto('rule_date between ?', $this->formatDate($fromDate)); $cond = $write->quoteInto($cond.' and ?', $this->formatDate($toDate)); $conds[] = $cond; if (!is_null($productId)) { $conds[] = $write->quoteInto('product_id=?', $productId); } /** * Add information about affected products * It can be used in processes which related with product price (like catalog index) */ $select = $this->_getWriteAdapter()->select() ->from($this->getTable('catalogrule/rule_product_price'), 'product_id') ->where(implode(' AND ', $conds)) ->group('product_id'); $replace = $write->insertFromSelect( $select, $this->getTable('catalogrule/affected_product'), array('product_id'), true ); $write->query($replace); $write->delete($this->getTable('catalogrule/rule_product_price'), $conds); return $this; } /** * Delete old price rules data * * @param string $date * @param int|null $productId * * @return Mage_CatalogRule_Model_Resource_Rule */ public function deleteOldData($date, $productId = null) { $write = $this->_getWriteAdapter(); $conds = array(); $conds[] = $write->quoteInto('rule_date<?', $this->formatDate($date)); if (!is_null($productId)) { $conds[] = $write->quoteInto('product_id=?', $productId); } $write->delete($this->getTable('catalogrule/rule_product_price'), $conds); return $this; } /** * Get DB resource statement for processing query result * * @param int $fromDate * @param int $toDate * @param int|null $productId * @param int|null $websiteId * * @return Zend_Db_Statement_Interface */ protected function _getRuleProductsStmt($fromDate, $toDate, $productId = null, $websiteId = null) { $read = $this->_getReadAdapter(); /** * Sort order is important * It used for check stop price rule condition. * website_id customer_group_id product_id sort_order * 1 1 1 0 * 1 1 1 1 * 1 1 1 2 * if row with sort order 1 will have stop flag we should exclude * all next rows for same product id from price calculation */ $select = $read->select() ->from(array('rp' => $this->getTable('catalogrule/rule_product'))) ->where($read->quoteInto('rp.from_time = 0 or rp.from_time <= ?', $toDate) . ' OR ' . $read->quoteInto('rp.to_time = 0 or rp.to_time >= ?', $fromDate)) ->order(array('rp.website_id', 'rp.customer_group_id', 'rp.product_id', 'rp.sort_order', 'rp.rule_id')); if (!is_null($productId)) { $select->where('rp.product_id=?', $productId); } /** * Join default price and websites prices to result */ $priceAttr = Mage::getSingleton('eav/config')->getAttribute(Mage_Catalog_Model_Product::ENTITY, 'price'); $priceTable = $priceAttr->getBackend()->getTable(); $attributeId= $priceAttr->getId(); $joinCondition = '%1$s.entity_id=rp.product_id AND (%1$s.attribute_id=' . $attributeId . ') and %1$s.store_id=%2$s'; $select->join( array('pp_default'=>$priceTable), sprintf($joinCondition, 'pp_default', Mage_Core_Model_App::ADMIN_STORE_ID), array('default_price'=>'pp_default.value') ); if ($websiteId !== null) { $website = Mage::app()->getWebsite($websiteId); $defaultGroup = $website->getDefaultGroup(); if ($defaultGroup instanceof Mage_Core_Model_Store_Group) { $storeId = $defaultGroup->getDefaultStoreId(); } else { $storeId = Mage_Core_Model_App::ADMIN_STORE_ID; } $select->joinInner( array('product_website' => $this->getTable('catalog/product_website')), 'product_website.product_id=rp.product_id ' . 'AND rp.website_id=product_website.website_id ' . 'AND product_website.website_id='.$websiteId, array() ); $tableAlias = 'pp'.$websiteId; $fieldAlias = 'website_'.$websiteId.'_price'; $select->joinLeft( array($tableAlias=>$priceTable), sprintf($joinCondition, $tableAlias, $storeId), array($fieldAlias=>$tableAlias.'.value') ); } else { foreach (Mage::app()->getWebsites() as $website) { $websiteId = $website->getId(); $defaultGroup = $website->getDefaultGroup(); if ($defaultGroup instanceof Mage_Core_Model_Store_Group) { $storeId = $defaultGroup->getDefaultStoreId(); } else { $storeId = Mage_Core_Model_App::ADMIN_STORE_ID; } $tableAlias = 'pp' . $websiteId; $fieldAlias = 'website_' . $websiteId . '_price'; $select->joinLeft( array($tableAlias => $priceTable), sprintf($joinCondition, $tableAlias, $storeId), array($fieldAlias => $tableAlias.'.value') ); } } return $read->query($select); } /** * Generate catalog price rules prices for specified date range * If from date is not defined - will be used previous day by UTC * If to date is not defined - will be used next day by UTC * * @param int|Mage_Catalog_Model_Product $product * * @throws Exception * @return Mage_CatalogRule_Model_Resource_Rule */ public function applyAllRules($product = null) { $this->_reindexCatalogRule($product); return $this; } /** * Generate catalog price rules prices for specified date range * If from date is not defined - will be used previous day by UTC * If to date is not defined - will be used next day by UTC * * @param int|string|null $fromDate * @param int|string|null $toDate * @param int $productId * * @deprecated after 1.7.0.2 use method applyAllRules * * @return Mage_CatalogRule_Model_Resource_Rule */ public function applyAllRulesForDateRange($fromDate = null, $toDate = null, $productId = null) { return $this->applyAllRules($productId); } /** * Run reindex * * @param int|Mage_Catalog_Model_Product $product */ protected function _reindexCatalogRule($product = null) { $indexerCode = 'catalogrule/action_index_refresh'; $value = null; if ($product) { $value = $product instanceof Mage_Catalog_Model_Product ? $product->getId() : $product; $indexerCode = 'catalogrule/action_index_refresh_row'; } /** @var $indexer Mage_CatalogRule_Model_Action_Index_Refresh */ $indexer = Mage::getModel( $indexerCode, array( 'connection' => $this->_getWriteAdapter(), 'factory' => Mage::getModel('core/factory'), 'resource' => $this, 'app' => Mage::app(), 'value' => $value ) ); $indexer->execute(); } /** * Calculate product price based on price rule data and previous information * * @param array $ruleData * @param null|array $productData * * @return float */ protected function _calcRuleProductPrice($ruleData, $productData = null) { if ($productData !== null && isset($productData['rule_price'])) { $productPrice = $productData['rule_price']; } else { $websiteId = $ruleData['website_id']; if (isset($ruleData['website_'.$websiteId.'_price'])) { $productPrice = $ruleData['website_'.$websiteId.'_price']; } else { $productPrice = $ruleData['default_price']; } } $productPrice = Mage::helper('catalogrule')->calcPriceRule( $ruleData['action_operator'], $ruleData['action_amount'], $productPrice); return Mage::app()->getStore()->roundPrice($productPrice); } /** * Save rule prices for products to DB * * @param array $arrData * * @return Mage_CatalogRule_Model_Resource_Rule */ protected function _saveRuleProductPrices($arrData) { if (empty($arrData)) { return $this; } $adapter = $this->_getWriteAdapter(); $productIds = array(); $adapter->beginTransaction(); try { foreach ($arrData as $key => $data) { $productIds['product_id'] = $data['product_id']; $arrData[$key]['rule_date'] = $this->formatDate($data['rule_date'], false); $arrData[$key]['latest_start_date'] = $this->formatDate($data['latest_start_date'], false); $arrData[$key]['earliest_end_date'] = $this->formatDate($data['earliest_end_date'], false); } $adapter->insertOnDuplicate($this->getTable('catalogrule/affected_product'), array_unique($productIds)); $adapter->insertOnDuplicate($this->getTable('catalogrule/rule_product_price'), $arrData); } catch (Exception $e) { $adapter->rollback(); throw $e; } $adapter->commit(); return $this; } /** * Get catalog rules product price for specific date, website and * customer group * * @param int|string $date * @param int $wId * @param int $gId * @param int $pId * * @return float|bool */ public function getRulePrice($date, $wId, $gId, $pId) { $data = $this->getRulePrices($date, $wId, $gId, array($pId)); if (isset($data[$pId])) { return $data[$pId]; } return false; } /** * Retrieve product prices by catalog rule for specific date, website and customer group * Collect data with product Id => price pairs * * @param int|string $date * @param int $websiteId * @param int $customerGroupId * @param array $productIds * * @return array */ public function getRulePrices($date, $websiteId, $customerGroupId, $productIds) { $adapter = $this->_getReadAdapter(); $select = $adapter->select() ->from($this->getTable('catalogrule/rule_product_price'), array('product_id', 'rule_price')) ->where('rule_date = ?', $this->formatDate($date, false)) ->where('website_id = ?', $websiteId) ->where('customer_group_id = ?', $customerGroupId) ->where('product_id IN(?)', $productIds); return $adapter->fetchPairs($select); } /** * Get active rule data based on few filters * * @param int|string $date * @param int $websiteId * @param int $customerGroupId * @param int $productId * @return array */ public function getRulesFromProduct($date, $websiteId, $customerGroupId, $productId) { $adapter = $this->_getReadAdapter(); if (is_string($date)) { $date = strtotime($date); } $select = $adapter->select() ->from($this->getTable('catalogrule/rule_product')) ->where('website_id = ?', $websiteId) ->where('customer_group_id = ?', $customerGroupId) ->where('product_id = ?', $productId) ->where('from_time = 0 or from_time < ?', $date) ->where('to_time = 0 or to_time > ?', $date); return $adapter->fetchAll($select); } /** * Retrieve product price data for all customer groups * * @param int|string $date * @param int $wId * @param int $pId * * @return array */ public function getRulesForProduct($date, $wId, $pId) { $read = $this->_getReadAdapter(); $select = $read->select() ->from($this->getTable('catalogrule/rule_product_price'), '*') ->where('rule_date=?', $this->formatDate($date, false)) ->where('website_id=?', $wId) ->where('product_id=?', $pId); return $read->fetchAll($select); } /** * Apply catalog rule to product * * @param Mage_CatalogRule_Model_Rule $rule * @param Mage_Catalog_Model_Product $product * @param array $websiteIds * * @throws Exception * @return Mage_CatalogRule_Model_Resource_Rule */ public function applyToProduct($rule, $product, $websiteIds) { if (!$rule->getIsActive()) { return $this; } $ruleId = $rule->getId(); $productId = $product->getId(); $write = $this->_getWriteAdapter(); $write->beginTransaction(); if ($this->_isProductMatchedRule($ruleId, $product)) { $this->cleanProductData($ruleId, array($productId)); } if ($this->validateProduct($rule, $product, $websiteIds)) { try { $this->insertRuleData($rule, $websiteIds, array( $productId => array_combine(array_values($websiteIds), array_values($websiteIds))) ); } catch (Exception $e) { $write->rollback(); throw $e; } } else { $write->delete($this->getTable('catalogrule/rule_product_price'), array( $write->quoteInto('product_id = ?', $productId), )); } $write->commit(); return $this; } /** * Get ids of matched rules for specific product * * @param int $productId * @return array */ public function getProductRuleIds($productId) { $read = $this->_getReadAdapter(); $select = $read->select()->from($this->getTable('catalogrule/rule_product'), 'rule_id'); $select->where('product_id = ?', $productId); return array_flip($read->fetchCol($select)); } /** * Is product has been matched the rule * * @param int $ruleId * @param Mage_Catalog_Model_Product $product * @return bool */ protected function _isProductMatchedRule($ruleId, $product) { $rules = $product->getMatchedRules(); return isset($rules[$ruleId]); } }
T0MM0R/magento
web/app/code/core/Mage/CatalogRule/Model/Resource/Rule.php
PHP
gpl-2.0
29,312
34.230769
117
0.518968
false
# -*- perl -*- # Cricket: a configuration, polling and data display wrapper for RRD files # # Copyright (C) 1998 Jeff R. Allen and WebTV Networks, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # define the ConfigRoot class package ConfigRoot; my $val; sub TIESCALAR { my $class = shift; my $me; $val = shift; bless \$me, $class; } sub FETCH { my $self = shift; if (!defined($val)) { return $Common::global::gCricketHome . "/cricket-config"; # check for relative path (both UNIX and DOS drive letter style) } elsif ($val !~ m#^/# && $val !~ m#^[a-z,A-Z]:/#) { return "$Common::global::gCricketHome/$val" unless ($^O eq 'MSWin32' && $Common::global::isGrapher); } return $val; } # this method will only be invoked if someone sets $gConfigRoot # after Common::global is loaded sub STORE { my $self = shift; $val = shift; return $self->FETCH(); } package Common::global; BEGIN { # Set defaults for things not picked up from cricket-config.pl $gCricketHome ||= $ENV{'HOME'}; tie $gConfigRoot, 'ConfigRoot', $gConfigRoot; if ($^O eq 'MSWin32') { $gCacheDir ||= "$ENV{'TEMP'}\\cricket-cache" if (defined($ENV{'TEMP'})); $gCacheDir ||= "c:\\temp\\cricket-cache"; } else { $gCacheDir ||= "$ENV{'TMPDIR'}/cricket-cache" if (defined($ENV{'TMPDIR'})); $gCacheDir ||= "/tmp/cricket-cache"; } $hasPersistantGlobals ||= 0; $hasPersistantGlobals = 1 if $ENV{'MOD_PERL'}; $hasPersistantGlobals = 1 if $CGI::SpeedyCGI::i_am_speedy; $gSkipMonitor ||= 0; $gUrlStyle ||= "classic"; if (!defined($isGrapher)) { $isGrapher = 0; } if (!defined($isCollector)) { $isCollector = 0; } if (!defined($gLongDSName)) { $gLongDSName = 0; } if (!defined($gLogFullPath)) { $gLogFullPath = 0; } if (!defined($gSubtreeTimes)) { $gSubtreeTimes = 0; } if (!defined($gMailer)) { $gMailer = '/bin/mailx'; } if (!defined($gCopyConfig)) { $gCopyConfig = 0; } } 1; # Local Variables: # mode: perl # indent-tabs-mode: nil # tab-width: 4 # perl-indent-level: 4 # End:
CricketRRD/cricket
lib/Common/global.pm
Perl
gpl-2.0
2,913
26.481132
74
0.607964
false
using System; using Server.Items; using Server.Mobiles; using System.Collections.Generic; using Server.Gumps; using Server.Commands; using System.Linq; namespace Server.Engines.Quests { public class TheQuestionsQuest : BaseQuest { public TheQuestionsQuest() : base() { AddObjective(new QuestionAndAnswerObjective(4, m_EntryTable)); } public override QuestChain ChainID { get { return QuestChain.CloakOfHumility; } } public override Type NextQuest { get { return typeof(CommunityServiceMuseumQuest); } } //The Question public override object Title { get { return 1076081; } } /*Greetings my friend! My name is Gareth, and I represent a group of citizens who wish to rejuvenate interest in our * kingdom's noble heritage. 'Tis our belief that one of Britannia's greatest triumphs was the institution of the Virtues, * neglected though they be now. To that end I have a set of tasks prepared for one who would follow a truly Humble path. * Art thou interested in joining our effort?*/ public override object Description { get { return 1075675; } } //I wish that thou wouldest reconsider. public override object Refuse { get { return 1075677; } } //Wonderful! First, let us see if thou art reading from the same roll of parchment as we are. *smiles* public override object Uncomplete { get { return 1075676; } } /*Very good! I can see that ye hath more than just a passing interest in our work. There are many trials before thee, but * I have every hope that ye shall have the diligence and fortitude to carry on to the very end. Before we begin, please * prepare thyself by thinking about the virtue of Humility. Ponder not only its symbols, but also its meanings. Once ye * believe that thou art ready, speak with me again.*/ public override object Complete { get { return 1075714; } } /*Ah... no, that is not quite right. Truly, Humility is something that takes time and experience to understand. I wish to * challenge thee to seek out more knowledge concerning this virtue, and tomorrow let us speak again about what thou hast * learned.<br>*/ public override object FailedMsg { get { return 1075713; } } public override bool RenderObjective(MondainQuestGump g, bool offer) { if (offer) g.AddHtmlLocalized(130, 45, 270, 16, 1049010, 0xFFFFFF, false, false); // Quest Offer else g.AddHtmlLocalized(130, 45, 270, 16, 1046026, 0xFFFFFF, false, false); // Quest Log g.AddHtmlObject(160, 70, 200, 40, Title, BaseQuestGump.DarkGreen, false, false); g.AddHtmlLocalized(98, 140, 312, 16, 1049073, 0x2710, false, false); // Objective: g.AddHtmlLocalized(98, 156, 312, 16, 1072208, 0x2710, false, false); // All of the following int offset = 172; string str; foreach (QuestionAndAnswerObjective obj in Objectives.OfType<QuestionAndAnswerObjective>()) { if (offer) str = String.Format("Answer {0} questions correctly.", obj.MaxProgress); else str = String.Format("Answer {0}/{1} questions answered correctly.", obj.CurProgress, obj.MaxProgress); g.AddHtmlObject(98, offset, 312, 16, str, BaseQuestGump.LightGreen, false, false); offset += 16; } return true; } public override void OnAccept() { base.OnAccept(); Owner.SendGump(new QAndAGump(Owner, this)); } public override void OnResign(bool chain) { base.OnResign(chain); m_CooldownTable[Owner] = DateTime.Now + TimeSpan.FromHours(24); } public override bool CanOffer() { DefragCooldown(); if (!m_CooldownTable.ContainsKey(Owner) || Owner.AccessLevel > AccessLevel.Player) return base.CanOffer(); return false; } private static void DefragCooldown() { List<Mobile> toRemove = new List<Mobile>(); foreach (KeyValuePair<Mobile, DateTime> kvp in m_CooldownTable) { if (kvp.Value < DateTime.Now) toRemove.Add(kvp.Key); } foreach (Mobile m in toRemove) { if (m_CooldownTable.ContainsKey(m)) m_CooldownTable.Remove(m); } } public override void Serialize(GenericWriter writer) { base.Serialize(writer); writer.Write((int)0); // version } public override void Deserialize(GenericReader reader) { base.Deserialize(reader); int version = reader.ReadInt(); } public static void Configure() { m_EntryTable[0] = new QuestionAndAnswerEntry(1075708, new object[] { 1075709 }, new object[] { 1075710, 1075711, 1075712 }); //<center>Finish this truism: Humility shows us...</center> m_EntryTable[1] = new QuestionAndAnswerEntry(1075678, new object[] { 1075679 }, new object[] { 1075680, 1075681, 1075682 }); //<center>What is the symbol of Humility?</center> m_EntryTable[2] = new QuestionAndAnswerEntry(1075683, new object[] { 1075685 }, new object[] { 1075684, 1075686, 1075687 }); //<center>What opposes Humility?</center> m_EntryTable[3] = new QuestionAndAnswerEntry(1075688, new object[] { 1075691 }, new object[] { 1075689, 1075690, 1075692 }); //<center>What is the color of Humility?</center> m_EntryTable[4] = new QuestionAndAnswerEntry(1075693, new object[] { 1075697 }, new object[] { 1075694, 1075695, 1075696 }); //<center>How doth one find Humility?</center> m_EntryTable[5] = new QuestionAndAnswerEntry(1075698, new object[] { 1075700 }, new object[] { 1075699, 1075601, 1075602 }); //<center>Which city embodies the need for Humility?</center> m_EntryTable[6] = new QuestionAndAnswerEntry(1075703, new object[] { 1075705 }, new object[] { 1075704, 1075706, 1075707 }); //<center>By name, which den of evil challenges one’s humility?</center> } private static QuestionAndAnswerEntry[] m_EntryTable = new QuestionAndAnswerEntry[7]; public static QuestionAndAnswerEntry[] EntryTable { get { return m_EntryTable; } } private static Dictionary<Mobile, DateTime> m_CooldownTable = new Dictionary<Mobile, DateTime>(); public static Dictionary<Mobile, DateTime> CooldownTable { get { return m_CooldownTable; } } } public class CommunityServiceMuseumQuest : BaseQuest { public CommunityServiceMuseumQuest() : base() { AddObjective(new CollectionsObtainObjective(typeof(ShepherdsCrookOfHumility), "Shepherd's Crook of Humility", 1)); } public override QuestChain ChainID { get { return QuestChain.CloakOfHumility; } } public override Type NextQuest { get { return typeof(CommunityServiceZooQuest); } } //Community Service - Museum public override object Title { get { return 1075716; } } /*'Tis time to help out the community of Britannia. Visit the Vesper Museum and donate to their collection, and eventually thou will * be able to receive a replica of the Shepherd's Crook of Humility. Once ye have it, return to me. Art thou willing to do this?*/ public override object Description { get { return 1075717; } } //I wish that thou wouldest reconsider. public override object Refuse { get { return 1075719; } } //Hello my friend. The museum sitteth in southern Vesper. If ye go downstairs, ye will discover a small donation chest. //That is the place where ye should leave thy donation. public override object Uncomplete { get { return 1075720; } } /*Terrific! The Museum is a worthy cause. Many will benefit from the inspiration and learning that thine donation hath supported.*/ public override object Complete { get { return 1075721; } } public override void Serialize(GenericWriter writer) { base.Serialize(writer); writer.Write((int)0); // version } public override void Deserialize(GenericReader reader) { base.Deserialize(reader); int version = reader.ReadInt(); } } public class CommunityServiceZooQuest : BaseQuest { public CommunityServiceZooQuest() : base() { AddObjective(new CollectionsObtainObjective(typeof(ForTheLifeOfBritanniaSash), "Life of Britannia Sash", 1)); } public override QuestChain ChainID { get { return QuestChain.CloakOfHumility; } } public override Type NextQuest { get { return typeof(CommunityServiceLibraryQuest); } } //Community Service – Zoo public override object Title { get { return 1075722; } } /*Now, go on and donate to the Moonglow Zoo. Givest thou enough to receive a 'For the Life of Britannia' sash. Once ye have it, * return it to me. Wilt thou continue?*/ public override object Description { get { return 1075723; } } //I wish that thou wouldest reconsider. public override object Refuse { get { return 1075725; } } //Hello again. The zoo lies a short ways south of Moonglow. Close to the entrance thou wilt discover a small donation chest. //That is where thou shouldest leave thy donation. public override object Uncomplete { get { return 1075726; } } /*Wonderful! The Zoo is a very special place from which people young and old canst benefit. Thanks to thee, it can continue to thrive.*/ public override object Complete { get { return 1075727; } } public override void Serialize(GenericWriter writer) { base.Serialize(writer); writer.Write((int)0); // version } public override void Deserialize(GenericReader reader) { base.Deserialize(reader); int version = reader.ReadInt(); } } public class CommunityServiceLibraryQuest : BaseQuest { public CommunityServiceLibraryQuest() : base() { AddObjective(new CollectionsObtainObjective(typeof(SpecialPrintingOfVirtue), "Special Painint of 'Virtue' Book", 1)); } public override QuestChain ChainID { get { return QuestChain.CloakOfHumility; } } public override Type NextQuest { get { return typeof(WhosMostHumbleQuest); } } //Community Service – Library public override object Title { get { return 1075728; } } /*I have one more charity for thee, my diligent friend. Go forth and donate to the Britain Library and do that which is necessary to receive * a special printing of ‘Virtue’, by Lord British. Once in hand, bring the book back with ye. Art thou ready?*/ public override object Description { get { return 1075729; } } //I wish that thou wouldest reconsider. public override object Refuse { get { return 1075731; } } //Art thou having trouble? The Library lieth north of Castle British's gates. I believe the representatives in charge of the //donations are easy enough to find. They await thy visit, amongst the many tomes of knowledge. public override object Uncomplete { get { return 1075732; } } /*Very good! The library is of great import to the people of Britannia. Thou hath done a worthy deed and this is thy last * required donation. I encourage thee to continue contributing to thine community, beyond the obligations of this endeavor.*/ public override object Complete { get { return 1075733; } } public override void Serialize(GenericWriter writer) { base.Serialize(writer); writer.Write((int)0); // version } public override void Deserialize(GenericReader reader) { base.Deserialize(reader); int version = reader.ReadInt(); } } public class WhosMostHumbleQuest : BaseQuest { private List<Item> m_QuestItems = new List<Item>(); private List<Mobile> m_GivenTo = new List<Mobile>(); private Dictionary<int, HumilityQuestMobileInfo> m_Infos = new Dictionary<int, HumilityQuestMobileInfo>(); public Dictionary<int, HumilityQuestMobileInfo> Infos { get { return m_Infos; } } public override bool CanRefuseReward { get { return true; } } public WhosMostHumbleQuest() : base() { AddObjective(new ObtainObjective(typeof(IronChain), "Iron Chain", 1)); AddReward(new BaseReward(typeof(GoldShield), "A Gold Shield")); } public override QuestChain ChainID { get { return QuestChain.CloakOfHumility; } } //Who's Most Humble public override object Title { get { return 1075734; } } /*Thou art challenged to find seven citizens spread out among the towns of Britannia: Skara Brae, Minoc, Britain, and * one of the towns upon an isle at sea. Each citizen wilt reveal some thought concerning Humility. But who doth best * exemplify the virtue? Here, thou needeth wear this plain grey cloak, for they wilt know ye by it. Wilt thou continue?*/ public override object Description { get { return 1075735; } } //'Tis a difficult quest, but well worth it. Wilt thou reconsider? public override object Refuse { get { return 1075737; } } /*There art no less than seven 'humble citizens' spread across the Britannia proper. I know that they can be found in the * towns of Minoc, Skara Brae and Britain. Another may be upon an island at sea, the name of which escapes me at the moment. * Thou needeth visit all seven to solve the puzzle. Be diligent, for they have a tendency to wander about.<BR><BR><br>Dost * thou wear the plain grey cloak?*/ public override object Uncomplete { get { return 1075738; } } /*Noble friend, thou hast performed tremendously! On behalf of the Rise of Britannia I wish to reward thee with this golden * shield, a symbol of accomplishment and pride for the many things that thou hast done for our people.<BR><BR><br>Dost thou accept?*/ public override object Complete { get { return 1075782; } } public override void OnAccept() { base.OnAccept(); Owner.SendGump(new QuestInfoGump(1075736)); // Excellent. When thou hast satisfied the needs of the most humble, thou wilt be given an item meant for me. Take this <B>brass ring</B> to start ye on the way. Item cloak = new GreyCloak(); Item ring = new BrassRing(); m_QuestItems.Add(cloak); m_QuestItems.Add(ring); Owner.Backpack.DropItem(cloak); Owner.Backpack.DropItem(ring); List<Type> itemTypes = new List<Type>(HumilityQuestMobileInfo.ItemTypes); List<Type> mobTypes = new List<Type>(HumilityQuestMobileInfo.MobileTypes); for (int i = 0; i < 25; i++) { int ran = Utility.RandomMinMax(1, itemTypes.Count - 2); Type t = itemTypes[ran]; itemTypes.Remove(t); itemTypes.Insert(Utility.RandomMinMax(1, itemTypes.Count - 2), t); } for (int i = 0; i < 25; i++) { int ran = Utility.RandomMinMax(0, mobTypes.Count - 2); if (ran > 0) { Type t = mobTypes[ran]; mobTypes.Remove(t); mobTypes.Insert(Utility.RandomMinMax(1, mobTypes.Count - 2), t); } } for (int i = 0; i < mobTypes.Count; i++) { int mobIndex = HumilityQuestMobileInfo.GetNPCIndex(mobTypes[i]); int need = i; int give = need + 1; Type needs = itemTypes[need]; Type gives = itemTypes[give]; m_Infos[mobIndex] = new HumilityQuestMobileInfo(needs, gives, HumilityQuestMobileInfo.GetLoc(needs), HumilityQuestMobileInfo.GetLoc(gives)); } } public override void OnResign(bool chain) { foreach (Item item in m_QuestItems) { if (item != null && !item.Deleted) item.Delete(); } base.OnResign(chain); } public override void GiveRewards() { foreach (Item item in m_QuestItems) { if(item != null && !item.Deleted) item.Delete(); } Owner.SendGump(new QuestInfoGump(1075783)); base.GiveRewards(); } public override void RefuseRewards() { foreach (Item item in m_QuestItems) { if (item is GreyCloak) ((GreyCloak)item).Owner = Owner; else if (item != null && !item.Deleted) item.Delete(); } Owner.SendGump(new QuestInfoGump(1075784)); base.RefuseRewards(); } public void AddQuestItem(Item item, Mobile from) { if (!m_QuestItems.Contains(item)) m_QuestItems.Add(item); OnGivenTo(from); } public void RemoveQuestItem(Item item) { if (m_QuestItems.Contains(item)) m_QuestItems.Remove(item); } public void OnGivenTo(Mobile m) { m_GivenTo.Add(m); } public bool HasGivenTo(Mobile m) { return m_GivenTo.Contains(m); } public override void Serialize(GenericWriter writer) { base.Serialize(writer); writer.Write((int)0); // version writer.Write(m_QuestItems.Count); foreach (Item item in m_QuestItems) writer.Write(item); writer.Write(m_Infos.Count); foreach(KeyValuePair<int, HumilityQuestMobileInfo> kvp in m_Infos) { writer.Write(kvp.Key); kvp.Value.Serialize(writer); } writer.Write(m_GivenTo.Count); foreach (Mobile m in m_GivenTo) writer.Write(m); } public override void Deserialize(GenericReader reader) { base.Deserialize(reader); int version = reader.ReadInt(); int count = reader.ReadInt(); for (int i = 0; i < count; i++) { Item item = reader.ReadItem(); if (item != null && !item.Deleted) m_QuestItems.Add(item); } count = reader.ReadInt(); for (int i = 0; i < count; i++) { int mobIndex = reader.ReadInt(); m_Infos[mobIndex] = new HumilityQuestMobileInfo(reader); } count = reader.ReadInt(); for (int i = 0; i < count; i++) { Mobile m = reader.ReadMobile(); if (m != null) m_GivenTo.Add(m); } } } }
HankTheDrunk/ServUO
Scripts/Quests/CloakOfHumility/CloakOfHumilityQuest.cs
C#
gpl-2.0
19,684
39.504115
217
0.602926
false
var Spinner = require('spin.js') exports.on = function () { // Fade out card this.$('.card-content').fadeTo(1, 0.4) // Create the spinner if it doesn't exist yet if (!this.spinner) { this.spinner = new Spinner() } // Start the spinner this.spinner.spin(this.el) } exports.off = function () { // Fade in card this.$('.card-content').fadeTo(1, 1) // Stop the spinner if (this.spinner) { this.spinner.stop() } }
timwis/vizwit
src/scripts/util/loader.js
JavaScript
gpl-2.0
446
17.583333
47
0.609865
false
/* * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #ifndef SHARE_VM_C1_C1_LIRGENERATOR_HPP #define SHARE_VM_C1_C1_LIRGENERATOR_HPP #include "c1/c1_Instruction.hpp" #include "c1/c1_LIR.hpp" #include "ci/ciMethodData.hpp" #include "utilities/sizes.hpp" // The classes responsible for code emission and register allocation class LIRGenerator; class LIREmitter; class Invoke; class SwitchRange; class LIRItem; define_array(LIRItemArray, LIRItem*) define_stack(LIRItemList, LIRItemArray) class SwitchRange: public CompilationResourceObj { private: int _low_key; int _high_key; BlockBegin* _sux; public: SwitchRange(int start_key, BlockBegin* sux): _low_key(start_key), _high_key(start_key), _sux(sux) {} void set_high_key(int key) { _high_key = key; } int high_key() const { return _high_key; } int low_key() const { return _low_key; } BlockBegin* sux() const { return _sux; } }; define_array(SwitchRangeArray, SwitchRange*) define_stack(SwitchRangeList, SwitchRangeArray) class ResolveNode; define_array(NodeArray, ResolveNode*); define_stack(NodeList, NodeArray); // Node objects form a directed graph of LIR_Opr // Edges between Nodes represent moves from one Node to its destinations class ResolveNode: public CompilationResourceObj { private: LIR_Opr _operand; // the source or destinaton NodeList _destinations; // for the operand bool _assigned; // Value assigned to this Node? bool _visited; // Node already visited? bool _start_node; // Start node already visited? public: ResolveNode(LIR_Opr operand) : _operand(operand) , _assigned(false) , _visited(false) , _start_node(false) {}; // accessors LIR_Opr operand() const { return _operand; } int no_of_destinations() const { return _destinations.length(); } ResolveNode* destination_at(int i) { return _destinations[i]; } bool assigned() const { return _assigned; } bool visited() const { return _visited; } bool start_node() const { return _start_node; } // modifiers void append(ResolveNode* dest) { _destinations.append(dest); } void set_assigned() { _assigned = true; } void set_visited() { _visited = true; } void set_start_node() { _start_node = true; } }; // This is shared state to be used by the PhiResolver so the operand // arrays don't have to be reallocated for reach resolution. class PhiResolverState: public CompilationResourceObj { friend class PhiResolver; private: NodeList _virtual_operands; // Nodes where the operand is a virtual register NodeList _other_operands; // Nodes where the operand is not a virtual register NodeList _vreg_table; // Mapping from virtual register to Node public: PhiResolverState() {} void reset(int max_vregs); }; // class used to move value of phi operand to phi function class PhiResolver: public CompilationResourceObj { private: LIRGenerator* _gen; PhiResolverState& _state; // temporary state cached by LIRGenerator ResolveNode* _loop; LIR_Opr _temp; // access to shared state arrays NodeList& virtual_operands() { return _state._virtual_operands; } NodeList& other_operands() { return _state._other_operands; } NodeList& vreg_table() { return _state._vreg_table; } ResolveNode* create_node(LIR_Opr opr, bool source); ResolveNode* source_node(LIR_Opr opr) { return create_node(opr, true); } ResolveNode* destination_node(LIR_Opr opr) { return create_node(opr, false); } void emit_move(LIR_Opr src, LIR_Opr dest); void move_to_temp(LIR_Opr src); void move_temp_to(LIR_Opr dest); void move(ResolveNode* src, ResolveNode* dest); LIRGenerator* gen() { return _gen; } public: PhiResolver(LIRGenerator* _lir_gen, int max_vregs); ~PhiResolver(); void move(LIR_Opr src, LIR_Opr dest); }; // only the classes below belong in the same file class LIRGenerator: public InstructionVisitor, public BlockClosure { private: Compilation* _compilation; ciMethod* _method; // method that we are compiling PhiResolverState _resolver_state; BlockBegin* _block; int _virtual_register_number; Values _instruction_for_operand; BitMap2D _vreg_flags; // flags which can be set on a per-vreg basis LIR_List* _lir; BarrierSet* _bs; LIRGenerator* gen() { return this; } void print_if_not_loaded(const NewInstance* new_instance) PRODUCT_RETURN; #ifdef ASSERT LIR_List* lir(const char * file, int line) const { _lir->set_file_and_line(file, line); return _lir; } #endif LIR_List* lir() const { return _lir; } // a simple cache of constants used within a block GrowableArray<LIR_Const*> _constants; LIR_OprList _reg_for_constants; Values _unpinned_constants; friend class PhiResolver; // unified bailout support void bailout(const char* msg) const { compilation()->bailout(msg); } bool bailed_out() const { return compilation()->bailed_out(); } void block_do_prolog(BlockBegin* block); void block_do_epilog(BlockBegin* block); // register allocation LIR_Opr rlock(Value instr); // lock a free register LIR_Opr rlock_result(Value instr); LIR_Opr rlock_result(Value instr, BasicType type); LIR_Opr rlock_byte(BasicType type); LIR_Opr rlock_callee_saved(BasicType type); // get a constant into a register and get track of what register was used LIR_Opr load_constant(Constant* x); LIR_Opr load_constant(LIR_Const* constant); // Given an immediate value, return an operand usable in logical ops. LIR_Opr load_immediate(int x, BasicType type); void set_result(Value x, LIR_Opr opr) { assert(opr->is_valid(), "must set to valid value"); assert(x->operand()->is_illegal(), "operand should never change"); assert(!opr->is_register() || opr->is_virtual(), "should never set result to a physical register"); x->set_operand(opr); assert(opr == x->operand(), "must be"); if (opr->is_virtual()) { _instruction_for_operand.at_put_grow(opr->vreg_number(), x, NULL); } } void set_no_result(Value x) { assert(!x->has_uses(), "can't have use"); x->clear_operand(); } friend class LIRItem; LIR_Opr round_item(LIR_Opr opr); LIR_Opr force_to_spill(LIR_Opr value, BasicType t); PhiResolverState& resolver_state() { return _resolver_state; } void move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val); void move_to_phi(ValueStack* cur_state); // code emission void do_ArithmeticOp_Long (ArithmeticOp* x); void do_ArithmeticOp_Int (ArithmeticOp* x); void do_ArithmeticOp_FPU (ArithmeticOp* x); // platform dependent LIR_Opr getThreadPointer(); void do_RegisterFinalizer(Intrinsic* x); void do_isInstance(Intrinsic* x); void do_getClass(Intrinsic* x); void do_currentThread(Intrinsic* x); void do_MathIntrinsic(Intrinsic* x); void do_ArrayCopy(Intrinsic* x); void do_CompareAndSwap(Intrinsic* x, ValueType* type); void do_NIOCheckIndex(Intrinsic* x); void do_FPIntrinsics(Intrinsic* x); void do_Reference_get(Intrinsic* x); void do_update_CRC32(Intrinsic* x); void do_UnsafePrefetch(UnsafePrefetch* x, bool is_store); LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info); LIR_Opr call_runtime(BasicTypeArray* signature, LIR_OprList* args, address entry, ValueType* result_type, CodeEmitInfo* info); // convenience functions LIR_Opr call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info); LIR_Opr call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info); // GC Barriers // generic interface void pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, bool do_load, bool patch, CodeEmitInfo* info); void post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val); // specific implementations // pre barriers void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, bool do_load, bool patch, CodeEmitInfo* info); // post barriers void G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val); void CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val); #ifdef CARDTABLEMODREF_POST_BARRIER_HELPER void CardTableModRef_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base); #endif static LIR_Opr result_register_for(ValueType* type, bool callee = false); ciObject* get_jobject_constant(Value value); LIRItemList* invoke_visit_arguments(Invoke* x); void invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list); void trace_block_entry(BlockBegin* block); // volatile field operations are never patchable because a klass // must be loaded to know it's volatile which means that the offset // it always known as well. void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info); void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info); void put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile); void get_Object_unsafe(LIR_Opr dest, LIR_Opr src, LIR_Opr offset, BasicType type, bool is_volatile); void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args); void increment_counter(address counter, BasicType type, int step = 1); void increment_counter(LIR_Address* addr, int step = 1); // is_strictfp is only needed for mul and div (and only generates different code on i486) void arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp, CodeEmitInfo* info = NULL); // machine dependent. returns true if it emitted code for the multiply bool strength_reduce_multiply(LIR_Opr left, int constant, LIR_Opr result, LIR_Opr tmp); void store_stack_parameter (LIR_Opr opr, ByteSize offset_from_sp_in_bytes); void klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve = false); // this loads the length and compares against the index void array_range_check (LIR_Opr array, LIR_Opr index, CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info); // For java.nio.Buffer.checkIndex void nio_range_check (LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info); void arithmetic_op_int (Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp); void arithmetic_op_long (Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL); void arithmetic_op_fpu (Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp = LIR_OprFact::illegalOpr); void shift_op (Bytecodes::Code code, LIR_Opr dst_reg, LIR_Opr value, LIR_Opr count, LIR_Opr tmp); void logic_op (Bytecodes::Code code, LIR_Opr dst_reg, LIR_Opr left, LIR_Opr right); void monitor_enter (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info); void monitor_exit (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no); void new_instance (LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info); // machine dependent void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info); void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info); void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, LIR_Opr disp, BasicType type, CodeEmitInfo* info); void arraycopy_helper(Intrinsic* x, int* flags, ciArrayKlass** expected_type); // returns a LIR_Address to address an array location. May also // emit some code as part of address calculation. If // needs_card_mark is true then compute the full address for use by // both the store and the card mark. LIR_Address* generate_address(LIR_Opr base, LIR_Opr index, int shift, int disp, BasicType type); LIR_Address* generate_address(LIR_Opr base, int disp, BasicType type) { return generate_address(base, LIR_OprFact::illegalOpr, 0, disp, type); } LIR_Address* emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type, bool needs_card_mark); // the helper for generate_address void add_large_constant(LIR_Opr src, int c, LIR_Opr dest); // machine preferences and characteristics bool can_inline_as_constant(Value i) const; bool can_inline_as_constant(LIR_Const* c) const; bool can_store_as_constant(Value i, BasicType type) const; LIR_Opr safepoint_poll_register(); void profile_branch(If* if_instr, If::Condition cond); void increment_event_counter_impl(CodeEmitInfo* info, ciMethod *method, int frequency, int bci, bool backedge, bool notify); void increment_event_counter(CodeEmitInfo* info, int bci, bool backedge); void increment_invocation_counter(CodeEmitInfo *info) { if (compilation()->count_invocations()) { increment_event_counter(info, InvocationEntryBci, false); } } void increment_backedge_counter(CodeEmitInfo* info, int bci) { if (compilation()->count_backedges()) { increment_event_counter(info, bci, true); } } CodeEmitInfo* state_for(Instruction* x, ValueStack* state, bool ignore_xhandler = false); CodeEmitInfo* state_for(Instruction* x); // allocates a virtual register for this instruction if // one isn't already allocated. Only for Phi and Local. LIR_Opr operand_for_instruction(Instruction *x); void set_block(BlockBegin* block) { _block = block; } void block_prolog(BlockBegin* block); void block_epilog(BlockBegin* block); void do_root (Instruction* instr); void walk (Instruction* instr); void bind_block_entry(BlockBegin* block); void start_block(BlockBegin* block); LIR_Opr new_register(BasicType type); LIR_Opr new_register(Value value) { return new_register(as_BasicType(value->type())); } LIR_Opr new_register(ValueType* type) { return new_register(as_BasicType(type)); } // returns a register suitable for doing pointer math LIR_Opr new_pointer_register() { #ifdef _LP64 return new_register(T_LONG); #else return new_register(T_INT); #endif } static LIR_Condition lir_cond(If::Condition cond) { LIR_Condition l; switch (cond) { case If::eql: l = lir_cond_equal; break; case If::neq: l = lir_cond_notEqual; break; case If::lss: l = lir_cond_less; break; case If::leq: l = lir_cond_lessEqual; break; case If::geq: l = lir_cond_greaterEqual; break; case If::gtr: l = lir_cond_greater; break; case If::aeq: l = lir_cond_aboveEqual; break; case If::beq: l = lir_cond_belowEqual; break; }; return l; } #ifdef __SOFTFP__ void do_soft_float_compare(If *x); #endif // __SOFTFP__ void init(); SwitchRangeArray* create_lookup_ranges(TableSwitch* x); SwitchRangeArray* create_lookup_ranges(LookupSwitch* x); void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux); void do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x); #ifdef TRACE_HAVE_INTRINSICS void do_ThreadIDIntrinsic(Intrinsic* x); void do_ClassIDIntrinsic(Intrinsic* x); #endif ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k, ciKlass* callee_signature_k); void profile_arguments(ProfileCall* x); void profile_parameters(Base* x); void profile_parameters_at_call(ProfileCall* x); public: Compilation* compilation() const { return _compilation; } FrameMap* frame_map() const { return _compilation->frame_map(); } ciMethod* method() const { return _method; } BlockBegin* block() const { return _block; } IRScope* scope() const { return block()->scope(); } int max_virtual_register_number() const { return _virtual_register_number; } void block_do(BlockBegin* block); // Flags that can be set on vregs enum VregFlag { must_start_in_memory = 0 // needs to be assigned a memory location at beginning, but may then be loaded in a register , callee_saved = 1 // must be in a callee saved register , byte_reg = 2 // must be in a byte register , num_vreg_flags }; LIRGenerator(Compilation* compilation, ciMethod* method) : _compilation(compilation) , _method(method) , _virtual_register_number(LIR_OprDesc::vreg_base) , _vreg_flags(NULL, 0, num_vreg_flags) { init(); } // for virtual registers, maps them back to Phi's or Local's Instruction* instruction_for_opr(LIR_Opr opr); Instruction* instruction_for_vreg(int reg_num); void set_vreg_flag (int vreg_num, VregFlag f); bool is_vreg_flag_set(int vreg_num, VregFlag f); void set_vreg_flag (LIR_Opr opr, VregFlag f) { set_vreg_flag(opr->vreg_number(), f); } bool is_vreg_flag_set(LIR_Opr opr, VregFlag f) { return is_vreg_flag_set(opr->vreg_number(), f); } // statics static LIR_Opr exceptionOopOpr(); static LIR_Opr exceptionPcOpr(); static LIR_Opr divInOpr(); static LIR_Opr divOutOpr(); static LIR_Opr remOutOpr(); static LIR_Opr shiftCountOpr(); LIR_Opr syncTempOpr(); LIR_Opr atomicLockOpr(); // returns a register suitable for saving the thread in a // call_runtime_leaf if one is needed. LIR_Opr getThreadTemp(); // visitor functionality virtual void do_Phi (Phi* x); virtual void do_Local (Local* x); virtual void do_Constant (Constant* x); virtual void do_LoadField (LoadField* x); virtual void do_StoreField (StoreField* x); virtual void do_ArrayLength (ArrayLength* x); virtual void do_LoadIndexed (LoadIndexed* x); virtual void do_StoreIndexed (StoreIndexed* x); virtual void do_NegateOp (NegateOp* x); virtual void do_ArithmeticOp (ArithmeticOp* x); virtual void do_ShiftOp (ShiftOp* x); virtual void do_LogicOp (LogicOp* x); virtual void do_CompareOp (CompareOp* x); virtual void do_IfOp (IfOp* x); virtual void do_Convert (Convert* x); virtual void do_NullCheck (NullCheck* x); virtual void do_TypeCast (TypeCast* x); virtual void do_Invoke (Invoke* x); virtual void do_NewInstance (NewInstance* x); virtual void do_NewTypeArray (NewTypeArray* x); virtual void do_NewObjectArray (NewObjectArray* x); virtual void do_NewMultiArray (NewMultiArray* x); virtual void do_CheckCast (CheckCast* x); virtual void do_InstanceOf (InstanceOf* x); virtual void do_MonitorEnter (MonitorEnter* x); virtual void do_MonitorExit (MonitorExit* x); virtual void do_Intrinsic (Intrinsic* x); virtual void do_BlockBegin (BlockBegin* x); virtual void do_Goto (Goto* x); virtual void do_If (If* x); virtual void do_IfInstanceOf (IfInstanceOf* x); virtual void do_TableSwitch (TableSwitch* x); virtual void do_LookupSwitch (LookupSwitch* x); virtual void do_Return (Return* x); virtual void do_Throw (Throw* x); virtual void do_Base (Base* x); virtual void do_OsrEntry (OsrEntry* x); virtual void do_ExceptionObject(ExceptionObject* x); virtual void do_RoundFP (RoundFP* x); virtual void do_UnsafeGetRaw (UnsafeGetRaw* x); virtual void do_UnsafePutRaw (UnsafePutRaw* x); virtual void do_UnsafeGetObject(UnsafeGetObject* x); virtual void do_UnsafePutObject(UnsafePutObject* x); virtual void do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x); virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x); virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x); virtual void do_ProfileCall (ProfileCall* x); virtual void do_ProfileReturnType (ProfileReturnType* x); virtual void do_ProfileInvoke (ProfileInvoke* x); virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_MemBar (MemBar* x); virtual void do_RangeCheckPredicate(RangeCheckPredicate* x); #ifdef ASSERT virtual void do_Assert (Assert* x); #endif #ifdef C1_LIRGENERATOR_MD_HPP #include C1_LIRGENERATOR_MD_HPP #endif }; class LIRItem: public CompilationResourceObj { private: Value _value; LIRGenerator* _gen; LIR_Opr _result; bool _destroys_register; LIR_Opr _new_result; LIRGenerator* gen() const { return _gen; } public: LIRItem(Value value, LIRGenerator* gen) { _destroys_register = false; _gen = gen; set_instruction(value); } LIRItem(LIRGenerator* gen) { _destroys_register = false; _gen = gen; _result = LIR_OprFact::illegalOpr; set_instruction(NULL); } void set_instruction(Value value) { _value = value; _result = LIR_OprFact::illegalOpr; if (_value != NULL) { _gen->walk(_value); _result = _value->operand(); } _new_result = LIR_OprFact::illegalOpr; } Value value() const { return _value; } ValueType* type() const { return value()->type(); } LIR_Opr result() { assert(!_destroys_register || (!_result->is_register() || _result->is_virtual()), "shouldn't use set_destroys_register with physical regsiters"); if (_destroys_register && _result->is_register()) { if (_new_result->is_illegal()) { _new_result = _gen->new_register(type()); gen()->lir()->move(_result, _new_result); } return _new_result; } else { return _result; } return _result; } void set_result(LIR_Opr opr); void load_item(); void load_byte_item(); void load_nonconstant(); // load any values which can't be expressed as part of a single store instruction void load_for_store(BasicType store_type); void load_item_force(LIR_Opr reg); void dont_load_item() { // do nothing } void set_destroys_register() { _destroys_register = true; } bool is_constant() const { return value()->as_Constant() != NULL; } bool is_stack() { return result()->is_stack(); } bool is_register() { return result()->is_register(); } ciObject* get_jobject_constant() const; jint get_jint_constant() const; jlong get_jlong_constant() const; jfloat get_jfloat_constant() const; jdouble get_jdouble_constant() const; jint get_address_constant() const; }; #endif // SHARE_VM_C1_C1_LIRGENERATOR_HPP
apurtell/jdk8u-hotspot
src/share/vm/c1/c1_LIRGenerator.hpp
C++
gpl-2.0
24,537
37.160187
198
0.670172
false
#ifdef DDR_PCTL #else #define APB_Wr(addr, data) *(volatile unsigned long *)(0xd0040000 + addr )=data #define APB_Rd(addr) *(volatile unsigned long *)(0xd0040000 + addr ) #define DDR_PCTL #define PCTL_SCFG_ADDR 0x000 #define PCTL_SCTL_ADDR 0x004 #define PCTL_STAT_ADDR 0x008 #define PCTL_MCMD_ADDR 0x040 #define PCTL_POWCTL_ADDR 0x044 #define PCTL_POWSTAT_ADDR 0x048 #define PCTL_MCFG_ADDR 0x080 #define PCTL_PPCFG_ADDR 0x084 #define PCTL_MSTAT_ADDR 0x088 #define PCTL_ODTCFG_ADDR 0x08c #define PCTL_DQSECFG_ADDR 0x090 #define PCTL_DTUPDES_ADDR 0x094 #define PCTL_DTUNA_ADDR 0x098 #define PCTL_DTUNE_ADDR 0x09c #define PCTL_DTUPRD0_ADDR 0x0a0 #define PCTL_DTUPRD1_ADDR 0x0a4 #define PCTL_DTUPRD2_ADDR 0x0a8 #define PCTL_DTUPRD3_ADDR 0x0ac #define PCTL_DTUAWDT_ADDR 0x0b0 #define PCTL_TOGCNT1U_ADDR 0x0c0 #define PCTL_TINIT_ADDR 0x0c4 #define PCTL_TRSTH_ADDR 0x0c8 #define PCTL_TOGCNT100N_ADDR 0x0cc #define PCTL_TREFI_ADDR 0x0d0 #define PCTL_TMRD_ADDR 0x0d4 #define PCTL_TRFC_ADDR 0x0d8 #define PCTL_TRP_ADDR 0x0dc #define PCTL_TRTW_ADDR 0x0e0 #define PCTL_TAL_ADDR 0x0e4 #define PCTL_TCL_ADDR 0x0e8 #define PCTL_TCWL_ADDR 0x0ec #define PCTL_TRAS_ADDR 0x0f0 #define PCTL_TRC_ADDR 0x0f4 #define PCTL_TRCD_ADDR 0x0f8 #define PCTL_TRRD_ADDR 0x0fc #define PCTL_TRTP_ADDR 0x100 #define PCTL_TWR_ADDR 0x104 #define PCTL_TWTR_ADDR 0x108 #define PCTL_TEXSR_ADDR 0x10c #define PCTL_TXP_ADDR 0x110 #define PCTL_TXPDLL_ADDR 0x114 #define PCTL_TZQCS_ADDR 0x118 #define PCTL_TZQCSI_ADDR 0x11c #define PCTL_TDQS_ADDR 0x120 #define PCTL_TCKSRE_ADDR 0x124 #define PCTL_TCKSRX_ADDR 0x128 #define PCTL_TCKE_ADDR 0x12c #define PCTL_TMOD_ADDR 0x130 #define PCTL_TRSTL_ADDR 0x134 #define PCTL_TZQCL_ADDR 0x138 #define PCTL_DWLCFG0_ADDR 0x170 #define PCTL_DWLCFG1_ADDR 0x174 #define PCTL_DWLCFG2_ADDR 0x178 #define PCTL_DWLCFG3_ADDR 0x17c #define PCTL_ECCCFG_ADDR 0x180 #define PCTL_ECCTST_ADDR 0x184 #define PCTL_ECCCLR_ADDR 0x188 #define PCTL_ECCLOG_ADDR 0x18c #define PCTL_ADDRMAP_ADDR 0x1c0 #define PCTL_IDDEC0_ADDR 0x1c4 #define PCTL_IDDEC1_ADDR 0x1c8 #define PCTL_DTUWACTL_ADDR 0x200 #define PCTL_DTURACTL_ADDR 0x204 #define PCTL_DTUCFG_ADDR 0x208 #define PCTL_DTUECTL_ADDR 0x20c #define PCTL_DTUWD0_ADDR 0x210 #define PCTL_DTUWD1_ADDR 0x214 #define PCTL_DTUWD2_ADDR 0x218 #define PCTL_DTUWD3_ADDR 0x21c #define PCTL_DTUWDM_ADDR 0x220 #define PCTL_DTURD0_ADDR 0x224 #define PCTL_DTURD1_ADDR 0x228 #define PCTL_DTURD2_ADDR 0x22c #define PCTL_DTURD3_ADDR 0x230 #define PCTL_DTULFSRWD_ADDR 0x234 #define PCTL_DTULFSRRD_ADDR 0x238 #define PCTL_DTUEAF_ADDR 0x23c #define PCTL_PHYCR_ADDR 0x240 #define PCTL_PHYSR_ADDR 0x244 #define PCTL_IOCR_ADDR 0x248 #define PCTL_RSLR0_ADDR 0x24c #define PCTL_RSLR1_ADDR 0x250 #define PCTL_RSLR2_ADDR 0x254 #define PCTL_RSLR3_ADDR 0x258 #define PCTL_RDGR0_ADDR 0x25c #define PCTL_RDGR1_ADDR 0x260 #define PCTL_RDGR2_ADDR 0x264 #define PCTL_RDGR3_ADDR 0x268 #define PCTL_ZQCR_ADDR 0x26c #define PCTL_ZQSR_ADDR 0x270 #define PCTL_DLLCR_ADDR 0x280 #define PCTL_DLLCR0_ADDR 0x284 #define PCTL_DLLCR1_ADDR 0x288 #define PCTL_DLLCR2_ADDR 0x28c #define PCTL_DLLCR3_ADDR 0x290 #define PCTL_DLLCR4_ADDR 0x294 #define PCTL_DLLCR5_ADDR 0x298 #define PCTL_DLLCR6_ADDR 0x29c #define PCTL_DLLCR7_ADDR 0x2a0 #define PCTL_DLLCR8_ADDR 0x2a4 #define PCTL_DLLCR9_ADDR 0x2a8 #define PCTL_DQTR0_ADDR 0x2c0 #define PCTL_DQTR1_ADDR 0x2c4 #define PCTL_DQTR2_ADDR 0x2c8 #define PCTL_DQTR3_ADDR 0x2cc #define PCTL_DQTR4_ADDR 0x2d0 #define PCTL_DQTR5_ADDR 0x2d4 #define PCTL_DQTR6_ADDR 0x2d8 #define PCTL_DQTR7_ADDR 0x2dc #define PCTL_DQTR8_ADDR 0x2e0 #define PCTL_DQSTR_ADDR 0x2e4 #define PCTL_DQSNTR_ADDR 0x2e8 #define PCTL_PHYPVTCFG_ADDR 0x300 #define PCTL_PHYPVTSTAT_ADDR 0x304 #define PCTL_PHYTUPDON_ADDR 0x308 #define PCTL_PHYTUPDDLY_ADDR 0x30c #define PCTL_PVTTUPDON_ADDR 0x310 #define PCTL_PVTTUPDDLY_ADDR 0x314 #define PCTL_PHYPVTUPDI_ADDR 0x318 #define PCTL_SCHCFG_ADDR 0x380 #define PCTL_IPVR_ADDR 0x3f8 #define PCTL_IPTR_ADDR 0x3fc #endif
bogdanov-d-a/Amlogic-reff16-uboot
arch/arm/cpu/aml_meson/m3/pctl.h
C
gpl-2.0
4,323
32.511628
79
0.728892
false
/** * @file: point.c * @brief: a point in 2D space * @date: 2015/09/12 */ #include "point.h" /** * @brief: fill the coordinates of a point pointed by buf * @param: int x x coordinate * @param: int y y coordinate * @param: struct point *buf points to the point to be set * @return: none */ inline void set_position(int x, int y, struct point *buf) { buf->x = x; buf->y = y; } /** * @brief: get x coordinate of a point * @param: struct point pt the point of the interest * @return: int the x coordinate */ inline int get_x_coord(struct point pt) { return pt.x; } /** * @brief: get y coordinate of a point * @param: struct point pt the point of the interest * @return: int the y coordinate */ inline int get_y_coord(struct point pt) { return pt.y; }
p2sun/ECE254-1
lab3/starter/mqueue/point.c
C
gpl-2.0
793
17.44186
60
0.631778
false
/** @file * * VBox frontends: Qt4 GUI ("VirtualBox"): * UIWizardImportAppPageExpert class declaration */ /* * Copyright (C) 2009-2013 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; * you can redistribute it and/or modify it under the terms of the GNU * General Public License (GPL) as published by the Free Software * Foundation, in version 2 as it comes in the "COPYING" file of the * VirtualBox OSE distribution. VirtualBox OSE is distributed in the * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. */ #ifndef __UIWizardImportAppPageExpert_h__ #define __UIWizardImportAppPageExpert_h__ /* Local includes: */ #include "UIWizardImportAppPageBasic1.h" #include "UIWizardImportAppPageBasic2.h" /* Forward declarations: */ class QGroupBox; /* Expert page of the Import Appliance wizard: */ class UIWizardImportAppPageExpert : public UIWizardPage, public UIWizardImportAppPage1, public UIWizardImportAppPage2 { Q_OBJECT; Q_PROPERTY(ImportAppliancePointer applianceWidget READ applianceWidget); public: /* Constructor: */ UIWizardImportAppPageExpert(const QString &strFileName); private slots: /* File-path change handler: */ void sltFilePathChangeHandler(); private: /* Translate stuff: */ void retranslateUi(); /* Prepare stuff: */ void initializePage(); /* Validation stuff: */ bool isComplete() const; bool validatePage(); /* Widgets: */ QGroupBox *m_pApplianceCnt; QGroupBox *m_pSettingsCnt; }; #endif /* __UIWizardImportAppPageExpert_h__ */
fintler/vbox
src/VBox/Frontends/VirtualBox/src/wizards/importappliance/UIWizardImportAppPageExpert.h
C
gpl-2.0
1,738
25.738462
76
0.693901
false
/*********************************************************************** Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. Copyright (c) 2009, Percona Inc. Portions of this file contain modifications contributed and copyrighted by Percona Inc.. Those modifications are gratefully acknowledged and are described briefly in the InnoDB documentation. The contributions by Percona Inc. are incorporated with their permission, and subject to the conditions contained in the file COPYING.Percona. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA ***********************************************************************/ /**************************************************//** @file os/os0file.c The interface to the operating system file i/o primitives Created 10/21/1995 Heikki Tuuri *******************************************************/ #include "os0file.h" #include "ut0mem.h" #include "srv0srv.h" #include "srv0start.h" #include "fil0fil.h" #include "buf0buf.h" #ifndef UNIV_HOTBACKUP # include "os0sync.h" # include "os0thread.h" #else /* !UNIV_HOTBACKUP */ # ifdef __WIN__ /* Add includes for the _stat() call to compile on Windows */ # include <sys/types.h> # include <sys/stat.h> # include <errno.h> # endif /* __WIN__ */ #endif /* !UNIV_HOTBACKUP */ /* This specifies the file permissions InnoDB uses when it creates files in Unix; the value of os_innodb_umask is initialized in ha_innodb.cc to my_umask */ #ifndef __WIN__ /** Umask for creating files */ UNIV_INTERN ulint os_innodb_umask = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; #else /** Umask for creating files */ UNIV_INTERN ulint os_innodb_umask = 0; #endif #ifdef UNIV_DO_FLUSH /* If the following is set to TRUE, we do not call os_file_flush in every os_file_write. We can set this TRUE when the doublewrite buffer is used. */ UNIV_INTERN ibool os_do_not_call_flush_at_each_write = FALSE; #else /* We do not call os_file_flush in every os_file_write. */ #endif /* UNIV_DO_FLUSH */ #ifdef UNIV_HOTBACKUP # define os_aio_use_native_aio FALSE #else /* UNIV_HOTBACKUP */ /* We use these mutexes to protect lseek + file i/o operation, if the OS does not provide an atomic pread or pwrite, or similar */ #define OS_FILE_N_SEEK_MUTEXES 16 UNIV_INTERN os_mutex_t os_file_seek_mutexes[OS_FILE_N_SEEK_MUTEXES]; /* In simulated aio, merge at most this many consecutive i/os */ #define OS_AIO_MERGE_N_CONSECUTIVE 64 /** If this flag is TRUE, then we will use the native aio of the OS (provided we compiled Innobase with it in), otherwise we will use simulated aio we build below with threads */ UNIV_INTERN ibool os_aio_use_native_aio = FALSE; /** Flag: enable debug printout for asynchronous i/o */ UNIV_INTERN ibool os_aio_print_debug = FALSE; /** The asynchronous i/o array slot structure */ typedef struct os_aio_slot_struct os_aio_slot_t; /** The asynchronous i/o array slot structure */ struct os_aio_slot_struct{ ibool is_read; /*!< TRUE if a read operation */ ulint pos; /*!< index of the slot in the aio array */ ibool reserved; /*!< TRUE if this slot is reserved */ time_t reservation_time;/*!< time when reserved */ ulint len; /*!< length of the block to read or write */ byte* buf; /*!< buffer used in i/o */ ulint type; /*!< OS_FILE_READ or OS_FILE_WRITE */ ulint offset; /*!< 32 low bits of file offset in bytes */ ulint offset_high; /*!< 32 high bits of file offset */ os_file_t file; /*!< file where to read or write */ const char* name; /*!< file name or path */ ibool io_already_done;/*!< used only in simulated aio: TRUE if the physical i/o already made and only the slot message needs to be passed to the caller of os_aio_simulated_handle */ fil_node_t* message1; /*!< message which is given by the */ void* message2; /*!< the requester of an aio operation and which can be used to identify which pending aio operation was completed */ #ifdef WIN_ASYNC_IO os_event_t event; /*!< event object we need in the OVERLAPPED struct */ OVERLAPPED control; /*!< Windows control block for the aio request */ #endif }; /** The asynchronous i/o array structure */ typedef struct os_aio_array_struct os_aio_array_t; /** The asynchronous i/o array structure */ struct os_aio_array_struct{ os_mutex_t mutex; /*!< the mutex protecting the aio array */ os_event_t not_full; /*!< The event which is set to the signaled state when there is space in the aio outside the ibuf segment */ os_event_t is_empty; /*!< The event which is set to the signaled state when there are no pending i/os in this array */ ulint n_slots;/*!< Total number of slots in the aio array. This must be divisible by n_threads. */ ulint n_segments; /*!< Number of segments in the aio array of pending aio requests. A thread can wait separately for any one of the segments. */ ulint n_reserved; /*!< Number of reserved slots in the aio array outside the ibuf segment */ os_aio_slot_t* slots; /*!< Pointer to the slots in the array */ #ifdef __WIN__ os_native_event_t* native_events; /*!< Pointer to an array of OS native event handles where we copied the handles from slots, in the same order. This can be used in WaitForMultipleObjects; used only in Windows */ #endif }; /** Array of events used in simulated aio */ static os_event_t* os_aio_segment_wait_events = NULL; /** The aio arrays for non-ibuf i/o and ibuf i/o, as well as sync aio. These are NULL when the module has not yet been initialized. @{ */ static os_aio_array_t* os_aio_read_array = NULL; /*!< Reads */ static os_aio_array_t* os_aio_write_array = NULL; /*!< Writes */ static os_aio_array_t* os_aio_ibuf_array = NULL; /*!< Insert buffer */ static os_aio_array_t* os_aio_log_array = NULL; /*!< Redo log */ static os_aio_array_t* os_aio_sync_array = NULL; /*!< Synchronous I/O */ /* @} */ /** Number of asynchronous I/O segments. Set by os_aio_init(). */ static ulint os_aio_n_segments = ULINT_UNDEFINED; /** If the following is TRUE, read i/o handler threads try to wait until a batch of new read requests have been posted */ static ibool os_aio_recommend_sleep_for_read_threads = FALSE; #endif /* UNIV_HOTBACKUP */ UNIV_INTERN ulint os_n_file_reads = 0; UNIV_INTERN ulint os_bytes_read_since_printout = 0; UNIV_INTERN ulint os_n_file_writes = 0; UNIV_INTERN ulint os_n_fsyncs = 0; UNIV_INTERN ulint os_n_file_reads_old = 0; UNIV_INTERN ulint os_n_file_writes_old = 0; UNIV_INTERN ulint os_n_fsyncs_old = 0; UNIV_INTERN time_t os_last_printout; UNIV_INTERN ibool os_has_said_disk_full = FALSE; #ifndef UNIV_HOTBACKUP /** The mutex protecting the following counts of pending I/O operations */ static os_mutex_t os_file_count_mutex; #endif /* !UNIV_HOTBACKUP */ /** Number of pending os_file_pread() operations */ UNIV_INTERN ulint os_file_n_pending_preads = 0; /** Number of pending os_file_pwrite() operations */ UNIV_INTERN ulint os_file_n_pending_pwrites = 0; /** Number of pending write operations */ UNIV_INTERN ulint os_n_pending_writes = 0; /** Number of pending read operations */ UNIV_INTERN ulint os_n_pending_reads = 0; /***********************************************************************//** Gets the operating system version. Currently works only on Windows. @return OS_WIN95, OS_WIN31, OS_WINNT, OS_WIN2000 */ UNIV_INTERN ulint os_get_os_version(void) /*===================*/ { #ifdef __WIN__ OSVERSIONINFO os_info; os_info.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); ut_a(GetVersionEx(&os_info)); if (os_info.dwPlatformId == VER_PLATFORM_WIN32s) { return(OS_WIN31); } else if (os_info.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS) { return(OS_WIN95); } else if (os_info.dwPlatformId == VER_PLATFORM_WIN32_NT) { if (os_info.dwMajorVersion <= 4) { return(OS_WINNT); } else { return(OS_WIN2000); } } else { ut_error; return(0); } #else ut_error; return(0); #endif } /***********************************************************************//** Retrieves the last error number if an error occurs in a file io function. The number should be retrieved before any other OS calls (because they may overwrite the error number). If the number is not known to this program, the OS error number + 100 is returned. @return error number, or OS error number + 100 */ UNIV_INTERN ulint os_file_get_last_error( /*===================*/ ibool report_all_errors) /*!< in: TRUE if we want an error message printed of all errors */ { ulint err; #ifdef __WIN__ err = (ulint) GetLastError(); if (report_all_errors || (err != ERROR_DISK_FULL && err != ERROR_FILE_EXISTS)) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Operating system error number %lu" " in a file operation.\n", (ulong) err); if (err == ERROR_PATH_NOT_FOUND) { fprintf(stderr, "InnoDB: The error means the system" " cannot find the path specified.\n"); if (srv_is_being_started) { fprintf(stderr, "InnoDB: If you are installing InnoDB," " remember that you must create\n" "InnoDB: directories yourself, InnoDB" " does not create them.\n"); } } else if (err == ERROR_ACCESS_DENIED) { fprintf(stderr, "InnoDB: The error means mysqld does not have" " the access rights to\n" "InnoDB: the directory. It may also be" " you have created a subdirectory\n" "InnoDB: of the same name as a data file.\n"); } else if (err == ERROR_SHARING_VIOLATION || err == ERROR_LOCK_VIOLATION) { fprintf(stderr, "InnoDB: The error means that another program" " is using InnoDB's files.\n" "InnoDB: This might be a backup or antivirus" " software or another instance\n" "InnoDB: of MySQL." " Please close it to get rid of this error.\n"); } else if (err == ERROR_WORKING_SET_QUOTA || err == ERROR_NO_SYSTEM_RESOURCES) { fprintf(stderr, "InnoDB: The error means that there are no" " sufficient system resources or quota to" " complete the operation.\n"); } else if (err == ERROR_OPERATION_ABORTED) { fprintf(stderr, "InnoDB: The error means that the I/O" " operation has been aborted\n" "InnoDB: because of either a thread exit" " or an application request.\n" "InnoDB: Retry attempt is made.\n"); } else { fprintf(stderr, "InnoDB: Some operating system error numbers" " are described at\n" "InnoDB: " REFMAN "operating-system-error-codes.html\n"); } } fflush(stderr); if (err == ERROR_FILE_NOT_FOUND) { return(OS_FILE_NOT_FOUND); } else if (err == ERROR_DISK_FULL) { return(OS_FILE_DISK_FULL); } else if (err == ERROR_FILE_EXISTS) { return(OS_FILE_ALREADY_EXISTS); } else if (err == ERROR_SHARING_VIOLATION || err == ERROR_LOCK_VIOLATION) { return(OS_FILE_SHARING_VIOLATION); } else if (err == ERROR_WORKING_SET_QUOTA || err == ERROR_NO_SYSTEM_RESOURCES) { return(OS_FILE_INSUFFICIENT_RESOURCE); } else if (err == ERROR_OPERATION_ABORTED) { return(OS_FILE_OPERATION_ABORTED); } else { return(100 + err); } #else err = (ulint) errno; if (report_all_errors || (err != ENOSPC && err != EEXIST)) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Operating system error number %lu" " in a file operation.\n", (ulong) err); if (err == ENOENT) { fprintf(stderr, "InnoDB: The error means the system" " cannot find the path specified.\n"); if (srv_is_being_started) { fprintf(stderr, "InnoDB: If you are installing InnoDB," " remember that you must create\n" "InnoDB: directories yourself, InnoDB" " does not create them.\n"); } } else if (err == EACCES) { fprintf(stderr, "InnoDB: The error means mysqld does not have" " the access rights to\n" "InnoDB: the directory.\n"); } else { if (strerror((int)err) != NULL) { fprintf(stderr, "InnoDB: Error number %lu" " means '%s'.\n", err, strerror((int)err)); } fprintf(stderr, "InnoDB: Some operating system" " error numbers are described at\n" "InnoDB: " REFMAN "operating-system-error-codes.html\n"); } } fflush(stderr); if (err == ENOSPC) { return(OS_FILE_DISK_FULL); } else if (err == ENOENT) { return(OS_FILE_NOT_FOUND); } else if (err == EEXIST) { return(OS_FILE_ALREADY_EXISTS); } else if (err == EXDEV || err == ENOTDIR || err == EISDIR) { return(OS_FILE_PATH_ERROR); } else { return(100 + err); } #endif } /****************************************************************//** Does error handling when a file operation fails. Conditionally exits (calling exit(3)) based on should_exit value and the error type @return TRUE if we should retry the operation */ static ibool os_file_handle_error_cond_exit( /*===========================*/ const char* name, /*!< in: name of a file or NULL */ const char* operation, /*!< in: operation */ ibool should_exit) /*!< in: call exit(3) if unknown error and this parameter is TRUE */ { ulint err; err = os_file_get_last_error(FALSE); if (err == OS_FILE_DISK_FULL) { /* We only print a warning about disk full once */ if (os_has_said_disk_full) { return(FALSE); } if (name) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Encountered a problem with" " file %s\n", name); } ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Disk is full. Try to clean the disk" " to free space.\n"); os_has_said_disk_full = TRUE; fflush(stderr); return(FALSE); } else if (err == OS_FILE_AIO_RESOURCES_RESERVED) { return(TRUE); } else if (err == OS_FILE_ALREADY_EXISTS || err == OS_FILE_PATH_ERROR) { return(FALSE); } else if (err == OS_FILE_SHARING_VIOLATION) { os_thread_sleep(10000000); /* 10 sec */ return(TRUE); } else if (err == OS_FILE_INSUFFICIENT_RESOURCE) { os_thread_sleep(100000); /* 100 ms */ return(TRUE); } else if (err == OS_FILE_OPERATION_ABORTED) { os_thread_sleep(100000); /* 100 ms */ return(TRUE); } else { if (name) { fprintf(stderr, "InnoDB: File name %s\n", name); } fprintf(stderr, "InnoDB: File operation call: '%s'.\n", operation); if (should_exit) { fprintf(stderr, "InnoDB: Cannot continue operation.\n"); fflush(stderr); exit(1); } } return(FALSE); } /****************************************************************//** Does error handling when a file operation fails. @return TRUE if we should retry the operation */ static ibool os_file_handle_error( /*=================*/ const char* name, /*!< in: name of a file or NULL */ const char* operation)/*!< in: operation */ { /* exit in case of unknown error */ return(os_file_handle_error_cond_exit(name, operation, TRUE)); } /****************************************************************//** Does error handling when a file operation fails. @return TRUE if we should retry the operation */ static ibool os_file_handle_error_no_exit( /*=========================*/ const char* name, /*!< in: name of a file or NULL */ const char* operation)/*!< in: operation */ { /* don't exit in case of unknown error */ return(os_file_handle_error_cond_exit(name, operation, FALSE)); } #undef USE_FILE_LOCK #define USE_FILE_LOCK #if defined(UNIV_HOTBACKUP) || defined(__WIN__) || defined(__NETWARE__) /* InnoDB Hot Backup does not lock the data files. * On Windows, mandatory locking is used. */ # undef USE_FILE_LOCK #endif #ifdef USE_FILE_LOCK /****************************************************************//** Obtain an exclusive lock on a file. @return 0 on success */ static int os_file_lock( /*=========*/ int fd, /*!< in: file descriptor */ const char* name) /*!< in: file name */ { struct flock lk; lk.l_type = F_WRLCK; lk.l_whence = SEEK_SET; lk.l_start = lk.l_len = 0; if (fcntl(fd, F_SETLK, &lk) == -1) { fprintf(stderr, "InnoDB: Unable to lock %s, error: %d\n", name, errno); if (errno == EAGAIN || errno == EACCES) { fprintf(stderr, "InnoDB: Check that you do not already have" " another mysqld process\n" "InnoDB: using the same InnoDB data" " or log files.\n"); } return(-1); } return(0); } #endif /* USE_FILE_LOCK */ #ifndef UNIV_HOTBACKUP /****************************************************************//** Creates the seek mutexes used in positioned reads and writes. */ UNIV_INTERN void os_io_init_simple(void) /*===================*/ { ulint i; os_file_count_mutex = os_mutex_create(NULL); for (i = 0; i < OS_FILE_N_SEEK_MUTEXES; i++) { os_file_seek_mutexes[i] = os_mutex_create(NULL); } } /***********************************************************************//** Creates a temporary file. This function is like tmpfile(3), but the temporary file is created in the MySQL temporary directory. On Netware, this function is like tmpfile(3), because the C run-time library of Netware does not expose the delete-on-close flag. @return temporary file handle, or NULL on error */ UNIV_INTERN FILE* os_file_create_tmpfile(void) /*========================*/ { #ifdef __NETWARE__ FILE* file = tmpfile(); #else /* __NETWARE__ */ FILE* file = NULL; int fd = innobase_mysql_tmpfile(); if (fd >= 0) { file = fdopen(fd, "w+b"); } #endif /* __NETWARE__ */ if (!file) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: unable to create temporary file;" " errno: %d\n", errno); #ifndef __NETWARE__ if (fd >= 0) { close(fd); } #endif /* !__NETWARE__ */ } return(file); } #endif /* !UNIV_HOTBACKUP */ /***********************************************************************//** The os_file_opendir() function opens a directory stream corresponding to the directory named by the dirname argument. The directory stream is positioned at the first entry. In both Unix and Windows we automatically skip the '.' and '..' items at the start of the directory listing. @return directory stream, NULL if error */ UNIV_INTERN os_file_dir_t os_file_opendir( /*============*/ const char* dirname, /*!< in: directory name; it must not contain a trailing '\' or '/' */ ibool error_is_fatal) /*!< in: TRUE if we should treat an error as a fatal error; if we try to open symlinks then we do not wish a fatal error if it happens not to be a directory */ { os_file_dir_t dir; #ifdef __WIN__ LPWIN32_FIND_DATA lpFindFileData; char path[OS_FILE_MAX_PATH + 3]; ut_a(strlen(dirname) < OS_FILE_MAX_PATH); strcpy(path, dirname); strcpy(path + strlen(path), "\\*"); /* Note that in Windows opening the 'directory stream' also retrieves the first entry in the directory. Since it is '.', that is no problem, as we will skip over the '.' and '..' entries anyway. */ lpFindFileData = ut_malloc(sizeof(WIN32_FIND_DATA)); dir = FindFirstFile((LPCTSTR) path, lpFindFileData); ut_free(lpFindFileData); if (dir == INVALID_HANDLE_VALUE) { if (error_is_fatal) { os_file_handle_error(dirname, "opendir"); } return(NULL); } return(dir); #else dir = opendir(dirname); if (dir == NULL && error_is_fatal) { os_file_handle_error(dirname, "opendir"); } return(dir); #endif } /***********************************************************************//** Closes a directory stream. @return 0 if success, -1 if failure */ UNIV_INTERN int os_file_closedir( /*=============*/ os_file_dir_t dir) /*!< in: directory stream */ { #ifdef __WIN__ BOOL ret; ret = FindClose(dir); if (!ret) { os_file_handle_error_no_exit(NULL, "closedir"); return(-1); } return(0); #else int ret; ret = closedir(dir); if (ret) { os_file_handle_error_no_exit(NULL, "closedir"); } return(ret); #endif } /***********************************************************************//** This function returns information of the next file in the directory. We jump over the '.' and '..' entries in the directory. @return 0 if ok, -1 if error, 1 if at the end of the directory */ UNIV_INTERN int os_file_readdir_next_file( /*======================*/ const char* dirname,/*!< in: directory name or path */ os_file_dir_t dir, /*!< in: directory stream */ os_file_stat_t* info) /*!< in/out: buffer where the info is returned */ { #ifdef __WIN__ LPWIN32_FIND_DATA lpFindFileData; BOOL ret; lpFindFileData = ut_malloc(sizeof(WIN32_FIND_DATA)); next_file: ret = FindNextFile(dir, lpFindFileData); if (ret) { ut_a(strlen((char *) lpFindFileData->cFileName) < OS_FILE_MAX_PATH); if (strcmp((char *) lpFindFileData->cFileName, ".") == 0 || strcmp((char *) lpFindFileData->cFileName, "..") == 0) { goto next_file; } strcpy(info->name, (char *) lpFindFileData->cFileName); info->size = (ib_int64_t)(lpFindFileData->nFileSizeLow) + (((ib_int64_t)(lpFindFileData->nFileSizeHigh)) << 32); if (lpFindFileData->dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) { /* TODO: test Windows symlinks */ /* TODO: MySQL has apparently its own symlink implementation in Windows, dbname.sym can redirect a database directory: REFMAN "windows-symbolic-links.html" */ info->type = OS_FILE_TYPE_LINK; } else if (lpFindFileData->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { info->type = OS_FILE_TYPE_DIR; } else { /* It is probably safest to assume that all other file types are normal. Better to check them rather than blindly skip them. */ info->type = OS_FILE_TYPE_FILE; } } ut_free(lpFindFileData); if (ret) { return(0); } else if (GetLastError() == ERROR_NO_MORE_FILES) { return(1); } else { os_file_handle_error_no_exit(dirname, "readdir_next_file"); return(-1); } #else struct dirent* ent; char* full_path; int ret; struct stat statinfo; #ifdef HAVE_READDIR_R char dirent_buf[sizeof(struct dirent) + _POSIX_PATH_MAX + 100]; /* In /mysys/my_lib.c, _POSIX_PATH_MAX + 1 is used as the max file name len; but in most standards, the length is NAME_MAX; we add 100 to be even safer */ #endif next_file: #ifdef HAVE_READDIR_R ret = readdir_r(dir, (struct dirent*)dirent_buf, &ent); if (ret != 0 #ifdef UNIV_AIX /* On AIX, only if we got non-NULL 'ent' (result) value and a non-zero 'ret' (return) value, it indicates a failed readdir_r() call. An NULL 'ent' with an non-zero 'ret' would indicate the "end of the directory" is reached. */ && ent != NULL #endif ) { fprintf(stderr, "InnoDB: cannot read directory %s, error %lu\n", dirname, (ulong)ret); return(-1); } if (ent == NULL) { /* End of directory */ return(1); } ut_a(strlen(ent->d_name) < _POSIX_PATH_MAX + 100 - 1); #else ent = readdir(dir); if (ent == NULL) { return(1); } #endif ut_a(strlen(ent->d_name) < OS_FILE_MAX_PATH); if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0) { goto next_file; } strcpy(info->name, ent->d_name); full_path = ut_malloc(strlen(dirname) + strlen(ent->d_name) + 10); sprintf(full_path, "%s/%s", dirname, ent->d_name); ret = stat(full_path, &statinfo); if (ret) { if (errno == ENOENT) { /* readdir() returned a file that does not exist, it must have been deleted in the meantime. Do what would have happened if the file was deleted before readdir() - ignore and go to the next entry. If this is the last entry then info->name will still contain the name of the deleted file when this function returns, but this is not an issue since the caller shouldn't be looking at info when end of directory is returned. */ ut_free(full_path); goto next_file; } os_file_handle_error_no_exit(full_path, "stat"); ut_free(full_path); return(-1); } info->size = (ib_int64_t)statinfo.st_size; if (S_ISDIR(statinfo.st_mode)) { info->type = OS_FILE_TYPE_DIR; } else if (S_ISLNK(statinfo.st_mode)) { info->type = OS_FILE_TYPE_LINK; } else if (S_ISREG(statinfo.st_mode)) { info->type = OS_FILE_TYPE_FILE; } else { info->type = OS_FILE_TYPE_UNKNOWN; } ut_free(full_path); return(0); #endif } /*****************************************************************//** This function attempts to create a directory named pathname. The new directory gets default permissions. On Unix the permissions are (0770 & ~umask). If the directory exists already, nothing is done and the call succeeds, unless the fail_if_exists arguments is true. @return TRUE if call succeeds, FALSE on error */ UNIV_INTERN ibool os_file_create_directory( /*=====================*/ const char* pathname, /*!< in: directory name as null-terminated string */ ibool fail_if_exists) /*!< in: if TRUE, pre-existing directory is treated as an error. */ { #ifdef __WIN__ BOOL rcode; rcode = CreateDirectory((LPCTSTR) pathname, NULL); if (!(rcode != 0 || (GetLastError() == ERROR_ALREADY_EXISTS && !fail_if_exists))) { /* failure */ os_file_handle_error(pathname, "CreateDirectory"); return(FALSE); } return (TRUE); #else int rcode; rcode = mkdir(pathname, 0770); if (!(rcode == 0 || (errno == EEXIST && !fail_if_exists))) { /* failure */ os_file_handle_error(pathname, "mkdir"); return(FALSE); } return (TRUE); #endif } /****************************************************************//** A simple function to open or create a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INTERN os_file_t os_file_create_simple( /*==================*/ const char* name, /*!< in: name of the file or path as a null-terminated string */ ulint create_mode,/*!< in: OS_FILE_OPEN if an existing file is opened (if does not exist, error), or OS_FILE_CREATE if a new file is created (if exists, error), or OS_FILE_CREATE_PATH if new file (if exists, error) and subdirectories along its path are created (if needed)*/ ulint access_type,/*!< in: OS_FILE_READ_ONLY or OS_FILE_READ_WRITE */ ibool* success)/*!< out: TRUE if succeed, FALSE if error */ { #ifdef __WIN__ os_file_t file; DWORD create_flag; DWORD access; DWORD attributes = 0; ibool retry; try_again: ut_a(name); if (create_mode == OS_FILE_OPEN) { create_flag = OPEN_EXISTING; } else if (create_mode == OS_FILE_CREATE) { create_flag = CREATE_NEW; } else if (create_mode == OS_FILE_CREATE_PATH) { /* create subdirs along the path if needed */ *success = os_file_create_subdirs_if_needed(name); if (!*success) { ut_error; } create_flag = CREATE_NEW; create_mode = OS_FILE_CREATE; } else { create_flag = 0; ut_error; } if (access_type == OS_FILE_READ_ONLY) { access = GENERIC_READ; } else if (access_type == OS_FILE_READ_WRITE) { access = GENERIC_READ | GENERIC_WRITE; } else { access = 0; ut_error; } file = CreateFile((LPCTSTR) name, access, FILE_SHARE_READ | FILE_SHARE_WRITE, /* file can be read and written also by other processes */ NULL, /* default security attributes */ create_flag, attributes, NULL); /*!< no template file */ if (file == INVALID_HANDLE_VALUE) { *success = FALSE; retry = os_file_handle_error(name, create_mode == OS_FILE_OPEN ? "open" : "create"); if (retry) { goto try_again; } } else { *success = TRUE; } return(file); #else /* __WIN__ */ os_file_t file; int create_flag; ibool retry; try_again: ut_a(name); if (create_mode == OS_FILE_OPEN) { if (access_type == OS_FILE_READ_ONLY) { create_flag = O_RDONLY; } else { create_flag = O_RDWR; } } else if (create_mode == OS_FILE_CREATE) { create_flag = O_RDWR | O_CREAT | O_EXCL; } else if (create_mode == OS_FILE_CREATE_PATH) { /* create subdirs along the path if needed */ *success = os_file_create_subdirs_if_needed(name); if (!*success) { return (-1); } create_flag = O_RDWR | O_CREAT | O_EXCL; create_mode = OS_FILE_CREATE; } else { create_flag = 0; ut_error; } if (create_mode == OS_FILE_CREATE) { file = open(name, create_flag, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP); } else { file = open(name, create_flag); } if (file == -1) { *success = FALSE; retry = os_file_handle_error(name, create_mode == OS_FILE_OPEN ? "open" : "create"); if (retry) { goto try_again; } #ifdef USE_FILE_LOCK } else if (access_type == OS_FILE_READ_WRITE && os_file_lock(file, name)) { *success = FALSE; close(file); file = -1; #endif } else { *success = TRUE; } return(file); #endif /* __WIN__ */ } /****************************************************************//** A simple function to open or create a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INTERN os_file_t os_file_create_simple_no_error_handling( /*====================================*/ const char* name, /*!< in: name of the file or path as a null-terminated string */ ulint create_mode,/*!< in: OS_FILE_OPEN if an existing file is opened (if does not exist, error), or OS_FILE_CREATE if a new file is created (if exists, error) */ ulint access_type,/*!< in: OS_FILE_READ_ONLY, OS_FILE_READ_WRITE, or OS_FILE_READ_ALLOW_DELETE; the last option is used by a backup program reading the file */ ibool* success)/*!< out: TRUE if succeed, FALSE if error */ { #ifdef __WIN__ os_file_t file; DWORD create_flag; DWORD access; DWORD attributes = 0; DWORD share_mode = FILE_SHARE_READ | FILE_SHARE_WRITE; ut_a(name); if (create_mode == OS_FILE_OPEN) { create_flag = OPEN_EXISTING; } else if (create_mode == OS_FILE_CREATE) { create_flag = CREATE_NEW; } else { create_flag = 0; ut_error; } if (access_type == OS_FILE_READ_ONLY) { access = GENERIC_READ; } else if (access_type == OS_FILE_READ_WRITE) { access = GENERIC_READ | GENERIC_WRITE; } else if (access_type == OS_FILE_READ_ALLOW_DELETE) { access = GENERIC_READ; share_mode = FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE; /*!< A backup program has to give mysqld the maximum freedom to do what it likes with the file */ } else { access = 0; ut_error; } file = CreateFile((LPCTSTR) name, access, share_mode, NULL, /* default security attributes */ create_flag, attributes, NULL); /*!< no template file */ if (file == INVALID_HANDLE_VALUE) { *success = FALSE; } else { *success = TRUE; } return(file); #else /* __WIN__ */ os_file_t file; int create_flag; ut_a(name); if (create_mode == OS_FILE_OPEN) { if (access_type == OS_FILE_READ_ONLY) { create_flag = O_RDONLY; } else { create_flag = O_RDWR; } } else if (create_mode == OS_FILE_CREATE) { create_flag = O_RDWR | O_CREAT | O_EXCL; } else { create_flag = 0; ut_error; } if (create_mode == OS_FILE_CREATE) { file = open(name, create_flag, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP); } else { file = open(name, create_flag); } if (file == -1) { *success = FALSE; #ifdef USE_FILE_LOCK } else if (access_type == OS_FILE_READ_WRITE && os_file_lock(file, name)) { *success = FALSE; close(file); file = -1; #endif } else { *success = TRUE; } return(file); #endif /* __WIN__ */ } /****************************************************************//** Tries to disable OS caching on an opened file descriptor. */ UNIV_INTERN void os_file_set_nocache( /*================*/ int fd /*!< in: file descriptor to alter */ __attribute__((unused)), const char* file_name /*!< in: used in the diagnostic message */ __attribute__((unused)), const char* operation_name __attribute__((unused))) /*!< in: "open" or "create"; used in the diagnostic message */ { /* some versions of Solaris may not have DIRECTIO_ON */ #if defined(UNIV_SOLARIS) && defined(DIRECTIO_ON) if (directio(fd, DIRECTIO_ON) == -1) { int errno_save; errno_save = (int)errno; ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Failed to set DIRECTIO_ON " "on file %s: %s: %s, continuing anyway\n", file_name, operation_name, strerror(errno_save)); } #elif defined(O_DIRECT) if (fcntl(fd, F_SETFL, O_DIRECT) == -1) { int errno_save; errno_save = (int)errno; ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Failed to set O_DIRECT " "on file %s: %s: %s, continuing anyway\n", file_name, operation_name, strerror(errno_save)); if (errno_save == EINVAL) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: O_DIRECT is known to result in " "'Invalid argument' on Linux on tmpfs, " "see MySQL Bug#26662\n"); } } #endif } /****************************************************************//** Opens an existing file or creates a new. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INTERN os_file_t os_file_create( /*===========*/ const char* name, /*!< in: name of the file or path as a null-terminated string */ ulint create_mode,/*!< in: OS_FILE_OPEN if an existing file is opened (if does not exist, error), or OS_FILE_CREATE if a new file is created (if exists, error), OS_FILE_OVERWRITE if a new file is created or an old overwritten; OS_FILE_OPEN_RAW, if a raw device or disk partition should be opened */ ulint purpose,/*!< in: OS_FILE_AIO, if asynchronous, non-buffered i/o is desired, OS_FILE_NORMAL, if any normal file; NOTE that it also depends on type, os_aio_.. and srv_.. variables whether we really use async i/o or unbuffered i/o: look in the function source code for the exact rules */ ulint type, /*!< in: OS_DATA_FILE or OS_LOG_FILE */ ibool* success)/*!< out: TRUE if succeed, FALSE if error */ { #ifdef __WIN__ os_file_t file; DWORD share_mode = FILE_SHARE_READ; DWORD create_flag; DWORD attributes; ibool retry; DBUG_EXECUTE_IF( "ib_create_table_fail_disk_full", *success = FALSE; SetLastError(ERROR_DISK_FULL); return((os_file_t) -1); ); try_again: ut_a(name); if (create_mode == OS_FILE_OPEN_RAW) { create_flag = OPEN_EXISTING; share_mode = FILE_SHARE_WRITE; } else if (create_mode == OS_FILE_OPEN || create_mode == OS_FILE_OPEN_RETRY) { create_flag = OPEN_EXISTING; } else if (create_mode == OS_FILE_CREATE) { create_flag = CREATE_NEW; } else if (create_mode == OS_FILE_OVERWRITE) { create_flag = CREATE_ALWAYS; } else { create_flag = 0; ut_error; } if (purpose == OS_FILE_AIO) { /* If specified, use asynchronous (overlapped) io and no buffering of writes in the OS */ attributes = 0; #ifdef WIN_ASYNC_IO if (os_aio_use_native_aio) { attributes = attributes | FILE_FLAG_OVERLAPPED; } #endif #ifdef UNIV_NON_BUFFERED_IO # ifndef UNIV_HOTBACKUP if (type == OS_LOG_FILE && srv_flush_log_at_trx_commit == 2) { /* Do not use unbuffered i/o to log files because value 2 denotes that we do not flush the log at every commit, but only once per second */ } else if (srv_win_file_flush_method == SRV_WIN_IO_UNBUFFERED) { attributes = attributes | FILE_FLAG_NO_BUFFERING; } # else /* !UNIV_HOTBACKUP */ attributes = attributes | FILE_FLAG_NO_BUFFERING; # endif /* !UNIV_HOTBACKUP */ #endif /* UNIV_NON_BUFFERED_IO */ } else if (purpose == OS_FILE_NORMAL) { attributes = 0; #ifdef UNIV_NON_BUFFERED_IO # ifndef UNIV_HOTBACKUP if (type == OS_LOG_FILE && srv_flush_log_at_trx_commit == 2) { /* Do not use unbuffered i/o to log files because value 2 denotes that we do not flush the log at every commit, but only once per second */ } else if (srv_win_file_flush_method == SRV_WIN_IO_UNBUFFERED) { attributes = attributes | FILE_FLAG_NO_BUFFERING; } # else /* !UNIV_HOTBACKUP */ attributes = attributes | FILE_FLAG_NO_BUFFERING; # endif /* !UNIV_HOTBACKUP */ #endif /* UNIV_NON_BUFFERED_IO */ } else { attributes = 0; ut_error; } file = CreateFile((LPCTSTR) name, GENERIC_READ | GENERIC_WRITE, /* read and write access */ share_mode, /* File can be read also by other processes; we must give the read permission because of ibbackup. We do not give the write permission to others because if one would succeed to start 2 instances of mysqld on the SAME files, that could cause severe database corruption! When opening raw disk partitions, Microsoft manuals say that we must give also the write permission. */ NULL, /* default security attributes */ create_flag, attributes, NULL); /*!< no template file */ if (file == INVALID_HANDLE_VALUE) { *success = FALSE; /* When srv_file_per_table is on, file creation failure may not be critical to the whole instance. Do not crash the server in case of unknown errors. Please note "srv_file_per_table" is a global variable with no explicit synchronization protection. It could be changed during this execution path. It might not have the same value as the one when building the table definition */ if (srv_file_per_table) { retry = os_file_handle_error_no_exit(name, create_mode == OS_FILE_CREATE ? "create" : "open"); } else { retry = os_file_handle_error(name, create_mode == OS_FILE_CREATE ? "create" : "open"); } if (retry) { goto try_again; } } else { *success = TRUE; } return(file); #else /* __WIN__ */ os_file_t file; int create_flag; ibool retry; const char* mode_str = NULL; DBUG_EXECUTE_IF( "ib_create_table_fail_disk_full", *success = FALSE; errno = ENOSPC; return((os_file_t) -1); ); try_again: ut_a(name); if (create_mode == OS_FILE_OPEN || create_mode == OS_FILE_OPEN_RAW || create_mode == OS_FILE_OPEN_RETRY) { mode_str = "OPEN"; create_flag = O_RDWR; } else if (create_mode == OS_FILE_CREATE) { mode_str = "CREATE"; create_flag = O_RDWR | O_CREAT | O_EXCL; } else if (create_mode == OS_FILE_OVERWRITE) { mode_str = "OVERWRITE"; create_flag = O_RDWR | O_CREAT | O_TRUNC; } else { create_flag = 0; ut_error; } ut_a(type == OS_LOG_FILE || type == OS_DATA_FILE); ut_a(purpose == OS_FILE_AIO || purpose == OS_FILE_NORMAL); #ifdef O_SYNC /* We let O_SYNC only affect log files; note that we map O_DSYNC to O_SYNC because the datasync options seemed to corrupt files in 2001 in both Linux and Solaris */ if (type == OS_LOG_FILE && srv_unix_file_flush_method == SRV_UNIX_O_DSYNC) { # if 0 fprintf(stderr, "Using O_SYNC for file %s\n", name); # endif create_flag = create_flag | O_SYNC; } #endif /* O_SYNC */ file = open(name, create_flag, os_innodb_umask); if (file == -1) { *success = FALSE; /* When srv_file_per_table is on, file creation failure may not be critical to the whole instance. Do not crash the server in case of unknown errors. Please note "srv_file_per_table" is a global variable with no explicit synchronization protection. It could be changed during this execution path. It might not have the same value as the one when building the table definition */ if (srv_file_per_table) { retry = os_file_handle_error_no_exit(name, create_mode == OS_FILE_CREATE ? "create" : "open"); } else { retry = os_file_handle_error(name, create_mode == OS_FILE_CREATE ? "create" : "open"); } if (retry) { goto try_again; } else { return(file /* -1 */); } } /* else */ *success = TRUE; /* We disable OS caching (O_DIRECT) only on data files */ if (type != OS_LOG_FILE && srv_unix_file_flush_method == SRV_UNIX_O_DIRECT) { os_file_set_nocache(file, name, mode_str); } #ifdef USE_FILE_LOCK if (create_mode != OS_FILE_OPEN_RAW && os_file_lock(file, name)) { if (create_mode == OS_FILE_OPEN_RETRY) { int i; ut_print_timestamp(stderr); fputs(" InnoDB: Retrying to lock" " the first data file\n", stderr); for (i = 0; i < 100; i++) { os_thread_sleep(1000000); if (!os_file_lock(file, name)) { *success = TRUE; return(file); } } ut_print_timestamp(stderr); fputs(" InnoDB: Unable to open the first data file\n", stderr); } *success = FALSE; close(file); file = -1; } #endif /* USE_FILE_LOCK */ return(file); #endif /* __WIN__ */ } /***********************************************************************//** Deletes a file if it exists. The file has to be closed before calling this. @return TRUE if success */ UNIV_INTERN ibool os_file_delete_if_exists( /*=====================*/ const char* name) /*!< in: file path as a null-terminated string */ { #ifdef __WIN__ BOOL ret; ulint count = 0; loop: /* In Windows, deleting an .ibd file may fail if ibbackup is copying it */ ret = DeleteFile((LPCTSTR)name); if (ret) { return(TRUE); } if (GetLastError() == ERROR_FILE_NOT_FOUND) { /* the file does not exist, this not an error */ return(TRUE); } count++; if (count > 100 && 0 == (count % 10)) { fprintf(stderr, "InnoDB: Warning: cannot delete file %s\n" "InnoDB: Are you running ibbackup" " to back up the file?\n", name); os_file_get_last_error(TRUE); /* print error information */ } os_thread_sleep(1000000); /* sleep for a second */ if (count > 2000) { return(FALSE); } goto loop; #else int ret; ret = unlink(name); if (ret != 0 && errno != ENOENT) { os_file_handle_error_no_exit(name, "delete"); return(FALSE); } return(TRUE); #endif } /***********************************************************************//** Deletes a file. The file has to be closed before calling this. @return TRUE if success */ UNIV_INTERN ibool os_file_delete( /*===========*/ const char* name) /*!< in: file path as a null-terminated string */ { #ifdef __WIN__ BOOL ret; ulint count = 0; loop: /* In Windows, deleting an .ibd file may fail if ibbackup is copying it */ ret = DeleteFile((LPCTSTR)name); if (ret) { return(TRUE); } if (GetLastError() == ERROR_FILE_NOT_FOUND) { /* If the file does not exist, we classify this as a 'mild' error and return */ return(FALSE); } count++; if (count > 100 && 0 == (count % 10)) { fprintf(stderr, "InnoDB: Warning: cannot delete file %s\n" "InnoDB: Are you running ibbackup" " to back up the file?\n", name); os_file_get_last_error(TRUE); /* print error information */ } os_thread_sleep(1000000); /* sleep for a second */ if (count > 2000) { return(FALSE); } goto loop; #else int ret; ret = unlink(name); if (ret != 0) { os_file_handle_error_no_exit(name, "delete"); return(FALSE); } return(TRUE); #endif } /***********************************************************************//** Renames a file (can also move it to another directory). It is safest that the file is closed before calling this function. @return TRUE if success */ UNIV_INTERN ibool os_file_rename( /*===========*/ const char* oldpath,/*!< in: old file path as a null-terminated string */ const char* newpath)/*!< in: new file path */ { #ifdef __WIN__ BOOL ret; ret = MoveFile((LPCTSTR)oldpath, (LPCTSTR)newpath); if (ret) { return(TRUE); } os_file_handle_error_no_exit(oldpath, "rename"); return(FALSE); #else int ret; ret = rename(oldpath, newpath); if (ret != 0) { os_file_handle_error_no_exit(oldpath, "rename"); return(FALSE); } return(TRUE); #endif } /***********************************************************************//** Closes a file handle. In case of error, error number can be retrieved with os_file_get_last_error. @return TRUE if success */ UNIV_INTERN ibool os_file_close( /*==========*/ os_file_t file) /*!< in, own: handle to a file */ { #ifdef __WIN__ BOOL ret; ut_a(file); ret = CloseHandle(file); if (ret) { return(TRUE); } os_file_handle_error(NULL, "close"); return(FALSE); #else int ret; ret = close(file); if (ret == -1) { os_file_handle_error(NULL, "close"); return(FALSE); } return(TRUE); #endif } #ifdef UNIV_HOTBACKUP /***********************************************************************//** Closes a file handle. @return TRUE if success */ UNIV_INTERN ibool os_file_close_no_error_handling( /*============================*/ os_file_t file) /*!< in, own: handle to a file */ { #ifdef __WIN__ BOOL ret; ut_a(file); ret = CloseHandle(file); if (ret) { return(TRUE); } return(FALSE); #else int ret; ret = close(file); if (ret == -1) { return(FALSE); } return(TRUE); #endif } #endif /* UNIV_HOTBACKUP */ /***********************************************************************//** Gets a file size. @return TRUE if success */ UNIV_INTERN ibool os_file_get_size( /*=============*/ os_file_t file, /*!< in: handle to a file */ ulint* size, /*!< out: least significant 32 bits of file size */ ulint* size_high)/*!< out: most significant 32 bits of size */ { #ifdef __WIN__ DWORD high; DWORD low; low = GetFileSize(file, &high); if ((low == 0xFFFFFFFF) && (GetLastError() != NO_ERROR)) { return(FALSE); } *size = low; *size_high = high; return(TRUE); #else off_t offs; offs = lseek(file, 0, SEEK_END); if (offs == ((off_t)-1)) { return(FALSE); } if (sizeof(off_t) > 4) { *size = (ulint)(offs & 0xFFFFFFFFUL); *size_high = (ulint)(offs >> 32); } else { *size = (ulint) offs; *size_high = 0; } return(TRUE); #endif } /***********************************************************************//** Gets file size as a 64-bit integer ib_int64_t. @return size in bytes, -1 if error */ UNIV_INTERN ib_int64_t os_file_get_size_as_iblonglong( /*===========================*/ os_file_t file) /*!< in: handle to a file */ { ulint size; ulint size_high; ibool success; success = os_file_get_size(file, &size, &size_high); if (!success) { return(-1); } return((((ib_int64_t)size_high) << 32) + (ib_int64_t)size); } /***********************************************************************//** Write the specified number of zeros to a newly created file. @return TRUE if success */ UNIV_INTERN ibool os_file_set_size( /*=============*/ const char* name, /*!< in: name of the file or path as a null-terminated string */ os_file_t file, /*!< in: handle to a file */ ulint size, /*!< in: least significant 32 bits of file size */ ulint size_high)/*!< in: most significant 32 bits of size */ { ib_int64_t current_size; ib_int64_t desired_size; ibool ret; byte* buf; byte* buf2; ulint buf_size; ut_a(size == (size & 0xFFFFFFFF)); current_size = 0; desired_size = (ib_int64_t)size + (((ib_int64_t)size_high) << 32); /* Write up to 1 megabyte at a time. */ buf_size = ut_min(64, (ulint) (desired_size / UNIV_PAGE_SIZE)) * UNIV_PAGE_SIZE; buf2 = ut_malloc(buf_size + UNIV_PAGE_SIZE); /* Align the buffer for possible raw i/o */ buf = ut_align(buf2, UNIV_PAGE_SIZE); /* Write buffer full of zeros */ memset(buf, 0, buf_size); if (desired_size >= (ib_int64_t)(100 * 1024 * 1024)) { fprintf(stderr, "InnoDB: Progress in MB:"); } while (current_size < desired_size) { ulint n_bytes; if (desired_size - current_size < (ib_int64_t) buf_size) { n_bytes = (ulint) (desired_size - current_size); } else { n_bytes = buf_size; } ret = os_file_write(name, file, buf, (ulint)(current_size & 0xFFFFFFFF), (ulint)(current_size >> 32), n_bytes); if (!ret) { ut_free(buf2); goto error_handling; } /* Print about progress for each 100 MB written */ if ((ib_int64_t) (current_size + n_bytes) / (ib_int64_t)(100 * 1024 * 1024) != current_size / (ib_int64_t)(100 * 1024 * 1024)) { fprintf(stderr, " %lu00", (ulong) ((current_size + n_bytes) / (ib_int64_t)(100 * 1024 * 1024))); } current_size += n_bytes; } if (desired_size >= (ib_int64_t)(100 * 1024 * 1024)) { fprintf(stderr, "\n"); } ut_free(buf2); ret = os_file_flush(file); if (ret) { return(TRUE); } error_handling: return(FALSE); } /***********************************************************************//** Truncates a file at its current position. @return TRUE if success */ UNIV_INTERN ibool os_file_set_eof( /*============*/ FILE* file) /*!< in: file to be truncated */ { #ifdef __WIN__ HANDLE h = (HANDLE) _get_osfhandle(fileno(file)); return(SetEndOfFile(h)); #else /* __WIN__ */ return(!ftruncate(fileno(file), ftell(file))); #endif /* __WIN__ */ } #ifndef __WIN__ /***********************************************************************//** Wrapper to fsync(2) that retries the call on some errors. Returns the value 0 if successful; otherwise the value -1 is returned and the global variable errno is set to indicate the error. @return 0 if success, -1 otherwise */ static int os_file_fsync( /*==========*/ os_file_t file) /*!< in: handle to a file */ { int ret; int failures; ibool retry; failures = 0; do { ret = fsync(file); os_n_fsyncs++; if (ret == -1 && errno == ENOLCK) { if (failures % 100 == 0) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: fsync(): " "No locks available; retrying\n"); } os_thread_sleep(200000 /* 0.2 sec */); failures++; retry = TRUE; } else { retry = FALSE; } } while (retry); return(ret); } #endif /* !__WIN__ */ /***********************************************************************//** Flushes the write buffers of a given file to the disk. @return TRUE if success */ UNIV_INTERN ibool os_file_flush( /*==========*/ os_file_t file) /*!< in, own: handle to a file */ { #ifdef __WIN__ BOOL ret; ut_a(file); os_n_fsyncs++; ret = FlushFileBuffers(file); if (ret) { return(TRUE); } /* Since Windows returns ERROR_INVALID_FUNCTION if the 'file' is actually a raw device, we choose to ignore that error if we are using raw disks */ if (srv_start_raw_disk_in_use && GetLastError() == ERROR_INVALID_FUNCTION) { return(TRUE); } os_file_handle_error(NULL, "flush"); /* It is a fatal error if a file flush does not succeed, because then the database can get corrupt on disk */ ut_error; return(FALSE); #else int ret; #if defined(HAVE_DARWIN_THREADS) # ifndef F_FULLFSYNC /* The following definition is from the Mac OS X 10.3 <sys/fcntl.h> */ # define F_FULLFSYNC 51 /* fsync + ask the drive to flush to the media */ # elif F_FULLFSYNC != 51 # error "F_FULLFSYNC != 51: ABI incompatibility with Mac OS X 10.3" # endif /* Apple has disabled fsync() for internal disk drives in OS X. That caused corruption for a user when he tested a power outage. Let us in OS X use a nonstandard flush method recommended by an Apple engineer. */ if (!srv_have_fullfsync) { /* If we are not on an operating system that supports this, then fall back to a plain fsync. */ ret = os_file_fsync(file); } else { ret = fcntl(file, F_FULLFSYNC, NULL); if (ret) { /* If we are not on a file system that supports this, then fall back to a plain fsync. */ ret = os_file_fsync(file); } } #else ret = os_file_fsync(file); #endif if (ret == 0) { return(TRUE); } /* Since Linux returns EINVAL if the 'file' is actually a raw device, we choose to ignore that error if we are using raw disks */ if (srv_start_raw_disk_in_use && errno == EINVAL) { return(TRUE); } ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: the OS said file flush did not succeed\n"); os_file_handle_error(NULL, "flush"); /* It is a fatal error if a file flush does not succeed, because then the database can get corrupt on disk */ ut_error; return(FALSE); #endif } #ifndef __WIN__ /*******************************************************************//** Does a synchronous read operation in Posix. @return number of bytes read, -1 if error */ static ssize_t os_file_pread( /*==========*/ os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read */ ulint n, /*!< in: number of bytes to read */ ulint offset, /*!< in: least significant 32 bits of file offset from where to read */ ulint offset_high) /*!< in: most significant 32 bits of offset */ { off_t offs; #if defined(HAVE_PREAD) && !defined(HAVE_BROKEN_PREAD) ssize_t n_bytes; #endif /* HAVE_PREAD && !HAVE_BROKEN_PREAD */ ut_a((offset & 0xFFFFFFFFUL) == offset); /* If off_t is > 4 bytes in size, then we assume we can pass a 64-bit address */ if (sizeof(off_t) > 4) { offs = (off_t)offset + (((off_t)offset_high) << 32); } else { offs = (off_t)offset; if (offset_high > 0) { fprintf(stderr, "InnoDB: Error: file read at offset > 4 GB\n"); } } os_n_file_reads++; #if defined(HAVE_PREAD) && !defined(HAVE_BROKEN_PREAD) os_mutex_enter(os_file_count_mutex); os_file_n_pending_preads++; os_n_pending_reads++; os_mutex_exit(os_file_count_mutex); n_bytes = pread(file, buf, (ssize_t)n, offs); os_mutex_enter(os_file_count_mutex); os_file_n_pending_preads--; os_n_pending_reads--; os_mutex_exit(os_file_count_mutex); return(n_bytes); #else { off_t ret_offset; ssize_t ret; #ifndef UNIV_HOTBACKUP ulint i; #endif /* !UNIV_HOTBACKUP */ os_mutex_enter(os_file_count_mutex); os_n_pending_reads++; os_mutex_exit(os_file_count_mutex); #ifndef UNIV_HOTBACKUP /* Protect the seek / read operation with a mutex */ i = ((ulint) file) % OS_FILE_N_SEEK_MUTEXES; os_mutex_enter(os_file_seek_mutexes[i]); #endif /* !UNIV_HOTBACKUP */ ret_offset = lseek(file, offs, SEEK_SET); if (ret_offset < 0) { ret = -1; } else { ret = read(file, buf, (ssize_t)n); } #ifndef UNIV_HOTBACKUP os_mutex_exit(os_file_seek_mutexes[i]); #endif /* !UNIV_HOTBACKUP */ os_mutex_enter(os_file_count_mutex); os_n_pending_reads--; os_mutex_exit(os_file_count_mutex); return(ret); } #endif } /*******************************************************************//** Does a synchronous write operation in Posix. @return number of bytes written, -1 if error */ static ssize_t os_file_pwrite( /*===========*/ os_file_t file, /*!< in: handle to a file */ const void* buf, /*!< in: buffer from where to write */ ulint n, /*!< in: number of bytes to write */ ulint offset, /*!< in: least significant 32 bits of file offset where to write */ ulint offset_high) /*!< in: most significant 32 bits of offset */ { ssize_t ret; off_t offs; ut_a((offset & 0xFFFFFFFFUL) == offset); /* If off_t is > 4 bytes in size, then we assume we can pass a 64-bit address */ if (sizeof(off_t) > 4) { offs = (off_t)offset + (((off_t)offset_high) << 32); } else { offs = (off_t)offset; if (offset_high > 0) { fprintf(stderr, "InnoDB: Error: file write" " at offset > 4 GB\n"); } } os_n_file_writes++; #if defined(HAVE_PWRITE) && !defined(HAVE_BROKEN_PREAD) os_mutex_enter(os_file_count_mutex); os_file_n_pending_pwrites++; os_n_pending_writes++; os_mutex_exit(os_file_count_mutex); ret = pwrite(file, buf, (ssize_t)n, offs); os_mutex_enter(os_file_count_mutex); os_file_n_pending_pwrites--; os_n_pending_writes--; os_mutex_exit(os_file_count_mutex); # ifdef UNIV_DO_FLUSH if (srv_unix_file_flush_method != SRV_UNIX_LITTLESYNC && srv_unix_file_flush_method != SRV_UNIX_NOSYNC && !os_do_not_call_flush_at_each_write) { /* Always do fsync to reduce the probability that when the OS crashes, a database page is only partially physically written to disk. */ ut_a(TRUE == os_file_flush(file)); } # endif /* UNIV_DO_FLUSH */ return(ret); #else { off_t ret_offset; # ifndef UNIV_HOTBACKUP ulint i; # endif /* !UNIV_HOTBACKUP */ os_mutex_enter(os_file_count_mutex); os_n_pending_writes++; os_mutex_exit(os_file_count_mutex); # ifndef UNIV_HOTBACKUP /* Protect the seek / write operation with a mutex */ i = ((ulint) file) % OS_FILE_N_SEEK_MUTEXES; os_mutex_enter(os_file_seek_mutexes[i]); # endif /* UNIV_HOTBACKUP */ ret_offset = lseek(file, offs, SEEK_SET); if (ret_offset < 0) { ret = -1; goto func_exit; } ret = write(file, buf, (ssize_t)n); # ifdef UNIV_DO_FLUSH if (srv_unix_file_flush_method != SRV_UNIX_LITTLESYNC && srv_unix_file_flush_method != SRV_UNIX_NOSYNC && !os_do_not_call_flush_at_each_write) { /* Always do fsync to reduce the probability that when the OS crashes, a database page is only partially physically written to disk. */ ut_a(TRUE == os_file_flush(file)); } # endif /* UNIV_DO_FLUSH */ func_exit: # ifndef UNIV_HOTBACKUP os_mutex_exit(os_file_seek_mutexes[i]); # endif /* !UNIV_HOTBACKUP */ os_mutex_enter(os_file_count_mutex); os_n_pending_writes--; os_mutex_exit(os_file_count_mutex); return(ret); } #endif } #endif /*******************************************************************//** Requests a synchronous positioned read operation. @return TRUE if request was successful, FALSE if fail */ UNIV_INTERN ibool os_file_read( /*=========*/ os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read */ ulint offset, /*!< in: least significant 32 bits of file offset where to read */ ulint offset_high, /*!< in: most significant 32 bits of offset */ ulint n) /*!< in: number of bytes to read */ { #ifdef __WIN__ BOOL ret; DWORD len; DWORD ret2; DWORD low; DWORD high; ibool retry; #ifndef UNIV_HOTBACKUP ulint i; #endif /* !UNIV_HOTBACKUP */ /* On 64-bit Windows, ulint is 64 bits. But offset and n should be no more than 32 bits. */ ut_a((offset & 0xFFFFFFFFUL) == offset); ut_a((n & 0xFFFFFFFFUL) == n); os_n_file_reads++; os_bytes_read_since_printout += n; try_again: ut_ad(file); ut_ad(buf); ut_ad(n > 0); low = (DWORD) offset; high = (DWORD) offset_high; os_mutex_enter(os_file_count_mutex); os_n_pending_reads++; os_mutex_exit(os_file_count_mutex); #ifndef UNIV_HOTBACKUP /* Protect the seek / read operation with a mutex */ i = ((ulint) file) % OS_FILE_N_SEEK_MUTEXES; os_mutex_enter(os_file_seek_mutexes[i]); #endif /* !UNIV_HOTBACKUP */ ret2 = SetFilePointer(file, low, &high, FILE_BEGIN); if (ret2 == 0xFFFFFFFF && GetLastError() != NO_ERROR) { #ifndef UNIV_HOTBACKUP os_mutex_exit(os_file_seek_mutexes[i]); #endif /* !UNIV_HOTBACKUP */ os_mutex_enter(os_file_count_mutex); os_n_pending_reads--; os_mutex_exit(os_file_count_mutex); goto error_handling; } ret = ReadFile(file, buf, (DWORD) n, &len, NULL); #ifndef UNIV_HOTBACKUP os_mutex_exit(os_file_seek_mutexes[i]); #endif /* !UNIV_HOTBACKUP */ os_mutex_enter(os_file_count_mutex); os_n_pending_reads--; os_mutex_exit(os_file_count_mutex); if (ret && len == n) { return(TRUE); } #else /* __WIN__ */ ibool retry; ssize_t ret; os_bytes_read_since_printout += n; try_again: ret = os_file_pread(file, buf, n, offset, offset_high); if ((ulint)ret == n) { return(TRUE); } fprintf(stderr, "InnoDB: Error: tried to read %lu bytes at offset %lu %lu.\n" "InnoDB: Was only able to read %ld.\n", (ulong)n, (ulong)offset_high, (ulong)offset, (long)ret); #endif /* __WIN__ */ #ifdef __WIN__ error_handling: #endif retry = os_file_handle_error(NULL, "read"); if (retry) { goto try_again; } fprintf(stderr, "InnoDB: Fatal error: cannot read from file." " OS error number %lu.\n", #ifdef __WIN__ (ulong) GetLastError() #else (ulong) errno #endif ); fflush(stderr); ut_error; return(FALSE); } /*******************************************************************//** Requests a synchronous positioned read operation. This function does not do any error handling. In case of error it returns FALSE. @return TRUE if request was successful, FALSE if fail */ UNIV_INTERN ibool os_file_read_no_error_handling( /*===========================*/ os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read */ ulint offset, /*!< in: least significant 32 bits of file offset where to read */ ulint offset_high, /*!< in: most significant 32 bits of offset */ ulint n) /*!< in: number of bytes to read */ { #ifdef __WIN__ BOOL ret; DWORD len; DWORD ret2; DWORD low; DWORD high; ibool retry; #ifndef UNIV_HOTBACKUP ulint i; #endif /* !UNIV_HOTBACKUP */ /* On 64-bit Windows, ulint is 64 bits. But offset and n should be no more than 32 bits. */ ut_a((offset & 0xFFFFFFFFUL) == offset); ut_a((n & 0xFFFFFFFFUL) == n); os_n_file_reads++; os_bytes_read_since_printout += n; try_again: ut_ad(file); ut_ad(buf); ut_ad(n > 0); low = (DWORD) offset; high = (DWORD) offset_high; os_mutex_enter(os_file_count_mutex); os_n_pending_reads++; os_mutex_exit(os_file_count_mutex); #ifndef UNIV_HOTBACKUP /* Protect the seek / read operation with a mutex */ i = ((ulint) file) % OS_FILE_N_SEEK_MUTEXES; os_mutex_enter(os_file_seek_mutexes[i]); #endif /* !UNIV_HOTBACKUP */ ret2 = SetFilePointer(file, low, &high, FILE_BEGIN); if (ret2 == 0xFFFFFFFF && GetLastError() != NO_ERROR) { #ifndef UNIV_HOTBACKUP os_mutex_exit(os_file_seek_mutexes[i]); #endif /* !UNIV_HOTBACKUP */ os_mutex_enter(os_file_count_mutex); os_n_pending_reads--; os_mutex_exit(os_file_count_mutex); goto error_handling; } ret = ReadFile(file, buf, (DWORD) n, &len, NULL); #ifndef UNIV_HOTBACKUP os_mutex_exit(os_file_seek_mutexes[i]); #endif /* !UNIV_HOTBACKUP */ os_mutex_enter(os_file_count_mutex); os_n_pending_reads--; os_mutex_exit(os_file_count_mutex); if (ret && len == n) { return(TRUE); } #else /* __WIN__ */ ibool retry; ssize_t ret; os_bytes_read_since_printout += n; try_again: ret = os_file_pread(file, buf, n, offset, offset_high); if ((ulint)ret == n) { return(TRUE); } #endif /* __WIN__ */ #ifdef __WIN__ error_handling: #endif retry = os_file_handle_error_no_exit(NULL, "read"); if (retry) { goto try_again; } return(FALSE); } /*******************************************************************//** Rewind file to its start, read at most size - 1 bytes from it to str, and NUL-terminate str. All errors are silently ignored. This function is mostly meant to be used with temporary files. */ UNIV_INTERN void os_file_read_string( /*================*/ FILE* file, /*!< in: file to read from */ char* str, /*!< in: buffer where to read */ ulint size) /*!< in: size of buffer */ { size_t flen; if (size == 0) { return; } rewind(file); flen = fread(str, 1, size - 1, file); str[flen] = '\0'; } /*******************************************************************//** Requests a synchronous write operation. @return TRUE if request was successful, FALSE if fail */ UNIV_INTERN ibool os_file_write( /*==========*/ const char* name, /*!< in: name of the file or path as a null-terminated string */ os_file_t file, /*!< in: handle to a file */ const void* buf, /*!< in: buffer from which to write */ ulint offset, /*!< in: least significant 32 bits of file offset where to write */ ulint offset_high, /*!< in: most significant 32 bits of offset */ ulint n) /*!< in: number of bytes to write */ { #ifdef __WIN__ BOOL ret; DWORD len; DWORD ret2; DWORD low; DWORD high; ulint n_retries = 0; ulint err; #ifndef UNIV_HOTBACKUP ulint i; #endif /* !UNIV_HOTBACKUP */ /* On 64-bit Windows, ulint is 64 bits. But offset and n should be no more than 32 bits. */ ut_a((offset & 0xFFFFFFFFUL) == offset); ut_a((n & 0xFFFFFFFFUL) == n); os_n_file_writes++; ut_ad(file); ut_ad(buf); ut_ad(n > 0); retry: low = (DWORD) offset; high = (DWORD) offset_high; os_mutex_enter(os_file_count_mutex); os_n_pending_writes++; os_mutex_exit(os_file_count_mutex); #ifndef UNIV_HOTBACKUP /* Protect the seek / write operation with a mutex */ i = ((ulint) file) % OS_FILE_N_SEEK_MUTEXES; os_mutex_enter(os_file_seek_mutexes[i]); #endif /* !UNIV_HOTBACKUP */ ret2 = SetFilePointer(file, low, &high, FILE_BEGIN); if (ret2 == 0xFFFFFFFF && GetLastError() != NO_ERROR) { #ifndef UNIV_HOTBACKUP os_mutex_exit(os_file_seek_mutexes[i]); #endif /* !UNIV_HOTBACKUP */ os_mutex_enter(os_file_count_mutex); os_n_pending_writes--; os_mutex_exit(os_file_count_mutex); ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: File pointer positioning to" " file %s failed at\n" "InnoDB: offset %lu %lu. Operating system" " error number %lu.\n" "InnoDB: Some operating system error numbers" " are described at\n" "InnoDB: " REFMAN "operating-system-error-codes.html\n", name, (ulong) offset_high, (ulong) offset, (ulong) GetLastError()); return(FALSE); } ret = WriteFile(file, buf, (DWORD) n, &len, NULL); /* Always do fsync to reduce the probability that when the OS crashes, a database page is only partially physically written to disk. */ # ifdef UNIV_DO_FLUSH if (!os_do_not_call_flush_at_each_write) { ut_a(TRUE == os_file_flush(file)); } # endif /* UNIV_DO_FLUSH */ #ifndef UNIV_HOTBACKUP os_mutex_exit(os_file_seek_mutexes[i]); #endif /* !UNIV_HOTBACKUP */ os_mutex_enter(os_file_count_mutex); os_n_pending_writes--; os_mutex_exit(os_file_count_mutex); if (ret && len == n) { return(TRUE); } /* If some background file system backup tool is running, then, at least in Windows 2000, we may get here a specific error. Let us retry the operation 100 times, with 1 second waits. */ if (GetLastError() == ERROR_LOCK_VIOLATION && n_retries < 100) { os_thread_sleep(1000000); n_retries++; goto retry; } if (!os_has_said_disk_full) { err = (ulint)GetLastError(); ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: Write to file %s failed" " at offset %lu %lu.\n" "InnoDB: %lu bytes should have been written," " only %lu were written.\n" "InnoDB: Operating system error number %lu.\n" "InnoDB: Check that your OS and file system" " support files of this size.\n" "InnoDB: Check also that the disk is not full" " or a disk quota exceeded.\n", name, (ulong) offset_high, (ulong) offset, (ulong) n, (ulong) len, (ulong) err); if (strerror((int)err) != NULL) { fprintf(stderr, "InnoDB: Error number %lu means '%s'.\n", (ulong) err, strerror((int)err)); } fprintf(stderr, "InnoDB: Some operating system error numbers" " are described at\n" "InnoDB: " REFMAN "operating-system-error-codes.html\n"); os_has_said_disk_full = TRUE; } return(FALSE); #else ssize_t ret; ret = os_file_pwrite(file, buf, n, offset, offset_high); if ((ulint)ret == n) { return(TRUE); } if (!os_has_said_disk_full) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: Write to file %s failed" " at offset %lu %lu.\n" "InnoDB: %lu bytes should have been written," " only %ld were written.\n" "InnoDB: Operating system error number %lu.\n" "InnoDB: Check that your OS and file system" " support files of this size.\n" "InnoDB: Check also that the disk is not full" " or a disk quota exceeded.\n", name, offset_high, offset, n, (long int)ret, (ulint)errno); if (strerror(errno) != NULL) { fprintf(stderr, "InnoDB: Error number %lu means '%s'.\n", (ulint)errno, strerror(errno)); } fprintf(stderr, "InnoDB: Some operating system error numbers" " are described at\n" "InnoDB: " REFMAN "operating-system-error-codes.html\n"); os_has_said_disk_full = TRUE; } return(FALSE); #endif } /*******************************************************************//** Check the existence and type of the given file. @return TRUE if call succeeded */ UNIV_INTERN ibool os_file_status( /*===========*/ const char* path, /*!< in: pathname of the file */ ibool* exists, /*!< out: TRUE if file exists */ os_file_type_t* type) /*!< out: type of the file (if it exists) */ { #ifdef __WIN__ int ret; struct _stat statinfo; ret = _stat(path, &statinfo); if (ret && (errno == ENOENT || errno == ENOTDIR)) { /* file does not exist */ *exists = FALSE; return(TRUE); } else if (ret) { /* file exists, but stat call failed */ os_file_handle_error_no_exit(path, "stat"); return(FALSE); } if (_S_IFDIR & statinfo.st_mode) { *type = OS_FILE_TYPE_DIR; } else if (_S_IFREG & statinfo.st_mode) { *type = OS_FILE_TYPE_FILE; } else { *type = OS_FILE_TYPE_UNKNOWN; } *exists = TRUE; return(TRUE); #else int ret; struct stat statinfo; ret = stat(path, &statinfo); if (ret && (errno == ENOENT || errno == ENOTDIR)) { /* file does not exist */ *exists = FALSE; return(TRUE); } else if (ret) { /* file exists, but stat call failed */ os_file_handle_error_no_exit(path, "stat"); return(FALSE); } if (S_ISDIR(statinfo.st_mode)) { *type = OS_FILE_TYPE_DIR; } else if (S_ISLNK(statinfo.st_mode)) { *type = OS_FILE_TYPE_LINK; } else if (S_ISREG(statinfo.st_mode)) { *type = OS_FILE_TYPE_FILE; } else { *type = OS_FILE_TYPE_UNKNOWN; } *exists = TRUE; return(TRUE); #endif } /*******************************************************************//** This function returns information about the specified file @return TRUE if stat information found */ UNIV_INTERN ibool os_file_get_status( /*===============*/ const char* path, /*!< in: pathname of the file */ os_file_stat_t* stat_info) /*!< information of a file in a directory */ { #ifdef __WIN__ int ret; struct _stat statinfo; ret = _stat(path, &statinfo); if (ret && (errno == ENOENT || errno == ENOTDIR)) { /* file does not exist */ return(FALSE); } else if (ret) { /* file exists, but stat call failed */ os_file_handle_error_no_exit(path, "stat"); return(FALSE); } if (_S_IFDIR & statinfo.st_mode) { stat_info->type = OS_FILE_TYPE_DIR; } else if (_S_IFREG & statinfo.st_mode) { stat_info->type = OS_FILE_TYPE_FILE; } else { stat_info->type = OS_FILE_TYPE_UNKNOWN; } stat_info->ctime = statinfo.st_ctime; stat_info->atime = statinfo.st_atime; stat_info->mtime = statinfo.st_mtime; stat_info->size = statinfo.st_size; return(TRUE); #else int ret; struct stat statinfo; ret = stat(path, &statinfo); if (ret && (errno == ENOENT || errno == ENOTDIR)) { /* file does not exist */ return(FALSE); } else if (ret) { /* file exists, but stat call failed */ os_file_handle_error_no_exit(path, "stat"); return(FALSE); } if (S_ISDIR(statinfo.st_mode)) { stat_info->type = OS_FILE_TYPE_DIR; } else if (S_ISLNK(statinfo.st_mode)) { stat_info->type = OS_FILE_TYPE_LINK; } else if (S_ISREG(statinfo.st_mode)) { stat_info->type = OS_FILE_TYPE_FILE; } else { stat_info->type = OS_FILE_TYPE_UNKNOWN; } stat_info->ctime = statinfo.st_ctime; stat_info->atime = statinfo.st_atime; stat_info->mtime = statinfo.st_mtime; stat_info->size = statinfo.st_size; return(TRUE); #endif } /* path name separator character */ #ifdef __WIN__ # define OS_FILE_PATH_SEPARATOR '\\' #else # define OS_FILE_PATH_SEPARATOR '/' #endif /****************************************************************//** The function os_file_dirname returns a directory component of a null-terminated pathname string. In the usual case, dirname returns the string up to, but not including, the final '/', and basename is the component following the final '/'. Trailing '/' charac­ ters are not counted as part of the pathname. If path does not contain a slash, dirname returns the string ".". Concatenating the string returned by dirname, a "/", and the basename yields a complete pathname. The return value is a copy of the directory component of the pathname. The copy is allocated from heap. It is the caller responsibility to free it after it is no longer needed. The following list of examples (taken from SUSv2) shows the strings returned by dirname and basename for different paths: path dirname basename "/usr/lib" "/usr" "lib" "/usr/" "/" "usr" "usr" "." "usr" "/" "/" "/" "." "." "." ".." "." ".." @return own: directory component of the pathname */ UNIV_INTERN char* os_file_dirname( /*============*/ const char* path) /*!< in: pathname */ { /* Find the offset of the last slash */ const char* last_slash = strrchr(path, OS_FILE_PATH_SEPARATOR); if (!last_slash) { /* No slash in the path, return "." */ return(mem_strdup(".")); } /* Ok, there is a slash */ if (last_slash == path) { /* last slash is the first char of the path */ return(mem_strdup("/")); } /* Non-trivial directory component */ return(mem_strdupl(path, last_slash - path)); } /****************************************************************//** Creates all missing subdirectories along the given path. @return TRUE if call succeeded FALSE otherwise */ UNIV_INTERN ibool os_file_create_subdirs_if_needed( /*=============================*/ const char* path) /*!< in: path name */ { char* subdir; ibool success, subdir_exists; os_file_type_t type; subdir = os_file_dirname(path); if (strlen(subdir) == 1 && (*subdir == OS_FILE_PATH_SEPARATOR || *subdir == '.')) { /* subdir is root or cwd, nothing to do */ mem_free(subdir); return(TRUE); } /* Test if subdir exists */ success = os_file_status(subdir, &subdir_exists, &type); if (success && !subdir_exists) { /* subdir does not exist, create it */ success = os_file_create_subdirs_if_needed(subdir); if (!success) { mem_free(subdir); return(FALSE); } success = os_file_create_directory(subdir, FALSE); } mem_free(subdir); return(success); } #ifndef UNIV_HOTBACKUP /****************************************************************//** Returns a pointer to the nth slot in the aio array. @return pointer to slot */ static os_aio_slot_t* os_aio_array_get_nth_slot( /*======================*/ os_aio_array_t* array, /*!< in: aio array */ ulint index) /*!< in: index of the slot */ { ut_a(index < array->n_slots); return((array->slots) + index); } /************************************************************************//** Creates an aio wait array. @return own: aio array */ static os_aio_array_t* os_aio_array_create( /*================*/ ulint n, /*!< in: maximum number of pending aio operations allowed; n must be divisible by n_segments */ ulint n_segments) /*!< in: number of segments in the aio array */ { os_aio_array_t* array; ulint i; os_aio_slot_t* slot; #ifdef WIN_ASYNC_IO OVERLAPPED* over; #endif ut_a(n > 0); ut_a(n_segments > 0); array = ut_malloc(sizeof(os_aio_array_t)); array->mutex = os_mutex_create(NULL); array->not_full = os_event_create(NULL); array->is_empty = os_event_create(NULL); os_event_set(array->is_empty); array->n_slots = n; array->n_segments = n_segments; array->n_reserved = 0; array->slots = ut_malloc(n * sizeof(os_aio_slot_t)); #ifdef __WIN__ array->native_events = ut_malloc(n * sizeof(os_native_event_t)); #endif for (i = 0; i < n; i++) { slot = os_aio_array_get_nth_slot(array, i); slot->pos = i; slot->reserved = FALSE; #ifdef WIN_ASYNC_IO slot->event = os_event_create(NULL); over = &(slot->control); over->hEvent = slot->event->handle; *((array->native_events) + i) = over->hEvent; #endif } return(array); } /************************************************************************//** Frees an aio wait array. */ static void os_aio_array_free( /*==============*/ os_aio_array_t* array) /*!< in, own: array to free */ { #ifdef WIN_ASYNC_IO ulint i; for (i = 0; i < array->n_slots; i++) { os_aio_slot_t* slot = os_aio_array_get_nth_slot(array, i); os_event_free(slot->event); } #endif /* WIN_ASYNC_IO */ #ifdef __WIN__ ut_free(array->native_events); #endif /* __WIN__ */ os_mutex_free(array->mutex); os_event_free(array->not_full); os_event_free(array->is_empty); ut_free(array->slots); ut_free(array); } /*********************************************************************** Initializes the asynchronous io system. Creates one array each for ibuf and log i/o. Also creates one array each for read and write where each array is divided logically into n_read_segs and n_write_segs respectively. The caller must create an i/o handler thread for each segment in these arrays. This function also creates the sync array. No i/o handler thread needs to be created for that */ UNIV_INTERN void os_aio_init( /*========*/ ulint n_per_seg, /*<! in: maximum number of pending aio operations allowed per segment */ ulint n_read_segs, /*<! in: number of reader threads */ ulint n_write_segs, /*<! in: number of writer threads */ ulint n_slots_sync) /*<! in: number of slots in the sync aio array */ { ulint i; ulint n_segments = 2 + n_read_segs + n_write_segs; ut_ad(n_segments >= 4); os_io_init_simple(); for (i = 0; i < n_segments; i++) { srv_set_io_thread_op_info(i, "not started yet"); } /* fprintf(stderr, "Array n per seg %lu\n", n_per_seg); */ os_aio_ibuf_array = os_aio_array_create(n_per_seg, 1); srv_io_thread_function[0] = "insert buffer thread"; os_aio_log_array = os_aio_array_create(n_per_seg, 1); srv_io_thread_function[1] = "log thread"; os_aio_read_array = os_aio_array_create(n_read_segs * n_per_seg, n_read_segs); for (i = 2; i < 2 + n_read_segs; i++) { ut_a(i < SRV_MAX_N_IO_THREADS); srv_io_thread_function[i] = "read thread"; } os_aio_write_array = os_aio_array_create(n_write_segs * n_per_seg, n_write_segs); for (i = 2 + n_read_segs; i < n_segments; i++) { ut_a(i < SRV_MAX_N_IO_THREADS); srv_io_thread_function[i] = "write thread"; } os_aio_sync_array = os_aio_array_create(n_slots_sync, 1); os_aio_n_segments = n_segments; os_aio_validate(); os_aio_segment_wait_events = ut_malloc(n_segments * sizeof(void*)); for (i = 0; i < n_segments; i++) { os_aio_segment_wait_events[i] = os_event_create(NULL); } os_last_printout = time(NULL); } /*********************************************************************** Frees the asynchronous io system. */ UNIV_INTERN void os_aio_free(void) /*=============*/ { ulint i; os_aio_array_free(os_aio_ibuf_array); os_aio_ibuf_array = NULL; os_aio_array_free(os_aio_log_array); os_aio_log_array = NULL; os_aio_array_free(os_aio_read_array); os_aio_read_array = NULL; os_aio_array_free(os_aio_write_array); os_aio_write_array = NULL; os_aio_array_free(os_aio_sync_array); os_aio_sync_array = NULL; for (i = 0; i < os_aio_n_segments; i++) { os_event_free(os_aio_segment_wait_events[i]); } ut_free(os_aio_segment_wait_events); os_aio_segment_wait_events = 0; os_aio_n_segments = 0; } #ifdef WIN_ASYNC_IO /************************************************************************//** Wakes up all async i/o threads in the array in Windows async i/o at shutdown. */ static void os_aio_array_wake_win_aio_at_shutdown( /*==================================*/ os_aio_array_t* array) /*!< in: aio array */ { ulint i; for (i = 0; i < array->n_slots; i++) { os_event_set((array->slots + i)->event); } } #endif /************************************************************************//** Wakes up all async i/o threads so that they know to exit themselves in shutdown. */ UNIV_INTERN void os_aio_wake_all_threads_at_shutdown(void) /*=====================================*/ { ulint i; #ifdef WIN_ASYNC_IO /* This code wakes up all ai/o threads in Windows native aio */ os_aio_array_wake_win_aio_at_shutdown(os_aio_read_array); os_aio_array_wake_win_aio_at_shutdown(os_aio_write_array); os_aio_array_wake_win_aio_at_shutdown(os_aio_ibuf_array); os_aio_array_wake_win_aio_at_shutdown(os_aio_log_array); #endif /* This loop wakes up all simulated ai/o threads */ for (i = 0; i < os_aio_n_segments; i++) { os_event_set(os_aio_segment_wait_events[i]); } } /************************************************************************//** Waits until there are no pending writes in os_aio_write_array. There can be other, synchronous, pending writes. */ UNIV_INTERN void os_aio_wait_until_no_pending_writes(void) /*=====================================*/ { os_event_wait(os_aio_write_array->is_empty); } /**********************************************************************//** Calculates segment number for a slot. @return segment number (which is the number used by, for example, i/o-handler threads) */ static ulint os_aio_get_segment_no_from_slot( /*============================*/ os_aio_array_t* array, /*!< in: aio wait array */ os_aio_slot_t* slot) /*!< in: slot in this array */ { ulint segment; ulint seg_len; if (array == os_aio_ibuf_array) { segment = 0; } else if (array == os_aio_log_array) { segment = 1; } else if (array == os_aio_read_array) { seg_len = os_aio_read_array->n_slots / os_aio_read_array->n_segments; segment = 2 + slot->pos / seg_len; } else { ut_a(array == os_aio_write_array); seg_len = os_aio_write_array->n_slots / os_aio_write_array->n_segments; segment = os_aio_read_array->n_segments + 2 + slot->pos / seg_len; } return(segment); } /**********************************************************************//** Calculates local segment number and aio array from global segment number. @return local segment number within the aio array */ static ulint os_aio_get_array_and_local_segment( /*===============================*/ os_aio_array_t** array, /*!< out: aio wait array */ ulint global_segment)/*!< in: global segment number */ { ulint segment; ut_a(global_segment < os_aio_n_segments); if (global_segment == 0) { *array = os_aio_ibuf_array; segment = 0; } else if (global_segment == 1) { *array = os_aio_log_array; segment = 0; } else if (global_segment < os_aio_read_array->n_segments + 2) { *array = os_aio_read_array; segment = global_segment - 2; } else { *array = os_aio_write_array; segment = global_segment - (os_aio_read_array->n_segments + 2); } return(segment); } /*******************************************************************//** Requests for a slot in the aio array. If no slot is available, waits until not_full-event becomes signaled. @return pointer to slot */ static os_aio_slot_t* os_aio_array_reserve_slot( /*======================*/ ulint type, /*!< in: OS_FILE_READ or OS_FILE_WRITE */ os_aio_array_t* array, /*!< in: aio array */ fil_node_t* message1,/*!< in: message to be passed along with the aio operation */ void* message2,/*!< in: message to be passed along with the aio operation */ os_file_t file, /*!< in: file handle */ const char* name, /*!< in: name of the file or path as a null-terminated string */ void* buf, /*!< in: buffer where to read or from which to write */ ulint offset, /*!< in: least significant 32 bits of file offset */ ulint offset_high, /*!< in: most significant 32 bits of offset */ ulint len) /*!< in: length of the block to read or write */ { os_aio_slot_t* slot; ulint i; ulint slots_per_seg; ulint local_seg; #ifdef WIN_ASYNC_IO OVERLAPPED* control; ut_a((len & 0xFFFFFFFFUL) == len); #endif /* No need of a mutex. Only reading constant fields */ slots_per_seg = array->n_slots / array->n_segments; /* We attempt to keep adjacent blocks in the same local segment. This can help in merging IO requests when we are doing simulated AIO */ local_seg = (offset >> (UNIV_PAGE_SIZE_SHIFT + 6)) % array->n_segments; loop: os_mutex_enter(array->mutex); if (array->n_reserved == array->n_slots) { os_mutex_exit(array->mutex); if (!os_aio_use_native_aio) { /* If the handler threads are suspended, wake them so that we get more slots */ os_aio_simulated_wake_handler_threads(); } os_event_wait(array->not_full); goto loop; } /* First try to find a slot in the preferred local segment */ for (i = local_seg * slots_per_seg; i < array->n_slots; i++) { slot = os_aio_array_get_nth_slot(array, i); if (slot->reserved == FALSE) { goto found; } } /* Fall back to a full scan. We are guaranteed to find a slot */ for (i = 0;; i++) { slot = os_aio_array_get_nth_slot(array, i); if (slot->reserved == FALSE) { goto found; } } found: ut_a(slot->reserved == FALSE); array->n_reserved++; if (array->n_reserved == 1) { os_event_reset(array->is_empty); } if (array->n_reserved == array->n_slots) { os_event_reset(array->not_full); } slot->reserved = TRUE; slot->reservation_time = time(NULL); slot->message1 = message1; slot->message2 = message2; slot->file = file; slot->name = name; slot->len = len; slot->type = type; slot->buf = buf; slot->offset = offset; slot->offset_high = offset_high; slot->io_already_done = FALSE; #ifdef WIN_ASYNC_IO control = &(slot->control); control->Offset = (DWORD)offset; control->OffsetHigh = (DWORD)offset_high; os_event_reset(slot->event); #endif os_mutex_exit(array->mutex); return(slot); } /*******************************************************************//** Frees a slot in the aio array. */ static void os_aio_array_free_slot( /*===================*/ os_aio_array_t* array, /*!< in: aio array */ os_aio_slot_t* slot) /*!< in: pointer to slot */ { ut_ad(array); ut_ad(slot); os_mutex_enter(array->mutex); ut_ad(slot->reserved); slot->reserved = FALSE; array->n_reserved--; if (array->n_reserved == array->n_slots - 1) { os_event_set(array->not_full); } if (array->n_reserved == 0) { os_event_set(array->is_empty); } #ifdef WIN_ASYNC_IO os_event_reset(slot->event); #endif os_mutex_exit(array->mutex); } /**********************************************************************//** Wakes up a simulated aio i/o-handler thread if it has something to do. */ static void os_aio_simulated_wake_handler_thread( /*=================================*/ ulint global_segment) /*!< in: the number of the segment in the aio arrays */ { os_aio_array_t* array; os_aio_slot_t* slot; ulint segment; ulint n; ulint i; ut_ad(!os_aio_use_native_aio); segment = os_aio_get_array_and_local_segment(&array, global_segment); n = array->n_slots / array->n_segments; /* Look through n slots after the segment * n'th slot */ os_mutex_enter(array->mutex); for (i = 0; i < n; i++) { slot = os_aio_array_get_nth_slot(array, i + segment * n); if (slot->reserved) { /* Found an i/o request */ break; } } os_mutex_exit(array->mutex); if (i < n) { os_event_set(os_aio_segment_wait_events[global_segment]); } } /**********************************************************************//** Wakes up simulated aio i/o-handler threads if they have something to do. */ UNIV_INTERN void os_aio_simulated_wake_handler_threads(void) /*=======================================*/ { ulint i; if (os_aio_use_native_aio) { /* We do not use simulated aio: do nothing */ return; } os_aio_recommend_sleep_for_read_threads = FALSE; for (i = 0; i < os_aio_n_segments; i++) { os_aio_simulated_wake_handler_thread(i); } } /**********************************************************************//** This function can be called if one wants to post a batch of reads and prefers an i/o-handler thread to handle them all at once later. You must call os_aio_simulated_wake_handler_threads later to ensure the threads are not left sleeping! */ UNIV_INTERN void os_aio_simulated_put_read_threads_to_sleep(void) /*============================================*/ { /* The idea of putting background IO threads to sleep is only for Windows when using simulated AIO. Windows XP seems to schedule background threads too eagerly to allow for coalescing during readahead requests. */ #ifdef __WIN__ os_aio_array_t* array; ulint g; if (os_aio_use_native_aio) { /* We do not use simulated aio: do nothing */ return; } os_aio_recommend_sleep_for_read_threads = TRUE; for (g = 0; g < os_aio_n_segments; g++) { os_aio_get_array_and_local_segment(&array, g); if (array == os_aio_read_array) { os_event_reset(os_aio_segment_wait_events[g]); } } #endif /* __WIN__ */ } /*******************************************************************//** Requests an asynchronous i/o operation. @return TRUE if request was queued successfully, FALSE if fail */ UNIV_INTERN ibool os_aio( /*===*/ ulint type, /*!< in: OS_FILE_READ or OS_FILE_WRITE */ ulint mode, /*!< in: OS_AIO_NORMAL, ..., possibly ORed to OS_AIO_SIMULATED_WAKE_LATER: the last flag advises this function not to wake i/o-handler threads, but the caller will do the waking explicitly later, in this way the caller can post several requests in a batch; NOTE that the batch must not be so big that it exhausts the slots in aio arrays! NOTE that a simulated batch may introduce hidden chances of deadlocks, because i/os are not actually handled until all have been posted: use with great caution! */ const char* name, /*!< in: name of the file or path as a null-terminated string */ os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read or from which to write */ ulint offset, /*!< in: least significant 32 bits of file offset where to read or write */ ulint offset_high, /*!< in: most significant 32 bits of offset */ ulint n, /*!< in: number of bytes to read or write */ fil_node_t* message1,/*!< in: message for the aio handler (can be used to identify a completed aio operation); ignored if mode is OS_AIO_SYNC */ void* message2)/*!< in: message for the aio handler (can be used to identify a completed aio operation); ignored if mode is OS_AIO_SYNC */ { os_aio_array_t* array; os_aio_slot_t* slot; #ifdef WIN_ASYNC_IO ibool retval; BOOL ret = TRUE; DWORD len = (DWORD) n; struct fil_node_struct * dummy_mess1; void* dummy_mess2; ulint dummy_type; #endif ulint err = 0; ibool retry; ulint wake_later; ut_ad(file); ut_ad(buf); ut_ad(n > 0); ut_ad(n % OS_FILE_LOG_BLOCK_SIZE == 0); ut_ad(offset % OS_FILE_LOG_BLOCK_SIZE == 0); ut_ad(os_aio_validate()); #ifdef WIN_ASYNC_IO ut_ad((n & 0xFFFFFFFFUL) == n); #endif wake_later = mode & OS_AIO_SIMULATED_WAKE_LATER; mode = mode & (~OS_AIO_SIMULATED_WAKE_LATER); if (mode == OS_AIO_SYNC #ifdef WIN_ASYNC_IO && !os_aio_use_native_aio #endif ) { /* This is actually an ordinary synchronous read or write: no need to use an i/o-handler thread. NOTE that if we use Windows async i/o, Windows does not allow us to use ordinary synchronous os_file_read etc. on the same file, therefore we have built a special mechanism for synchronous wait in the Windows case. */ if (type == OS_FILE_READ) { return(os_file_read(file, buf, offset, offset_high, n)); } ut_a(type == OS_FILE_WRITE); return(os_file_write(name, file, buf, offset, offset_high, n)); } try_again: if (mode == OS_AIO_NORMAL) { if (type == OS_FILE_READ) { array = os_aio_read_array; } else { array = os_aio_write_array; } } else if (mode == OS_AIO_IBUF) { ut_ad(type == OS_FILE_READ); /* Reduce probability of deadlock bugs in connection with ibuf: do not let the ibuf i/o handler sleep */ wake_later = FALSE; array = os_aio_ibuf_array; } else if (mode == OS_AIO_LOG) { array = os_aio_log_array; } else if (mode == OS_AIO_SYNC) { array = os_aio_sync_array; } else { array = NULL; /* Eliminate compiler warning */ ut_error; } slot = os_aio_array_reserve_slot(type, array, message1, message2, file, name, buf, offset, offset_high, n); if (type == OS_FILE_READ) { if (os_aio_use_native_aio) { #ifdef WIN_ASYNC_IO os_n_file_reads++; os_bytes_read_since_printout += len; ret = ReadFile(file, buf, (DWORD)n, &len, &(slot->control)); #endif } else { if (!wake_later) { os_aio_simulated_wake_handler_thread( os_aio_get_segment_no_from_slot( array, slot)); } } } else if (type == OS_FILE_WRITE) { if (os_aio_use_native_aio) { #ifdef WIN_ASYNC_IO os_n_file_writes++; ret = WriteFile(file, buf, (DWORD)n, &len, &(slot->control)); #endif } else { if (!wake_later) { os_aio_simulated_wake_handler_thread( os_aio_get_segment_no_from_slot( array, slot)); } } } else { ut_error; } #ifdef WIN_ASYNC_IO if (os_aio_use_native_aio) { if ((ret && len == n) || (!ret && GetLastError() == ERROR_IO_PENDING)) { /* aio was queued successfully! */ if (mode == OS_AIO_SYNC) { /* We want a synchronous i/o operation on a file where we also use async i/o: in Windows we must use the same wait mechanism as for async i/o */ retval = os_aio_windows_handle(ULINT_UNDEFINED, slot->pos, &dummy_mess1, &dummy_mess2, &dummy_type); return(retval); } return(TRUE); } err = 1; /* Fall through the next if */ } #endif if (err == 0) { /* aio was queued successfully! */ return(TRUE); } os_aio_array_free_slot(array, slot); retry = os_file_handle_error(name, type == OS_FILE_READ ? "aio read" : "aio write"); if (retry) { goto try_again; } return(FALSE); } #ifdef WIN_ASYNC_IO /**********************************************************************//** This function is only used in Windows asynchronous i/o. Waits for an aio operation to complete. This function is used to wait the for completed requests. The aio array of pending requests is divided into segments. The thread specifies which segment or slot it wants to wait for. NOTE: this function will also take care of freeing the aio slot, therefore no other thread is allowed to do the freeing! @return TRUE if the aio operation succeeded */ UNIV_INTERN ibool os_aio_windows_handle( /*==================*/ ulint segment, /*!< in: the number of the segment in the aio arrays to wait for; segment 0 is the ibuf i/o thread, segment 1 the log i/o thread, then follow the non-ibuf read threads, and as the last are the non-ibuf write threads; if this is ULINT_UNDEFINED, then it means that sync aio is used, and this parameter is ignored */ ulint pos, /*!< this parameter is used only in sync aio: wait for the aio slot at this position */ fil_node_t**message1, /*!< out: the messages passed with the aio request; note that also in the case where the aio operation failed, these output parameters are valid and can be used to restart the operation, for example */ void** message2, ulint* type) /*!< out: OS_FILE_WRITE or ..._READ */ { ulint orig_seg = segment; os_aio_array_t* array; os_aio_slot_t* slot; ulint n; ulint i; ibool ret_val; BOOL ret; DWORD len; BOOL retry = FALSE; if (segment == ULINT_UNDEFINED) { array = os_aio_sync_array; segment = 0; } else { segment = os_aio_get_array_and_local_segment(&array, segment); } /* NOTE! We only access constant fields in os_aio_array. Therefore we do not have to acquire the protecting mutex yet */ ut_ad(os_aio_validate()); ut_ad(segment < array->n_segments); n = array->n_slots / array->n_segments; if (array == os_aio_sync_array) { os_event_wait(os_aio_array_get_nth_slot(array, pos)->event); i = pos; } else { srv_set_io_thread_op_info(orig_seg, "wait Windows aio"); i = os_event_wait_multiple(n, (array->native_events) + segment * n); } os_mutex_enter(array->mutex); slot = os_aio_array_get_nth_slot(array, i + segment * n); ut_a(slot->reserved); if (orig_seg != ULINT_UNDEFINED) { srv_set_io_thread_op_info(orig_seg, "get windows aio return value"); } ret = GetOverlappedResult(slot->file, &(slot->control), &len, TRUE); *message1 = slot->message1; *message2 = slot->message2; *type = slot->type; if (ret && len == slot->len) { ret_val = TRUE; #ifdef UNIV_DO_FLUSH if (slot->type == OS_FILE_WRITE && !os_do_not_call_flush_at_each_write) { ut_a(TRUE == os_file_flush(slot->file)); } #endif /* UNIV_DO_FLUSH */ } else if (os_file_handle_error(slot->name, "Windows aio")) { retry = TRUE; } else { ret_val = FALSE; } os_mutex_exit(array->mutex); if (retry) { /* retry failed read/write operation synchronously. No need to hold array->mutex. */ ut_a((slot->len & 0xFFFFFFFFUL) == slot->len); switch (slot->type) { case OS_FILE_WRITE: ret = WriteFile(slot->file, slot->buf, (DWORD) slot->len, &len, &(slot->control)); break; case OS_FILE_READ: ret = ReadFile(slot->file, slot->buf, (DWORD) slot->len, &len, &(slot->control)); break; default: ut_error; } if (!ret && GetLastError() == ERROR_IO_PENDING) { /* aio was queued successfully! We want a synchronous i/o operation on a file where we also use async i/o: in Windows we must use the same wait mechanism as for async i/o */ ret = GetOverlappedResult(slot->file, &(slot->control), &len, TRUE); } ret_val = ret && len == slot->len; } os_aio_array_free_slot(array, slot); return(ret_val); } #endif /**********************************************************************//** Does simulated aio. This function should be called by an i/o-handler thread. @return TRUE if the aio operation succeeded */ UNIV_INTERN ibool os_aio_simulated_handle( /*====================*/ ulint global_segment, /*!< in: the number of the segment in the aio arrays to wait for; segment 0 is the ibuf i/o thread, segment 1 the log i/o thread, then follow the non-ibuf read threads, and as the last are the non-ibuf write threads */ fil_node_t**message1, /*!< out: the messages passed with the aio request; note that also in the case where the aio operation failed, these output parameters are valid and can be used to restart the operation, for example */ void** message2, ulint* type) /*!< out: OS_FILE_WRITE or ..._READ */ { os_aio_array_t* array; ulint segment; os_aio_slot_t* slot; os_aio_slot_t* slot2; os_aio_slot_t* consecutive_ios[OS_AIO_MERGE_N_CONSECUTIVE]; ulint n_consecutive; ulint total_len; ulint offs; ulint lowest_offset; ulint biggest_age; ulint age; byte* combined_buf; byte* combined_buf2; ibool ret; ulint n; ulint i; /* Fix compiler warning */ *consecutive_ios = NULL; segment = os_aio_get_array_and_local_segment(&array, global_segment); restart: /* NOTE! We only access constant fields in os_aio_array. Therefore we do not have to acquire the protecting mutex yet */ srv_set_io_thread_op_info(global_segment, "looking for i/o requests (a)"); ut_ad(os_aio_validate()); ut_ad(segment < array->n_segments); n = array->n_slots / array->n_segments; /* Look through n slots after the segment * n'th slot */ if (array == os_aio_read_array && os_aio_recommend_sleep_for_read_threads) { /* Give other threads chance to add several i/os to the array at once. */ goto recommended_sleep; } os_mutex_enter(array->mutex); srv_set_io_thread_op_info(global_segment, "looking for i/o requests (b)"); /* Check if there is a slot for which the i/o has already been done */ for (i = 0; i < n; i++) { slot = os_aio_array_get_nth_slot(array, i + segment * n); if (slot->reserved && slot->io_already_done) { if (os_aio_print_debug) { fprintf(stderr, "InnoDB: i/o for slot %lu" " already done, returning\n", (ulong) i); } ret = TRUE; goto slot_io_done; } } n_consecutive = 0; /* If there are at least 2 seconds old requests, then pick the oldest one to prevent starvation. If several requests have the same age, then pick the one at the lowest offset. */ biggest_age = 0; lowest_offset = ULINT_MAX; for (i = 0; i < n; i++) { slot = os_aio_array_get_nth_slot(array, i + segment * n); if (slot->reserved) { age = (ulint)difftime(time(NULL), slot->reservation_time); if ((age >= 2 && age > biggest_age) || (age >= 2 && age == biggest_age && slot->offset < lowest_offset)) { /* Found an i/o request */ consecutive_ios[0] = slot; n_consecutive = 1; biggest_age = age; lowest_offset = slot->offset; } } } if (n_consecutive == 0) { /* There were no old requests. Look for an i/o request at the lowest offset in the array (we ignore the high 32 bits of the offset in these heuristics) */ lowest_offset = ULINT_MAX; for (i = 0; i < n; i++) { slot = os_aio_array_get_nth_slot(array, i + segment * n); if (slot->reserved && slot->offset < lowest_offset) { /* Found an i/o request */ consecutive_ios[0] = slot; n_consecutive = 1; lowest_offset = slot->offset; } } } if (n_consecutive == 0) { /* No i/o requested at the moment */ goto wait_for_io; } slot = consecutive_ios[0]; /* Check if there are several consecutive blocks to read or write */ consecutive_loop: for (i = 0; i < n; i++) { slot2 = os_aio_array_get_nth_slot(array, i + segment * n); if (slot2->reserved && slot2 != slot && slot2->offset == slot->offset + slot->len /* check that sum does not wrap over */ && slot->offset + slot->len > slot->offset && slot2->offset_high == slot->offset_high && slot2->type == slot->type && slot2->file == slot->file) { /* Found a consecutive i/o request */ consecutive_ios[n_consecutive] = slot2; n_consecutive++; slot = slot2; if (n_consecutive < OS_AIO_MERGE_N_CONSECUTIVE) { goto consecutive_loop; } else { break; } } } srv_set_io_thread_op_info(global_segment, "consecutive i/o requests"); /* We have now collected n_consecutive i/o requests in the array; allocate a single buffer which can hold all data, and perform the i/o */ total_len = 0; slot = consecutive_ios[0]; for (i = 0; i < n_consecutive; i++) { total_len += consecutive_ios[i]->len; } if (n_consecutive == 1) { /* We can use the buffer of the i/o request */ combined_buf = slot->buf; combined_buf2 = NULL; } else { combined_buf2 = ut_malloc(total_len + UNIV_PAGE_SIZE); ut_a(combined_buf2); combined_buf = ut_align(combined_buf2, UNIV_PAGE_SIZE); } /* We release the array mutex for the time of the i/o: NOTE that this assumes that there is just one i/o-handler thread serving a single segment of slots! */ os_mutex_exit(array->mutex); if (slot->type == OS_FILE_WRITE && n_consecutive > 1) { /* Copy the buffers to the combined buffer */ offs = 0; for (i = 0; i < n_consecutive; i++) { ut_memcpy(combined_buf + offs, consecutive_ios[i]->buf, consecutive_ios[i]->len); offs += consecutive_ios[i]->len; } } srv_set_io_thread_op_info(global_segment, "doing file i/o"); if (os_aio_print_debug) { fprintf(stderr, "InnoDB: doing i/o of type %lu at offset %lu %lu," " length %lu\n", (ulong) slot->type, (ulong) slot->offset_high, (ulong) slot->offset, (ulong) total_len); } /* Do the i/o with ordinary, synchronous i/o functions: */ if (slot->type == OS_FILE_WRITE) { ret = os_file_write(slot->name, slot->file, combined_buf, slot->offset, slot->offset_high, total_len); } else { ret = os_file_read(slot->file, combined_buf, slot->offset, slot->offset_high, total_len); } ut_a(ret); srv_set_io_thread_op_info(global_segment, "file i/o done"); #if 0 fprintf(stderr, "aio: %lu consecutive %lu:th segment, first offs %lu blocks\n", n_consecutive, global_segment, slot->offset / UNIV_PAGE_SIZE); #endif if (slot->type == OS_FILE_READ && n_consecutive > 1) { /* Copy the combined buffer to individual buffers */ offs = 0; for (i = 0; i < n_consecutive; i++) { ut_memcpy(consecutive_ios[i]->buf, combined_buf + offs, consecutive_ios[i]->len); offs += consecutive_ios[i]->len; } } if (combined_buf2) { ut_free(combined_buf2); } os_mutex_enter(array->mutex); /* Mark the i/os done in slots */ for (i = 0; i < n_consecutive; i++) { consecutive_ios[i]->io_already_done = TRUE; } /* We return the messages for the first slot now, and if there were several slots, the messages will be returned with subsequent calls of this function */ slot_io_done: ut_a(slot->reserved); *message1 = slot->message1; *message2 = slot->message2; *type = slot->type; os_mutex_exit(array->mutex); os_aio_array_free_slot(array, slot); return(ret); wait_for_io: srv_set_io_thread_op_info(global_segment, "resetting wait event"); /* We wait here until there again can be i/os in the segment of this thread */ os_event_reset(os_aio_segment_wait_events[global_segment]); os_mutex_exit(array->mutex); recommended_sleep: srv_set_io_thread_op_info(global_segment, "waiting for i/o request"); os_event_wait(os_aio_segment_wait_events[global_segment]); if (os_aio_print_debug) { fprintf(stderr, "InnoDB: i/o handler thread for i/o" " segment %lu wakes up\n", (ulong) global_segment); } goto restart; } /**********************************************************************//** Validates the consistency of an aio array. @return TRUE if ok */ static ibool os_aio_array_validate( /*==================*/ os_aio_array_t* array) /*!< in: aio wait array */ { os_aio_slot_t* slot; ulint n_reserved = 0; ulint i; ut_a(array); os_mutex_enter(array->mutex); ut_a(array->n_slots > 0); ut_a(array->n_segments > 0); for (i = 0; i < array->n_slots; i++) { slot = os_aio_array_get_nth_slot(array, i); if (slot->reserved) { n_reserved++; ut_a(slot->len > 0); } } ut_a(array->n_reserved == n_reserved); os_mutex_exit(array->mutex); return(TRUE); } /**********************************************************************//** Validates the consistency the aio system. @return TRUE if ok */ UNIV_INTERN ibool os_aio_validate(void) /*=================*/ { os_aio_array_validate(os_aio_read_array); os_aio_array_validate(os_aio_write_array); os_aio_array_validate(os_aio_ibuf_array); os_aio_array_validate(os_aio_log_array); os_aio_array_validate(os_aio_sync_array); return(TRUE); } /**********************************************************************//** Prints info of the aio arrays. */ UNIV_INTERN void os_aio_print( /*=========*/ FILE* file) /*!< in: file where to print */ { os_aio_array_t* array; os_aio_slot_t* slot; ulint n_reserved; time_t current_time; double time_elapsed; double avg_bytes_read; ulint i; for (i = 0; i < srv_n_file_io_threads; i++) { fprintf(file, "I/O thread %lu state: %s (%s)", (ulong) i, srv_io_thread_op_info[i], srv_io_thread_function[i]); #ifndef __WIN__ if (os_aio_segment_wait_events[i]->is_set) { fprintf(file, " ev set"); } #endif fprintf(file, "\n"); } fputs("Pending normal aio reads:", file); array = os_aio_read_array; loop: ut_a(array); os_mutex_enter(array->mutex); ut_a(array->n_slots > 0); ut_a(array->n_segments > 0); n_reserved = 0; for (i = 0; i < array->n_slots; i++) { slot = os_aio_array_get_nth_slot(array, i); if (slot->reserved) { n_reserved++; #if 0 fprintf(stderr, "Reserved slot, messages %p %p\n", (void*) slot->message1, (void*) slot->message2); #endif ut_a(slot->len > 0); } } ut_a(array->n_reserved == n_reserved); fprintf(file, " %lu", (ulong) n_reserved); os_mutex_exit(array->mutex); if (array == os_aio_read_array) { fputs(", aio writes:", file); array = os_aio_write_array; goto loop; } if (array == os_aio_write_array) { fputs(",\n ibuf aio reads:", file); array = os_aio_ibuf_array; goto loop; } if (array == os_aio_ibuf_array) { fputs(", log i/o's:", file); array = os_aio_log_array; goto loop; } if (array == os_aio_log_array) { fputs(", sync i/o's:", file); array = os_aio_sync_array; goto loop; } putc('\n', file); current_time = time(NULL); time_elapsed = 0.001 + difftime(current_time, os_last_printout); fprintf(file, "Pending flushes (fsync) log: %lu; buffer pool: %lu\n" "%lu OS file reads, %lu OS file writes, %lu OS fsyncs\n", (ulong) fil_n_pending_log_flushes, (ulong) fil_n_pending_tablespace_flushes, (ulong) os_n_file_reads, (ulong) os_n_file_writes, (ulong) os_n_fsyncs); if (os_file_n_pending_preads != 0 || os_file_n_pending_pwrites != 0) { fprintf(file, "%lu pending preads, %lu pending pwrites\n", (ulong) os_file_n_pending_preads, (ulong) os_file_n_pending_pwrites); } if (os_n_file_reads == os_n_file_reads_old) { avg_bytes_read = 0.0; } else { avg_bytes_read = (double) os_bytes_read_since_printout / (os_n_file_reads - os_n_file_reads_old); } fprintf(file, "%.2f reads/s, %lu avg bytes/read," " %.2f writes/s, %.2f fsyncs/s\n", (os_n_file_reads - os_n_file_reads_old) / time_elapsed, (ulong)avg_bytes_read, (os_n_file_writes - os_n_file_writes_old) / time_elapsed, (os_n_fsyncs - os_n_fsyncs_old) / time_elapsed); os_n_file_reads_old = os_n_file_reads; os_n_file_writes_old = os_n_file_writes; os_n_fsyncs_old = os_n_fsyncs; os_bytes_read_since_printout = 0; os_last_printout = current_time; } /**********************************************************************//** Refreshes the statistics used to print per-second averages. */ UNIV_INTERN void os_aio_refresh_stats(void) /*======================*/ { os_n_file_reads_old = os_n_file_reads; os_n_file_writes_old = os_n_file_writes; os_n_fsyncs_old = os_n_fsyncs; os_bytes_read_since_printout = 0; os_last_printout = time(NULL); } #ifdef UNIV_DEBUG /**********************************************************************//** Checks that all slots in the system have been freed, that is, there are no pending io operations. @return TRUE if all free */ UNIV_INTERN ibool os_aio_all_slots_free(void) /*=======================*/ { os_aio_array_t* array; ulint n_res = 0; array = os_aio_read_array; os_mutex_enter(array->mutex); n_res += array->n_reserved; os_mutex_exit(array->mutex); array = os_aio_write_array; os_mutex_enter(array->mutex); n_res += array->n_reserved; os_mutex_exit(array->mutex); array = os_aio_ibuf_array; os_mutex_enter(array->mutex); n_res += array->n_reserved; os_mutex_exit(array->mutex); array = os_aio_log_array; os_mutex_enter(array->mutex); n_res += array->n_reserved; os_mutex_exit(array->mutex); array = os_aio_sync_array; os_mutex_enter(array->mutex); n_res += array->n_reserved; os_mutex_exit(array->mutex); if (n_res == 0) { return(TRUE); } return(FALSE); } #endif /* UNIV_DEBUG */ #endif /* !UNIV_HOTBACKUP */
Abner-Sun/vx_mysql5.1_git
storage/innodb_plugin/os/os0file.c
C
gpl-2.0
110,683
23.640027
78
0.612127
false
#ifndef __SP_STRING_H__ #define __SP_STRING_H__ /* * string elements * extracted from sp-text */ #include <glibmm/ustring.h> #include "sp-object.h" #define SP_STRING(obj) (dynamic_cast<SPString*>((SPObject*)obj)) #define SP_IS_STRING(obj) (dynamic_cast<const SPString*>((SPObject*)obj) != NULL) class SPString : public SPObject { public: SPString(); virtual ~SPString(); Glib::ustring string; virtual void build(SPDocument* doc, Inkscape::XML::Node* repr); virtual void release(); virtual void read_content(); virtual void update(SPCtx* ctx, unsigned int flags); }; #endif
tik0/inkscapeGrid
src/sp-string.h
C
gpl-2.0
597
18.258065
81
0.690117
false
--[[ # For More Information ....! # Developer : Aziz < @devss_bot > #Dev # our channel: @help_tele ]] do local Arian = 159280034 --تضع ايدي بوتك هنا local function setrank(msg, name, value) -- setrank function local hash = nil if msg.to.type == 'chat' then hash = 'rank:'..msg.to.id..':variables' end if hash then redis:hset(hash, name, value) return send_msg('chat#id'..msg.to.id, 'set Rank for ('..name..') To : '..value, ok_cb, true) end end local function res_user_callback(extra, success, result) -- /info <username> function if success == 1 then if result.username then Username = '@'..result.username else Username = '----' end local text = 'Full name : '..(result.first_name or '')..' '..(result.last_name or '')..'\n' ..'User name: '..Username..'\n' ..'ID : '..result.id..'\n\n' local hash = 'rank:'..extra.chat2..':variables' local value = redis:hget(hash, result.id) if not value then if result.id == tonumber(Arian) then text = text..'Rank : Executive Admin \n\n' elseif is_admin2(result.id) then text = text..'Rank : Admin \n\n' elseif is_owner2(result.id, extra.chat2) then text = text..'Rank : Owner \n\n' elseif is_momod2(result.id, extra.chat2) then text = text..'Rank : Moderator \n\n' else text = text..'Rank : Member \n\n' end else text = text..'Rank : '..value..'\n\n' end local uhash = 'user:'..result.id local user = redis:hgetall(uhash) local um_hash = 'msgs:'..result.id..':'..extra.chat2 user_info_msgs = tonumber(redis:get(um_hash) or 0) text = text..'Total messages : '..user_info_msgs..'\n\n' text = text..'#CHANNEL BOT : @IQ_DEV8' send_msg(extra.receiver, text, ok_cb, true) else send_msg(extra.receiver, ' Username not found.', ok_cb, false) end end local function action_by_id(extra, success, result) -- /info <ID> function if success == 1 then if result.username then Username = '@'..result.username else Username = '----' end local text = 'Full name : '..(result.first_name or '')..' '..(result.last_name or '')..'\n' ..'Username: '..Username..'\n' ..'ID : '..result.id..'\n\n' local hash = 'rank:'..extra.chat2..':variables' local value = redis:hget(hash, result.id) if not value then if result.id == tonumber(Arian) then text = text..'Rank : Executive Admin \n\n' elseif is_admin2(result.id) then text = text..'Rank : Admin \n\n' elseif is_owner2(result.id, extra.chat2) then text = text..'Rank : Owner \n\n' elseif is_momod2(result.id, extra.chat2) then text = text..'Rank : Moderator \n\n' else text = text..'Rank : Member \n\n' end else text = text..'Rank : '..value..'\n\n' end local uhash = 'user:'..result.id local user = redis:hgetall(uhash) local um_hash = 'msgs:'..result.id..':'..extra.chat2 user_info_msgs = tonumber(redis:get(um_hash) or 0) text = text..'Total messages : '..user_info_msgs..'\n\n' text = text..'#CHANNEL BOT : @IQ_DEV8' send_msg(extra.receiver, text, ok_cb, true) else send_msg(extra.receiver, 'id not found.\nuse : /info @username', ok_cb, false) end end local function action_by_reply(extra, success, result)-- (reply) /info function if result.from.username then Username = '@'..result.from.username else Username = '----' end local text = 'Full name : '..(result.from.first_name or '')..' '..(result.from.last_name or '')..'\n' ..'Username : '..Username..'\n' ..'ID : '..result.from.id..'\n\n' local hash = 'rank:'..result.to.id..':variables' local value = redis:hget(hash, result.from.id) if not value then if result.from.id == tonumber(Arian) then text = text..'Rank : Executive Admin \n\n' elseif is_admin2(result.from.id) then text = text..'Rank : Admin \n\n' elseif is_owner2(result.from.id, result.to.id) then text = text..'Rank : Owner \n\n' elseif is_momod2(result.from.id, result.to.id) then text = text..'Rank : Moderator \n\n' else text = text..'Rank : Member \n\n' end else text = text..'Rank : '..value..'\n\n' end local user_info = {} local uhash = 'user:'..result.from.id local user = redis:hgetall(uhash) local um_hash = 'msgs:'..result.from.id..':'..result.to.id user_info_msgs = tonumber(redis:get(um_hash) or 0) text = text..'Total messages : '..user_info_msgs..'\n\n' text = text..'#CHANNEL BOT : @IQ_DEV8' send_msg(extra.receiver, text, ok_cb, true) end local function action_by_reply2(extra, success, result) local value = extra.value setrank(result, result.from.id, value) end local function run(msg, matches) if matches[1]:lower() == 'setrank' then local hash = 'usecommands:'..msg.from.id..':'..msg.to.id redis:incr(hash) if not is_sudo(msg) then return "Only for Sudo" end local receiver = get_receiver(msg) local Reply = msg.reply_id if msg.reply_id then local value = string.sub(matches[2], 1, 1000) msgr = get_message(msg.reply_id, action_by_reply2, {receiver=receiver, Reply=Reply, value=value}) else local name = string.sub(matches[2], 1, 50) local value = string.sub(matches[3], 1, 1000) local text = setrank(msg, name, value) return text end end if matches[1]:lower() == 'info' and not matches[2] then local receiver = get_receiver(msg) local Reply = msg.reply_id if msg.reply_id then msgr = get_message(msg.reply_id, action_by_reply, {receiver=receiver, Reply=Reply}) else if msg.from.username then Username = '@'..msg.from.username else Username = '----' end local text = 'First name : '..(msg.from.first_name or '----')..'\n' local text = text..'Last name : '..(msg.from.last_name or '----')..'\n' local text = text..'Username : '..Username..'\n' local text = text..'ID : '..msg.from.id..'\n\n' local hash = 'rank:'..msg.to.id..':variables' if hash then local value = redis:hget(hash, msg.from.id) if not value then if msg.from.id == tonumber(Arian) then text = text..'Rank : Executive Admin \n\n' elseif is_sudo(msg) then text = text..'Rank : Admin \n\n' elseif is_owner(msg) then text = text..'Rank : Owner \n\n' elseif is_momod(msg) then text = text..'Rank : Moderator \n\n' else text = text..'Rank : Member \n\n' end else text = text..'Rank : '..value..'\n' end end local uhash = 'user:'..msg.from.id local user = redis:hgetall(uhash) local um_hash = 'msgs:'..msg.from.id..':'..msg.to.id user_info_msgs = tonumber(redis:get(um_hash) or 0) text = text..'Total messages : '..user_info_msgs..'\n\n' if msg.to.type == 'chat' then text = text..'Group name : '..msg.to.title..'\n' text = text..'Group ID : '..msg.to.id end text = text..'\n\n#CHANNEL : @help_tele' return send_msg(receiver, text, ok_cb, true) end end if matches[1]:lower() == 'info' and matches[2] then local user = matches[2] local chat2 = msg.to.id local receiver = get_receiver(msg) if string.match(user, '^%d+$') then user_info('user#id'..user, action_by_id, {receiver=receiver, user=user, text=text, chat2=chat2}) elseif string.match(user, '^@.+$') then username = string.gsub(user, '@', '') msgr = res_user(username, res_user_callback, {receiver=receiver, user=user, text=text, chat2=chat2}) end end end return { description = 'Know your information or the info of a chat members.', usage = { '!info: Return your info and the chat info if you are in one.', '(Reply)!info: Return info of replied user if used by reply.', '!info <id>: Return the info\'s of the <id>.', '!info @<user_name>: Return the member @<user_name> information from the current chat.', '!setrank <userid> <rank>: change members rank.', '(Reply)!setrank <rank>: change members rank.', }, patterns = { "^([Ii][Nn][Ff][Oo])$", "^([Ii][Nn][Ff][Oo]) (.*)$", "^([Ss][Ee][Tt][Rr][Aa][Nn][Kk]) (%d+) (.*)$", "^([Ss][Ee][Tt][Rr][Aa][Nn][Kk]) (.*)$", }, run = run } end --CHANNEL BOT : @IQ_DEV8 --by @zeoon3
jokerdevv/Joker-Developer
plugins/super-info.lua
Lua
gpl-2.0
8,139
32.303279
106
0.609354
false
// { dg-options "-std=gnu++11" } // { dg-require-cstdint "" } // // 2010-03-16 Paolo Carlini <paolo.carlini@oracle.com> // // Copyright (C) 2010-2014 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // terms of the GNU General Public License as published by the // Free Software Foundation; either version 3, or (at your option) // any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License along // with this library; see the file COPYING3. If not see // <http://www.gnu.org/licenses/>. // 26.5.8.4.5 Class template fisher_f_distribution [rand.dist.norm.f] #include <random> #include <testsuite_hooks.h> void test01() { bool test __attribute__((unused)) = true; std::fisher_f_distribution<double> u(2.0, 3.0), v, w; VERIFY( v == w ); VERIFY( !(u == v) ); } int main() { test01(); return 0; }
mageec/mageec-gcc
libstdc++-v3/testsuite/26_numerics/random/fisher_f_distribution/operators/equal.cc
C++
gpl-2.0
1,215
27.255814
74
0.688066
false
<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE TS><TS version="2.0"> <context> <name>AboutDialog</name> <message> <source>About</source> <translation>Sobre</translation> </message> <message> <source>Authors</source> <translation type="unfinished"></translation> </message> <message> <source>License</source> <translation type="unfinished"></translation> </message> <message> <source>Lightweight WebKit-based web browser</source> <translation type="unfinished"></translation> </message> <message encoding="UTF-8"> <source>&lt;!DOCTYPE HTML PUBLIC &quot;-//W3C//DTD HTML 4.0//EN&quot; &quot;http://www.w3.org/TR/REC-html40/strict.dtd&quot;&gt; &lt;html&gt;&lt;head&gt;&lt;meta name=&quot;qrichtext&quot; content=&quot;1&quot; /&gt;&lt;style type=&quot;text/css&quot;&gt; p, li { white-space: pre-wrap; } &lt;/style&gt;&lt;/head&gt;&lt;body style=&quot; font-family:&apos;DejaVu Sans&apos;; font-size:9pt; font-weight:400; font-style:normal;&quot;&gt; &lt;p style=&quot; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;&quot;&gt;Copyright © 2007-2008 Benjamin C. Meyer &amp;lt;&lt;a href=&quot;mailto:ben@meyerhome.net&quot;&gt;&lt;span style=&quot; text-decoration: underline; color:#0057ae;&quot;&gt;ben@meyerhome.net&lt;/span&gt;&lt;/a&gt;&amp;gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</source> <translation type="unfinished"></translation> </message> <message> <source>&lt;a href=&quot;http://arora-browser.org&quot;&gt;http://arora-browser.org&lt;/a&gt;</source> <translation type="unfinished"></translation> </message> <message> <source>Close</source> <translation type="unfinished"></translation> </message> </context> <context> <name>AddBookmarkDialog</name> <message> <source>Add Bookmark</source> <translation>Adicionar Favoritos</translation> </message> <message> <source>Type a name for the bookmark, and choose where to keep it.</source> <translation>Digite o nome do favorito, e escolhe onde manter ele.</translation> </message> </context> <context> <name>BookmarksDialog</name> <message> <source>Bookmarks</source> <translation>Favoritos</translation> </message> <message> <source>&amp;Remove</source> <translation>&amp;Remover</translation> </message> <message> <source>Add Folder</source> <translation>Adicionar Pasta</translation> </message> <message> <source>Open</source> <translation>Abrir</translation> </message> <message> <source>Open in New Tab</source> <translation>Abrir em nova aba</translation> </message> <message> <source>Delete</source> <translation>Deletar</translation> </message> <message> <source>New Folder</source> <translation>Nova Pasta</translation> </message> </context> <context> <name>BookmarksManager</name> <message> <source>Error when loading bookmarks on line %1, column %2: %3</source> <translation>Erro ao carregar favoritos na linha %1, coluna%2:</translation> </message> <message> <source>Toolbar Bookmarks</source> <translation type="unfinished"></translation> </message> <message> <source>Menu</source> <translation>Menu</translation> </message> <message> <source>Open File</source> <translation>Abrir Arquivo</translation> </message> <message> <source>XBEL (*.xbel *.xml)</source> <translation>XBEL (*.xbel *.xml)</translation> </message> <message> <source>Imported %1</source> <translation>Importado %1</translation> </message> <message> <source>Save File</source> <translation>Salvar Arquivo</translation> </message> <message> <source>%1 Bookmarks.xbel</source> <translation>%1 Favoritos.xbel</translation> </message> <message> <source>Export error</source> <translation>Erro na exportação</translation> </message> <message> <source>error saving bookmarks</source> <translation>erro salvando favoritos</translation> </message> <message> <source>Remove Bookmark</source> <translation>Remover Favorito</translation> </message> <message> <source>Insert Bookmark</source> <translation>Inserir Favorito</translation> </message> <message> <source>Name Change</source> <translation>Troca de Nome</translation> </message> <message> <source>Address Change</source> <translation>Troca de Endereço</translation> </message> </context> <context> <name>BookmarksModel</name> <message> <source>Title</source> <translation>Título</translation> </message> <message> <source>Address</source> <translation>Endereço</translation> </message> </context> <context> <name>BookmarksToolBar</name> <message> <source>Bookmark</source> <translation>Favorito</translation> </message> </context> <context> <name>BrowserApplication</name> <message> <source>There are %1 windows and %2 tabs open Do you want to quit anyway?</source> <translation>Existem %1 janelas e %2 abas abertas Você deve sair de qualquer maneira?</translation> </message> <message> <source>Restore failed</source> <translation type="unfinished"></translation> </message> <message> <source>The saved session will not being restored because last time it was restored Arora crashed.</source> <translation type="unfinished"></translation> </message> </context> <context> <name>BrowserMainWindow</name> <message> <source>&amp;File</source> <translation>&amp;Arquivo</translation> </message> <message> <source>&amp;New Window</source> <translation>&amp;Nova Janela</translation> </message> <message> <source>&amp;Open File...</source> <translation>&amp;Abrir Arquivo...</translation> </message> <message> <source>Open &amp;Location...</source> <translation>Abrir &amp;Endereço...</translation> </message> <message> <source>&amp;Save As...</source> <translation>&amp;Salvar Como...</translation> </message> <message> <source>&amp;Import Bookmarks...</source> <translation>&amp;Importar Favoritos</translation> </message> <message> <source>&amp;Export Bookmarks...</source> <translation>&amp;Exportar Favoritos</translation> </message> <message> <source>P&amp;rint Preview...</source> <translation>Imp&amp;rimir Previsão</translation> </message> <message> <source>&amp;Print...</source> <translation>Im&amp;primir</translation> </message> <message> <source>Private &amp;Browsing...</source> <translation type="unfinished"></translation> </message> <message> <source>&amp;Quit</source> <translation>&amp;Sair</translation> </message> <message> <source>&amp;Edit</source> <translation>&amp;Editar</translation> </message> <message> <source>&amp;Undo</source> <translation>&amp;Desfazer</translation> </message> <message> <source>&amp;Redo</source> <translation>&amp;Refazer</translation> </message> <message> <source>Cu&amp;t</source> <translation>&amp;Recortar</translation> </message> <message> <source>&amp;Copy</source> <translation>&amp;Colar</translation> </message> <message> <source>&amp;Paste</source> <translation>&amp;Colar</translation> </message> <message> <source>&amp;Find</source> <translation>&amp;Procurar</translation> </message> <message> <source>&amp;Find Next</source> <translation type="obsolete">&amp;Procurar Próximo</translation> </message> <message> <source>&amp;Find Previous</source> <translation type="obsolete">&amp;Procurar Anterior</translation> </message> <message> <source>&amp;Preferences</source> <translation type="obsolete">&amp;Preferências</translation> </message> <message> <source>Ctrl+,</source> <translation>Ctrl+,</translation> </message> <message> <source>&amp;View</source> <translation>&amp;Visualizar</translation> </message> <message> <source>Shift+Ctrl+B</source> <translation>Shift+Ctrl+B</translation> </message> <message> <source>Ctrl+|</source> <translation>Ctrl+|</translation> </message> <message> <source>Ctrl+/</source> <translation>Ctrl+/</translation> </message> <message> <source>&amp;Stop</source> <translation>&amp;Parar</translation> </message> <message> <source>Reload Page</source> <translation type="obsolete">Recarregar Página</translation> </message> <message> <source>&amp;Make Text Bigger</source> <translation type="obsolete">&amp;Fazer Texto Maior</translation> </message> <message> <source>&amp;Make Text Normal</source> <translation type="obsolete">&amp;Fazer Texto Normal</translation> </message> <message> <source>&amp;Make Text Smaller</source> <translation type="obsolete">&amp;Fazer Texto Menor</translation> </message> <message> <source>Page S&amp;ource</source> <translation>Código &amp;Fonte</translation> </message> <message> <source>Ctrl+Alt+U</source> <translation>Ctrl+Alt+U</translation> </message> <message> <source>&amp;Full Screen</source> <translation>&amp;Tela Inteira</translation> </message> <message> <source>Hi&amp;story</source> <translation>Hi&amp;stórico</translation> </message> <message> <source>Back</source> <translation>Voltar</translation> </message> <message> <source>Forward</source> <translation>Avançar</translation> </message> <message> <source>Home</source> <translation>Página Inicial</translation> </message> <message> <source>Restore Last Session</source> <translation>Restaurar Última Sessão</translation> </message> <message> <source>&amp;Bookmarks</source> <translation>&amp;Favoritos</translation> </message> <message> <source>Manage Bookmarks...</source> <translation>Gerenciar Favoritos</translation> </message> <message> <source>Add Bookmark...</source> <translation>Adicionar Favorito...</translation> </message> <message> <source>&amp;Window</source> <translation>&amp;Janela</translation> </message> <message> <source>&amp;Tools</source> <translation>&amp;Ferramentas</translation> </message> <message> <source>Web &amp;Search</source> <translation>&amp;Busca na Web</translation> </message> <message> <source>Ctrl+K</source> <comment>Web Search</comment> <translation>Ctrl+K</translation> </message> <message> <source>&amp;Clear Private Data</source> <translation>&amp;Limpar Dados Pessoais</translation> </message> <message> <source>Ctrl+Shift+Delete</source> <comment>Clear Private Data</comment> <translation>Ctrl+Shift+Delete</translation> </message> <message> <source>Enable Web &amp;Inspector</source> <translation>Ativar Web &amp;Inspector</translation> </message> <message> <source>&amp;Help</source> <translation>&amp;Ajuda</translation> </message> <message> <source>About &amp;Qt</source> <translation>Sobre &amp;Qt</translation> </message> <message> <source>About &amp;Arora</source> <translation>Sobre &amp;Arora</translation> </message> <message> <source>Navigation</source> <translation>Navegação</translation> </message> <message> <source>Show Status Bar</source> <translation>Mostrar Barra de Status</translation> </message> <message> <source>Hide Status Bar</source> <translation>Esconder Barra de Status</translation> </message> <message> <source>Show Toolbar</source> <translation>Mostrar Barra de Ferramentas</translation> </message> <message> <source>Hide Toolbar</source> <translation>Esconder Barra de Ferramentas</translation> </message> <message> <source>Show Bookmarks bar</source> <translation type="obsolete">Mostrar barra de Favoritos</translation> </message> <message> <source>Hide Bookmarks bar</source> <translation type="obsolete">Esconder barra de Favoritos</translation> </message> <message> <source>Arora</source> <translation>Arora</translation> </message> <message> <source>%1 - Arora</source> <comment>Page title and Browser name</comment> <translation>%1 - Arora</translation> </message> <message> <source>Open Web Resource</source> <translation>Abrir Recurso Web</translation> </message> <message> <source>Web Resources (*.html *.htm *.svg *.png *.gif *.svgz);;All files (*.*)</source> <translation>Web Resources (*.html *.htm *.svg *.png *.gif *.svgz);;Todos Arquivos (*.*)</translation> </message> <message> <source>Print Document</source> <translation>Imprimir Documento</translation> </message> <message> <source>Are you sure you want to turn on private browsing?</source> <translation type="unfinished"></translation> </message> <message> <source>&lt;b&gt;%1&lt;/b&gt;&lt;br&gt;&lt;br&gt;When private browsing is turned on, some actions concerning your privacy will be disabled:&lt;ul&gt;&lt;li&gt; Webpages are not added to the history.&lt;/li&gt;&lt;li&gt; Items are automatically removed from the Downloads window.&lt;/li&gt;&lt;li&gt; New cookies are not stored, current cookies can&apos;t be accessed.&lt;/li&gt;&lt;li&gt; Site icons won&apos;t be stored, session won&apos;t be saved.&lt;/li&gt;&lt;li&gt; Searches are not addded to the pop-up menu in the search box.&lt;/li&gt;&lt;/ul&gt;Until you close the window, you can still click the Back and Forward buttons to return to the webpages you have opened.</source> <translation type="unfinished"></translation> </message> <message> <source>Are you sure you want to close the window? There are %1 tabs open</source> <translation type="unfinished"></translation> </message> <message> <source>Page Source of %1</source> <translation>Código Fonte da página %1</translation> </message> <message> <source>Web Inspector</source> <translation>Web Inspector</translation> </message> <message> <source>The web inspector will only work correctly for pages that were loaded after enabling. Do you want to reload all pages?</source> <translation type="unfinished"></translation> </message> <message> <source>Stop loading the current page</source> <translation>Parar de carregar a página atual</translation> </message> <message> <source>Reload the current page</source> <translation>Recarregar página atual</translation> </message> <message> <source>Downloads</source> <translation>Downloads</translation> </message> <message> <source>Alt+Ctrl+L</source> <comment>Download Manager</comment> <translation>Alt+Ctrl+L</translation> </message> <message> <source>Find &amp;Next</source> <translation type="unfinished"></translation> </message> <message> <source>Find P&amp;revious</source> <translation type="unfinished"></translation> </message> <message> <source>Prefe&amp;rences</source> <translation type="unfinished"></translation> </message> <message> <source>&amp;Reload Page</source> <translation type="unfinished"></translation> </message> <message> <source>Make Text &amp;Bigger</source> <translation type="unfinished"></translation> </message> <message> <source>Make Text &amp;Normal</source> <translation type="unfinished"></translation> </message> <message> <source>Make Text &amp;Smaller</source> <translation type="unfinished"></translation> </message> <message> <source>Show Bookmarks Bar</source> <translation type="unfinished"></translation> </message> <message> <source>Hide Bookmarks Bar</source> <translation type="unfinished"></translation> </message> </context> <context> <name>ClearButton</name> <message> <source>Clear</source> <translation>Limpar</translation> </message> </context> <context> <name>ClearPrivateData</name> <message> <source>Clear Private Data</source> <translation>Limpar Dados Pessoais</translation> </message> <message> <source>Clear the following items:</source> <translation>Limpar os seguintes itens:</translation> </message> <message> <source>&amp;Browsing History</source> <translation>&amp;Navegar pelo Histórico</translation> </message> <message> <source>&amp;Download History</source> <translation>&amp;Baixar Histórico</translation> </message> <message> <source>&amp;Search History</source> <translation>&amp;Buscar Histórico</translation> </message> <message> <source>&amp;Cookies</source> <translation>&amp;Cookies</translation> </message> <message> <source>C&amp;ache</source> <translation>C&amp;ache</translation> </message> <message> <source>Website &amp;Icons</source> <translation>&amp;Ícones do Site</translation> </message> <message> <source>Clear &amp;Private Data</source> <translation>Limpar Informações &amp;Pessoais</translation> </message> <message> <source>&amp;Cancel</source> <translation>&amp;Cancelar</translation> </message> </context> <context> <name>CookieExceptionsModel</name> <message> <source>Website</source> <translation>Site</translation> </message> <message> <source>Status</source> <translation>Status</translation> </message> <message> <source>Allow</source> <translation>Permitir</translation> </message> <message> <source>Block</source> <translation>Bloquear</translation> </message> <message> <source>Allow For Session</source> <translation>Permitir Para Sessão</translation> </message> </context> <context> <name>CookieModel</name> <message> <source>Website</source> <translation>Site</translation> </message> <message> <source>Name</source> <translation>Nome</translation> </message> <message> <source>Path</source> <translation>Caminho</translation> </message> <message> <source>Secure</source> <translation>Seguro</translation> </message> <message> <source>Expires</source> <translation>Expira</translation> </message> <message> <source>Contents</source> <translation>Conteúdo</translation> </message> </context> <context> <name>CookiesDialog</name> <message> <source>Cookies</source> <translation>Cookies</translation> </message> <message> <source>&amp;Remove</source> <translation>&amp;Remover</translation> </message> <message> <source>Remove &amp;All Cookies</source> <translation>Remover &amp;Todos Cookies</translation> </message> </context> <context> <name>CookiesExceptionsDialog</name> <message> <source>Cookie Exceptions</source> <translation>Exceção Cookie</translation> </message> <message> <source>New Exception</source> <translation>Nova Exceção</translation> </message> <message> <source>Domain:</source> <translation>Domínio:</translation> </message> <message> <source>Block</source> <translation>Bloquear</translation> </message> <message> <source>Allow For Session</source> <translation>Permitir para essa Sessão</translation> </message> <message> <source>Allow</source> <translation>Permitir</translation> </message> <message> <source>Exceptions</source> <translation>Exceções</translation> </message> <message> <source>&amp;Remove</source> <translation>&amp;Remover</translation> </message> <message> <source>Remove &amp;All</source> <translation>Remover &amp;Todos</translation> </message> </context> <context> <name>DownloadDialog</name> <message> <source>Downloads</source> <translation>Downloads</translation> </message> <message> <source>Clean up</source> <translation>Limpar</translation> </message> <message> <source>0 Items</source> <translation>0 itens</translation> </message> </context> <context> <name>DownloadItem</name> <message> <source>Form</source> <translation>Formulário</translation> </message> <message> <source>Ico</source> <translation>Ico</translation> </message> <message> <source>Filename</source> <translation>Nome do Arquivo</translation> </message> <message> <source>Try Again</source> <translation>Tentar Novamente</translation> </message> <message> <source>Stop</source> <translation>Parar</translation> </message> <message> <source>Open</source> <translation>Abrir</translation> </message> <message> <source>Save File</source> <translation>Salvar Arquivo</translation> </message> <message> <source>Download canceled: %1</source> <translation>Download cancelado: %1</translation> </message> <message> <source>Error opening save file: %1</source> <translation>Erro abrindo arquivo saldo: %1</translation> </message> <message> <source>Error saving: %1</source> <translation>Erro salvando: %1</translation> </message> <message> <source>Network Error: %1</source> <translation>Erro na Rede: %1</translation> </message> <message> <source>seconds</source> <translation>segundos</translation> </message> <message> <source>minutes</source> <translation type="obsolete">minutos</translation> </message> <message> <source>- %4 %5 remaining</source> <translation type="obsolete">- %4 %5 restante</translation> </message> <message> <source>%1 of %2 (%3/sec) %4</source> <translation>%1 de %2 (%3/segundo) %4</translation> </message> <message> <source>?</source> <translation>?</translation> </message> <message> <source>%1 of %2 - Stopped</source> <translation>%1 de %2 - Parado</translation> </message> <message> <source>bytes</source> <translation>bytes</translation> </message> <message> <source>kB</source> <translation>kB</translation> </message> <message> <source>MB</source> <translation>MB</translation> </message> <message numerus="yes"> <source>- %n minutes remaining</source> <translation type="unfinished"> <numerusform></numerusform> </translation> </message> <message numerus="yes"> <source>- %n seconds remaining</source> <translation type="unfinished"> <numerusform></numerusform> </translation> </message> </context> <context> <name>DownloadManager</name> <message> <source>1 Download</source> <translation type="obsolete">1 Download</translation> </message> <message> <source>%1 Downloads</source> <translation type="obsolete">%1 Downloads</translation> </message> <message numerus="yes"> <source>%n Download(s)</source> <translation type="unfinished"> <numerusform></numerusform> </translation> </message> </context> <context> <name>HistoryDialog</name> <message> <source>History</source> <translation>Histórico</translation> </message> <message> <source>&amp;Remove</source> <translation>&amp;Remover</translation> </message> <message> <source>Remove &amp;All</source> <translation>Remover &amp;Todos</translation> </message> <message> <source>Open</source> <translation>Abrir</translation> </message> <message> <source>Copy</source> <translation>Copiar</translation> </message> <message> <source>Delete</source> <translation>Deletar</translation> </message> </context> <context> <name>HistoryMenu</name> <message> <source>Show All History</source> <translation>Mostrar Todos Histórico</translation> </message> <message> <source>Clear History</source> <translation>Limpar Histórico</translation> </message> </context> <context> <name>HistoryModel</name> <message> <source>Title</source> <translation>Título</translation> </message> <message> <source>Address</source> <translation>Endereço</translation> </message> </context> <context> <name>HistoryTreeModel</name> <message> <source>Earlier Today</source> <translation>Hoje mais cedo</translation> </message> <message> <source>%1 items</source> <translation type="obsolete">%1 itens</translation> </message> <message numerus="yes"> <source>%n item(s)</source> <translation type="unfinished"> <numerusform></numerusform> </translation> </message> </context> <context> <name>NetworkAccessManager</name> <message> <source>&lt;qt&gt;Enter username and password for &quot;%1&quot; at %2&lt;/qt&gt;</source> <translation>&lt;qt&gt;Informe usuário e senha para \&quot;%1\&quot; e \&quot;%2\&quot;&lt;/qt&gt;</translation> </message> <message> <source>&lt;qt&gt;Connect to proxy &quot;%1&quot; using:&lt;/qt&gt;</source> <translation>&lt;qt&gt;Conectar a proxy \&quot;%1\&quot; usando:&lt;/qt&gt;</translation> </message> <message> <source>SSL Errors: %1 %2 Do you want to ignore these errors?</source> <translation>Erros SSL: %1 %2 Você deseja ignorar esses erros?</translation> </message> <message> <source>Do you want to accept all these certificates?</source> <translation>Você deseja aceitar todos esses certificados?</translation> </message> </context> <context> <name>PasswordDialog</name> <message> <source>Authentication Required</source> <translation>Atenticação Requerida</translation> </message> <message> <source>DUMMY ICON</source> <translation type="unfinished"></translation> </message> <message> <source>INTRO TEXT DUMMY</source> <translation>Texto de Introdução</translation> </message> <message> <source>Username:</source> <translation>Usuário:</translation> </message> <message> <source>Password:</source> <translation>Senha:</translation> </message> </context> <context> <name>ProxyDialog</name> <message> <source>Proxy Authentication</source> <translation>Autenticação Proxy</translation> </message> <message> <source>ICON</source> <translation>ICON</translation> </message> <message> <source>Connect to proxy</source> <translation>Conectar a proxy</translation> </message> <message> <source>Username:</source> <translation>Usuário:</translation> </message> <message> <source>Password:</source> <translation>Senha:</translation> </message> </context> <context> <name>QObject</name> <message> <source>The file is not an XBEL version 1.0 file.</source> <translation>O arquivo não é um arquivo XBEL versão 1.0</translation> </message> <message> <source>Unknown title</source> <translation>Título desconhecido</translation> </message> </context> <context> <name>SearchBanner</name> <message> <source>Form</source> <translation>Formulário</translation> </message> <message> <source>TextLabel</source> <translation>TextLabel</translation> </message> <message> <source>&lt;</source> <translation>&lt;</translation> </message> <message> <source>&gt;</source> <translation>&gt;</translation> </message> <message> <source>Done</source> <translation>Feito</translation> </message> </context> <context> <name>SearchLineEdit</name> <message> <source>Search</source> <translation>Busca</translation> </message> </context> <context> <name>Settings</name> <message> <source>Settings</source> <translation>Preferências</translation> </message> <message> <source>General</source> <translation>Geral</translation> </message> <message> <source>Home:</source> <translation>Págtina Inicial:</translation> </message> <message> <source>Set to current page</source> <translation>Setar a página atual</translation> </message> <message> <source>Remove history items:</source> <translation>Remover itens do histórico:</translation> </message> <message> <source>After one day</source> <translation>Depois de um dia</translation> </message> <message> <source>After one week</source> <translation>Depois de uma semana</translation> </message> <message> <source>After two weeks</source> <translation>Depois de 2 semanas</translation> </message> <message> <source>After one month</source> <translation>Depois de um mês</translation> </message> <message> <source>After one year</source> <translation>Depois de um ano</translation> </message> <message> <source>Manually</source> <translation>Manualmente</translation> </message> <message> <source>Open links from applications:</source> <translation>Abrir links de uma aplicação:</translation> </message> <message> <source>In a tab in the current window</source> <translation>Em uma aba na janela atual</translation> </message> <message> <source>In a new window</source> <translation>Em uma nova janela</translation> </message> <message> <source>Downloads</source> <translation>Downloads</translation> </message> <message> <source>Ask for a destination each time</source> <translation>Perguntar um destino todas as vezes</translation> </message> <message> <source>Use this destination:</source> <translation>Use esse destino:</translation> </message> <message> <source>Appearance</source> <translation>Aparência</translation> </message> <message> <source>Standard font:</source> <translation>Fonte padrão:</translation> </message> <message> <source>Times 16</source> <translation>Times 16</translation> </message> <message> <source>Select...</source> <translation>Selecionar...</translation> </message> <message> <source>Fixed-width font:</source> <translation>Fonte com largura fixa:</translation> </message> <message> <source>Courier 13</source> <translation>Courier 13</translation> </message> <message> <source>Privacy</source> <translation>Privacidade</translation> </message> <message> <source>Web Content</source> <translation>Conteúdo Web</translation> </message> <message> <source>Enable Plugins</source> <translation>Permitir Plugins</translation> </message> <message> <source>Enable Javascript</source> <translation>Permitir JavaScript</translation> </message> <message> <source>Cookies</source> <translation>Cookies</translation> </message> <message> <source>Accept Cookies:</source> <translation>Aceitar Cookies:</translation> </message> <message> <source>Always</source> <translation>Sempre</translation> </message> <message> <source>Never</source> <translation>Nunca</translation> </message> <message> <source>Only from sites you navigate to</source> <translation>Apenas sites que você navegar</translation> </message> <message> <source>Exceptions...</source> <translation>Exceções...</translation> </message> <message> <source>Keep until:</source> <translation>Manter até:</translation> </message> <message> <source>They expire</source> <translation>Eles expirarem</translation> </message> <message> <source>I exit the application</source> <translation>Eu fecho a aplicação</translation> </message> <message> <source>At most 90 days</source> <translation>Mais de 90 dias</translation> </message> <message> <source>Cookies...</source> <translation>Cookies...</translation> </message> <message> <source>Proxy</source> <translation>Proxy</translation> </message> <message> <source>Enable proxy</source> <translation>Ativar proxy</translation> </message> <message> <source>Type:</source> <translation>Tipo:</translation> </message> <message> <source>Socks5</source> <translation>Socks5</translation> </message> <message> <source>Http</source> <translation>Http</translation> </message> <message> <source>Host:</source> <translation>Host:</translation> </message> <message> <source>Port:</source> <translation>Porta:</translation> </message> <message> <source>User Name:</source> <translation>Usuário:</translation> </message> <message> <source>Password:</source> <translation>Senha:</translation> </message> <message> <source>Advanced</source> <translation>Avançado</translation> </message> <message> <source>Style Sheet:</source> <translation>Folha de Estilo:</translation> </message> <message> <source>On startup:</source> <translation type="unfinished"></translation> </message> <message> <source>Show my home page</source> <translation type="unfinished"></translation> </message> <message> <source>Show a blank page</source> <translation type="unfinished"></translation> </message> <message> <source>Restore windows and tabs from last time</source> <translation type="unfinished"></translation> </message> <message> <source>On application exit</source> <translation type="unfinished"></translation> </message> <message> <source>Enable Images</source> <translation type="unfinished"></translation> </message> <message> <source>Tabs</source> <translation type="unfinished"></translation> </message> <message> <source>Select tabs and windows as they are created</source> <translation type="unfinished"></translation> </message> <message> <source>Confirm when closing multiple tabs</source> <translation type="unfinished"></translation> </message> </context> <context> <name>TabBar</name> <message> <source>New &amp;Tab</source> <translation>Nova &amp;Aba</translation> </message> <message> <source>Duplicate Tab</source> <translation>Duplicar Aba</translation> </message> <message> <source>&amp;Close Tab</source> <translation>&amp;Fechar Aba</translation> </message> <message> <source>Close &amp;Other Tabs</source> <translation>Fechar &amp;Outras Abas</translation> </message> <message> <source>Reload Tab</source> <translation>Recarregar Aba</translation> </message> <message> <source>Reload All Tabs</source> <translation>Recarregar Todas as Abas</translation> </message> <message> <source>Show Tab Bar</source> <translation type="unfinished"></translation> </message> <message> <source>Hide Tab Bar</source> <translation type="unfinished"></translation> </message> </context> <context> <name>TabWidget</name> <message> <source>New &amp;Tab</source> <translation>Nova &amp;Aba</translation> </message> <message> <source>&amp;Close Tab</source> <translation>&amp;Fechar Aba</translation> </message> <message> <source>Show Next Tab</source> <translation>Mostrar Próxima Aba</translation> </message> <message> <source>Show Previous Tab</source> <translation>Mostrar Aba Anterior</translation> </message> <message> <source>Recently Closed Tabs</source> <translation>Abas Recentemente Fechadas</translation> </message> <message> <source>(Untitled)</source> <translation>(Sem Título)</translation> </message> <message> <source>Do you really want to close this page?</source> <translation>Você deseja fechar essa página?</translation> </message> <message> <source>You have modified this page and when closing it you would lose the modification. Do you really want to close this page? </source> <translation>Você modificou essa página e quando fechar você perderá as modificações. Você realmente deseja fechar essa página?</translation> </message> </context> <context> <name>ToolbarSearch</name> <message> <source>Google</source> <translation>Google</translation> </message> <message> <source>No Recent Searches</source> <translation>Sem Pesquisas Recentes</translation> </message> <message> <source>Recent Searches</source> <translation>Pesquisas Recentes</translation> </message> <message> <source>Clear Recent Searches</source> <translation>Limpar Pesquisas Recentes</translation> </message> </context> <context> <name>WebPage</name> <message> <source>Error loading page: %1</source> <translation>Erro carregando página: %1</translation> </message> </context> <context> <name>WebView</name> <message> <source>Open in New &amp;Window</source> <translation>Abrir em uma Nova &amp;Janela</translation> </message> <message> <source>Open in New &amp;Tab</source> <translation>Abrir em uma Nova &amp;Aba</translation> </message> <message> <source>Save Lin&amp;k</source> <translation>Salvar Lin&amp;k</translation> </message> <message> <source>&amp;Bookmark This Link</source> <translation>&amp;Adicionar Link a Favoritos</translation> </message> <message> <source>&amp;Copy Link Location</source> <translation>&amp;Copar Endereço do Link</translation> </message> <message> <source>Open Image in New &amp;Window</source> <translation>Abrir Imagem em uma Nova &amp;Janela</translation> </message> <message> <source>Open Image in New &amp;Tab</source> <translation>Abrir imagem em uma Nova &amp;Aba</translation> </message> <message> <source>&amp;Save Image</source> <translation>&amp;Salvar Imagem</translation> </message> <message> <source>&amp;Copy Image</source> <translation>&amp;Copiar Imagem</translation> </message> <message> <source>C&amp;opy Image Location</source> <translation>C&amp;opiar Endereço da Imagem</translation> </message> <message> <source>Loading...</source> <translation type="unfinished"></translation> </message> </context> <context> <name>WebViewSearch</name> <message> <source>Not Found</source> <translation>Não Encontrado</translation> </message> </context> </TS>
ckleow2000/arora
src/locale/pt_BR.ts
TypeScript
gpl-2.0
42,009
29.638158
691
0.614225
false
/* ScummVM - Graphic Adventure Engine * * ScummVM is the legal property of its developers, whose names * are too numerous to list here. Please refer to the COPYRIGHT * file distributed with this source distribution. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * */ #ifndef __FRAMFS_SAVE_MANAGER__ #define __FRAMFS_SAVE_MANAGER__ #include <common/savefile.h> #include <common/zlib.h> #include <framfs.h> // N64 FramFS library bool fram_deleteSaveGame(const char *filename); class InFRAMSave : public Common::InSaveFile { private: FRAMFILE *fd; uint32 read(void *buf, uint32 cnt) override; bool skip(uint32 offset) override; bool seek(int64 offs, int whence) override; public: InFRAMSave() : fd(NULL) { } ~InFRAMSave() { if (fd != NULL) framfs_close(fd); } bool eos() const override { return framfs_eof(fd); } void clearErr() override { framfs_clearerr(fd); } int64 pos() const override { return framfs_tell(fd); } int64 size() const override { return fd->size; } bool readSaveGame(const char *filename) { fd = framfs_open(filename, "r"); return (fd != NULL); } }; class OutFRAMSave : public Common::WriteStream { private: FRAMFILE *fd; public: uint32 write(const void *buf, uint32 cnt); virtual int64 pos() const { return framfs_tell(fd); } OutFRAMSave(const char *_filename) : fd(NULL) { fd = framfs_open(_filename, "w"); } ~OutFRAMSave() { if (fd != NULL) { finalize(); framfs_close(fd); } } bool err() const { if (fd) return (framfs_error(fd) == 1); else return true; } void clearErr() { framfs_clearerr(fd); } void finalize() { framfs_flush(fd); } }; class FRAMSaveManager : public Common::SaveFileManager { public: void updateSavefilesList(Common::StringArray &lockedFiles) override { // this method is used to lock saves while cloud syncing // as there is no network on N64, this method wouldn't be used // thus it's not implemtented } Common::InSaveFile *openRawFile(const Common::String &filename) override { InFRAMSave *s = new InFRAMSave(); if (s->readSaveGame(filename.c_str())) { return s; } else { delete s; return 0; } } Common::OutSaveFile *openForSaving(const Common::String &filename, bool compress = true) override { OutFRAMSave *s = new OutFRAMSave(filename.c_str()); if (!s->err()) { return new Common::OutSaveFile(compress ? Common::wrapCompressedWriteStream(s) : s); } else { delete s; return 0; } } Common::InSaveFile *openForLoading(const Common::String &filename) override { InFRAMSave *s = new InFRAMSave(); if (s->readSaveGame(filename.c_str())) { return Common::wrapCompressedReadStream(s); } else { delete s; return 0; } } bool removeSavefile(const Common::String &filename) override { return ::fram_deleteSaveGame(filename.c_str()); } Common::StringArray listSavefiles(const Common::String &pattern) override; bool exists(const Common::String &filename) override { return InFRAMSave().readSaveGame(filename.c_str()); } }; #endif
vanfanel/scummvm
backends/platform/n64/framfs_save_manager.h
C
gpl-2.0
3,715
23.281046
100
0.696097
false
/* * AMLOGIC Audio/Video streaming port driver. * * * Author: Simon Zheng <simon.zheng@amlogic.com> * */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <mach/am_regs.h> #include <plat/io.h> #include <linux/ctype.h> #include <linux/amports/ptsserv.h> #include <linux/amports/amstream.h> #include <linux/amports/canvas.h> #include <linux/amports/vframe.h> #include <linux/amports/vframe_provider.h> #include <linux/amports/vframe_receiver.h> #include "vdec_reg.h" #include <linux/delay.h> #define ENC_CANVAS_OFFSET AMVENC_CANVAS_INDEX #define LOG_LEVEL_VAR 1 #define debug_level(level, x...) \ do { \ if (level >= LOG_LEVEL_VAR) \ printk(x); \ } while (0); #define PUT_INTERVAL (HZ/100) #ifdef CONFIG_AM_VDEC_MJPEG_LOG #define AMLOG #define LOG_LEVEL_VAR amlog_level_avc #define LOG_MASK_VAR amlog_mask_avc #define LOG_LEVEL_ERROR 0 #define LOG_LEVEL_INFO 1 #define LOG_LEVEL_DESC "0:ERROR, 1:INFO" #endif #include <linux/amlog.h> MODULE_AMLOG(LOG_LEVEL_ERROR, 0, LOG_LEVEL_DESC, LOG_DEFAULT_MASK_DESC); #include "encoder.h" #include "amvdec.h" #include "encoder_mc.h" static int avc_device_major = 0; static struct class *amvenc_avc_class; static struct device *amvenc_avc_dev; #define DRIVER_NAME "amvenc_avc" #define MODULE_NAME "amvenc_avc" #define DEVICE_NAME "amvenc_avc" /* protocol register usage #define ENCODER_STATUS HENC_SCRATCH_0 : encode stage #define MEM_OFFSET_REG HENC_SCRATCH_1 : assit buffer physical address #define DEBUG_REG HENC_SCRATCH_2 : debug register #define MB_COUNT HENC_SCRATCH_3 : MB encoding number */ /*output buffer define*/ static unsigned BitstreamStart; static unsigned BitstreamEnd; //static unsigned BitstreamIntAddr; /*input buffer define*/ static unsigned dct_buff_start_addr; static unsigned dct_buff_end_addr; /*deblock buffer define*/ //static unsigned dblk_buf_addr; static unsigned dblk_buf_canvas; /*reference buffer define*/ //static unsigned ref_buf_addr ; //192 static unsigned ref_buf_canvas; //((192<<16)|(192<<8)|(192<<0)) /*microcode assitant buffer*/ static unsigned assit_buffer_offset; //static struct dec_sysinfo avc_amstream_dec_info; static u32 stat; //static u32 cur_stage; static u32 frame_start;//0: processing 1:restart static u32 quant = 28; static u32 encoder_width = 1280; static u32 encoder_height = 720; static void avc_prot_init(void); static s32 avc_poweron(void); static void dma_flush(unsigned buf_start , unsigned buf_size ); //static void avc_local_init(void); static int idr_pic_id = 0; //need reset as 0 for IDR static u32 frame_number = 0 ; //need plus each frame static u32 pic_order_cnt_lsb = 0 ; //need reset as 0 for IDR and plus 2 for NON-IDR static u32 log2_max_pic_order_cnt_lsb = 4 ; static u32 log2_max_frame_num =4 ; static u32 anc0_buffer_id =0; static u32 qppicture =26; static u32 process_irq = 0; static int encode_inited = 0; static int encode_opened = 0; static const char avc_dec_id[] = "avc-dev"; #define AMVENC_BUFFER_LEVEL_480P 0 #define AMVENC_BUFFER_LEVEL_720P 1 #define AMVENC_BUFFER_LEVEL_1080P 2 typedef struct { u32 buf_start; u32 buf_size; } Buff_t; typedef struct { u32 lev_id; u32 min_buffsize; u32 max_width; u32 max_height; Buff_t dct; Buff_t dec0_y; Buff_t dec0_uv; Buff_t dec1_y; Buff_t dec1_uv; Buff_t assit; Buff_t bitstream; } BuffInfo_t; const BuffInfo_t amvenc_buffspec[]={ { .lev_id = AMVENC_BUFFER_LEVEL_480P, .max_width = 640, .max_height = 480, .min_buffsize = 0x400000, .dct = { .buf_start = 0, .buf_size = 0xfe000, }, .dec0_y = { .buf_start = 0x100000, .buf_size = 0x50000, }, .dec0_uv = { .buf_start = 0x150000, .buf_size = 0x30000, }, .dec1_y = { .buf_start = 0x180000, .buf_size = 0x50000, }, .dec1_uv = { .buf_start = 0x1d0000, .buf_size = 0x30000, }, .assit = { .buf_start = 0x240000, .buf_size = 0xc0000, }, .bitstream = { .buf_start = 0x300000, .buf_size = 0x100000, } },{ .lev_id = AMVENC_BUFFER_LEVEL_720P, .max_width = 1280, .max_height = 720, .min_buffsize = 0x800000, .dct = { .buf_start = 0, .buf_size = 0x2f8000, }, .dec0_y = { .buf_start = 0x300000, .buf_size = 0xf0000, }, .dec0_uv = { .buf_start = 0x400000, .buf_size = 0x80000, }, .dec1_y = { .buf_start = 0x480000, .buf_size = 0xf0000, }, .dec1_uv = { .buf_start = 0x580000, .buf_size = 0x80000, }, .assit = { .buf_start = 0x640000, .buf_size = 0xc0000, }, .bitstream = { .buf_start = 0x700000, .buf_size = 0x100000, } },{ .lev_id = AMVENC_BUFFER_LEVEL_1080P, .max_width = 1920, .max_height = 1088, .min_buffsize = 0xf00000, .dct = { .buf_start = 0, .buf_size = 0x6ba000, }, .dec0_y = { .buf_start = 0x6d0000, .buf_size = 0x1fe000, }, .dec0_uv = { .buf_start = 0x8d0000, .buf_size = 0xff000, }, .dec1_y = { .buf_start = 0x9d0000, .buf_size = 0x1fe000, }, .dec1_uv = { .buf_start = 0xbd0000, .buf_size = 0xff000, }, .assit = { .buf_start = 0xd10000, .buf_size = 0xc0000, }, .bitstream = { .buf_start = 0xe00000, .buf_size = 0x100000, } } }; typedef struct { u32 buf_start; u32 buf_size; u8 cur_buf_lev; BuffInfo_t* bufspec; } EncBuffer_t; static EncBuffer_t gAmvencbuff = {0,0,0,NULL}; static void avc_canvas_init(void); void amvenc_reset(void); /* static DEFINE_SPINLOCK(lock); static void avc_put_timer_func(unsigned long arg) { struct timer_list *timer = (struct timer_list *)arg; timer->expires = jiffies + PUT_INTERVAL; add_timer(timer); } */ int avc_dec_status(struct vdec_status *vstatus) { return 0; } /*output stream buffer setting*/ static void avc_init_output_buffer(void) { WRITE_HREG(VLC_VB_MEM_CTL ,((1<<31)|(0x3f<<24)|(0x20<<16)|(2<<0)) ); WRITE_HREG(VLC_VB_START_PTR, BitstreamStart); WRITE_HREG(VLC_VB_WR_PTR, BitstreamStart); WRITE_HREG(VLC_VB_SW_RD_PTR, BitstreamStart); WRITE_HREG(VLC_VB_END_PTR, BitstreamEnd); WRITE_HREG(VLC_VB_CONTROL, 1); WRITE_HREG(VLC_VB_CONTROL, ((0<<14)|(7<<3)|(1<<1)|(0<<0))); } /*input dct buffer setting*/ static void avc_init_input_buffer(void) { WRITE_HREG(QDCT_MB_START_PTR ,dct_buff_start_addr ); WRITE_HREG(QDCT_MB_END_PTR, dct_buff_end_addr); WRITE_HREG(QDCT_MB_WR_PTR, dct_buff_start_addr); WRITE_HREG(QDCT_MB_RD_PTR, dct_buff_start_addr); WRITE_HREG(QDCT_MB_BUFF, 0); } /*input reference buffer setting*/ static void avc_init_reference_buffer(int canvas) { WRITE_HREG(ANC0_CANVAS_ADDR ,canvas); WRITE_HREG(VLC_HCMD_CONFIG ,0); } static void avc_init_assit_buffer(void) { WRITE_HREG(MEM_OFFSET_REG,assit_buffer_offset); //memory offset ? } /*deblock buffer setting, same as INI_CANVAS*/ static void avc_init_dblk_buffer(int canvas) { WRITE_HREG(REC_CANVAS_ADDR,canvas); WRITE_HREG(DBKR_CANVAS_ADDR,canvas); WRITE_HREG(DBKW_CANVAS_ADDR,canvas); } /*same as INIT_ENCODER*/ static void avc_init_encoder(void) { WRITE_HREG(VLC_TOTAL_BYTES, 0); WRITE_HREG(VLC_CONFIG, 0x07); WRITE_HREG(VLC_INT_CONTROL, 0); //WRITE_HREG(ENCODER_STATUS,ENCODER_IDLE); WRITE_HREG(HCODEC_ASSIST_AMR1_INT0, 0x15); WRITE_HREG(HCODEC_ASSIST_AMR1_INT1, 0x8); WRITE_HREG(HCODEC_ASSIST_AMR1_INT3, 0x14); WRITE_HREG(IDR_PIC_ID ,idr_pic_id); WRITE_HREG(FRAME_NUMBER ,frame_number); WRITE_HREG(PIC_ORDER_CNT_LSB,pic_order_cnt_lsb); log2_max_pic_order_cnt_lsb= 4; log2_max_frame_num = 4; WRITE_HREG(LOG2_MAX_PIC_ORDER_CNT_LSB , log2_max_pic_order_cnt_lsb); WRITE_HREG(LOG2_MAX_FRAME_NUM , log2_max_frame_num); WRITE_HREG(ANC0_BUFFER_ID, anc0_buffer_id); WRITE_HREG(QPPICTURE, qppicture); } /****************************************/ static void avc_canvas_init(void) { u32 canvas_width, canvas_height; int start_addr = gAmvencbuff.buf_start; canvas_width = encoder_width; canvas_height = encoder_height; /*input dct buffer config */ dct_buff_start_addr = start_addr+gAmvencbuff.bufspec->dct.buf_start; //(w>>4)*(h>>4)*864 dct_buff_end_addr = dct_buff_start_addr + gAmvencbuff.bufspec->dct.buf_size -1 ; debug_level(0,"dct_buff_start_addr is %x \n",dct_buff_start_addr); canvas_config(ENC_CANVAS_OFFSET, start_addr + gAmvencbuff.bufspec->dec0_y.buf_start, canvas_width, canvas_height, CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); canvas_config(1 + ENC_CANVAS_OFFSET, start_addr + gAmvencbuff.bufspec->dec0_uv.buf_start, canvas_width , canvas_height/2, CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); /*here the third plane use the same address as the second plane*/ canvas_config(2 + ENC_CANVAS_OFFSET, start_addr + gAmvencbuff.bufspec->dec0_uv.buf_start, canvas_width , canvas_height/2, CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); canvas_config(3 + ENC_CANVAS_OFFSET, start_addr + gAmvencbuff.bufspec->dec1_y.buf_start, canvas_width, canvas_height, CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); canvas_config(4 + ENC_CANVAS_OFFSET, start_addr + gAmvencbuff.bufspec->dec1_uv.buf_start, canvas_width , canvas_height/2, CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); /*here the third plane use the same address as the second plane*/ canvas_config(5 + ENC_CANVAS_OFFSET, start_addr + gAmvencbuff.bufspec->dec1_uv.buf_start, canvas_width , canvas_height/2, CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); assit_buffer_offset = start_addr + gAmvencbuff.bufspec->assit.buf_start; debug_level(0,"assit_buffer_offset is %x \n",assit_buffer_offset); /*output stream buffer config*/ BitstreamStart = start_addr + gAmvencbuff.bufspec->bitstream.buf_start; BitstreamEnd = BitstreamStart + gAmvencbuff.bufspec->bitstream.buf_size -1; debug_level(0,"BitstreamStart is %x \n",BitstreamStart); dblk_buf_canvas = ((ENC_CANVAS_OFFSET+2) <<16)|((ENC_CANVAS_OFFSET + 1) <<8)|(ENC_CANVAS_OFFSET); ref_buf_canvas = ((ENC_CANVAS_OFFSET +5) <<16)|((ENC_CANVAS_OFFSET + 4) <<8)|(ENC_CANVAS_OFFSET +3); debug_level(0,"dblk_buf_canvas is %d ; ref_buf_canvas is %d \n",dblk_buf_canvas , ref_buf_canvas); } /* static void init_scaler(void) { } static void avc_local_init(void) { } */ static int encoder_status; static irqreturn_t enc_isr(int irq, void *dev_id) { int temp_canvas; WRITE_HREG(HCODEC_ASSIST_MBOX1_CLR_REG, 1); encoder_status = READ_HREG(ENCODER_STATUS); if((encoder_status == ENCODER_IDR_DONE) ||(encoder_status == ENCODER_NON_IDR_DONE) ||(encoder_status == ENCODER_SEQUENCE_DONE) ||(encoder_status == ENCODER_PICTURE_DONE)){ debug_level(0,"encoder stage is %d\n",encoder_status); } if(((encoder_status == ENCODER_IDR_DONE) ||(encoder_status == ENCODER_NON_IDR_DONE))&&(!process_irq)){ temp_canvas = dblk_buf_canvas; dblk_buf_canvas = ref_buf_canvas; ref_buf_canvas = temp_canvas; //current dblk buffer as next reference buffer frame_start = 1; frame_number ++; pic_order_cnt_lsb += 2; process_irq = 1; debug_level(0,"encoder is done %d\n",encoder_status); } return IRQ_HANDLED; } int avc_endian = 6; static void avc_prot_init(void) { unsigned int data32; int pic_width, pic_height; int pic_mb_nr; int pic_mbx, pic_mby; int i_pic_qp, p_pic_qp; int i_pic_qp_c, p_pic_qp_c; pic_width = encoder_width; pic_height = encoder_height; pic_mb_nr = 0; pic_mbx = 0; pic_mby = 0; i_pic_qp = quant; p_pic_qp = quant; WRITE_HREG(VLC_PIC_SIZE, pic_width | (pic_height<<16)); WRITE_HREG(VLC_PIC_POSITION, (pic_mb_nr<<16) | (pic_mby << 8) | (pic_mbx <<0)); //start mb switch (i_pic_qp) { // synopsys parallel_case full_case case 0 : i_pic_qp_c = 0; break; case 1 : i_pic_qp_c = 1; break; case 2 : i_pic_qp_c = 2; break; case 3 : i_pic_qp_c = 3; break; case 4 : i_pic_qp_c = 4; break; case 5 : i_pic_qp_c = 5; break; case 6 : i_pic_qp_c = 6; break; case 7 : i_pic_qp_c = 7; break; case 8 : i_pic_qp_c = 8; break; case 9 : i_pic_qp_c = 9; break; case 10 : i_pic_qp_c = 10; break; case 11 : i_pic_qp_c = 11; break; case 12 : i_pic_qp_c = 12; break; case 13 : i_pic_qp_c = 13; break; case 14 : i_pic_qp_c = 14; break; case 15 : i_pic_qp_c = 15; break; case 16 : i_pic_qp_c = 16; break; case 17 : i_pic_qp_c = 17; break; case 18 : i_pic_qp_c = 18; break; case 19 : i_pic_qp_c = 19; break; case 20 : i_pic_qp_c = 20; break; case 21 : i_pic_qp_c = 21; break; case 22 : i_pic_qp_c = 22; break; case 23 : i_pic_qp_c = 23; break; case 24 : i_pic_qp_c = 24; break; case 25 : i_pic_qp_c = 25; break; case 26 : i_pic_qp_c = 26; break; case 27 : i_pic_qp_c = 27; break; case 28 : i_pic_qp_c = 28; break; case 29 : i_pic_qp_c = 29; break; case 30 : i_pic_qp_c = 29; break; case 31 : i_pic_qp_c = 30; break; case 32 : i_pic_qp_c = 31; break; case 33 : i_pic_qp_c = 32; break; case 34 : i_pic_qp_c = 32; break; case 35 : i_pic_qp_c = 33; break; case 36 : i_pic_qp_c = 34; break; case 37 : i_pic_qp_c = 34; break; case 38 : i_pic_qp_c = 35; break; case 39 : i_pic_qp_c = 35; break; case 40 : i_pic_qp_c = 36; break; case 41 : i_pic_qp_c = 36; break; case 42 : i_pic_qp_c = 37; break; case 43 : i_pic_qp_c = 37; break; case 44 : i_pic_qp_c = 37; break; case 45 : i_pic_qp_c = 38; break; case 46 : i_pic_qp_c = 38; break; case 47 : i_pic_qp_c = 38; break; case 48 : i_pic_qp_c = 39; break; case 49 : i_pic_qp_c = 39; break; case 50 : i_pic_qp_c = 39; break; default : i_pic_qp_c = 39; break; // should only be 51 or more (when index_offset) } switch (p_pic_qp) { // synopsys parallel_case full_case case 0 : p_pic_qp_c = 0; break; case 1 : p_pic_qp_c = 1; break; case 2 : p_pic_qp_c = 2; break; case 3 : p_pic_qp_c = 3; break; case 4 : p_pic_qp_c = 4; break; case 5 : p_pic_qp_c = 5; break; case 6 : p_pic_qp_c = 6; break; case 7 : p_pic_qp_c = 7; break; case 8 : p_pic_qp_c = 8; break; case 9 : p_pic_qp_c = 9; break; case 10 : p_pic_qp_c = 10; break; case 11 : p_pic_qp_c = 11; break; case 12 : p_pic_qp_c = 12; break; case 13 : p_pic_qp_c = 13; break; case 14 : p_pic_qp_c = 14; break; case 15 : p_pic_qp_c = 15; break; case 16 : p_pic_qp_c = 16; break; case 17 : p_pic_qp_c = 17; break; case 18 : p_pic_qp_c = 18; break; case 19 : p_pic_qp_c = 19; break; case 20 : p_pic_qp_c = 20; break; case 21 : p_pic_qp_c = 21; break; case 22 : p_pic_qp_c = 22; break; case 23 : p_pic_qp_c = 23; break; case 24 : p_pic_qp_c = 24; break; case 25 : p_pic_qp_c = 25; break; case 26 : p_pic_qp_c = 26; break; case 27 : p_pic_qp_c = 27; break; case 28 : p_pic_qp_c = 28; break; case 29 : p_pic_qp_c = 29; break; case 30 : p_pic_qp_c = 29; break; case 31 : p_pic_qp_c = 30; break; case 32 : p_pic_qp_c = 31; break; case 33 : p_pic_qp_c = 32; break; case 34 : p_pic_qp_c = 32; break; case 35 : p_pic_qp_c = 33; break; case 36 : p_pic_qp_c = 34; break; case 37 : p_pic_qp_c = 34; break; case 38 : p_pic_qp_c = 35; break; case 39 : p_pic_qp_c = 35; break; case 40 : p_pic_qp_c = 36; break; case 41 : p_pic_qp_c = 36; break; case 42 : p_pic_qp_c = 37; break; case 43 : p_pic_qp_c = 37; break; case 44 : p_pic_qp_c = 37; break; case 45 : p_pic_qp_c = 38; break; case 46 : p_pic_qp_c = 38; break; case 47 : p_pic_qp_c = 38; break; case 48 : p_pic_qp_c = 39; break; case 49 : p_pic_qp_c = 39; break; case 50 : p_pic_qp_c = 39; break; default : p_pic_qp_c = 39; break; // should only be 51 or more (when index_offset) } WRITE_HREG(QDCT_Q_QUANT_I, (i_pic_qp_c<<22) | (i_pic_qp<<16) | ((i_pic_qp_c%6)<<12)|((i_pic_qp_c/6)<<8)|((i_pic_qp%6)<<4)|((i_pic_qp/6)<<0)); WRITE_HREG(QDCT_Q_QUANT_P, (p_pic_qp_c<<22) | (p_pic_qp<<16) | ((p_pic_qp_c%6)<<12)|((p_pic_qp_c/6)<<8)|((p_pic_qp%6)<<4)|((p_pic_qp/6)<<0)); //avc_init_input_buffer(); WRITE_HREG(IGNORE_CONFIG , (1<<31) | // ignore_lac_coeff_en (1<<26) | // ignore_lac_coeff_else (<1) (1<<21) | // ignore_lac_coeff_2 (<1) (2<<16) | // ignore_lac_coeff_1 (<2) (1<<15) | // ignore_cac_coeff_en (1<<10) | // ignore_cac_coeff_else (<1) (1<<5) | // ignore_cac_coeff_2 (<1) (2<<0)); // ignore_cac_coeff_1 (<2) WRITE_HREG(IGNORE_CONFIG_2, (1<<31) | // ignore_t_lac_coeff_en (1<<26) | // ignore_t_lac_coeff_else (<1) (1<<21) | // ignore_t_lac_coeff_2 (<1) (5<<16) | // ignore_t_lac_coeff_1 (<5) (0<<0)); WRITE_HREG(QDCT_MB_CONTROL, (1<<9) | // mb_info_soft_reset (1<<0)); // mb read buffer soft reset WRITE_HREG(QDCT_MB_CONTROL, (0<<28) | // ignore_t_p8x8 (0<<27) | // zero_mc_out_null_non_skipped_mb (0<<26) | // no_mc_out_null_non_skipped_mb (0<<25) | // mc_out_even_skipped_mb (0<<24) | // mc_out_wait_cbp_ready (0<<23) | // mc_out_wait_mb_type_ready (1<<22) | // i_pred_int_enable (1<<19) | // i_pred_enable (1<<20) | // ie_sub_enable (1<<18) | // iq_enable (1<<17) | // idct_enable (1<<14) | // mb_pause_enable (1<<13) | // q_enable (1<<12) | // dct_enable (1<<10) | // mb_info_en (avc_endian<<3) | // endian (1<<1) | // mb_read_en (0<<0)); // soft reset WRITE_HREG(CURR_CANVAS_CTRL,0); //debug_level(0,"current endian is %d \n" , avc_endian); data32 = READ_HREG(VLC_CONFIG); data32 = data32 | (1<<0); // set pop_coeff_even_all_zero WRITE_HREG(VLC_CONFIG , data32); /* clear mailbox interrupt */ WRITE_HREG(HCODEC_ASSIST_MBOX1_CLR_REG, 1); /* enable mailbox interrupt */ WRITE_HREG(HCODEC_ASSIST_MBOX1_MASK, 1); } void amvenc_reset(void) { READ_VREG(DOS_SW_RESET1); READ_VREG(DOS_SW_RESET1); READ_VREG(DOS_SW_RESET1); WRITE_VREG(DOS_SW_RESET1, (1<<2)|(1<<6)|(1<<7)|(1<<8)|(1<<16)|(1<<17)); WRITE_VREG(DOS_SW_RESET1, 0); READ_VREG(DOS_SW_RESET1); READ_VREG(DOS_SW_RESET1); READ_VREG(DOS_SW_RESET1); } void amvenc_start(void) { READ_VREG(DOS_SW_RESET1); READ_VREG(DOS_SW_RESET1); READ_VREG(DOS_SW_RESET1); WRITE_VREG(DOS_SW_RESET1, (1<<12)|(1<<11)); WRITE_VREG(DOS_SW_RESET1, 0); READ_VREG(DOS_SW_RESET1); READ_VREG(DOS_SW_RESET1); READ_VREG(DOS_SW_RESET1); WRITE_HREG(MPSR, 0x0001); } void amvenc_stop(void) { ulong timeout = jiffies + HZ; WRITE_HREG(MPSR, 0); WRITE_HREG(CPSR, 0); while (READ_HREG(IMEM_DMA_CTRL) & 0x8000) { if (time_after(jiffies, timeout)) { break; } } READ_VREG(DOS_SW_RESET1); READ_VREG(DOS_SW_RESET1); READ_VREG(DOS_SW_RESET1); WRITE_VREG(DOS_SW_RESET1, (1<<12)|(1<<11)|(1<<2)|(1<<6)|(1<<7)|(1<<8)|(1<<16)|(1<<17)); //WRITE_VREG(DOS_SW_RESET1, (1<<12)|(1<<11)); WRITE_VREG(DOS_SW_RESET1, 0); READ_VREG(DOS_SW_RESET1); READ_VREG(DOS_SW_RESET1); READ_VREG(DOS_SW_RESET1); } static void __iomem *mc_addr=NULL; static unsigned mc_addr_map; #define MC_SIZE (4096 * 4) s32 amvenc_loadmc(const u32 *p) { ulong timeout; s32 ret = 0 ; mc_addr_map = assit_buffer_offset; mc_addr = ioremap_wc(mc_addr_map,MC_SIZE); memcpy(mc_addr, p, MC_SIZE); debug_level(0,"address 0 is 0x%x\n", *((u32*)mc_addr)); debug_level(0,"address 1 is 0x%x\n", *((u32*)mc_addr + 1)); debug_level(0,"address 2 is 0x%x\n", *((u32*)mc_addr + 2)); debug_level(0,"address 3 is 0x%x\n", *((u32*)mc_addr + 3)); WRITE_HREG(MPSR, 0); WRITE_HREG(CPSR, 0); /* Read CBUS register for timing */ timeout = READ_HREG(MPSR); timeout = READ_HREG(MPSR); timeout = jiffies + HZ; WRITE_HREG(IMEM_DMA_ADR, mc_addr_map); WRITE_HREG(IMEM_DMA_COUNT, 0x1000); WRITE_HREG(IMEM_DMA_CTRL, (0x8000 | (7 << 16))); while (READ_HREG(IMEM_DMA_CTRL) & 0x8000) { if (time_before(jiffies, timeout)) { schedule(); } else { debug_level(1,"hcodec load mc error\n"); ret = -EBUSY; break; } } iounmap(mc_addr); mc_addr=NULL; return ret; } #define DMC_SEC_PORT8_RANGE0 0x840 #define DMC_SEC_CTRL 0x829 void enable_hcoder_ddr_access(void) { WRITE_SEC_REG(DMC_SEC_PORT8_RANGE0 , 0xffff); WRITE_SEC_REG(DMC_SEC_CTRL , 0x80000000); } static s32 avc_poweron(void) { enable_hcoder_ddr_access(); // Enable Dos internal clock gating hvdec_clock_enable(); mdelay(10); return 0; } static s32 avc_poweroff(void) { hvdec_clock_disable(); return 0; } static s32 avc_init(void) { int r; avc_poweron(); avc_canvas_init(); WRITE_HREG(HCODEC_ASSIST_MMC_CTRL1,0x2); debug_level(1,"start to load microcode\n"); if (amvenc_loadmc(encoder_mc) < 0) { //amvdec_disable(); return -EBUSY; } debug_level(1,"succeed to load microcode\n"); //avc_canvas_init(); frame_start = 0; idr_pic_id = 0 ; frame_number = 0 ; process_irq = 0; pic_order_cnt_lsb = 0 ; encoder_status = ENCODER_IDLE ; amvenc_reset(); avc_init_encoder(); avc_init_input_buffer(); //dct buffer setting avc_init_output_buffer(); //output stream buffer avc_prot_init(); r = request_irq(INT_AMVENCODER, enc_isr, IRQF_SHARED, "enc-irq", (void *)avc_dec_id);//INT_MAILBOX_1A avc_init_dblk_buffer(dblk_buf_canvas); //decoder buffer , need set before each frame start avc_init_reference_buffer(ref_buf_canvas); //reference buffer , need set before each frame start avc_init_assit_buffer(); //assitant buffer for microcode WRITE_HREG(ENCODER_STATUS , ENCODER_IDLE); amvenc_start(); encode_inited = 1; return 0; } void amvenc_avc_start_cmd(int cmd, unsigned* input_info) { if((cmd == ENCODER_IDR)||(cmd == ENCODER_SEQUENCE)){ pic_order_cnt_lsb = 0; frame_number = 0; } if(frame_number > 65535){ frame_number = 0; } #if 0 if((idr_pic_id == 0)&&(cmd == ENCODER_IDR)) frame_start = 1; #endif if(frame_start){ frame_start = 0; encoder_status = ENCODER_IDLE ; //WRITE_HREG(HENC_SCRATCH_3,0); //mb count //WRITE_HREG(VLC_TOTAL_BYTES ,0); //offset in bitstream buffer amvenc_reset(); avc_init_encoder(); if(cmd == ENCODER_IDR){ idr_pic_id ++; } if(idr_pic_id > 65535){ idr_pic_id = 0; } avc_init_input_buffer(); avc_init_output_buffer(); avc_prot_init(); avc_init_assit_buffer(); debug_level(0,"begin to new frame\n"); } avc_init_dblk_buffer(dblk_buf_canvas); avc_init_reference_buffer(ref_buf_canvas); encoder_status = cmd; WRITE_HREG(ENCODER_STATUS , cmd); if((cmd == ENCODER_IDR)||(cmd == ENCODER_NON_IDR)){ process_irq = 0; } debug_level(0,"amvenc_avc_start\n"); } void amvenc_avc_stop(void) { //WRITE_HREG(MPSR, 0); amvenc_stop(); avc_poweroff(); debug_level(1,"amvenc_avc_stop\n"); } static int amvenc_avc_open(struct inode *inode, struct file *file) { int r = 0; debug_level(1,"avc open\n"); if(encode_opened>0){ debug_level(1, "amvenc_avc open busy.\n"); return -EBUSY; } encode_opened++; #if 0 if (avc_poweron() < 0) { amlog_level(LOG_LEVEL_ERROR, "amvenc_avc init failed.\n"); encode_opened--; return -ENODEV; } #endif return r; } static int amvenc_avc_release(struct inode *inode, struct file *file) { if(encode_inited){ free_irq(INT_AMVENCODER, (void *)avc_dec_id); //amvdec_disable(); amvenc_avc_stop(); encode_inited = 0; } if(encode_opened>0) encode_opened--; debug_level(1,"avc release\n"); return 0; } static void dma_flush(unsigned buf_start , unsigned buf_size ) { //dma_sync_single_for_cpu(amvenc_avc_dev,buf_start, buf_size, DMA_TO_DEVICE); dma_sync_single_for_device(amvenc_avc_dev,buf_start ,buf_size, DMA_TO_DEVICE); } static void cache_flush(unsigned buf_start , unsigned buf_size ) { dma_sync_single_for_cpu(amvenc_avc_dev , buf_start, buf_size, DMA_FROM_DEVICE); //dma_sync_single_for_device(amvenc_avc_dev ,buf_start , buf_size, DMA_FROM_DEVICE); } static long amvenc_avc_ioctl(struct file *file, unsigned int cmd, ulong arg) { int r = 0; int amrisc_cmd = 0; unsigned* offset; unsigned* addr_info; unsigned buf_start; switch (cmd) { case AMVENC_AVC_IOC_GET_ADDR: if((ref_buf_canvas & 0xff) == (ENC_CANVAS_OFFSET)){ *((unsigned*)arg) = 1; }else{ *((unsigned*)arg) = 2; } break; case AMVENC_AVC_IOC_INPUT_UPDATE: offset = (unsigned*)arg ; WRITE_HREG(QDCT_MB_WR_PTR, (dct_buff_start_addr+ *offset)); break; case AMVENC_AVC_IOC_NEW_CMD: amrisc_cmd = *((unsigned*)arg) ; amvenc_avc_start_cmd(amrisc_cmd, NULL); break; case AMVENC_AVC_IOC_GET_STAGE: *((unsigned*)arg) = encoder_status; break; case AMVENC_AVC_IOC_GET_OUTPUT_SIZE: *((unsigned*)arg) = READ_HREG(VLC_TOTAL_BYTES); break; case AMVENC_AVC_IOC_SET_QUANT: quant = *((unsigned*)arg) ; break; case AMVENC_AVC_IOC_SET_ENCODER_WIDTH: if(*((unsigned*)arg)>gAmvencbuff.bufspec->max_width) *((unsigned*)arg) = gAmvencbuff.bufspec->max_width; else encoder_width = *((unsigned*)arg) ; break; case AMVENC_AVC_IOC_SET_ENCODER_HEIGHT: if(*((unsigned*)arg)>gAmvencbuff.bufspec->max_height) *((unsigned*)arg) = gAmvencbuff.bufspec->max_height; else encoder_height = *((unsigned*)arg) ; break; case AMVENC_AVC_IOC_CONFIG_INIT: avc_init(); break; case AMVENC_AVC_IOC_FLUSH_CACHE: addr_info = (unsigned*)arg ; switch(addr_info[0]){ case 0: buf_start = dct_buff_start_addr; break; case 1: buf_start = dct_buff_start_addr + gAmvencbuff.bufspec->dec0_y.buf_start; break; case 2: buf_start = dct_buff_start_addr + gAmvencbuff.bufspec->dec1_y.buf_start; break; case 3: buf_start = BitstreamStart ; break; default: buf_start = dct_buff_start_addr; break; } dma_flush(buf_start + addr_info[1] ,addr_info[2] - addr_info[1]); break; case AMVENC_AVC_IOC_FLUSH_DMA: addr_info = (unsigned*)arg ; switch(addr_info[0]){ case 0: buf_start = dct_buff_start_addr; break; case 1: buf_start = dct_buff_start_addr + gAmvencbuff.bufspec->dec0_y.buf_start; break; case 2: buf_start = dct_buff_start_addr + gAmvencbuff.bufspec->dec1_y.buf_start; break; case 3: buf_start = BitstreamStart ; break; default: buf_start = dct_buff_start_addr; break; } cache_flush(buf_start + addr_info[1] ,addr_info[2] - addr_info[1]); break; case AMVENC_AVC_IOC_GET_BUFFINFO: addr_info = (unsigned*)arg; addr_info[0] = gAmvencbuff.buf_size; addr_info[1] = gAmvencbuff.bufspec->dct.buf_start; addr_info[2] = gAmvencbuff.bufspec->dct.buf_size; addr_info[3] = gAmvencbuff.bufspec->dec0_y.buf_start; addr_info[4] = gAmvencbuff.bufspec->dec0_y.buf_size; addr_info[5] = gAmvencbuff.bufspec->dec0_uv.buf_start; addr_info[6] = gAmvencbuff.bufspec->dec0_uv.buf_size; addr_info[7] = gAmvencbuff.bufspec->dec1_y.buf_start; addr_info[8] = gAmvencbuff.bufspec->dec1_y.buf_size; addr_info[9] = gAmvencbuff.bufspec->dec1_uv.buf_start; addr_info[10] = gAmvencbuff.bufspec->dec1_uv.buf_size; addr_info[11] = gAmvencbuff.bufspec->bitstream.buf_start; addr_info[12] = gAmvencbuff.bufspec->bitstream.buf_size; break; case AMVENC_AVC_IOC_GET_DEVINFO: strncpy((char *)arg,AMVENC_DEV_VERSION,strlen(AMVENC_DEV_VERSION)); break; default: r= -1; break; } return r; } static int avc_mmap(struct file *filp, struct vm_area_struct *vma) { unsigned long off = vma->vm_pgoff << PAGE_SHIFT; unsigned vma_size = vma->vm_end - vma->vm_start; if (vma_size == 0) { debug_level(1,"vma_size is 0 \n"); return -EAGAIN; } off += gAmvencbuff.buf_start; debug_level(0,"vma_size is %d , off is %ld \n" , vma_size ,off); vma->vm_flags |= VM_RESERVED | VM_IO; //vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) { debug_level(1,"set_cached: failed remap_pfn_range\n"); return -EAGAIN; } return 0; } const static struct file_operations amvenc_avc_fops = { .owner = THIS_MODULE, .open = amvenc_avc_open, .mmap = avc_mmap, .release = amvenc_avc_release, .unlocked_ioctl = amvenc_avc_ioctl, }; int init_avc_device(void) { int r =0; r =register_chrdev(0,DEVICE_NAME,&amvenc_avc_fops); if(r<=0) { amlog_level(LOG_LEVEL_HIGH,"register amvenc_avc device error\r\n"); return r ; } avc_device_major= r ; amvenc_avc_class = class_create(THIS_MODULE, DEVICE_NAME); amvenc_avc_dev = device_create(amvenc_avc_class, NULL, MKDEV(avc_device_major, 0), NULL, DEVICE_NAME); return r; } int uninit_avc_device(void) { device_destroy(amvenc_avc_class, MKDEV(avc_device_major, 0)); class_destroy(amvenc_avc_class); unregister_chrdev(avc_device_major, DEVICE_NAME); return 0; } static int amvenc_avc_probe(struct platform_device *pdev) { struct resource *mem; amlog_level(LOG_LEVEL_INFO, "amvenc_avc probe start.\n"); if (!(mem = platform_get_resource(pdev, IORESOURCE_MEM, 0))) { amlog_level(LOG_LEVEL_ERROR, "amvenc_avc memory resource undefined.\n"); return -EFAULT; } gAmvencbuff.buf_start = mem->start; gAmvencbuff.buf_size = mem->end - mem->start + 1; if(gAmvencbuff.buf_size>=amvenc_buffspec[AMVENC_BUFFER_LEVEL_1080P].min_buffsize){ gAmvencbuff.cur_buf_lev = AMVENC_BUFFER_LEVEL_1080P; gAmvencbuff.bufspec = (BuffInfo_t*)&amvenc_buffspec[AMVENC_BUFFER_LEVEL_1080P]; }else if(gAmvencbuff.buf_size>=amvenc_buffspec[AMVENC_BUFFER_LEVEL_720P].min_buffsize){ gAmvencbuff.cur_buf_lev = AMVENC_BUFFER_LEVEL_720P; gAmvencbuff.bufspec= (BuffInfo_t*)&amvenc_buffspec[AMVENC_BUFFER_LEVEL_720P]; }else if(gAmvencbuff.buf_size>=amvenc_buffspec[AMVENC_BUFFER_LEVEL_480P].min_buffsize){ gAmvencbuff.cur_buf_lev = AMVENC_BUFFER_LEVEL_480P; gAmvencbuff.bufspec= (BuffInfo_t*)&amvenc_buffspec[AMVENC_BUFFER_LEVEL_480P]; }else{ gAmvencbuff.buf_start = 0; gAmvencbuff.buf_size = 0; amlog_level(LOG_LEVEL_ERROR, "amvenc_avc memory resource too small, size is %d.\n",gAmvencbuff.buf_size); return -EFAULT; } debug_level(1,"amvenc_avc memory config sucess, buff size is 0x%x, level is %s\n",gAmvencbuff.buf_size,(gAmvencbuff.cur_buf_lev == 0)?"480P":(gAmvencbuff.cur_buf_lev == 1)?"720P":"1080P"); init_avc_device(); amlog_level(LOG_LEVEL_INFO, "amvenc_avc probe end.\n"); return 0; } static int amvenc_avc_remove(struct platform_device *pdev) { uninit_avc_device(); amlog_level(LOG_LEVEL_INFO, "amvenc_avc remove.\n"); return 0; } /****************************************/ static struct platform_driver amvenc_avc_driver = { .probe = amvenc_avc_probe, .remove = amvenc_avc_remove, .driver = { .name = DRIVER_NAME, } }; static struct codec_profile_t amvenc_avc_profile = { .name = "avc", .profile = "" }; static int __init amvenc_avc_driver_init_module(void) { amlog_level(LOG_LEVEL_INFO, "amvenc_avc module init\n"); if (platform_driver_register(&amvenc_avc_driver)) { amlog_level(LOG_LEVEL_ERROR, "failed to register amvenc_avc driver\n"); return -ENODEV; } vcodec_profile_register(&amvenc_avc_profile); return 0; } static void __exit amvenc_avc_driver_remove_module(void) { amlog_level(LOG_LEVEL_INFO, "amvenc_avc module remove.\n"); platform_driver_unregister(&amvenc_avc_driver); } /****************************************/ module_param(stat, uint, 0664); MODULE_PARM_DESC(stat, "\n amvenc_avc stat \n"); module_init(amvenc_avc_driver_init_module); module_exit(amvenc_avc_driver_remove_module); MODULE_DESCRIPTION("AMLOGIC AVC Video Encoder Driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("simon.zheng <simon.zheng@amlogic.com>");
TinyHTPC/openlinux-amlogic_M6
drivers/amlogic/amports/encoder.c
C
gpl-2.0
34,043
29.126549
193
0.592838
false
/* workspace.c- Workspace management * * Window Maker window manager * * Copyright (c) 1997-2003 Alfredo K. Kojima * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "wconfig.h" #include <X11/Xlib.h> #include <X11/Xutil.h> #ifdef USE_XSHAPE #include <X11/extensions/shape.h> #endif #include <stdlib.h> #include <stdio.h> #include <stdint.h> #include <unistd.h> #include <ctype.h> #include <string.h> #include <time.h> #include <sys/time.h> #include "WindowMaker.h" #include "framewin.h" #include "window.h" #include "icon.h" #include "misc.h" #include "menu.h" #include "application.h" #include "dock.h" #include "actions.h" #include "workspace.h" #include "appicon.h" #include "wmspec.h" #include "xinerama.h" #include "event.h" #include "wsmap.h" #include "dialog.h" #define MC_DESTROY_LAST 1 #define MC_LAST_USED 2 /* index of the first workspace menu entry */ #define MC_WORKSPACE1 3 #define WORKSPACE_NAME_DISPLAY_PADDING 32 static WMPropList *dWorkspaces = NULL; static WMPropList *dClip, *dName; static void make_keys(void) { if (dWorkspaces != NULL) return; dWorkspaces = WMCreatePLString("Workspaces"); dName = WMCreatePLString("Name"); dClip = WMCreatePLString("Clip"); } void wWorkspaceMake(WScreen * scr, int count) { while (count > 0) { wWorkspaceNew(scr); count--; } } int wWorkspaceNew(WScreen *scr) { WWorkspace *wspace, **list; int i; if (scr->workspace_count < MAX_WORKSPACES) { scr->workspace_count++; wspace = wmalloc(sizeof(WWorkspace)); wspace->name = NULL; wspace->clip = NULL; if (!wspace->name) { static const char *new_name = NULL; static size_t name_length; if (new_name == NULL) { new_name = _("Workspace %i"); name_length = strlen(new_name) + 8; } wspace->name = wmalloc(name_length); snprintf(wspace->name, name_length, new_name, scr->workspace_count); } if (!wPreferences.flags.noclip) wspace->clip = wDockCreate(scr, WM_CLIP, NULL); list = wmalloc(sizeof(WWorkspace *) * scr->workspace_count); for (i = 0; i < scr->workspace_count - 1; i++) list[i] = scr->workspaces[i]; list[i] = wspace; if (scr->workspaces) wfree(scr->workspaces); scr->workspaces = list; wWorkspaceMenuUpdate(scr, scr->workspace_menu); wWorkspaceMenuUpdate(scr, scr->clip_ws_menu); wNETWMUpdateDesktop(scr); WMPostNotificationName(WMNWorkspaceCreated, scr, (void *)(uintptr_t) (scr->workspace_count - 1)); XFlush(dpy); return scr->workspace_count - 1; } return -1; } Bool wWorkspaceDelete(WScreen * scr, int workspace) { WWindow *tmp; WWorkspace **list; int i, j; if (workspace <= 0) return False; /* verify if workspace is in use by some window */ tmp = scr->focused_window; while (tmp) { if (!IS_OMNIPRESENT(tmp) && tmp->frame->workspace == workspace) { char buf[256]; snprintf(buf, sizeof(buf), _("Workspace \"%s\" in use; cannot delete"), scr->workspaces[workspace]->name); wMessageDialog(scr, _("Error"), buf, _("OK"), NULL, NULL); return False; } tmp = tmp->prev; } if (!wPreferences.flags.noclip) { wDockDestroy(scr->workspaces[workspace]->clip); scr->workspaces[workspace]->clip = NULL; } list = wmalloc(sizeof(WWorkspace *) * (scr->workspace_count - 1)); j = 0; for (i = 0; i < scr->workspace_count; i++) { if (i != workspace) { list[j++] = scr->workspaces[i]; } else { if (scr->workspaces[i]->name) wfree(scr->workspaces[i]->name); if (scr->workspaces[i]->map) RReleaseImage(scr->workspaces[i]->map); wfree(scr->workspaces[i]); } } wfree(scr->workspaces); scr->workspaces = list; scr->workspace_count--; /* update menu */ wWorkspaceMenuUpdate(scr, scr->workspace_menu); /* clip workspace menu */ wWorkspaceMenuUpdate(scr, scr->clip_ws_menu); /* update also window menu */ if (scr->workspace_submenu) { WMenu *menu = scr->workspace_submenu; i = menu->entry_no; while (i > scr->workspace_count) wMenuRemoveItem(menu, --i); wMenuRealize(menu); } /* and clip menu */ if (scr->clip_submenu) { WMenu *menu = scr->clip_submenu; i = menu->entry_no; while (i > scr->workspace_count) wMenuRemoveItem(menu, --i); wMenuRealize(menu); } wNETWMUpdateDesktop(scr); WMPostNotificationName(WMNWorkspaceDestroyed, scr, (void *)(uintptr_t) (scr->workspace_count - 1)); if (scr->current_workspace >= scr->workspace_count) wWorkspaceChange(scr, scr->workspace_count - 1); if (scr->last_workspace >= scr->workspace_count) scr->last_workspace = 0; return True; } typedef struct WorkspaceNameData { int count; RImage *back; RImage *text; time_t timeout; } WorkspaceNameData; static void hideWorkspaceName(void *data) { WScreen *scr = (WScreen *) data; if (!scr->workspace_name_data || scr->workspace_name_data->count == 0 || time(NULL) > scr->workspace_name_data->timeout) { XUnmapWindow(dpy, scr->workspace_name); if (scr->workspace_name_data) { RReleaseImage(scr->workspace_name_data->back); RReleaseImage(scr->workspace_name_data->text); wfree(scr->workspace_name_data); scr->workspace_name_data = NULL; } scr->workspace_name_timer = NULL; } else { RImage *img = RCloneImage(scr->workspace_name_data->back); Pixmap pix; scr->workspace_name_timer = WMAddTimerHandler(WORKSPACE_NAME_FADE_DELAY, hideWorkspaceName, scr); RCombineImagesWithOpaqueness(img, scr->workspace_name_data->text, scr->workspace_name_data->count * 255 / 10); RConvertImage(scr->rcontext, img, &pix); RReleaseImage(img); XSetWindowBackgroundPixmap(dpy, scr->workspace_name, pix); XClearWindow(dpy, scr->workspace_name); XFreePixmap(dpy, pix); XFlush(dpy); scr->workspace_name_data->count--; } } static void showWorkspaceName(WScreen * scr, int workspace) { WorkspaceNameData *data; RXImage *ximg; Pixmap text, mask; int w, h; int px, py; char *name = scr->workspaces[workspace]->name; int len = strlen(name); int x, y; #ifdef USE_XINERAMA int head; WMRect rect; int xx, yy; #endif if (wPreferences.workspace_name_display_position == WD_NONE || scr->workspace_count < 2) return; if (scr->workspace_name_timer) { WMDeleteTimerHandler(scr->workspace_name_timer); XUnmapWindow(dpy, scr->workspace_name); XFlush(dpy); } scr->workspace_name_timer = WMAddTimerHandler(WORKSPACE_NAME_DELAY, hideWorkspaceName, scr); if (scr->workspace_name_data) { RReleaseImage(scr->workspace_name_data->back); RReleaseImage(scr->workspace_name_data->text); wfree(scr->workspace_name_data); } data = wmalloc(sizeof(WorkspaceNameData)); data->back = NULL; w = WMWidthOfString(scr->workspace_name_font, name, len); h = WMFontHeight(scr->workspace_name_font); #ifdef USE_XINERAMA head = wGetHeadForPointerLocation(scr); rect = wGetRectForHead(scr, head); if (scr->xine_info.count) { xx = rect.pos.x + (scr->xine_info.screens[head].size.width - (w + 4)) / 2; yy = rect.pos.y + (scr->xine_info.screens[head].size.height - (h + 4)) / 2; } else { xx = (scr->scr_width - (w + 4)) / 2; yy = (scr->scr_height - (h + 4)) / 2; } #endif switch (wPreferences.workspace_name_display_position) { case WD_TOP: #ifdef USE_XINERAMA px = xx; #else px = (scr->scr_width - (w + 4)) / 2; #endif py = WORKSPACE_NAME_DISPLAY_PADDING; break; case WD_BOTTOM: #ifdef USE_XINERAMA px = xx; #else px = (scr->scr_width - (w + 4)) / 2; #endif py = scr->scr_height - (h + 4 + WORKSPACE_NAME_DISPLAY_PADDING); break; case WD_TOPLEFT: px = WORKSPACE_NAME_DISPLAY_PADDING; py = WORKSPACE_NAME_DISPLAY_PADDING; break; case WD_TOPRIGHT: px = scr->scr_width - (w + 4 + WORKSPACE_NAME_DISPLAY_PADDING); py = WORKSPACE_NAME_DISPLAY_PADDING; break; case WD_BOTTOMLEFT: px = WORKSPACE_NAME_DISPLAY_PADDING; py = scr->scr_height - (h + 4 + WORKSPACE_NAME_DISPLAY_PADDING); break; case WD_BOTTOMRIGHT: px = scr->scr_width - (w + 4 + WORKSPACE_NAME_DISPLAY_PADDING); py = scr->scr_height - (h + 4 + WORKSPACE_NAME_DISPLAY_PADDING); break; case WD_CENTER: default: #ifdef USE_XINERAMA px = xx; py = yy; #else px = (scr->scr_width - (w + 4)) / 2; py = (scr->scr_height - (h + 4)) / 2; #endif break; } XResizeWindow(dpy, scr->workspace_name, w + 4, h + 4); XMoveWindow(dpy, scr->workspace_name, px, py); text = XCreatePixmap(dpy, scr->w_win, w + 4, h + 4, scr->w_depth); mask = XCreatePixmap(dpy, scr->w_win, w + 4, h + 4, 1); /*XSetForeground(dpy, scr->mono_gc, 0); XFillRectangle(dpy, mask, scr->mono_gc, 0, 0, w+4, h+4); */ XFillRectangle(dpy, text, WMColorGC(scr->black), 0, 0, w + 4, h + 4); for (x = 0; x <= 4; x++) for (y = 0; y <= 4; y++) WMDrawString(scr->wmscreen, text, scr->white, scr->workspace_name_font, x, y, name, len); XSetForeground(dpy, scr->mono_gc, 1); XSetBackground(dpy, scr->mono_gc, 0); XCopyPlane(dpy, text, mask, scr->mono_gc, 0, 0, w + 4, h + 4, 0, 0, 1 << (scr->w_depth - 1)); /*XSetForeground(dpy, scr->mono_gc, 1); */ XSetBackground(dpy, scr->mono_gc, 1); XFillRectangle(dpy, text, WMColorGC(scr->black), 0, 0, w + 4, h + 4); WMDrawString(scr->wmscreen, text, scr->white, scr->workspace_name_font, 2, 2, name, len); #ifdef USE_XSHAPE if (w_global.xext.shape.supported) XShapeCombineMask(dpy, scr->workspace_name, ShapeBounding, 0, 0, mask, ShapeSet); #endif XSetWindowBackgroundPixmap(dpy, scr->workspace_name, text); XClearWindow(dpy, scr->workspace_name); data->text = RCreateImageFromDrawable(scr->rcontext, text, None); XFreePixmap(dpy, text); XFreePixmap(dpy, mask); if (!data->text) { XMapRaised(dpy, scr->workspace_name); XFlush(dpy); goto erro; } ximg = RGetXImage(scr->rcontext, scr->root_win, px, py, data->text->width, data->text->height); if (!ximg) goto erro; XMapRaised(dpy, scr->workspace_name); XFlush(dpy); data->back = RCreateImageFromXImage(scr->rcontext, ximg->image, NULL); RDestroyXImage(scr->rcontext, ximg); if (!data->back) { goto erro; } data->count = 10; /* set a timeout for the effect */ data->timeout = time(NULL) + 2 + (WORKSPACE_NAME_DELAY + WORKSPACE_NAME_FADE_DELAY * data->count) / 1000; scr->workspace_name_data = data; return; erro: if (scr->workspace_name_timer) WMDeleteTimerHandler(scr->workspace_name_timer); if (data->text) RReleaseImage(data->text); if (data->back) RReleaseImage(data->back); wfree(data); scr->workspace_name_data = NULL; scr->workspace_name_timer = WMAddTimerHandler(WORKSPACE_NAME_DELAY + 10 * WORKSPACE_NAME_FADE_DELAY, hideWorkspaceName, scr); } void wWorkspaceChange(WScreen *scr, int workspace) { if (scr->flags.startup || scr->flags.startup2 || scr->flags.ignore_focus_events) return; if (workspace != scr->current_workspace) wWorkspaceForceChange(scr, workspace); } void wWorkspaceRelativeChange(WScreen * scr, int amount) { int w; /* While the deiconify animation is going on the window is * still "flying" to its final position and we don't want to * change workspace before the animation finishes, otherwise * the window will land in the new workspace */ if (w_global.ignore_workspace_change) return; w = scr->current_workspace + amount; if (amount < 0) { if (w >= 0) { wWorkspaceChange(scr, w); } else if (wPreferences.ws_cycle) { wWorkspaceChange(scr, scr->workspace_count + w); } } else if (amount > 0) { if (w < scr->workspace_count) { wWorkspaceChange(scr, w); } else if (wPreferences.ws_advance) { wWorkspaceChange(scr, WMIN(w, MAX_WORKSPACES - 1)); } else if (wPreferences.ws_cycle) { wWorkspaceChange(scr, w % scr->workspace_count); } } } void wWorkspaceForceChange(WScreen * scr, int workspace) { WWindow *tmp, *foc = NULL, *foc2 = NULL; if (workspace >= MAX_WORKSPACES || workspace < 0) return; if (wPreferences.enable_workspace_pager && !w_global.process_workspacemap_event) wWorkspaceMapUpdate(scr); SendHelperMessage(scr, 'C', workspace + 1, NULL); if (workspace > scr->workspace_count - 1) wWorkspaceMake(scr, workspace - scr->workspace_count + 1); wClipUpdateForWorkspaceChange(scr, workspace); scr->last_workspace = scr->current_workspace; scr->current_workspace = workspace; wWorkspaceMenuUpdate(scr, scr->workspace_menu); wWorkspaceMenuUpdate(scr, scr->clip_ws_menu); tmp = scr->focused_window; if (tmp != NULL) { WWindow **toUnmap; int toUnmapSize, toUnmapCount; if ((IS_OMNIPRESENT(tmp) && (tmp->flags.mapped || tmp->flags.shaded) && !WFLAGP(tmp, no_focusable)) || tmp->flags.changing_workspace) { foc = tmp; } toUnmapSize = 16; toUnmapCount = 0; toUnmap = wmalloc(toUnmapSize * sizeof(WWindow *)); /* foc2 = tmp; will fix annoyance with gnome panel * but will create annoyance for every other application */ while (tmp) { if (tmp->frame->workspace != workspace && !tmp->flags.selected) { /* unmap windows not on this workspace */ if ((tmp->flags.mapped || tmp->flags.shaded) && !IS_OMNIPRESENT(tmp) && !tmp->flags.changing_workspace) { if (toUnmapCount == toUnmapSize) { toUnmapSize *= 2; toUnmap = wrealloc(toUnmap, toUnmapSize * sizeof(WWindow *)); } toUnmap[toUnmapCount++] = tmp; } /* also unmap miniwindows not on this workspace */ if (!wPreferences.sticky_icons && tmp->flags.miniaturized && tmp->icon && !IS_OMNIPRESENT(tmp)) { XUnmapWindow(dpy, tmp->icon->core->window); tmp->icon->mapped = 0; } /* update current workspace of omnipresent windows */ if (IS_OMNIPRESENT(tmp)) { WApplication *wapp = wApplicationOf(tmp->main_window); tmp->frame->workspace = workspace; if (wapp) { wapp->last_workspace = workspace; } if (!foc2 && (tmp->flags.mapped || tmp->flags.shaded)) { foc2 = tmp; } } } else { /* change selected windows' workspace */ if (tmp->flags.selected) { wWindowChangeWorkspace(tmp, workspace); if (!tmp->flags.miniaturized && !foc) { foc = tmp; } } else { if (!tmp->flags.hidden) { if (!(tmp->flags.mapped || tmp->flags.miniaturized)) { /* remap windows that are on this workspace */ wWindowMap(tmp); if (!foc && !WFLAGP(tmp, no_focusable)) { foc = tmp; } } /* Also map miniwindow if not omnipresent */ if (!wPreferences.sticky_icons && tmp->flags.miniaturized && !IS_OMNIPRESENT(tmp) && tmp->icon) { tmp->icon->mapped = 1; XMapWindow(dpy, tmp->icon->core->window); } } } } tmp = tmp->prev; } while (toUnmapCount > 0) { wWindowUnmap(toUnmap[--toUnmapCount]); } wfree(toUnmap); /* Gobble up events unleashed by our mapping & unmapping. * These may trigger various grab-initiated focus & * crossing events. However, we don't care about them, * and ignore their focus implications altogether to avoid * flicker. */ scr->flags.ignore_focus_events = 1; ProcessPendingEvents(); scr->flags.ignore_focus_events = 0; if (!foc) foc = foc2; /* * Check that the window we want to focus still exists, because the application owning it * could decide to unmap/destroy it in response to unmap any of its other window following * the workspace change, this happening during our 'ProcessPendingEvents' loop. */ if (foc != NULL) { WWindow *parse; Bool found; found = False; for (parse = scr->focused_window; parse != NULL; parse = parse->prev) { if (parse == foc) { found = True; break; } } if (!found) foc = NULL; } if (scr->focused_window->flags.mapped && !foc) { foc = scr->focused_window; } if (wPreferences.focus_mode == WKF_CLICK) { wSetFocusTo(scr, foc); } else { unsigned int mask; int foo; Window bar, win; WWindow *tmp; tmp = NULL; if (XQueryPointer(dpy, scr->root_win, &bar, &win, &foo, &foo, &foo, &foo, &mask)) { tmp = wWindowFor(win); } /* If there's a window under the pointer, focus it. * (we ate all other focus events above, so it's * certainly not focused). Otherwise focus last * focused, or the root (depending on sloppiness) */ if (!tmp && wPreferences.focus_mode == WKF_SLOPPY) { wSetFocusTo(scr, foc); } else { wSetFocusTo(scr, tmp); } } } /* We need to always arrange icons when changing workspace, even if * no autoarrange icons, because else the icons in different workspaces * can be superposed. * This can be avoided if appicons are also workspace specific. */ if (!wPreferences.sticky_icons) wArrangeIcons(scr, False); if (scr->dock) wAppIconPaint(scr->dock->icon_array[0]); if (!wPreferences.flags.noclip && (scr->workspaces[workspace]->clip->auto_collapse || scr->workspaces[workspace]->clip->auto_raise_lower)) { /* to handle enter notify. This will also */ XUnmapWindow(dpy, scr->clip_icon->icon->core->window); XMapWindow(dpy, scr->clip_icon->icon->core->window); } else if (scr->clip_icon != NULL) { wClipIconPaint(scr->clip_icon); } wScreenUpdateUsableArea(scr); wNETWMUpdateDesktop(scr); showWorkspaceName(scr, workspace); WMPostNotificationName(WMNWorkspaceChanged, scr, (void *)(uintptr_t) workspace); /* XSync(dpy, False); */ } static void switchWSCommand(WMenu * menu, WMenuEntry * entry) { wWorkspaceChange(menu->frame->screen_ptr, (long)entry->clientdata); } static void lastWSCommand(WMenu *menu, WMenuEntry *entry) { /* Parameter not used, but tell the compiler that it is ok */ (void) entry; wWorkspaceChange(menu->frame->screen_ptr, menu->frame->screen_ptr->last_workspace); } static void deleteWSCommand(WMenu *menu, WMenuEntry *entry) { /* Parameter not used, but tell the compiler that it is ok */ (void) entry; wWorkspaceDelete(menu->frame->screen_ptr, menu->frame->screen_ptr->workspace_count - 1); } static void newWSCommand(WMenu *menu, WMenuEntry *foo) { int ws; /* Parameter not used, but tell the compiler that it is ok */ (void) foo; ws = wWorkspaceNew(menu->frame->screen_ptr); /* autochange workspace */ if (ws >= 0) wWorkspaceChange(menu->frame->screen_ptr, ws); } void wWorkspaceRename(WScreen *scr, int workspace, const char *name) { char buf[MAX_WORKSPACENAME_WIDTH + 1]; char *tmp; if (workspace >= scr->workspace_count) return; /* trim white spaces */ tmp = wtrimspace(name); if (strlen(tmp) == 0) { snprintf(buf, sizeof(buf), _("Workspace %i"), workspace + 1); } else { strncpy(buf, tmp, MAX_WORKSPACENAME_WIDTH); } buf[MAX_WORKSPACENAME_WIDTH] = 0; wfree(tmp); /* update workspace */ wfree(scr->workspaces[workspace]->name); scr->workspaces[workspace]->name = wstrdup(buf); if (scr->clip_ws_menu) { if (strcmp(scr->clip_ws_menu->entries[workspace + MC_WORKSPACE1]->text, buf) != 0) { wfree(scr->clip_ws_menu->entries[workspace + MC_WORKSPACE1]->text); scr->clip_ws_menu->entries[workspace + MC_WORKSPACE1]->text = wstrdup(buf); wMenuRealize(scr->clip_ws_menu); } } if (scr->workspace_menu) { if (strcmp(scr->workspace_menu->entries[workspace + MC_WORKSPACE1]->text, buf) != 0) { wfree(scr->workspace_menu->entries[workspace + MC_WORKSPACE1]->text); scr->workspace_menu->entries[workspace + MC_WORKSPACE1]->text = wstrdup(buf); wMenuRealize(scr->workspace_menu); } } if (scr->clip_icon) wClipIconPaint(scr->clip_icon); WMPostNotificationName(WMNWorkspaceNameChanged, scr, (void *)(uintptr_t) workspace); } /* callback for when menu entry is edited */ static void onMenuEntryEdited(WMenu * menu, WMenuEntry * entry) { char *tmp; tmp = entry->text; wWorkspaceRename(menu->frame->screen_ptr, (long)entry->clientdata, tmp); } WMenu *wWorkspaceMenuMake(WScreen * scr, Bool titled) { WMenu *wsmenu; WMenuEntry *entry; wsmenu = wMenuCreate(scr, titled ? _("Workspaces") : NULL, False); if (!wsmenu) { wwarning(_("could not create Workspace menu")); return NULL; } /* callback to be called when an entry is edited */ wsmenu->on_edit = onMenuEntryEdited; wMenuAddCallback(wsmenu, _("New"), newWSCommand, NULL); wMenuAddCallback(wsmenu, _("Destroy Last"), deleteWSCommand, NULL); entry = wMenuAddCallback(wsmenu, _("Last Used"), lastWSCommand, NULL); entry->rtext = GetShortcutKey(wKeyBindings[WKBD_LASTWORKSPACE]); return wsmenu; } void wWorkspaceMenuUpdate(WScreen * scr, WMenu * menu) { int i; long ws; char title[MAX_WORKSPACENAME_WIDTH + 1]; WMenuEntry *entry; int tmp; if (!menu) return; if (menu->entry_no < scr->workspace_count + MC_WORKSPACE1) { /* new workspace(s) added */ i = scr->workspace_count - (menu->entry_no - MC_WORKSPACE1); ws = menu->entry_no - MC_WORKSPACE1; while (i > 0) { wstrlcpy(title, scr->workspaces[ws]->name, MAX_WORKSPACENAME_WIDTH); entry = wMenuAddCallback(menu, title, switchWSCommand, (void *)ws); entry->flags.indicator = 1; entry->flags.editable = 1; i--; ws++; } } else if (menu->entry_no > scr->workspace_count + MC_WORKSPACE1) { /* removed workspace(s) */ for (i = menu->entry_no - 1; i >= scr->workspace_count + MC_WORKSPACE1; i--) wMenuRemoveItem(menu, i); } for (i = 0; i < scr->workspace_count; i++) { /* workspace shortcut labels */ if (i / 10 == scr->current_workspace / 10) menu->entries[i + MC_WORKSPACE1]->rtext = GetShortcutKey(wKeyBindings[WKBD_WORKSPACE1 + (i % 10)]); else menu->entries[i + MC_WORKSPACE1]->rtext = NULL; menu->entries[i + MC_WORKSPACE1]->flags.indicator_on = 0; } menu->entries[scr->current_workspace + MC_WORKSPACE1]->flags.indicator_on = 1; wMenuRealize(menu); /* don't let user destroy current workspace */ if (scr->current_workspace == scr->workspace_count - 1) wMenuSetEnabled(menu, MC_DESTROY_LAST, False); else wMenuSetEnabled(menu, MC_DESTROY_LAST, True); /* back to last workspace */ if (scr->workspace_count && scr->last_workspace != scr->current_workspace) wMenuSetEnabled(menu, MC_LAST_USED, True); else wMenuSetEnabled(menu, MC_LAST_USED, False); tmp = menu->frame->top_width + 5; /* if menu got unreachable, bring it to a visible place */ if (menu->frame_x < tmp - (int)menu->frame->core->width) wMenuMove(menu, tmp - (int)menu->frame->core->width, menu->frame_y, False); wMenuPaint(menu); } void wWorkspaceSaveState(WScreen * scr, WMPropList * old_state) { WMPropList *parr, *pstr, *wks_state, *old_wks_state, *foo, *bar; int i; make_keys(); old_wks_state = WMGetFromPLDictionary(old_state, dWorkspaces); parr = WMCreatePLArray(NULL); for (i = 0; i < scr->workspace_count; i++) { pstr = WMCreatePLString(scr->workspaces[i]->name); wks_state = WMCreatePLDictionary(dName, pstr, NULL); WMReleasePropList(pstr); if (!wPreferences.flags.noclip) { pstr = wClipSaveWorkspaceState(scr, i); WMPutInPLDictionary(wks_state, dClip, pstr); WMReleasePropList(pstr); } else if (old_wks_state != NULL) { foo = WMGetFromPLArray(old_wks_state, i); if (foo != NULL) { bar = WMGetFromPLDictionary(foo, dClip); if (bar != NULL) WMPutInPLDictionary(wks_state, dClip, bar); } } WMAddToPLArray(parr, wks_state); WMReleasePropList(wks_state); } WMPutInPLDictionary(scr->session_state, dWorkspaces, parr); WMReleasePropList(parr); } void wWorkspaceRestoreState(WScreen *scr) { WMPropList *parr, *pstr, *wks_state, *clip_state; int i, j; make_keys(); if (scr->session_state == NULL) return; parr = WMGetFromPLDictionary(scr->session_state, dWorkspaces); if (!parr) return; for (i = 0; i < WMIN(WMGetPropListItemCount(parr), MAX_WORKSPACES); i++) { wks_state = WMGetFromPLArray(parr, i); if (WMIsPLDictionary(wks_state)) pstr = WMGetFromPLDictionary(wks_state, dName); else pstr = wks_state; if (i >= scr->workspace_count) wWorkspaceNew(scr); if (scr->workspace_menu) { wfree(scr->workspace_menu->entries[i + MC_WORKSPACE1]->text); scr->workspace_menu->entries[i + MC_WORKSPACE1]->text = wstrdup(WMGetFromPLString(pstr)); scr->workspace_menu->flags.realized = 0; } wfree(scr->workspaces[i]->name); scr->workspaces[i]->name = wstrdup(WMGetFromPLString(pstr)); if (!wPreferences.flags.noclip) { int added_omnipresent_icons = 0; clip_state = WMGetFromPLDictionary(wks_state, dClip); if (scr->workspaces[i]->clip) wDockDestroy(scr->workspaces[i]->clip); scr->workspaces[i]->clip = wDockRestoreState(scr, clip_state, WM_CLIP); if (i > 0) wDockHideIcons(scr->workspaces[i]->clip); /* We set the global icons here, because scr->workspaces[i]->clip * was not valid in wDockRestoreState(). * There we only set icon->omnipresent to know which icons we * need to set here. */ for (j = 0; j < scr->workspaces[i]->clip->max_icons; j++) { WAppIcon *aicon = scr->workspaces[i]->clip->icon_array[j]; int k; if (!aicon || !aicon->omnipresent) continue; aicon->omnipresent = 0; if (wClipMakeIconOmnipresent(aicon, True) != WO_SUCCESS) continue; if (i == 0) continue; /* Move this appicon from workspace i to workspace 0 */ scr->workspaces[i]->clip->icon_array[j] = NULL; scr->workspaces[i]->clip->icon_count--; added_omnipresent_icons++; /* If there are too many omnipresent appicons, we are in trouble */ assert(scr->workspaces[0]->clip->icon_count + added_omnipresent_icons <= scr->workspaces[0]->clip->max_icons); /* Find first free spot on workspace 0 */ for (k = 0; k < scr->workspaces[0]->clip->max_icons; k++) if (scr->workspaces[0]->clip->icon_array[k] == NULL) break; scr->workspaces[0]->clip->icon_array[k] = aicon; aicon->dock = scr->workspaces[0]->clip; } scr->workspaces[0]->clip->icon_count += added_omnipresent_icons; } WMPostNotificationName(WMNWorkspaceNameChanged, scr, (void *)(uintptr_t) i); } } /* Returns the workspace number for a given workspace name */ int wGetWorkspaceNumber(WScreen *scr, const char *value) { int w, i; if (sscanf(value, "%i", &w) != 1) { w = -1; for (i = 0; i < scr->workspace_count; i++) { if (strcmp(scr->workspaces[i]->name, value) == 0) { w = i; break; } } } else { w--; } return w; }
crmafra/wmaker
src/workspace.c
C
gpl-2.0
26,746
26.291837
106
0.664062
false
<?php defined('WPSS_PATH') or die();?> <?php $quiz = new WPSS_Quiz((int) $_GET['id']); ?> <?php $util = new WPSS_Util(); ?> <?php $page = empty($_GET['wpss_results_page']) ? 1 : (int) $_GET['wpss_results_page']; ?> <?php $offset = ($page - 1) * WPSS_Result::PAGED; ?> <?php $results = WPSS_Result::where( array( 'quiz_id' => $quiz->id), $offset ); ?> <!-- Admin question index --> <div class="wrap wpss"> <img class="left" src="<?php echo WPSS_URL.'assets/images/wpss_admin.png'?>" /> <h2 class="left"><?php echo $quiz->title;?> Results</h2> <div class="clear"></div> <hr /> <p class="wpss-breadcrumb"> <a href="<?php echo $util->admin_url('','','');?>">Quizzes</a> &raquo; <a href="<?php echo $util->admin_url('quiz', 'edit', $quiz->id);?>"><?php echo $quiz->title;?></a> &raquo; <a class="current">Results</a> </p> <table class="widefat"> <thead> <tr> <th>id</th> <th>View</th> <th>Fields</th> <th>Score</th> <th>Route</th> <th>IP Address</th> <th>Submitted At</th> <th>Delete</th> </tr> </thead> <tfoot> <tr> <th>id</th> <th>View</th> <th>Fields</th> <th>Score</th> <th>Route</th> <th>IP Address</th> <th>Submitted At</th> <th>Delete</th> </tr> </tfoot> <tbody> <?php foreach( $results as $n => $result ): ?> <tr> <td> <?php echo $result->id;?> </td> <td> <strong> <a class="row-title" href="<?php echo $util->admin_url('result', 'show', $result->id);?>">Show</a> </strong> </td> <td> <?php echo WPSS_Result::custom_field_values_preview( $result->data['field_results'] ); ?> </td> <td> <?php echo $result->score;?> </td> <td> <?php if( !empty($result->data['route_results']) && !empty($result->data['route_results']['name']) ): ?> <?php echo $result->data['route_results']['name']; ?> <?php endif;?> </td> <td> <?php echo $result->ip_address;?> </td> <td> <?php echo $result->submitted_at;?> </td> <td> <form method="post" action="<?php echo WPSS_DELETE_RESULT.$result->id;?>"> <?php wp_nonce_field("wpss_crud", "wpss_crud"); ?> <input type="hidden" name="wpss[result_id]" value="<?php echo $result->id;?>" /> <input class="button-secondary" type="submit" name="wpss[delete]" value="Delete" onclick="return confirm('Are you sure you want to delete this result?')"> </form> </td> </tr> <?php endforeach;?> </tbody> </table> <!-- Pagination --> <div class="tablenav bottom"> <div class="alignleft actions bulkactions"> <a class="button-primary" href="#" onclick="alert('Upgrade to the extended version to use this feature.')">Export all results for this quiz</a>&nbsp; <a class="button" href="<?php echo $util->admin_url('','');?>">&lsaquo; back</a>&nbsp; </div> <div class="alignleft actions"></div> <div class="tablenav-pages"> <span class="displaying-num"><?php echo WPSS_Result::count( $quiz->id );?> items</span> <span class="pagination-links"> <a class="first-page <?php echo $page == 1 ? 'disabled' : '';?>" href="<?php echo $util->admin_url('quiz', 'results_index', $quiz->id);?>">&laquo;</a> <a class="prev-page <?php echo $page == 1 ? 'disabled' : '';?>" href="<?php echo $util->admin_url('quiz', 'results_index', $quiz->id);?>&wpss_results_page=<?php echo $page - 1;?>">&lsaquo;</a> <span class="paging-input"><?php echo $page;?> of <span class="total-pages"><?php echo WPSS_Result::pages($quiz->id);?></span></span> <a class="next-page <?php echo $page == WPSS_Result::pages($quiz->id) ? 'disabled' : '';?>" href="<?php echo $util->admin_url('quiz', 'results_index', $quiz->id);?>&wpss_results_page=<?php echo $page + 1;?>">&rsaquo;</a> <a class="last-page <?php echo $page == WPSS_Result::pages($quiz->id) ? 'disabled' : '';?>" href="<?php echo $util->admin_url('quiz', 'results_index', $quiz->id);?>&wpss_results_page=<?php echo WPSS_Result::pages($quiz->id);?>">&raquo;</a> </span> </div> <br class="clear"> </div> </div>
JohnBueno/cafequill
wp-content/plugins/wordpress-simple-survey/admin/results/index.php
PHP
gpl-2.0
4,514
38.946903
247
0.508418
false
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <title>liblinphone: Initializing liblinphone</title> <link href="tabs.css" rel="stylesheet" type="text/css"/> <link href="doxygen.css" rel="stylesheet" type="text/css" /> </head> <body> <div id="top"><!-- do not remove this div! --> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 56px;"> <td style="padding-left: 0.5em;"> <div id="projectname">liblinphone &#160;<span id="projectnumber">3.5.0</span> </div> </td> </tr> </tbody> </table> </div> <!-- Generated by Doxygen 1.7.5.1 --> <div id="navrow1" class="tabs"> <ul class="tablist"> <li><a href="index.html"><span>Main&#160;Page</span></a></li> <li><a href="pages.html"><span>Related&#160;Pages</span></a></li> <li><a href="modules.html"><span>Modules</span></a></li> <li><a href="annotated.html"><span>Data&#160;Structures</span></a></li> <li><a href="dirs.html"><span>Directories</span></a></li> </ul> </div> </div> <div class="header"> <div class="summary"> <a href="#nested-classes">Data Structures</a> &#124; <a href="#typedef-members">Typedefs</a> &#124; <a href="#enum-members">Enumerations</a> &#124; <a href="#func-members">Functions</a> </div> <div class="headertitle"> <div class="title">Initializing liblinphone</div> </div> </div> <div class="contents"> <table class="memberdecls"> <tr><td colspan="2"><h2><a name="nested-classes"></a> Data Structures</h2></td></tr> <tr><td class="memItemLeft" align="right" valign="top">struct &#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="struct__LinphoneVTable.html">_LinphoneVTable</a></td></tr> <tr><td colspan="2"><h2><a name="typedef-members"></a> Typedefs</h2></td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef struct _LinphoneCore&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a></td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef enum <a class="el" href="group__initializing.html#ga3bca1685790c41a1729e88af318ffb1c">_LinphoneGlobalState</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#gae852b5b31d6d5e673de9d2c9b63aca7e">LinphoneGlobalState</a></td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#ga43f7be5965383fae304e3053ffdc7e7f">LinphoneGlobalStateCb</a> )(struct _LinphoneCore *lc, <a class="el" href="group__initializing.html#gae852b5b31d6d5e673de9d2c9b63aca7e">LinphoneGlobalState</a> gstate, const char *message)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#gacf692730f4aceefa92422d128cc4c069">LinphoneCallStateCb</a> )(struct _LinphoneCore *lc, LinphoneCall *call, <a class="el" href="group__call__control.html#ga37dad1a4f935d9cc26ec1f9d2b23691a">LinphoneCallState</a> cstate, const char *message)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#gab6e92e471a14b2dc5c513e2cab066133">CallEncryptionChangedCb</a> )(struct _LinphoneCore *lc, LinphoneCall *call, bool_t on, const char *authentication_token)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#gae2118ee5fdce307bd608a2d40bfa3829">LinphoneRegistrationStateCb</a> )(struct _LinphoneCore *lc, <a class="el" href="group__proxies.html#ga1879a0d59e068457b688138da97f2d0b">LinphoneProxyConfig</a> *cfg, <a class="el" href="group__proxies.html#ga698cb4228fac724b9e7c3cc5f499f7a8">LinphoneRegistrationState</a> cstate, const char *message)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#ga1efc828e75f40ceb0498aa113404d9c5">ShowInterfaceCb</a> )(struct _LinphoneCore *lc)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#gaa641595735c115fe26902aa31a5b8d44">DisplayStatusCb</a> )(struct _LinphoneCore *lc, const char *message)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#ga30f12b0b8f25375dd7338cd920d7e107">DisplayMessageCb</a> )(struct _LinphoneCore *lc, const char *message)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#ga4de4ea100f488d83d874325bccc5f82d">DisplayUrlCb</a> )(struct _LinphoneCore *lc, const char *message, const char *url)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#ga7b33594493052b415638d601e0e0c4f4">LinphoneCoreCbFunc</a> )(struct _LinphoneCore *lc, void *user_data)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#gae2fb7f03c3ce67ad4362cc71c8ce8de6">NotifyReceivedCb</a> )(struct _LinphoneCore *lc, LinphoneCall *call, const char *from, const char *event)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#ga6cd8753afd60d2f2b34e6b7f3be1666a">NotifyPresenceReceivedCb</a> )(struct _LinphoneCore *lc, <a class="el" href="group__buddy__list.html#ga2f0508bab6d8ded431ce8b1a679a1522">LinphoneFriend</a> *lf)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#gacea6471f30e2fcc601cec428fde57e9a">NewSubscribtionRequestCb</a> )(struct _LinphoneCore *lc, <a class="el" href="group__buddy__list.html#ga2f0508bab6d8ded431ce8b1a679a1522">LinphoneFriend</a> *lf, const char *url)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#ga2673e7baaa3ddec6190902fc76204b29">AuthInfoRequested</a> )(struct _LinphoneCore *lc, const char *realm, const char *username)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#ga13c18ca9ea1289dcb40b47d149fba765">CallLogUpdated</a> )(struct _LinphoneCore *lc, struct <a class="el" href="struct__LinphoneCallLog.html">_LinphoneCallLog</a> *newcl)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#ga07b7506ff003f7fc9b819f95f3d1588f">TextMessageReceived</a> )(<a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a> *lc, <a class="el" href="group__chatroom.html#ga37902f3fc715b7f6451c241d0efd0299">LinphoneChatRoom</a> *room, const <a class="el" href="group__linphone__address.html#gaa017e66840f5ee578168bd469b6c3e91">LinphoneAddress</a> *from, const char *message)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#gacd78bd518b92f4368afccda59c43b4c8">DtmfReceived</a> )(struct _LinphoneCore *lc, LinphoneCall *call, int dtmf)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#ga4e9744cebbbd4287dfe7cfde41e91c45">ReferReceived</a> )(struct _LinphoneCore *lc, const char *refer_to)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef void(*&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#gab9d0aefcdffb933ddf9d8664ff740529">BuddyInfoUpdated</a> )(struct _LinphoneCore *lc, <a class="el" href="group__buddy__list.html#ga2f0508bab6d8ded431ce8b1a679a1522">LinphoneFriend</a> *lf)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">typedef struct <a class="el" href="struct__LinphoneVTable.html">_LinphoneVTable</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#ga189ef1b7d6caff6493dfd8311ec3e378">LinphoneCoreVTable</a></td></tr> <tr><td colspan="2"><h2><a name="enum-members"></a> Enumerations</h2></td></tr> <tr><td class="memItemLeft" align="right" valign="top">enum &#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#gaf7346e9b3a064155f3d5d3811d684fb2">_LinphoneReason</a> { , <br/> &#160;&#160;<a class="el" href="group__initializing.html#ggaf7346e9b3a064155f3d5d3811d684fb2ac71a4d9109f8eb8a9f5961109d505331">LinphoneReasonNoResponse</a>, <br/> &#160;&#160;<a class="el" href="group__initializing.html#ggaf7346e9b3a064155f3d5d3811d684fb2ad26546a581f2bfd58410f1f3d141666a">LinphoneReasonBadCredentials</a>, <br/> &#160;&#160;<a class="el" href="group__initializing.html#ggaf7346e9b3a064155f3d5d3811d684fb2a442fce67ace6491eee2f4a94177f34c6">LinphoneReasonDeclined</a> <br/> }</td></tr> <tr><td class="memItemLeft" align="right" valign="top">enum &#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#ga3bca1685790c41a1729e88af318ffb1c">_LinphoneGlobalState</a> </td></tr> <tr><td colspan="2"><h2><a name="func-members"></a> Functions</h2></td></tr> <tr><td class="memItemLeft" align="right" valign="top"><a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a> *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#gac93ed982ae84a5a282a703c8433ca42f">linphone_core_new</a> (const <a class="el" href="group__initializing.html#ga189ef1b7d6caff6493dfd8311ec3e378">LinphoneCoreVTable</a> *vtable, const char *config_path, const char *factory_config_path, void *userdata)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#ga7bdac8ac386483fc4e0876e4c5d13755">linphone_core_iterate</a> (<a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a> *lc)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">void *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#ga705206c56f9be737ce1eac53e5ba8b76">linphone_core_get_user_data</a> (<a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a> *lc)</td></tr> <tr><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__initializing.html#ga5e3f2f87c8d8262caa1fa90bbd504008">linphone_core_destroy</a> (<a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a> *lc)</td></tr> </table> <hr/><h2>Typedef Documentation</h2> <a class="anchor" id="gaa5cf635b82dd338e7ee2dd3599d05f0a"></a><!-- doxytag: member="linphonecore.h::LinphoneCore" ref="gaa5cf635b82dd338e7ee2dd3599d05f0a" args="" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef struct _LinphoneCore <a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a></td> </tr> </table> </div> <div class="memdoc"> <p>Linphone core main object created by function <a class="el" href="group__initializing.html#gac93ed982ae84a5a282a703c8433ca42f">linphone_core_new()</a> . </p> </div> </div> <a class="anchor" id="gae852b5b31d6d5e673de9d2c9b63aca7e"></a><!-- doxytag: member="linphonecore.h::LinphoneGlobalState" ref="gae852b5b31d6d5e673de9d2c9b63aca7e" args="" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef enum <a class="el" href="group__initializing.html#ga3bca1685790c41a1729e88af318ffb1c">_LinphoneGlobalState</a> <a class="el" href="group__initializing.html#gae852b5b31d6d5e673de9d2c9b63aca7e">LinphoneGlobalState</a></td> </tr> </table> </div> <div class="memdoc"> <p>LinphoneGlobalState describes the global state of the LinphoneCore object. It is notified via the <a class="el" href="struct__LinphoneVTable.html#a96effce0671ecb15f6e8ee693632e29b">LinphoneCoreVTable::global_state_changed</a> </p> </div> </div> <a class="anchor" id="ga43f7be5965383fae304e3053ffdc7e7f"></a><!-- doxytag: member="linphonecore.h::LinphoneGlobalStateCb" ref="ga43f7be5965383fae304e3053ffdc7e7f" args=")(struct _LinphoneCore *lc, LinphoneGlobalState gstate, const char *message)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#ga43f7be5965383fae304e3053ffdc7e7f">LinphoneGlobalStateCb</a>)(struct _LinphoneCore *lc, <a class="el" href="group__initializing.html#gae852b5b31d6d5e673de9d2c9b63aca7e">LinphoneGlobalState</a> gstate, const char *message)</td> </tr> </table> </div> <div class="memdoc"> <p>Call state notification callback prototype </p> </div> </div> <a class="anchor" id="gacf692730f4aceefa92422d128cc4c069"></a><!-- doxytag: member="linphonecore.h::LinphoneCallStateCb" ref="gacf692730f4aceefa92422d128cc4c069" args=")(struct _LinphoneCore *lc, LinphoneCall *call, LinphoneCallState cstate, const char *message)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#gacf692730f4aceefa92422d128cc4c069">LinphoneCallStateCb</a>)(struct _LinphoneCore *lc, LinphoneCall *call, <a class="el" href="group__call__control.html#ga37dad1a4f935d9cc26ec1f9d2b23691a">LinphoneCallState</a> cstate, const char *message)</td> </tr> </table> </div> <div class="memdoc"> <p>Call state notification callback prototype </p> </div> </div> <a class="anchor" id="gab6e92e471a14b2dc5c513e2cab066133"></a><!-- doxytag: member="linphonecore.h::CallEncryptionChangedCb" ref="gab6e92e471a14b2dc5c513e2cab066133" args=")(struct _LinphoneCore *lc, LinphoneCall *call, bool_t on, const char *authentication_token)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#gab6e92e471a14b2dc5c513e2cab066133">CallEncryptionChangedCb</a>)(struct _LinphoneCore *lc, LinphoneCall *call, bool_t on, const char *authentication_token)</td> </tr> </table> </div> <div class="memdoc"> <p>Call encryption changed callback prototype </p> </div> </div> <a class="anchor" id="gae2118ee5fdce307bd608a2d40bfa3829"></a><!-- doxytag: member="linphonecore.h::LinphoneRegistrationStateCb" ref="gae2118ee5fdce307bd608a2d40bfa3829" args=")(struct _LinphoneCore *lc, LinphoneProxyConfig *cfg, LinphoneRegistrationState cstate, const char *message)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#gae2118ee5fdce307bd608a2d40bfa3829">LinphoneRegistrationStateCb</a>)(struct _LinphoneCore *lc, <a class="el" href="group__proxies.html#ga1879a0d59e068457b688138da97f2d0b">LinphoneProxyConfig</a> *cfg, <a class="el" href="group__proxies.html#ga698cb4228fac724b9e7c3cc5f499f7a8">LinphoneRegistrationState</a> cstate, const char *message)</td> </tr> </table> </div> <div class="memdoc"> <p>Registration state notification callback prototype </p> </div> </div> <a class="anchor" id="ga1efc828e75f40ceb0498aa113404d9c5"></a><!-- doxytag: member="linphonecore.h::ShowInterfaceCb" ref="ga1efc828e75f40ceb0498aa113404d9c5" args=")(struct _LinphoneCore *lc)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#ga1efc828e75f40ceb0498aa113404d9c5">ShowInterfaceCb</a>)(struct _LinphoneCore *lc)</td> </tr> </table> </div> <div class="memdoc"> <p>Callback prototype </p> </div> </div> <a class="anchor" id="gaa641595735c115fe26902aa31a5b8d44"></a><!-- doxytag: member="linphonecore.h::DisplayStatusCb" ref="gaa641595735c115fe26902aa31a5b8d44" args=")(struct _LinphoneCore *lc, const char *message)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#gaa641595735c115fe26902aa31a5b8d44">DisplayStatusCb</a>)(struct _LinphoneCore *lc, const char *message)</td> </tr> </table> </div> <div class="memdoc"> <p>Callback prototype </p> </div> </div> <a class="anchor" id="ga30f12b0b8f25375dd7338cd920d7e107"></a><!-- doxytag: member="linphonecore.h::DisplayMessageCb" ref="ga30f12b0b8f25375dd7338cd920d7e107" args=")(struct _LinphoneCore *lc, const char *message)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#ga30f12b0b8f25375dd7338cd920d7e107">DisplayMessageCb</a>)(struct _LinphoneCore *lc, const char *message)</td> </tr> </table> </div> <div class="memdoc"> <p>Callback prototype </p> </div> </div> <a class="anchor" id="ga4de4ea100f488d83d874325bccc5f82d"></a><!-- doxytag: member="linphonecore.h::DisplayUrlCb" ref="ga4de4ea100f488d83d874325bccc5f82d" args=")(struct _LinphoneCore *lc, const char *message, const char *url)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#ga4de4ea100f488d83d874325bccc5f82d">DisplayUrlCb</a>)(struct _LinphoneCore *lc, const char *message, const char *url)</td> </tr> </table> </div> <div class="memdoc"> <p>Callback prototype </p> </div> </div> <a class="anchor" id="ga7b33594493052b415638d601e0e0c4f4"></a><!-- doxytag: member="linphonecore.h::LinphoneCoreCbFunc" ref="ga7b33594493052b415638d601e0e0c4f4" args=")(struct _LinphoneCore *lc, void *user_data)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#ga7b33594493052b415638d601e0e0c4f4">LinphoneCoreCbFunc</a>)(struct _LinphoneCore *lc, void *user_data)</td> </tr> </table> </div> <div class="memdoc"> <p>Callback prototype </p> </div> </div> <a class="anchor" id="gae2fb7f03c3ce67ad4362cc71c8ce8de6"></a><!-- doxytag: member="linphonecore.h::NotifyReceivedCb" ref="gae2fb7f03c3ce67ad4362cc71c8ce8de6" args=")(struct _LinphoneCore *lc, LinphoneCall *call, const char *from, const char *event)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#gae2fb7f03c3ce67ad4362cc71c8ce8de6">NotifyReceivedCb</a>)(struct _LinphoneCore *lc, LinphoneCall *call, const char *from, const char *event)</td> </tr> </table> </div> <div class="memdoc"> <p>Callback prototype </p> </div> </div> <a class="anchor" id="ga6cd8753afd60d2f2b34e6b7f3be1666a"></a><!-- doxytag: member="linphonecore.h::NotifyPresenceReceivedCb" ref="ga6cd8753afd60d2f2b34e6b7f3be1666a" args=")(struct _LinphoneCore *lc, LinphoneFriend *lf)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#ga6cd8753afd60d2f2b34e6b7f3be1666a">NotifyPresenceReceivedCb</a>)(struct _LinphoneCore *lc, <a class="el" href="group__buddy__list.html#ga2f0508bab6d8ded431ce8b1a679a1522">LinphoneFriend</a> *lf)</td> </tr> </table> </div> <div class="memdoc"> <p>Report status change for a friend previously <a class="el" href="group__buddy__list.html#ga3a8e9184320402082bf35226ae8d25b8">added </a> to <a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a>. </p> <dl><dt><b>Parameters:</b></dt><dd> <table class="params"> <tr><td class="paramname">lc</td><td><a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a> object . </td></tr> <tr><td class="paramname">lf</td><td>Updated <a class="el" href="group__buddy__list.html#ga2f0508bab6d8ded431ce8b1a679a1522">LinphoneFriend</a> . </td></tr> </table> </dd> </dl> </div> </div> <a class="anchor" id="gacea6471f30e2fcc601cec428fde57e9a"></a><!-- doxytag: member="linphonecore.h::NewSubscribtionRequestCb" ref="gacea6471f30e2fcc601cec428fde57e9a" args=")(struct _LinphoneCore *lc, LinphoneFriend *lf, const char *url)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#gacea6471f30e2fcc601cec428fde57e9a">NewSubscribtionRequestCb</a>)(struct _LinphoneCore *lc, <a class="el" href="group__buddy__list.html#ga2f0508bab6d8ded431ce8b1a679a1522">LinphoneFriend</a> *lf, const char *url)</td> </tr> </table> </div> <div class="memdoc"> <p>Reports that a new subscription request has been received and wait for a decision. <br/> Status on this subscription request is notified by <a class="el" href="group__buddy__list.html#ga9ab0c9222edcc0a8e25fb068aee88bc0">changing policy </a> for this friend </p> <dl><dt><b>Parameters:</b></dt><dd> <table class="params"> <tr><td class="paramname">lc</td><td><a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a> object </td></tr> <tr><td class="paramname">lf</td><td><a class="el" href="group__buddy__list.html#ga2f0508bab6d8ded431ce8b1a679a1522">LinphoneFriend</a> corresponding to the subscriber </td></tr> <tr><td class="paramname">url</td><td>of the subscriber Callback prototype </td></tr> </table> </dd> </dl> </div> </div> <a class="anchor" id="ga2673e7baaa3ddec6190902fc76204b29"></a><!-- doxytag: member="linphonecore.h::AuthInfoRequested" ref="ga2673e7baaa3ddec6190902fc76204b29" args=")(struct _LinphoneCore *lc, const char *realm, const char *username)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#ga2673e7baaa3ddec6190902fc76204b29">AuthInfoRequested</a>)(struct _LinphoneCore *lc, const char *realm, const char *username)</td> </tr> </table> </div> <div class="memdoc"> <p>Callback prototype </p> </div> </div> <a class="anchor" id="ga13c18ca9ea1289dcb40b47d149fba765"></a><!-- doxytag: member="linphonecore.h::CallLogUpdated" ref="ga13c18ca9ea1289dcb40b47d149fba765" args=")(struct _LinphoneCore *lc, struct _LinphoneCallLog *newcl)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#ga13c18ca9ea1289dcb40b47d149fba765">CallLogUpdated</a>)(struct _LinphoneCore *lc, struct <a class="el" href="struct__LinphoneCallLog.html">_LinphoneCallLog</a> *newcl)</td> </tr> </table> </div> <div class="memdoc"> <p>Callback prototype </p> </div> </div> <a class="anchor" id="ga07b7506ff003f7fc9b819f95f3d1588f"></a><!-- doxytag: member="linphonecore.h::TextMessageReceived" ref="ga07b7506ff003f7fc9b819f95f3d1588f" args=")(LinphoneCore *lc, LinphoneChatRoom *room, const LinphoneAddress *from, const char *message)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#ga07b7506ff003f7fc9b819f95f3d1588f">TextMessageReceived</a>)(<a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a> *lc, <a class="el" href="group__chatroom.html#ga37902f3fc715b7f6451c241d0efd0299">LinphoneChatRoom</a> *room, const <a class="el" href="group__linphone__address.html#gaa017e66840f5ee578168bd469b6c3e91">LinphoneAddress</a> *from, const char *message)</td> </tr> </table> </div> <div class="memdoc"> <p>Callback prototype</p> <dl><dt><b>Parameters:</b></dt><dd> <table class="params"> <tr><td class="paramname">lc</td><td><a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a> object </td></tr> <tr><td class="paramname">room</td><td><a class="el" href="group__chatroom.html#ga37902f3fc715b7f6451c241d0efd0299">LinphoneChatRoom</a> involved in this conversation. Can be be created by the framework in case <a class="el" href="group__linphone__address.html#gaa017e66840f5ee578168bd469b6c3e91">the from </a> is not present in any chat room. </td></tr> <tr><td class="paramname">from</td><td><a class="el" href="group__linphone__address.html#gaa017e66840f5ee578168bd469b6c3e91">LinphoneAddress</a> from </td></tr> <tr><td class="paramname">message</td><td>incoming message </td></tr> </table> </dd> </dl> </div> </div> <a class="anchor" id="gacd78bd518b92f4368afccda59c43b4c8"></a><!-- doxytag: member="linphonecore.h::DtmfReceived" ref="gacd78bd518b92f4368afccda59c43b4c8" args=")(struct _LinphoneCore *lc, LinphoneCall *call, int dtmf)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#gacd78bd518b92f4368afccda59c43b4c8">DtmfReceived</a>)(struct _LinphoneCore *lc, LinphoneCall *call, int dtmf)</td> </tr> </table> </div> <div class="memdoc"> <p>Callback prototype </p> </div> </div> <a class="anchor" id="ga4e9744cebbbd4287dfe7cfde41e91c45"></a><!-- doxytag: member="linphonecore.h::ReferReceived" ref="ga4e9744cebbbd4287dfe7cfde41e91c45" args=")(struct _LinphoneCore *lc, const char *refer_to)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#ga4e9744cebbbd4287dfe7cfde41e91c45">ReferReceived</a>)(struct _LinphoneCore *lc, const char *refer_to)</td> </tr> </table> </div> <div class="memdoc"> <p>Callback prototype </p> </div> </div> <a class="anchor" id="gab9d0aefcdffb933ddf9d8664ff740529"></a><!-- doxytag: member="linphonecore.h::BuddyInfoUpdated" ref="gab9d0aefcdffb933ddf9d8664ff740529" args=")(struct _LinphoneCore *lc, LinphoneFriend *lf)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef void(* <a class="el" href="group__initializing.html#gab9d0aefcdffb933ddf9d8664ff740529">BuddyInfoUpdated</a>)(struct _LinphoneCore *lc, <a class="el" href="group__buddy__list.html#ga2f0508bab6d8ded431ce8b1a679a1522">LinphoneFriend</a> *lf)</td> </tr> </table> </div> <div class="memdoc"> <p>Callback prototype </p> </div> </div> <a class="anchor" id="ga189ef1b7d6caff6493dfd8311ec3e378"></a><!-- doxytag: member="linphonecore.h::LinphoneCoreVTable" ref="ga189ef1b7d6caff6493dfd8311ec3e378" args="" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">typedef struct <a class="el" href="struct__LinphoneVTable.html">_LinphoneVTable</a> <a class="el" href="group__initializing.html#ga189ef1b7d6caff6493dfd8311ec3e378">LinphoneCoreVTable</a></td> </tr> </table> </div> <div class="memdoc"> <p>This structure holds all callbacks that the application should implement. None is mandatory. </p> </div> </div> <hr/><h2>Enumeration Type Documentation</h2> <a class="anchor" id="gaf7346e9b3a064155f3d5d3811d684fb2"></a><!-- doxytag: member="linphonecore.h::_LinphoneReason" ref="gaf7346e9b3a064155f3d5d3811d684fb2" args="" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">enum <a class="el" href="group__initializing.html#gaf7346e9b3a064155f3d5d3811d684fb2">_LinphoneReason</a></td> </tr> </table> </div> <div class="memdoc"> <p>Enum describing failure reasons. </p> <dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0"> <tr><td valign="top"><em><a class="anchor" id="ggaf7346e9b3a064155f3d5d3811d684fb2ac71a4d9109f8eb8a9f5961109d505331"></a><!-- doxytag: member="LinphoneReasonNoResponse" ref="ggaf7346e9b3a064155f3d5d3811d684fb2ac71a4d9109f8eb8a9f5961109d505331" args="" -->LinphoneReasonNoResponse</em>&nbsp;</td><td> <p>No response received from remote </p> </td></tr> <tr><td valign="top"><em><a class="anchor" id="ggaf7346e9b3a064155f3d5d3811d684fb2ad26546a581f2bfd58410f1f3d141666a"></a><!-- doxytag: member="LinphoneReasonBadCredentials" ref="ggaf7346e9b3a064155f3d5d3811d684fb2ad26546a581f2bfd58410f1f3d141666a" args="" -->LinphoneReasonBadCredentials</em>&nbsp;</td><td> <p>Authentication failed due to bad or missing credentials </p> </td></tr> <tr><td valign="top"><em><a class="anchor" id="ggaf7346e9b3a064155f3d5d3811d684fb2a442fce67ace6491eee2f4a94177f34c6"></a><!-- doxytag: member="LinphoneReasonDeclined" ref="ggaf7346e9b3a064155f3d5d3811d684fb2a442fce67ace6491eee2f4a94177f34c6" args="" -->LinphoneReasonDeclined</em>&nbsp;</td><td> <p>The call has been declined </p> </td></tr> </table> </dd> </dl> </div> </div> <a class="anchor" id="ga3bca1685790c41a1729e88af318ffb1c"></a><!-- doxytag: member="linphonecore.h::_LinphoneGlobalState" ref="ga3bca1685790c41a1729e88af318ffb1c" args="" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">enum <a class="el" href="group__initializing.html#ga3bca1685790c41a1729e88af318ffb1c">_LinphoneGlobalState</a></td> </tr> </table> </div> <div class="memdoc"> <p>LinphoneGlobalState describes the global state of the LinphoneCore object. It is notified via the <a class="el" href="struct__LinphoneVTable.html#a96effce0671ecb15f6e8ee693632e29b">LinphoneCoreVTable::global_state_changed</a> </p> </div> </div> <hr/><h2>Function Documentation</h2> <a class="anchor" id="gac93ed982ae84a5a282a703c8433ca42f"></a><!-- doxytag: member="linphonecore.c::linphone_core_new" ref="gac93ed982ae84a5a282a703c8433ca42f" args="(const LinphoneCoreVTable *vtable, const char *config_path, const char *factory_config_path, void *userdata)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname"><a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a>* linphone_core_new </td> <td>(</td> <td class="paramtype">const <a class="el" href="group__initializing.html#ga189ef1b7d6caff6493dfd8311ec3e378">LinphoneCoreVTable</a> *&#160;</td> <td class="paramname"><em>vtable</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">const char *&#160;</td> <td class="paramname"><em>config_path</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">const char *&#160;</td> <td class="paramname"><em>factory_config_path</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">void *&#160;</td> <td class="paramname"><em>userdata</em>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div> <div class="memdoc"> <p>Instanciates a LinphoneCore object.</p> <p>The LinphoneCore object is the primary handle for doing all phone actions. It should be unique within your application. </p> <dl><dt><b>Parameters:</b></dt><dd> <table class="params"> <tr><td class="paramname">vtable</td><td>a LinphoneCoreVTable structure holding your application callbacks </td></tr> <tr><td class="paramname">config_path</td><td>a path to a config file. If it does not exists it will be created. The config file is used to store all settings, call logs, friends, proxies... so that all these settings become persistent over the life of the LinphoneCore object. It is allowed to set a NULL config file. In that case LinphoneCore will not store any settings. </td></tr> <tr><td class="paramname">factory_config_path</td><td>a path to a read-only config file that can be used to to store hard-coded preference such as proxy settings or internal preferences. The settings in this factory file always override the one in the normal config file. It is OPTIONAL, use NULL if unneeded. </td></tr> <tr><td class="paramname">userdata</td><td>an opaque user pointer that can be retrieved at any time (for example in callbacks) using <a class="el" href="group__initializing.html#ga705206c56f9be737ce1eac53e5ba8b76">linphone_core_get_user_data()</a>. </td></tr> </table> </dd> </dl> </div> </div> <a class="anchor" id="ga7bdac8ac386483fc4e0876e4c5d13755"></a><!-- doxytag: member="linphonecore.c::linphone_core_iterate" ref="ga7bdac8ac386483fc4e0876e4c5d13755" args="(LinphoneCore *lc)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">void linphone_core_iterate </td> <td>(</td> <td class="paramtype"><a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a> *&#160;</td> <td class="paramname"><em>lc</em></td><td>)</td> <td></td> </tr> </table> </div> <div class="memdoc"> <p>Main loop function. It is crucial that your application call it periodically.</p> <p><a class="el" href="group__initializing.html#ga7bdac8ac386483fc4e0876e4c5d13755">linphone_core_iterate()</a> performs various backgrounds tasks:</p> <ul> <li>receiving of SIP messages</li> <li>handles timers and timeout</li> <li>performs registration to proxies</li> <li>authentication retries The application MUST call this function periodically, in its main loop. Be careful that this function must be called from the same thread as other liblinphone methods. If it is not the case make sure all liblinphone calls are serialized with a mutex. </li> </ul> </div> </div> <a class="anchor" id="ga705206c56f9be737ce1eac53e5ba8b76"></a><!-- doxytag: member="linphonecore.c::linphone_core_get_user_data" ref="ga705206c56f9be737ce1eac53e5ba8b76" args="(LinphoneCore *lc)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">void* linphone_core_get_user_data </td> <td>(</td> <td class="paramtype"><a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a> *&#160;</td> <td class="paramname"><em>lc</em></td><td>)</td> <td></td> </tr> </table> </div> <div class="memdoc"> <p>Retrieves the user pointer that was given to <a class="el" href="group__initializing.html#gac93ed982ae84a5a282a703c8433ca42f">linphone_core_new()</a> </p> </div> </div> <a class="anchor" id="ga5e3f2f87c8d8262caa1fa90bbd504008"></a><!-- doxytag: member="linphonecore.c::linphone_core_destroy" ref="ga5e3f2f87c8d8262caa1fa90bbd504008" args="(LinphoneCore *lc)" --> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">void linphone_core_destroy </td> <td>(</td> <td class="paramtype"><a class="el" href="group__initializing.html#gaa5cf635b82dd338e7ee2dd3599d05f0a">LinphoneCore</a> *&#160;</td> <td class="paramname"><em>lc</em></td><td>)</td> <td></td> </tr> </table> </div> <div class="memdoc"> <p>Destroys a LinphoneCore </p> </div> </div> </div> <hr class="footer"/><address class="footer"><small> Generated on Mon Feb 6 2012 18:52:20 for liblinphone by &#160;<a href="http://www.doxygen.org/index.html"> <img class="footer" src="doxygen.png" alt="doxygen"/> </a> 1.7.5.1 </small></address> </body> </html>
GargoyleSoftware/voip-client-ios
submodules/build-i386-apple-darwin/linphone/coreapi/help/doc/html/group__initializing.html
HTML
gpl-2.0
37,261
62.477002
585
0.70771
false
/* * linux/mm/page_alloc.c * * Manages the free list, the system allocates free pages here. * Note that kmalloc() lives in slab.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) */ #include <linux/stddef.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/interrupt.h> #include <linux/pagemap.h> #include <linux/jiffies.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/kmemcheck.h> #include <linux/kasan.h> #include <linux/module.h> #include <linux/suspend.h> #include <linux/pagevec.h> #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/ratelimit.h> #include <linux/oom.h> #include <linux/notifier.h> #include <linux/topology.h> #include <linux/sysctl.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/memory_hotplug.h> #include <linux/nodemask.h> #include <linux/vmalloc.h> #include <linux/vmstat.h> #include <linux/mempolicy.h> #include <linux/stop_machine.h> #include <linux/sort.h> #include <linux/pfn.h> #include <linux/backing-dev.h> #include <linux/fault-inject.h> #include <linux/page-isolation.h> #include <linux/page_ext.h> #include <linux/debugobjects.h> #include <linux/kmemleak.h> #include <linux/compaction.h> #include <trace/events/kmem.h> #include <linux/prefetch.h> #include <linux/mm_inline.h> #include <linux/migrate.h> #include <linux/page_ext.h> #include <linux/hugetlb.h> #include <linux/sched/rt.h> #include <linux/page_owner.h> #include <linux/kthread.h> #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/div64.h> #include "internal.h" /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ static DEFINE_MUTEX(pcp_batch_high_lock); #define MIN_PERCPU_PAGELIST_FRACTION (8) #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID DEFINE_PER_CPU(int, numa_node); EXPORT_PER_CPU_SYMBOL(numa_node); #endif #ifdef CONFIG_HAVE_MEMORYLESS_NODES /* * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() * defined in <linux/topology.h>. */ DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ EXPORT_PER_CPU_SYMBOL(_numa_mem_); int _node_numa_mem_[MAX_NUMNODES]; #endif /* * Array of node states. */ nodemask_t node_states[NR_NODE_STATES] __read_mostly = { [N_POSSIBLE] = NODE_MASK_ALL, [N_ONLINE] = { { [0] = 1UL } }, #ifndef CONFIG_NUMA [N_NORMAL_MEMORY] = { { [0] = 1UL } }, #ifdef CONFIG_HIGHMEM [N_HIGH_MEMORY] = { { [0] = 1UL } }, #endif #ifdef CONFIG_MOVABLE_NODE [N_MEMORY] = { { [0] = 1UL } }, #endif [N_CPU] = { { [0] = 1UL } }, #endif /* NUMA */ }; EXPORT_SYMBOL(node_states); /* Protect totalram_pages and zone->managed_pages */ static DEFINE_SPINLOCK(managed_page_count_lock); unsigned long totalram_pages __read_mostly; unsigned long totalreserve_pages __read_mostly; unsigned long totalcma_pages __read_mostly; /* * When calculating the number of globally allowed dirty pages, there * is a certain number of per-zone reserves that should not be * considered dirtyable memory. This is the sum of those reserves * over all existing zones that contribute dirtyable memory. */ unsigned long dirty_balance_reserve __read_mostly; int percpu_pagelist_fraction; gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; /* * A cached value of the page's pageblock's migratetype, used when the page is * put on a pcplist. Used to avoid the pageblock migratetype lookup when * freeing from pcplists in most cases, at the cost of possibly becoming stale. * Also the migratetype set in the page does not necessarily match the pcplist * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any * other index - this ensures that it will be put on the correct CMA freelist. */ static inline int get_pcppage_migratetype(struct page *page) { return page->index; } static inline void set_pcppage_migratetype(struct page *page, int migratetype) { page->index = migratetype; } #ifdef CONFIG_PM_SLEEP /* * The following functions are used by the suspend/hibernate code to temporarily * change gfp_allowed_mask in order to avoid using I/O during memory allocations * while devices are suspended. To avoid races with the suspend/hibernate code, * they should always be called with pm_mutex held (gfp_allowed_mask also should * only be modified with pm_mutex held, unless the suspend/hibernate code is * guaranteed not to run in parallel with that modification). */ static gfp_t saved_gfp_mask; void pm_restore_gfp_mask(void) { WARN_ON(!mutex_is_locked(&pm_mutex)); if (saved_gfp_mask) { gfp_allowed_mask = saved_gfp_mask; saved_gfp_mask = 0; } } void pm_restrict_gfp_mask(void) { WARN_ON(!mutex_is_locked(&pm_mutex)); WARN_ON(saved_gfp_mask); saved_gfp_mask = gfp_allowed_mask; gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); } bool pm_suspended_storage(void) { if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) return false; return true; } #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE unsigned int pageblock_order __read_mostly; #endif static void __free_pages_ok(struct page *page, unsigned int order); /* * results with 256, 32 in the lowmem_reserve sysctl: * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) * 1G machine -> (16M dma, 784M normal, 224M high) * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA * * TBD: should special case ZONE_DMA32 machines here - in those we normally * don't need any ZONE_NORMAL reservation */ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { #ifdef CONFIG_ZONE_DMA 256, #endif #ifdef CONFIG_ZONE_DMA32 256, #endif #ifdef CONFIG_HIGHMEM 32, #endif 32, }; EXPORT_SYMBOL(totalram_pages); static char * const zone_names[MAX_NR_ZONES] = { #ifdef CONFIG_ZONE_DMA "DMA", #endif #ifdef CONFIG_ZONE_DMA32 "DMA32", #endif "Normal", #ifdef CONFIG_HIGHMEM "HighMem", #endif "Movable", #ifdef CONFIG_ZONE_DEVICE "Device", #endif }; static void free_compound_page(struct page *page); compound_page_dtor * const compound_page_dtors[] = { NULL, free_compound_page, #ifdef CONFIG_HUGETLB_PAGE free_huge_page, #endif }; int min_free_kbytes = 1024; int user_min_free_kbytes = -1; static unsigned long __meminitdata nr_kernel_pages; static unsigned long __meminitdata nr_all_pages; static unsigned long __meminitdata dma_reserve; #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; static unsigned long __initdata required_kernelcore; static unsigned long __initdata required_movablecore; static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ int movable_zone; EXPORT_SYMBOL(movable_zone); #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ #if MAX_NUMNODES > 1 int nr_node_ids __read_mostly = MAX_NUMNODES; int nr_online_nodes __read_mostly = 1; EXPORT_SYMBOL(nr_node_ids); EXPORT_SYMBOL(nr_online_nodes); #endif int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT static inline void reset_deferred_meminit(pg_data_t *pgdat) { pgdat->first_deferred_pfn = ULONG_MAX; } /* Returns true if the struct page for the pfn is uninitialised */ static inline bool __meminit early_page_uninitialised(unsigned long pfn) { int nid = early_pfn_to_nid(pfn); if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) return true; return false; } static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid) { if (pfn >= NODE_DATA(nid)->first_deferred_pfn) return true; return false; } /* * Returns false when the remaining initialisation should be deferred until * later in the boot cycle when it can be parallelised. */ static inline bool update_defer_init(pg_data_t *pgdat, unsigned long pfn, unsigned long zone_end, unsigned long *nr_initialised) { /* Always populate low zones for address-contrained allocations */ if (zone_end < pgdat_end_pfn(pgdat)) return true; /* Initialise at least 2G of the highest zone */ (*nr_initialised)++; if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) && (pfn & (PAGES_PER_SECTION - 1)) == 0) { pgdat->first_deferred_pfn = pfn; return false; } return true; } #else static inline void reset_deferred_meminit(pg_data_t *pgdat) { } static inline bool early_page_uninitialised(unsigned long pfn) { return false; } static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid) { return false; } static inline bool update_defer_init(pg_data_t *pgdat, unsigned long pfn, unsigned long zone_end, unsigned long *nr_initialised) { return true; } #endif void set_pageblock_migratetype(struct page *page, int migratetype) { if (unlikely(page_group_by_mobility_disabled && migratetype < MIGRATE_PCPTYPES)) migratetype = MIGRATE_UNMOVABLE; set_pageblock_flags_group(page, (unsigned long)migratetype, PB_migrate, PB_migrate_end); } #ifdef CONFIG_DEBUG_VM static int page_outside_zone_boundaries(struct zone *zone, struct page *page) { int ret = 0; unsigned seq; unsigned long pfn = page_to_pfn(page); unsigned long sp, start_pfn; do { seq = zone_span_seqbegin(zone); start_pfn = zone->zone_start_pfn; sp = zone->spanned_pages; if (!zone_spans_pfn(zone, pfn)) ret = 1; } while (zone_span_seqretry(zone, seq)); if (ret) pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", pfn, zone_to_nid(zone), zone->name, start_pfn, start_pfn + sp); return ret; } static int page_is_consistent(struct zone *zone, struct page *page) { if (!pfn_valid_within(page_to_pfn(page))) return 0; if (zone != page_zone(page)) return 0; return 1; } /* * Temporary debugging check for pages not lying within a given zone. */ static int bad_range(struct zone *zone, struct page *page) { if (page_outside_zone_boundaries(zone, page)) return 1; if (!page_is_consistent(zone, page)) return 1; return 0; } #else static inline int bad_range(struct zone *zone, struct page *page) { return 0; } #endif static void bad_page(struct page *page, const char *reason, unsigned long bad_flags) { static unsigned long resume; static unsigned long nr_shown; static unsigned long nr_unshown; /* Don't complain about poisoned pages */ if (PageHWPoison(page)) { page_mapcount_reset(page); /* remove PageBuddy */ return; } /* * Allow a burst of 60 reports, then keep quiet for that minute; * or allow a steady drip of one report per second. */ if (nr_shown == 60) { if (time_before(jiffies, resume)) { nr_unshown++; goto out; } if (nr_unshown) { printk(KERN_ALERT "BUG: Bad page state: %lu messages suppressed\n", nr_unshown); nr_unshown = 0; } nr_shown = 0; } if (nr_shown++ == 0) resume = jiffies + 60 * HZ; printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", current->comm, page_to_pfn(page)); dump_page_badflags(page, reason, bad_flags); print_modules(); dump_stack(); out: /* Leave bad fields for debug, except PageBuddy could make trouble */ page_mapcount_reset(page); /* remove PageBuddy */ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); } /* * Higher-order pages are called "compound pages". They are structured thusly: * * The first PAGE_SIZE page is called the "head page" and have PG_head set. * * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded * in bit 0 of page->compound_head. The rest of bits is pointer to head page. * * The first tail page's ->compound_dtor holds the offset in array of compound * page destructors. See compound_page_dtors. * * The first tail page's ->compound_order holds the order of allocation. * This usage means that zero-order pages may not be compound. */ static void free_compound_page(struct page *page) { __free_pages_ok(page, compound_order(page)); } void prep_compound_page(struct page *page, unsigned int order) { int i; int nr_pages = 1 << order; set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); set_compound_order(page, order); __SetPageHead(page); for (i = 1; i < nr_pages; i++) { struct page *p = page + i; set_page_count(p, 0); set_compound_head(p, page); } } #ifdef CONFIG_DEBUG_PAGEALLOC unsigned int _debug_guardpage_minorder; bool _debug_pagealloc_enabled __read_mostly; bool _debug_guardpage_enabled __read_mostly; static int __init early_debug_pagealloc(char *buf) { if (!buf) return -EINVAL; if (strcmp(buf, "on") == 0) _debug_pagealloc_enabled = true; return 0; } early_param("debug_pagealloc", early_debug_pagealloc); static bool need_debug_guardpage(void) { /* If we don't use debug_pagealloc, we don't need guard page */ if (!debug_pagealloc_enabled()) return false; return true; } static void init_debug_guardpage(void) { if (!debug_pagealloc_enabled()) return; _debug_guardpage_enabled = true; } struct page_ext_operations debug_guardpage_ops = { .need = need_debug_guardpage, .init = init_debug_guardpage, }; static int __init debug_guardpage_minorder_setup(char *buf) { unsigned long res; if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { printk(KERN_ERR "Bad debug_guardpage_minorder value\n"); return 0; } _debug_guardpage_minorder = res; printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res); return 0; } __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup); static inline void set_page_guard(struct zone *zone, struct page *page, unsigned int order, int migratetype) { struct page_ext *page_ext; if (!debug_guardpage_enabled()) return; page_ext = lookup_page_ext(page); __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); INIT_LIST_HEAD(&page->lru); set_page_private(page, order); /* Guard pages are not available for any usage */ __mod_zone_freepage_state(zone, -(1 << order), migratetype); } static inline void clear_page_guard(struct zone *zone, struct page *page, unsigned int order, int migratetype) { struct page_ext *page_ext; if (!debug_guardpage_enabled()) return; page_ext = lookup_page_ext(page); __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); set_page_private(page, 0); if (!is_migrate_isolate(migratetype)) __mod_zone_freepage_state(zone, (1 << order), migratetype); } #else struct page_ext_operations debug_guardpage_ops = { NULL, }; static inline void set_page_guard(struct zone *zone, struct page *page, unsigned int order, int migratetype) {} static inline void clear_page_guard(struct zone *zone, struct page *page, unsigned int order, int migratetype) {} #endif static inline void set_page_order(struct page *page, unsigned int order) { set_page_private(page, order); __SetPageBuddy(page); } static inline void rmv_page_order(struct page *page) { __ClearPageBuddy(page); set_page_private(page, 0); } /* * This function checks whether a page is free && is the buddy * we can do coalesce a page and its buddy if * (a) the buddy is not in a hole && * (b) the buddy is in the buddy system && * (c) a page and its buddy have the same order && * (d) a page and its buddy are in the same zone. * * For recording whether a page is in the buddy system, we set ->_mapcount * PAGE_BUDDY_MAPCOUNT_VALUE. * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is * serialized by zone->lock. * * For recording page's order, we use page_private(page). */ static inline int page_is_buddy(struct page *page, struct page *buddy, unsigned int order) { if (!pfn_valid_within(page_to_pfn(buddy))) return 0; if (page_is_guard(buddy) && page_order(buddy) == order) { if (page_zone_id(page) != page_zone_id(buddy)) return 0; VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); return 1; } if (PageBuddy(buddy) && page_order(buddy) == order) { /* * zone check is done late to avoid uselessly * calculating zone/node ids for pages that could * never merge. */ if (page_zone_id(page) != page_zone_id(buddy)) return 0; VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); return 1; } return 0; } /* * Freeing function for a buddy system allocator. * * The concept of a buddy system is to maintain direct-mapped table * (containing bit values) for memory blocks of various "orders". * The bottom level table contains the map for the smallest allocatable * units of memory (here, pages), and each level above it describes * pairs of units from the levels below, hence, "buddies". * At a high level, all that happens here is marking the table entry * at the bottom level available, and propagating the changes upward * as necessary, plus some accounting needed to play nicely with other * parts of the VM system. * At each level, we keep a list of pages, which are heads of continuous * free pages of length of (1 << order) and marked with _mapcount * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page) * field. * So when we are allocating or freeing one, we can derive the state of the * other. That is, if we allocate a small block, and both were * free, the remainder of the region must be split into blocks. * If a block is freed, and its buddy is also free, then this * triggers coalescing into a block of larger size. * * -- nyc */ static inline void __free_one_page(struct page *page, unsigned long pfn, struct zone *zone, unsigned int order, int migratetype) { unsigned long page_idx; unsigned long combined_idx; unsigned long uninitialized_var(buddy_idx); struct page *buddy; unsigned int max_order; max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); VM_BUG_ON(!zone_is_initialized(zone)); VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); VM_BUG_ON(migratetype == -1); if (likely(!is_migrate_isolate(migratetype))) __mod_zone_freepage_state(zone, 1 << order, migratetype); page_idx = pfn & ((1 << MAX_ORDER) - 1); VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); VM_BUG_ON_PAGE(bad_range(zone, page), page); continue_merging: while (order < max_order - 1) { buddy_idx = __find_buddy_index(page_idx, order); buddy = page + (buddy_idx - page_idx); if (!page_is_buddy(page, buddy, order)) goto done_merging; /* * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, * merge with it and move up one order. */ if (page_is_guard(buddy)) { clear_page_guard(zone, buddy, order, migratetype); } else { list_del(&buddy->lru); zone->free_area[order].nr_free--; rmv_page_order(buddy); } combined_idx = buddy_idx & page_idx; page = page + (combined_idx - page_idx); page_idx = combined_idx; order++; } if (max_order < MAX_ORDER) { /* If we are here, it means order is >= pageblock_order. * We want to prevent merge between freepages on isolate * pageblock and normal pageblock. Without this, pageblock * isolation could cause incorrect freepage or CMA accounting. * * We don't want to hit this code for the more frequent * low-order merging. */ if (unlikely(has_isolate_pageblock(zone))) { int buddy_mt; buddy_idx = __find_buddy_index(page_idx, order); buddy = page + (buddy_idx - page_idx); buddy_mt = get_pageblock_migratetype(buddy); if (migratetype != buddy_mt && (is_migrate_isolate(migratetype) || is_migrate_isolate(buddy_mt))) goto done_merging; } max_order++; goto continue_merging; } done_merging: set_page_order(page, order); /* * If this is not the largest possible page, check if the buddy * of the next-highest order is free. If it is, it's possible * that pages are being freed that will coalesce soon. In case, * that is happening, add the free page to the tail of the list * so it's less likely to be used soon and more likely to be merged * as a higher order page */ if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { struct page *higher_page, *higher_buddy; combined_idx = buddy_idx & page_idx; higher_page = page + (combined_idx - page_idx); buddy_idx = __find_buddy_index(combined_idx, order + 1); higher_buddy = higher_page + (buddy_idx - combined_idx); if (page_is_buddy(higher_page, higher_buddy, order + 1)) { list_add_tail(&page->lru, &zone->free_area[order].free_list[migratetype]); goto out; } } list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); out: zone->free_area[order].nr_free++; } static inline int free_pages_check(struct page *page) { const char *bad_reason = NULL; unsigned long bad_flags = 0; if (unlikely(page_mapcount(page))) bad_reason = "nonzero mapcount"; if (unlikely(page->mapping != NULL)) bad_reason = "non-NULL mapping"; if (unlikely(atomic_read(&page->_count) != 0)) bad_reason = "nonzero _count"; if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; bad_flags = PAGE_FLAGS_CHECK_AT_FREE; } #ifdef CONFIG_MEMCG if (unlikely(page->mem_cgroup)) bad_reason = "page still charged to cgroup"; #endif if (unlikely(bad_reason)) { bad_page(page, bad_reason, bad_flags); return 1; } page_cpupid_reset_last(page); if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; return 0; } /* * Frees a number of pages from the PCP lists * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free. * * If the zone was previously in an "all pages pinned" state then look to * see if this freeing clears that state. * * And clear the zone's pages_scanned counter, to hold off the "all pages are * pinned" detection logic. */ static void free_pcppages_bulk(struct zone *zone, int count, struct per_cpu_pages *pcp) { int migratetype = 0; int batch_free = 0; int to_free = count; unsigned long nr_scanned; spin_lock(&zone->lock); nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); if (nr_scanned) __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); while (to_free) { struct page *page; struct list_head *list; /* * Remove pages from lists in a round-robin fashion. A * batch_free count is maintained that is incremented when an * empty list is encountered. This is so more pages are freed * off fuller lists instead of spinning excessively around empty * lists */ do { batch_free++; if (++migratetype == MIGRATE_PCPTYPES) migratetype = 0; list = &pcp->lists[migratetype]; } while (list_empty(list)); /* This is the only non-empty list. Free them all. */ if (batch_free == MIGRATE_PCPTYPES) batch_free = to_free; do { int mt; /* migratetype of the to-be-freed page */ page = list_entry(list->prev, struct page, lru); /* must delete as __free_one_page list manipulates */ list_del(&page->lru); mt = get_pcppage_migratetype(page); /* MIGRATE_ISOLATE page should not go to pcplists */ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); /* Pageblock could have been isolated meanwhile */ if (unlikely(has_isolate_pageblock(zone))) mt = get_pageblock_migratetype(page); __free_one_page(page, page_to_pfn(page), zone, 0, mt); trace_mm_page_pcpu_drain(page, 0, mt); } while (--to_free && --batch_free && !list_empty(list)); } spin_unlock(&zone->lock); } static void free_one_page(struct zone *zone, struct page *page, unsigned long pfn, unsigned int order, int migratetype) { unsigned long nr_scanned; spin_lock(&zone->lock); nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); if (nr_scanned) __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); if (unlikely(has_isolate_pageblock(zone) || is_migrate_isolate(migratetype))) { migratetype = get_pfnblock_migratetype(page, pfn); } __free_one_page(page, pfn, zone, order, migratetype); spin_unlock(&zone->lock); } static int free_tail_pages_check(struct page *head_page, struct page *page) { int ret = 1; /* * We rely page->lru.next never has bit 0 set, unless the page * is PageTail(). Let's make sure that's true even for poisoned ->lru. */ BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); if (!IS_ENABLED(CONFIG_DEBUG_VM)) { ret = 0; goto out; } if (unlikely(!PageTail(page))) { bad_page(page, "PageTail not set", 0); goto out; } if (unlikely(compound_head(page) != head_page)) { bad_page(page, "compound_head not consistent", 0); goto out; } ret = 0; out: clear_compound_head(page); return ret; } static void __meminit __init_single_page(struct page *page, unsigned long pfn, unsigned long zone, int nid) { set_page_links(page, zone, nid, pfn); init_page_count(page); page_mapcount_reset(page); page_cpupid_reset_last(page); INIT_LIST_HEAD(&page->lru); #ifdef WANT_PAGE_VIRTUAL /* The shift won't overflow because ZONE_NORMAL is below 4G. */ if (!is_highmem_idx(zone)) set_page_address(page, __va(pfn << PAGE_SHIFT)); #endif } static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, int nid) { return __init_single_page(pfn_to_page(pfn), pfn, zone, nid); } #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT static void init_reserved_page(unsigned long pfn) { pg_data_t *pgdat; int nid, zid; if (!early_page_uninitialised(pfn)) return; nid = early_pfn_to_nid(pfn); pgdat = NODE_DATA(nid); for (zid = 0; zid < MAX_NR_ZONES; zid++) { struct zone *zone = &pgdat->node_zones[zid]; if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) break; } __init_single_pfn(pfn, zid, nid); } #else static inline void init_reserved_page(unsigned long pfn) { } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ /* * Initialised pages do not have PageReserved set. This function is * called for each range allocated by the bootmem allocator and * marks the pages PageReserved. The remaining valid pages are later * sent to the buddy page allocator. */ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) { unsigned long start_pfn = PFN_DOWN(start); unsigned long end_pfn = PFN_UP(end); for (; start_pfn < end_pfn; start_pfn++) { if (pfn_valid(start_pfn)) { struct page *page = pfn_to_page(start_pfn); init_reserved_page(start_pfn); /* Avoid false-positive PageTail() */ INIT_LIST_HEAD(&page->lru); SetPageReserved(page); } } } static bool free_pages_prepare(struct page *page, unsigned int order) { bool compound = PageCompound(page); int i, bad = 0; VM_BUG_ON_PAGE(PageTail(page), page); VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); trace_mm_page_free(page, order); kmemcheck_free_shadow(page, order); kasan_free_pages(page, order); if (PageAnon(page)) page->mapping = NULL; bad += free_pages_check(page); for (i = 1; i < (1 << order); i++) { if (compound) bad += free_tail_pages_check(page, page + i); bad += free_pages_check(page + i); } if (bad) return false; reset_page_owner(page, order); if (!PageHighMem(page)) { debug_check_no_locks_freed(page_address(page), PAGE_SIZE << order); debug_check_no_obj_freed(page_address(page), PAGE_SIZE << order); } arch_free_page(page, order); kernel_map_pages(page, 1 << order, 0); return true; } static void __free_pages_ok(struct page *page, unsigned int order) { unsigned long flags; int migratetype; unsigned long pfn = page_to_pfn(page); if (!free_pages_prepare(page, order)) return; migratetype = get_pfnblock_migratetype(page, pfn); local_irq_save(flags); __count_vm_events(PGFREE, 1 << order); free_one_page(page_zone(page), page, pfn, order, migratetype); local_irq_restore(flags); } static void __init __free_pages_boot_core(struct page *page, unsigned long pfn, unsigned int order) { unsigned int nr_pages = 1 << order; struct page *p = page; unsigned int loop; prefetchw(p); for (loop = 0; loop < (nr_pages - 1); loop++, p++) { prefetchw(p + 1); __ClearPageReserved(p); set_page_count(p, 0); } __ClearPageReserved(p); set_page_count(p, 0); page_zone(page)->managed_pages += nr_pages; set_page_refcounted(page); __free_pages(page, order); } #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; int __meminit early_pfn_to_nid(unsigned long pfn) { static DEFINE_SPINLOCK(early_pfn_lock); int nid; spin_lock(&early_pfn_lock); nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); if (nid < 0) nid = first_online_node; spin_unlock(&early_pfn_lock); return nid; } #endif #ifdef CONFIG_NODES_SPAN_OTHER_NODES static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, struct mminit_pfnnid_cache *state) { int nid; nid = __early_pfn_to_nid(pfn, state); if (nid >= 0 && nid != node) return false; return true; } /* Only safe to use early in boot when initialisation is single-threaded */ static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) { return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache); } #else static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) { return true; } static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, struct mminit_pfnnid_cache *state) { return true; } #endif void __init __free_pages_bootmem(struct page *page, unsigned long pfn, unsigned int order) { if (early_page_uninitialised(pfn)) return; return __free_pages_boot_core(page, pfn, order); } #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT static void __init deferred_free_range(struct page *page, unsigned long pfn, int nr_pages) { int i; if (!page) return; /* Free a large naturally-aligned chunk if possible */ if (nr_pages == MAX_ORDER_NR_PAGES && (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) { set_pageblock_migratetype(page, MIGRATE_MOVABLE); __free_pages_boot_core(page, pfn, MAX_ORDER-1); return; } for (i = 0; i < nr_pages; i++, page++, pfn++) __free_pages_boot_core(page, pfn, 0); } /* Completion tracking for deferred_init_memmap() threads */ static atomic_t pgdat_init_n_undone __initdata; static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); static inline void __init pgdat_init_report_one_done(void) { if (atomic_dec_and_test(&pgdat_init_n_undone)) complete(&pgdat_init_all_done_comp); } /* Initialise remaining memory on a node */ static int __init deferred_init_memmap(void *data) { pg_data_t *pgdat = data; int nid = pgdat->node_id; struct mminit_pfnnid_cache nid_init_state = { }; unsigned long start = jiffies; unsigned long nr_pages = 0; unsigned long walk_start, walk_end; int i, zid; struct zone *zone; unsigned long first_init_pfn = pgdat->first_deferred_pfn; const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); if (first_init_pfn == ULONG_MAX) { pgdat_init_report_one_done(); return 0; } /* Bind memory initialisation thread to a local node if possible */ if (!cpumask_empty(cpumask)) set_cpus_allowed_ptr(current, cpumask); /* Sanity check boundaries */ BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); pgdat->first_deferred_pfn = ULONG_MAX; /* Only the highest zone is deferred so find it */ for (zid = 0; zid < MAX_NR_ZONES; zid++) { zone = pgdat->node_zones + zid; if (first_init_pfn < zone_end_pfn(zone)) break; } for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) { unsigned long pfn, end_pfn; struct page *page = NULL; struct page *free_base_page = NULL; unsigned long free_base_pfn = 0; int nr_to_free = 0; end_pfn = min(walk_end, zone_end_pfn(zone)); pfn = first_init_pfn; if (pfn < walk_start) pfn = walk_start; if (pfn < zone->zone_start_pfn) pfn = zone->zone_start_pfn; for (; pfn < end_pfn; pfn++) { if (!pfn_valid_within(pfn)) goto free_range; /* * Ensure pfn_valid is checked every * MAX_ORDER_NR_PAGES for memory holes */ if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { if (!pfn_valid(pfn)) { page = NULL; goto free_range; } } if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) { page = NULL; goto free_range; } /* Minimise pfn page lookups and scheduler checks */ if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) { page++; } else { nr_pages += nr_to_free; deferred_free_range(free_base_page, free_base_pfn, nr_to_free); free_base_page = NULL; free_base_pfn = nr_to_free = 0; page = pfn_to_page(pfn); cond_resched(); } if (page->flags) { VM_BUG_ON(page_zone(page) != zone); goto free_range; } __init_single_page(page, pfn, zid, nid); if (!free_base_page) { free_base_page = page; free_base_pfn = pfn; nr_to_free = 0; } nr_to_free++; /* Where possible, batch up pages for a single free */ continue; free_range: /* Free the current block of pages to allocator */ nr_pages += nr_to_free; deferred_free_range(free_base_page, free_base_pfn, nr_to_free); free_base_page = NULL; free_base_pfn = nr_to_free = 0; } first_init_pfn = max(end_pfn, first_init_pfn); } /* Sanity check that the next zone really is unpopulated */ WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, jiffies_to_msecs(jiffies - start)); pgdat_init_report_one_done(); return 0; } void __init page_alloc_init_late(void) { int nid; /* There will be num_node_state(N_MEMORY) threads */ atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); for_each_node_state(nid, N_MEMORY) { kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); } /* Block until all are initialised */ wait_for_completion(&pgdat_init_all_done_comp); /* Reinit limits that are based on free pages after the kernel is up */ files_maxfiles_init(); } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ #ifdef CONFIG_CMA /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ void __init init_cma_reserved_pageblock(struct page *page) { unsigned i = pageblock_nr_pages; struct page *p = page; do { __ClearPageReserved(p); set_page_count(p, 0); } while (++p, --i); set_pageblock_migratetype(page, MIGRATE_CMA); if (pageblock_order >= MAX_ORDER) { i = pageblock_nr_pages; p = page; do { set_page_refcounted(p); __free_pages(p, MAX_ORDER - 1); p += MAX_ORDER_NR_PAGES; } while (i -= MAX_ORDER_NR_PAGES); } else { set_page_refcounted(page); __free_pages(page, pageblock_order); } adjust_managed_page_count(page, pageblock_nr_pages); } #endif /* * The order of subdivision here is critical for the IO subsystem. * Please do not alter this order without good reasons and regression * testing. Specifically, as large blocks of memory are subdivided, * the order in which smaller blocks are delivered depends on the order * they're subdivided in this function. This is the primary factor * influencing the order in which pages are delivered to the IO * subsystem according to empirical testing, and this is also justified * by considering the behavior of a buddy system containing a single * large block of memory acted on by a series of small allocations. * This behavior is a critical factor in sglist merging's success. * * -- nyc */ static inline void expand(struct zone *zone, struct page *page, int low, int high, struct free_area *area, int migratetype) { unsigned long size = 1 << high; while (high > low) { area--; high--; size >>= 1; VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && debug_guardpage_enabled() && high < debug_guardpage_minorder()) { /* * Mark as guard pages (or page), that will allow to * merge back to allocator when buddy will be freed. * Corresponding page table entries will not be touched, * pages will stay not present in virtual address space */ set_page_guard(zone, &page[size], high, migratetype); continue; } list_add(&page[size].lru, &area->free_list[migratetype]); area->nr_free++; set_page_order(&page[size], high); } } /* * This page is about to be returned from the page allocator */ static inline int check_new_page(struct page *page) { const char *bad_reason = NULL; unsigned long bad_flags = 0; if (unlikely(page_mapcount(page))) bad_reason = "nonzero mapcount"; if (unlikely(page->mapping != NULL)) bad_reason = "non-NULL mapping"; if (unlikely(atomic_read(&page->_count) != 0)) bad_reason = "nonzero _count"; if (unlikely(page->flags & __PG_HWPOISON)) { bad_reason = "HWPoisoned (hardware-corrupted)"; bad_flags = __PG_HWPOISON; } if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; bad_flags = PAGE_FLAGS_CHECK_AT_PREP; } #ifdef CONFIG_MEMCG if (unlikely(page->mem_cgroup)) bad_reason = "page still charged to cgroup"; #endif if (unlikely(bad_reason)) { bad_page(page, bad_reason, bad_flags); return 1; } return 0; } static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, int alloc_flags) { int i; for (i = 0; i < (1 << order); i++) { struct page *p = page + i; if (unlikely(check_new_page(p))) return 1; } set_page_private(page, 0); set_page_refcounted(page); arch_alloc_page(page, order); kernel_map_pages(page, 1 << order, 1); kasan_alloc_pages(page, order); if (gfp_flags & __GFP_ZERO) for (i = 0; i < (1 << order); i++) clear_highpage(page + i); if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); set_page_owner(page, order, gfp_flags); /* * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to * allocate the page. The expectation is that the caller is taking * steps that will free more memory. The caller should avoid the page * being used for !PFMEMALLOC purposes. */ if (alloc_flags & ALLOC_NO_WATERMARKS) set_page_pfmemalloc(page); else clear_page_pfmemalloc(page); return 0; } /* * Go through the free lists for the given migratetype and remove * the smallest available page from the freelists */ static inline struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, int migratetype) { unsigned int current_order; struct free_area *area; struct page *page; /* Find a page of the appropriate size in the preferred list */ for (current_order = order; current_order < MAX_ORDER; ++current_order) { area = &(zone->free_area[current_order]); if (list_empty(&area->free_list[migratetype])) continue; page = list_entry(area->free_list[migratetype].next, struct page, lru); list_del(&page->lru); rmv_page_order(page); area->nr_free--; expand(zone, page, order, current_order, area, migratetype); set_pcppage_migratetype(page, migratetype); return page; } return NULL; } /* * This array describes the order lists are fallen back to when * the free lists for the desirable migrate type are depleted */ static int fallbacks[MIGRATE_TYPES][4] = { [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, #ifdef CONFIG_CMA [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */ #endif #ifdef CONFIG_MEMORY_ISOLATION [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */ #endif }; #ifdef CONFIG_CMA static struct page *__rmqueue_cma_fallback(struct zone *zone, unsigned int order) { return __rmqueue_smallest(zone, order, MIGRATE_CMA); } #else static inline struct page *__rmqueue_cma_fallback(struct zone *zone, unsigned int order) { return NULL; } #endif /* * Move the free pages in a range to the free lists of the requested type. * Note that start_page and end_pages are not aligned on a pageblock * boundary. If alignment is required, use move_freepages_block() */ int move_freepages(struct zone *zone, struct page *start_page, struct page *end_page, int migratetype) { struct page *page; unsigned int order; int pages_moved = 0; #ifndef CONFIG_HOLES_IN_ZONE /* * page_zone is not safe to call in this context when * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant * anyway as we check zone boundaries in move_freepages_block(). * Remove at a later date when no bug reports exist related to * grouping pages by mobility */ VM_BUG_ON(page_zone(start_page) != page_zone(end_page)); #endif for (page = start_page; page <= end_page;) { /* Make sure we are not inadvertently changing nodes */ VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); if (!pfn_valid_within(page_to_pfn(page))) { page++; continue; } if (!PageBuddy(page)) { page++; continue; } order = page_order(page); list_move(&page->lru, &zone->free_area[order].free_list[migratetype]); page += 1 << order; pages_moved += 1 << order; } return pages_moved; } int move_freepages_block(struct zone *zone, struct page *page, int migratetype) { unsigned long start_pfn, end_pfn; struct page *start_page, *end_page; start_pfn = page_to_pfn(page); start_pfn = start_pfn & ~(pageblock_nr_pages-1); start_page = pfn_to_page(start_pfn); end_page = start_page + pageblock_nr_pages - 1; end_pfn = start_pfn + pageblock_nr_pages - 1; /* Do not cross zone boundaries */ if (!zone_spans_pfn(zone, start_pfn)) start_page = page; if (!zone_spans_pfn(zone, end_pfn)) return 0; return move_freepages(zone, start_page, end_page, migratetype); } static void change_pageblock_range(struct page *pageblock_page, int start_order, int migratetype) { int nr_pageblocks = 1 << (start_order - pageblock_order); while (nr_pageblocks--) { set_pageblock_migratetype(pageblock_page, migratetype); pageblock_page += pageblock_nr_pages; } } /* * When we are falling back to another migratetype during allocation, try to * steal extra free pages from the same pageblocks to satisfy further * allocations, instead of polluting multiple pageblocks. * * If we are stealing a relatively large buddy page, it is likely there will * be more free pages in the pageblock, so try to steal them all. For * reclaimable and unmovable allocations, we steal regardless of page size, * as fragmentation caused by those allocations polluting movable pageblocks * is worse than movable allocations stealing from unmovable and reclaimable * pageblocks. */ static bool can_steal_fallback(unsigned int order, int start_mt) { /* * Leaving this order check is intended, although there is * relaxed order check in next check. The reason is that * we can actually steal whole pageblock if this condition met, * but, below check doesn't guarantee it and that is just heuristic * so could be changed anytime. */ if (order >= pageblock_order) return true; if (order >= pageblock_order / 2 || start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE || page_group_by_mobility_disabled) return true; return false; } /* * This function implements actual steal behaviour. If order is large enough, * we can steal whole pageblock. If not, we first move freepages in this * pageblock and check whether half of pages are moved or not. If half of * pages are moved, we can change migratetype of pageblock and permanently * use it's pages as requested migratetype in the future. */ static void steal_suitable_fallback(struct zone *zone, struct page *page, int start_type) { unsigned int current_order = page_order(page); int pages; /* Take ownership for orders >= pageblock_order */ if (current_order >= pageblock_order) { change_pageblock_range(page, current_order, start_type); return; } pages = move_freepages_block(zone, page, start_type); /* Claim the whole block if over half of it is free */ if (pages >= (1 << (pageblock_order-1)) || page_group_by_mobility_disabled) set_pageblock_migratetype(page, start_type); } /* * Check whether there is a suitable fallback freepage with requested order. * If only_stealable is true, this function returns fallback_mt only if * we can steal other freepages all together. This would help to reduce * fragmentation due to mixed migratetype pages in one pageblock. */ int find_suitable_fallback(struct free_area *area, unsigned int order, int migratetype, bool only_stealable, bool *can_steal) { int i; int fallback_mt; if (area->nr_free == 0) return -1; *can_steal = false; for (i = 0;; i++) { fallback_mt = fallbacks[migratetype][i]; if (fallback_mt == MIGRATE_TYPES) break; if (list_empty(&area->free_list[fallback_mt])) continue; if (can_steal_fallback(order, migratetype)) *can_steal = true; if (!only_stealable) return fallback_mt; if (*can_steal) return fallback_mt; } return -1; } /* * Reserve a pageblock for exclusive use of high-order atomic allocations if * there are no empty page blocks that contain a page with a suitable order */ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, unsigned int alloc_order) { int mt; unsigned long max_managed, flags; /* * Limit the number reserved to 1 pageblock or roughly 1% of a zone. * Check is race-prone but harmless. */ max_managed = (zone->managed_pages / 100) + pageblock_nr_pages; if (zone->nr_reserved_highatomic >= max_managed) return; spin_lock_irqsave(&zone->lock, flags); /* Recheck the nr_reserved_highatomic limit under the lock */ if (zone->nr_reserved_highatomic >= max_managed) goto out_unlock; /* Yoink! */ mt = get_pageblock_migratetype(page); if (mt != MIGRATE_HIGHATOMIC && !is_migrate_isolate(mt) && !is_migrate_cma(mt)) { zone->nr_reserved_highatomic += pageblock_nr_pages; set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); move_freepages_block(zone, page, MIGRATE_HIGHATOMIC); } out_unlock: spin_unlock_irqrestore(&zone->lock, flags); } /* * Used when an allocation is about to fail under memory pressure. This * potentially hurts the reliability of high-order allocations when under * intense memory pressure but failed atomic allocations should be easier * to recover from than an OOM. */ static void unreserve_highatomic_pageblock(const struct alloc_context *ac) { struct zonelist *zonelist = ac->zonelist; unsigned long flags; struct zoneref *z; struct zone *zone; struct page *page; int order; for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, ac->nodemask) { /* Preserve at least one pageblock */ if (zone->nr_reserved_highatomic <= pageblock_nr_pages) continue; spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < MAX_ORDER; order++) { struct free_area *area = &(zone->free_area[order]); if (list_empty(&area->free_list[MIGRATE_HIGHATOMIC])) continue; page = list_entry(area->free_list[MIGRATE_HIGHATOMIC].next, struct page, lru); /* * It should never happen but changes to locking could * inadvertently allow a per-cpu drain to add pages * to MIGRATE_HIGHATOMIC while unreserving so be safe * and watch for underflows. */ zone->nr_reserved_highatomic -= min(pageblock_nr_pages, zone->nr_reserved_highatomic); /* * Convert to ac->migratetype and avoid the normal * pageblock stealing heuristics. Minimally, the caller * is doing the work and needs the pages. More * importantly, if the block was always converted to * MIGRATE_UNMOVABLE or another type then the number * of pageblocks that cannot be completely freed * may increase. */ set_pageblock_migratetype(page, ac->migratetype); move_freepages_block(zone, page, ac->migratetype); spin_unlock_irqrestore(&zone->lock, flags); return; } spin_unlock_irqrestore(&zone->lock, flags); } } /* Remove an element from the buddy allocator from the fallback list */ static inline struct page * __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) { struct free_area *area; unsigned int current_order; struct page *page; int fallback_mt; bool can_steal; /* Find the largest possible block of pages in the other list */ for (current_order = MAX_ORDER-1; current_order >= order && current_order <= MAX_ORDER-1; --current_order) { area = &(zone->free_area[current_order]); fallback_mt = find_suitable_fallback(area, current_order, start_migratetype, false, &can_steal); if (fallback_mt == -1) continue; page = list_entry(area->free_list[fallback_mt].next, struct page, lru); if (can_steal) steal_suitable_fallback(zone, page, start_migratetype); /* Remove the page from the freelists */ area->nr_free--; list_del(&page->lru); rmv_page_order(page); expand(zone, page, order, current_order, area, start_migratetype); /* * The pcppage_migratetype may differ from pageblock's * migratetype depending on the decisions in * find_suitable_fallback(). This is OK as long as it does not * differ for MIGRATE_CMA pageblocks. Those can be used as * fallback only via special __rmqueue_cma_fallback() function */ set_pcppage_migratetype(page, start_migratetype); trace_mm_page_alloc_extfrag(page, order, current_order, start_migratetype, fallback_mt); return page; } return NULL; } /* * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. */ static struct page *__rmqueue(struct zone *zone, unsigned int order, int migratetype, gfp_t gfp_flags) { struct page *page; page = __rmqueue_smallest(zone, order, migratetype); if (unlikely(!page)) { if (migratetype == MIGRATE_MOVABLE) page = __rmqueue_cma_fallback(zone, order); if (!page) page = __rmqueue_fallback(zone, order, migratetype); } trace_mm_page_alloc_zone_locked(page, order, migratetype); return page; } /* * Obtain a specified number of elements from the buddy allocator, all under * a single hold of the lock, for efficiency. Add them to the supplied list. * Returns the number of new pages which were placed at *list. */ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, int migratetype, bool cold) { int i; spin_lock(&zone->lock); for (i = 0; i < count; ++i) { struct page *page = __rmqueue(zone, order, migratetype, 0); if (unlikely(page == NULL)) break; /* * Split buddy pages returned by expand() are received here * in physical page order. The page is added to the callers and * list and the list head then moves forward. From the callers * perspective, the linked list is ordered by page number in * some conditions. This is useful for IO devices that can * merge IO requests if the physical pages are ordered * properly. */ if (likely(!cold)) list_add(&page->lru, list); else list_add_tail(&page->lru, list); list = &page->lru; if (is_migrate_cma(get_pcppage_migratetype(page))) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, -(1 << order)); } __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); spin_unlock(&zone->lock); return i; } #ifdef CONFIG_NUMA /* * Called from the vmstat counter updater to drain pagesets of this * currently executing processor on remote nodes after they have * expired. * * Note that this function must be called with the thread pinned to * a single processor. */ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { unsigned long flags; int to_drain, batch; local_irq_save(flags); batch = READ_ONCE(pcp->batch); to_drain = min(pcp->count, batch); if (to_drain > 0) { free_pcppages_bulk(zone, to_drain, pcp); pcp->count -= to_drain; } local_irq_restore(flags); } #endif /* * Drain pcplists of the indicated processor and zone. * * The processor must either be the current processor and the * thread pinned to the current processor or a processor that * is not online. */ static void drain_pages_zone(unsigned int cpu, struct zone *zone) { unsigned long flags; struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; local_irq_save(flags); pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; if (pcp->count) { free_pcppages_bulk(zone, pcp->count, pcp); pcp->count = 0; } local_irq_restore(flags); } /* * Drain pcplists of all zones on the indicated processor. * * The processor must either be the current processor and the * thread pinned to the current processor or a processor that * is not online. */ static void drain_pages(unsigned int cpu) { struct zone *zone; for_each_populated_zone(zone) { drain_pages_zone(cpu, zone); } } /* * Spill all of this CPU's per-cpu pages back into the buddy allocator. * * The CPU has to be pinned. When zone parameter is non-NULL, spill just * the single zone's pages. */ void drain_local_pages(struct zone *zone) { int cpu = smp_processor_id(); if (zone) drain_pages_zone(cpu, zone); else drain_pages(cpu); } /* * Spill all the per-cpu pages from all CPUs back into the buddy allocator. * * When zone parameter is non-NULL, spill just the single zone's pages. * * Note that this code is protected against sending an IPI to an offline * CPU but does not guarantee sending an IPI to newly hotplugged CPUs: * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but * nothing keeps CPUs from showing up after we populated the cpumask and * before the call to on_each_cpu_mask(). */ void drain_all_pages(struct zone *zone) { int cpu; /* * Allocate in the BSS so we wont require allocation in * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y */ static cpumask_t cpus_with_pcps; /* * We don't care about racing with CPU hotplug event * as offline notification will cause the notified * cpu to drain that CPU pcps and on_each_cpu_mask * disables preemption as part of its processing */ for_each_online_cpu(cpu) { struct per_cpu_pageset *pcp; struct zone *z; bool has_pcps = false; if (zone) { pcp = per_cpu_ptr(zone->pageset, cpu); if (pcp->pcp.count) has_pcps = true; } else { for_each_populated_zone(z) { pcp = per_cpu_ptr(z->pageset, cpu); if (pcp->pcp.count) { has_pcps = true; break; } } } if (has_pcps) cpumask_set_cpu(cpu, &cpus_with_pcps); else cpumask_clear_cpu(cpu, &cpus_with_pcps); } on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages, zone, 1); } #ifdef CONFIG_HIBERNATION void mark_free_pages(struct zone *zone) { unsigned long pfn, max_zone_pfn; unsigned long flags; unsigned int order, t; struct list_head *curr; if (zone_is_empty(zone)) return; spin_lock_irqsave(&zone->lock, flags); max_zone_pfn = zone_end_pfn(zone); for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) if (pfn_valid(pfn)) { struct page *page = pfn_to_page(pfn); if (!swsusp_page_is_forbidden(page)) swsusp_unset_page_free(page); } for_each_migratetype_order(order, t) { list_for_each(curr, &zone->free_area[order].free_list[t]) { unsigned long i; pfn = page_to_pfn(list_entry(curr, struct page, lru)); for (i = 0; i < (1UL << order); i++) swsusp_set_page_free(pfn_to_page(pfn + i)); } } spin_unlock_irqrestore(&zone->lock, flags); } #endif /* CONFIG_PM */ /* * Free a 0-order page * cold == true ? free a cold page : free a hot page */ void free_hot_cold_page(struct page *page, bool cold) { struct zone *zone = page_zone(page); struct per_cpu_pages *pcp; unsigned long flags; unsigned long pfn = page_to_pfn(page); int migratetype; if (!free_pages_prepare(page, 0)) return; migratetype = get_pfnblock_migratetype(page, pfn); set_pcppage_migratetype(page, migratetype); local_irq_save(flags); __count_vm_event(PGFREE); /* * We only track unmovable, reclaimable and movable on pcp lists. * Free ISOLATE pages back to the allocator because they are being * offlined but treat RESERVE as movable pages so we can get those * areas back if necessary. Otherwise, we may have to free * excessively into the page allocator */ if (migratetype >= MIGRATE_PCPTYPES) { if (unlikely(is_migrate_isolate(migratetype))) { free_one_page(zone, page, pfn, 0, migratetype); goto out; } migratetype = MIGRATE_MOVABLE; } pcp = &this_cpu_ptr(zone->pageset)->pcp; if (!cold) list_add(&page->lru, &pcp->lists[migratetype]); else list_add_tail(&page->lru, &pcp->lists[migratetype]); pcp->count++; if (pcp->count >= pcp->high) { unsigned long batch = READ_ONCE(pcp->batch); free_pcppages_bulk(zone, batch, pcp); pcp->count -= batch; } out: local_irq_restore(flags); } /* * Free a list of 0-order pages */ void free_hot_cold_page_list(struct list_head *list, bool cold) { struct page *page, *next; list_for_each_entry_safe(page, next, list, lru) { trace_mm_page_free_batched(page, cold); free_hot_cold_page(page, cold); } } /* * split_page takes a non-compound higher-order page, and splits it into * n (1<<order) sub-pages: page[0..n] * Each sub-page must be freed individually. * * Note: this is probably too low level an operation for use in drivers. * Please consult with lkml before using this in your driver. */ void split_page(struct page *page, unsigned int order) { int i; gfp_t gfp_mask; VM_BUG_ON_PAGE(PageCompound(page), page); VM_BUG_ON_PAGE(!page_count(page), page); #ifdef CONFIG_KMEMCHECK /* * Split shadow pages too, because free(page[0]) would * otherwise free the whole shadow. */ if (kmemcheck_page_is_tracked(page)) split_page(virt_to_page(page[0].shadow), order); #endif gfp_mask = get_page_owner_gfp(page); set_page_owner(page, 0, gfp_mask); for (i = 1; i < (1 << order); i++) { set_page_refcounted(page + i); set_page_owner(page + i, 0, gfp_mask); } } EXPORT_SYMBOL_GPL(split_page); int __isolate_free_page(struct page *page, unsigned int order) { unsigned long watermark; struct zone *zone; int mt; BUG_ON(!PageBuddy(page)); zone = page_zone(page); mt = get_pageblock_migratetype(page); if (!is_migrate_isolate(mt)) { /* Obey watermarks as if the page was being allocated */ watermark = low_wmark_pages(zone) + (1 << order); if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) return 0; __mod_zone_freepage_state(zone, -(1UL << order), mt); } /* Remove page from free list */ list_del(&page->lru); zone->free_area[order].nr_free--; rmv_page_order(page); set_page_owner(page, order, __GFP_MOVABLE); /* Set the pageblock if the isolated page is at least a pageblock */ if (order >= pageblock_order - 1) { struct page *endpage = page + (1 << order) - 1; for (; page < endpage; page += pageblock_nr_pages) { int mt = get_pageblock_migratetype(page); if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)) set_pageblock_migratetype(page, MIGRATE_MOVABLE); } } return 1UL << order; } /* * Similar to split_page except the page is already free. As this is only * being used for migration, the migratetype of the block also changes. * As this is called with interrupts disabled, the caller is responsible * for calling arch_alloc_page() and kernel_map_page() after interrupts * are enabled. * * Note: this is probably too low level an operation for use in drivers. * Please consult with lkml before using this in your driver. */ int split_free_page(struct page *page) { unsigned int order; int nr_pages; order = page_order(page); nr_pages = __isolate_free_page(page, order); if (!nr_pages) return 0; /* Split into individual pages */ set_page_refcounted(page); split_page(page, order); return nr_pages; } /* * Allocate a page from the given zone. Use pcplists for order-0 allocations. */ static inline struct page *buffered_rmqueue(struct zone *preferred_zone, struct zone *zone, unsigned int order, gfp_t gfp_flags, int alloc_flags, int migratetype) { unsigned long flags; struct page *page; bool cold = ((gfp_flags & __GFP_COLD) != 0); if (likely(order == 0)) { struct per_cpu_pages *pcp; struct list_head *list; local_irq_save(flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; if (list_empty(list)) { pcp->count += rmqueue_bulk(zone, 0, pcp->batch, list, migratetype, cold); if (unlikely(list_empty(list))) goto failed; } if (cold) page = list_entry(list->prev, struct page, lru); else page = list_entry(list->next, struct page, lru); list_del(&page->lru); pcp->count--; } else { if (unlikely(gfp_flags & __GFP_NOFAIL)) { /* * __GFP_NOFAIL is not to be used in new code. * * All __GFP_NOFAIL callers should be fixed so that they * properly detect and handle allocation failures. * * We most definitely don't want callers attempting to * allocate greater than order-1 page units with * __GFP_NOFAIL. */ WARN_ON_ONCE(order > 1); } spin_lock_irqsave(&zone->lock, flags); page = NULL; if (alloc_flags & ALLOC_HARDER) { page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); if (page) trace_mm_page_alloc_zone_locked(page, order, migratetype); } if (!page) page = __rmqueue(zone, order, migratetype, gfp_flags); spin_unlock(&zone->lock); if (!page) goto failed; __mod_zone_freepage_state(zone, -(1 << order), get_pcppage_migratetype(page)); } __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && !test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) set_bit(ZONE_FAIR_DEPLETED, &zone->flags); __count_zone_vm_events(PGALLOC, zone, 1 << order); zone_statistics(preferred_zone, zone, gfp_flags); local_irq_restore(flags); VM_BUG_ON_PAGE(bad_range(zone, page), page); return page; failed: local_irq_restore(flags); return NULL; } #ifdef CONFIG_FAIL_PAGE_ALLOC static struct { struct fault_attr attr; bool ignore_gfp_highmem; bool ignore_gfp_reclaim; u32 min_order; } fail_page_alloc = { .attr = FAULT_ATTR_INITIALIZER, .ignore_gfp_reclaim = true, .ignore_gfp_highmem = true, .min_order = 1, }; static int __init setup_fail_page_alloc(char *str) { return setup_fault_attr(&fail_page_alloc.attr, str); } __setup("fail_page_alloc=", setup_fail_page_alloc); static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) { if (order < fail_page_alloc.min_order) return false; if (gfp_mask & __GFP_NOFAIL) return false; if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) return false; if (fail_page_alloc.ignore_gfp_reclaim && (gfp_mask & __GFP_DIRECT_RECLAIM)) return false; return should_fail(&fail_page_alloc.attr, 1 << order); } #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS static int __init fail_page_alloc_debugfs(void) { umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; struct dentry *dir; dir = fault_create_debugfs_attr("fail_page_alloc", NULL, &fail_page_alloc.attr); if (IS_ERR(dir)) return PTR_ERR(dir); if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, &fail_page_alloc.ignore_gfp_reclaim)) goto fail; if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir, &fail_page_alloc.ignore_gfp_highmem)) goto fail; if (!debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order)) goto fail; return 0; fail: debugfs_remove_recursive(dir); return -ENOMEM; } late_initcall(fail_page_alloc_debugfs); #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ #else /* CONFIG_FAIL_PAGE_ALLOC */ static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) { return false; } #endif /* CONFIG_FAIL_PAGE_ALLOC */ /* * Return true if free base pages are above 'mark'. For high-order checks it * will return true of the order-0 watermark is reached and there is at least * one free page of a suitable size. Checking now avoids taking the zone lock * to check in the allocation paths if no pages are free. */ static bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx, int alloc_flags, long free_pages) { long min = mark; int o; const int alloc_harder = (alloc_flags & ALLOC_HARDER); /* free_pages may go negative - that's OK */ free_pages -= (1 << order) - 1; if (alloc_flags & ALLOC_HIGH) min -= min / 2; /* * If the caller does not have rights to ALLOC_HARDER then subtract * the high-atomic reserves. This will over-estimate the size of the * atomic reserve but it avoids a search. */ if (likely(!alloc_harder)) free_pages -= z->nr_reserved_highatomic; else min -= min / 4; #ifdef CONFIG_CMA /* If allocation can't use CMA areas don't use free CMA pages */ if (!(alloc_flags & ALLOC_CMA)) free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); #endif /* * Check watermarks for an order-0 allocation request. If these * are not met, then a high-order request also cannot go ahead * even if a suitable page happened to be free. */ if (free_pages <= min + z->lowmem_reserve[classzone_idx]) return false; /* If this is an order-0 request then the watermark is fine */ if (!order) return true; /* For a high-order request, check at least one suitable page is free */ for (o = order; o < MAX_ORDER; o++) { struct free_area *area = &z->free_area[o]; int mt; if (!area->nr_free) continue; if (alloc_harder) return true; for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { if (!list_empty(&area->free_list[mt])) return true; } #ifdef CONFIG_CMA if ((alloc_flags & ALLOC_CMA) && !list_empty(&area->free_list[MIGRATE_CMA])) { return true; } #endif } return false; } bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx, int alloc_flags) { return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, zone_page_state(z, NR_FREE_PAGES)); } bool zone_watermark_ok_safe(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx) { long free_pages = zone_page_state(z, NR_FREE_PAGES); if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); return __zone_watermark_ok(z, order, mark, classzone_idx, 0, free_pages); } #ifdef CONFIG_NUMA static bool zone_local(struct zone *local_zone, struct zone *zone) { return local_zone->node == zone->node; } static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) { return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) < RECLAIM_DISTANCE; } #else /* CONFIG_NUMA */ static bool zone_local(struct zone *local_zone, struct zone *zone) { return true; } static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) { return true; } #endif /* CONFIG_NUMA */ static void reset_alloc_batches(struct zone *preferred_zone) { struct zone *zone = preferred_zone->zone_pgdat->node_zones; do { mod_zone_page_state(zone, NR_ALLOC_BATCH, high_wmark_pages(zone) - low_wmark_pages(zone) - atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); clear_bit(ZONE_FAIR_DEPLETED, &zone->flags); } while (zone++ != preferred_zone); } /* * get_page_from_freelist goes through the zonelist trying to allocate * a page. */ static struct page * get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, const struct alloc_context *ac) { struct zonelist *zonelist = ac->zonelist; struct zoneref *z; struct page *page = NULL; struct zone *zone; int nr_fair_skipped = 0; bool zonelist_rescan; zonelist_scan: zonelist_rescan = false; /* * Scan zonelist, looking for a zone with enough free. * See also __cpuset_node_allowed() comment in kernel/cpuset.c. */ for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, ac->nodemask) { unsigned long mark; if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && !cpuset_zone_allowed(zone, gfp_mask)) continue; /* * Distribute pages in proportion to the individual * zone size to ensure fair page aging. The zone a * page was allocated in should have no effect on the * time the page has in memory before being reclaimed. */ if (alloc_flags & ALLOC_FAIR) { if (!zone_local(ac->preferred_zone, zone)) break; if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) { nr_fair_skipped++; continue; } } /* * When allocating a page cache page for writing, we * want to get it from a zone that is within its dirty * limit, such that no single zone holds more than its * proportional share of globally allowed dirty pages. * The dirty limits take into account the zone's * lowmem reserves and high watermark so that kswapd * should be able to balance it without having to * write pages from its LRU list. * * This may look like it could increase pressure on * lower zones by failing allocations in higher zones * before they are full. But the pages that do spill * over are limited as the lower zones are protected * by this very same mechanism. It should not become * a practical burden to them. * * XXX: For now, allow allocations to potentially * exceed the per-zone dirty limit in the slowpath * (spread_dirty_pages unset) before going into reclaim, * which is important when on a NUMA setup the allowed * zones are together not big enough to reach the * global limit. The proper fix for these situations * will require awareness of zones in the * dirty-throttling and the flusher threads. */ if (ac->spread_dirty_pages && !zone_dirty_ok(zone)) continue; mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; if (!zone_watermark_ok(zone, order, mark, ac->classzone_idx, alloc_flags)) { int ret; /* Checked here to keep the fast path fast */ BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); if (alloc_flags & ALLOC_NO_WATERMARKS) goto try_this_zone; if (zone_reclaim_mode == 0 || !zone_allows_reclaim(ac->preferred_zone, zone)) continue; ret = zone_reclaim(zone, gfp_mask, order); switch (ret) { case ZONE_RECLAIM_NOSCAN: /* did not scan */ continue; case ZONE_RECLAIM_FULL: /* scanned but unreclaimable */ continue; default: /* did we reclaim enough */ if (zone_watermark_ok(zone, order, mark, ac->classzone_idx, alloc_flags)) goto try_this_zone; continue; } } try_this_zone: page = buffered_rmqueue(ac->preferred_zone, zone, order, gfp_mask, alloc_flags, ac->migratetype); if (page) { if (prep_new_page(page, order, gfp_mask, alloc_flags)) goto try_this_zone; /* * If this is a high-order atomic allocation then check * if the pageblock should be reserved for the future */ if (unlikely(order && (alloc_flags & ALLOC_HARDER))) reserve_highatomic_pageblock(page, zone, order); return page; } } /* * The first pass makes sure allocations are spread fairly within the * local node. However, the local node might have free pages left * after the fairness batches are exhausted, and remote zones haven't * even been considered yet. Try once more without fairness, and * include remote zones now, before entering the slowpath and waking * kswapd: prefer spilling to a remote zone over swapping locally. */ if (alloc_flags & ALLOC_FAIR) { alloc_flags &= ~ALLOC_FAIR; if (nr_fair_skipped) { zonelist_rescan = true; reset_alloc_batches(ac->preferred_zone); } if (nr_online_nodes > 1) zonelist_rescan = true; } if (zonelist_rescan) goto zonelist_scan; return NULL; } /* * Large machines with many possible nodes should not always dump per-node * meminfo in irq context. */ static inline bool should_suppress_show_mem(void) { bool ret = false; #if NODES_SHIFT > 8 ret = in_interrupt(); #endif return ret; } static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...) { unsigned int filter = SHOW_MEM_FILTER_NODES; if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) || debug_guardpage_minorder() > 0) return; /* * This documents exceptions given to allocations in certain * contexts that are allowed to allocate outside current's set * of allowed nodes. */ if (!(gfp_mask & __GFP_NOMEMALLOC)) if (test_thread_flag(TIF_MEMDIE) || (current->flags & (PF_MEMALLOC | PF_EXITING))) filter &= ~SHOW_MEM_FILTER_NODES; if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) filter &= ~SHOW_MEM_FILTER_NODES; if (fmt) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_warn("%pV", &vaf); va_end(args); } pr_warn("%s: page allocation failure: order:%u, mode:0x%x\n", current->comm, order, gfp_mask); dump_stack(); if (!should_suppress_show_mem()) show_mem(filter); } static inline struct page * __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac, unsigned long *did_some_progress) { struct oom_control oc = { .zonelist = ac->zonelist, .nodemask = ac->nodemask, .gfp_mask = gfp_mask, .order = order, }; struct page *page; *did_some_progress = 0; /* * Acquire the oom lock. If that fails, somebody else is * making progress for us. */ if (!mutex_trylock(&oom_lock)) { *did_some_progress = 1; schedule_timeout_uninterruptible(1); return NULL; } /* * Go through the zonelist yet one more time, keep very high watermark * here, this is only to catch a parallel oom killing, we must fail if * we're still under heavy pressure. */ page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order, ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); if (page) goto out; if (!(gfp_mask & __GFP_NOFAIL)) { /* Coredumps can quickly deplete all memory reserves */ if (current->flags & PF_DUMPCORE) goto out; /* The OOM killer will not help higher order allocs */ if (order > PAGE_ALLOC_COSTLY_ORDER) goto out; /* The OOM killer does not needlessly kill tasks for lowmem */ if (ac->high_zoneidx < ZONE_NORMAL) goto out; /* The OOM killer does not compensate for IO-less reclaim */ if (!(gfp_mask & __GFP_FS)) { /* * XXX: Page reclaim didn't yield anything, * and the OOM killer can't be invoked, but * keep looping as per tradition. */ *did_some_progress = 1; goto out; } if (pm_suspended_storage()) goto out; /* The OOM killer may not free memory on a specific node */ if (gfp_mask & __GFP_THISNODE) goto out; } /* Exhausted what can be done so it's blamo time */ if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) *did_some_progress = 1; out: mutex_unlock(&oom_lock); return page; } #ifdef CONFIG_COMPACTION /* Try memory compaction for high-order allocations before reclaim */ static struct page * __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, int alloc_flags, const struct alloc_context *ac, enum migrate_mode mode, int *contended_compaction, bool *deferred_compaction) { unsigned long compact_result; struct page *page; if (!order) return NULL; current->flags |= PF_MEMALLOC; compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, mode, contended_compaction); current->flags &= ~PF_MEMALLOC; switch (compact_result) { case COMPACT_DEFERRED: *deferred_compaction = true; /* fall-through */ case COMPACT_SKIPPED: return NULL; default: break; } /* * At least in one zone compaction wasn't deferred or skipped, so let's * count a compaction stall */ count_vm_event(COMPACTSTALL); page = get_page_from_freelist(gfp_mask, order, alloc_flags & ~ALLOC_NO_WATERMARKS, ac); if (page) { struct zone *zone = page_zone(page); zone->compact_blockskip_flush = false; compaction_defer_reset(zone, order, true); count_vm_event(COMPACTSUCCESS); return page; } /* * It's bad if compaction run occurs and fails. The most likely reason * is that pages exist, but not enough to satisfy watermarks. */ count_vm_event(COMPACTFAIL); cond_resched(); return NULL; } #else static inline struct page * __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, int alloc_flags, const struct alloc_context *ac, enum migrate_mode mode, int *contended_compaction, bool *deferred_compaction) { return NULL; } #endif /* CONFIG_COMPACTION */ /* Perform direct synchronous page reclaim */ static int __perform_reclaim(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac) { struct reclaim_state reclaim_state; int progress; cond_resched(); /* We now go into synchronous reclaim */ cpuset_memory_pressure_bump(); current->flags |= PF_MEMALLOC; lockdep_set_current_reclaim_state(gfp_mask); reclaim_state.reclaimed_slab = 0; current->reclaim_state = &reclaim_state; progress = try_to_free_pages(ac->zonelist, order, gfp_mask, ac->nodemask); current->reclaim_state = NULL; lockdep_clear_current_reclaim_state(); current->flags &= ~PF_MEMALLOC; cond_resched(); return progress; } /* The really slow allocator path where we enter direct reclaim */ static inline struct page * __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, int alloc_flags, const struct alloc_context *ac, unsigned long *did_some_progress) { struct page *page = NULL; bool drained = false; *did_some_progress = __perform_reclaim(gfp_mask, order, ac); if (unlikely(!(*did_some_progress))) return NULL; retry: page = get_page_from_freelist(gfp_mask, order, alloc_flags & ~ALLOC_NO_WATERMARKS, ac); /* * If an allocation failed after direct reclaim, it could be because * pages are pinned on the per-cpu lists or in high alloc reserves. * Shrink them them and try again */ if (!page && !drained) { unreserve_highatomic_pageblock(ac); drain_all_pages(NULL); drained = true; goto retry; } return page; } /* * This is called in the allocator slow-path if the allocation request is of * sufficient urgency to ignore watermarks and take other desperate measures */ static inline struct page * __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac) { struct page *page; do { page = get_page_from_freelist(gfp_mask, order, ALLOC_NO_WATERMARKS, ac); if (!page && gfp_mask & __GFP_NOFAIL) wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50); } while (!page && (gfp_mask & __GFP_NOFAIL)); return page; } static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac) { struct zoneref *z; struct zone *zone; for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, ac->nodemask) wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone)); } static inline int gfp_to_alloc_flags(gfp_t gfp_mask) { int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); /* * The caller may dip into page reserves a bit more if the caller * cannot run direct reclaim, or if the caller has realtime scheduling * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH). */ alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); if (gfp_mask & __GFP_ATOMIC) { /* * Not worth trying to allocate harder for __GFP_NOMEMALLOC even * if it can't schedule. */ if (!(gfp_mask & __GFP_NOMEMALLOC)) alloc_flags |= ALLOC_HARDER; /* * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the * comment for __cpuset_node_allowed(). */ alloc_flags &= ~ALLOC_CPUSET; } else if (unlikely(rt_task(current)) && !in_interrupt()) alloc_flags |= ALLOC_HARDER; if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { if (gfp_mask & __GFP_MEMALLOC) alloc_flags |= ALLOC_NO_WATERMARKS; else if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) alloc_flags |= ALLOC_NO_WATERMARKS; else if (!in_interrupt() && ((current->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))) alloc_flags |= ALLOC_NO_WATERMARKS; } #ifdef CONFIG_CMA if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; #endif return alloc_flags; } bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) { return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); } static inline bool is_thp_gfp_mask(gfp_t gfp_mask) { return (gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE; } static inline struct page * __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct alloc_context *ac) { bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; struct page *page = NULL; int alloc_flags; unsigned long pages_reclaimed = 0; unsigned long did_some_progress; enum migrate_mode migration_mode = MIGRATE_ASYNC; bool deferred_compaction = false; int contended_compaction = COMPACT_CONTENDED_NONE; /* * In the slowpath, we sanity check order to avoid ever trying to * reclaim >= MAX_ORDER areas which will never succeed. Callers may * be using allocators in order of preference for an area that is * too large. */ if (order >= MAX_ORDER) { WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); return NULL; } /* * We also sanity check to catch abuse of atomic reserves being used by * callers that are not in atomic context. */ if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) gfp_mask &= ~__GFP_ATOMIC; /* * If this allocation cannot block and it is for a specific node, then * fail early. There's no need to wakeup kswapd or retry for a * speculative node-specific allocation. */ if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !can_direct_reclaim) goto nopage; retry: if (gfp_mask & __GFP_KSWAPD_RECLAIM) wake_all_kswapds(order, ac); /* * OK, we're below the kswapd watermark and have kicked background * reclaim. Now things get more complex, so set up alloc_flags according * to how we want to proceed. */ alloc_flags = gfp_to_alloc_flags(gfp_mask); /* * Find the true preferred zone if the allocation is unconstrained by * cpusets. */ if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) { struct zoneref *preferred_zoneref; preferred_zoneref = first_zones_zonelist(ac->zonelist, ac->high_zoneidx, NULL, &ac->preferred_zone); ac->classzone_idx = zonelist_zone_idx(preferred_zoneref); } /* This is the last chance, in general, before the goto nopage. */ page = get_page_from_freelist(gfp_mask, order, alloc_flags & ~ALLOC_NO_WATERMARKS, ac); if (page) goto got_pg; /* Allocate without watermarks if the context allows */ if (alloc_flags & ALLOC_NO_WATERMARKS) { /* * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds * the allocation is high priority and these type of * allocations are system rather than user orientated */ ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); page = __alloc_pages_high_priority(gfp_mask, order, ac); if (page) { goto got_pg; } } /* Caller is not willing to reclaim, we can't balance anything */ if (!can_direct_reclaim) { /* * All existing users of the deprecated __GFP_NOFAIL are * blockable, so warn of any new users that actually allow this * type of allocation to fail. */ WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL); goto nopage; } /* Avoid recursion of direct reclaim */ if (current->flags & PF_MEMALLOC) goto nopage; /* Avoid allocations with no watermarks from looping endlessly */ if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) goto nopage; /* * Try direct compaction. The first pass is asynchronous. Subsequent * attempts after direct reclaim are synchronous */ page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, migration_mode, &contended_compaction, &deferred_compaction); if (page) goto got_pg; /* Checks for THP-specific high-order allocations */ if (is_thp_gfp_mask(gfp_mask)) { /* * If compaction is deferred for high-order allocations, it is * because sync compaction recently failed. If this is the case * and the caller requested a THP allocation, we do not want * to heavily disrupt the system, so we fail the allocation * instead of entering direct reclaim. */ if (deferred_compaction) goto nopage; /* * In all zones where compaction was attempted (and not * deferred or skipped), lock contention has been detected. * For THP allocation we do not want to disrupt the others * so we fallback to base pages instead. */ if (contended_compaction == COMPACT_CONTENDED_LOCK) goto nopage; /* * If compaction was aborted due to need_resched(), we do not * want to further increase allocation latency, unless it is * khugepaged trying to collapse. */ if (contended_compaction == COMPACT_CONTENDED_SCHED && !(current->flags & PF_KTHREAD)) goto nopage; } /* * It can become very expensive to allocate transparent hugepages at * fault, so use asynchronous memory compaction for THP unless it is * khugepaged trying to collapse. */ if (!is_thp_gfp_mask(gfp_mask) || (current->flags & PF_KTHREAD)) migration_mode = MIGRATE_SYNC_LIGHT; /* Try direct reclaim and then allocating */ page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, &did_some_progress); if (page) goto got_pg; /* Do not loop if specifically requested */ if (gfp_mask & __GFP_NORETRY) goto noretry; /* Keep reclaiming pages as long as there is reasonable progress */ pages_reclaimed += did_some_progress; if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) || ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) { /* Wait for some write requests to complete then retry */ wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50); goto retry; } /* Reclaim has failed us, start killing things */ page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); if (page) goto got_pg; /* Retry as long as the OOM killer is making progress */ if (did_some_progress) goto retry; noretry: /* * High-order allocations do not necessarily loop after * direct reclaim and reclaim/compaction depends on compaction * being called after reclaim so call directly if necessary */ page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, migration_mode, &contended_compaction, &deferred_compaction); if (page) goto got_pg; nopage: warn_alloc_failed(gfp_mask, order, NULL); got_pg: return page; } /* * This is the 'heart' of the zoned buddy allocator. */ struct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, nodemask_t *nodemask) { struct zoneref *preferred_zoneref; struct page *page = NULL; unsigned int cpuset_mems_cookie; int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR; gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */ struct alloc_context ac = { .high_zoneidx = gfp_zone(gfp_mask), .nodemask = nodemask, .migratetype = gfpflags_to_migratetype(gfp_mask), }; gfp_mask &= gfp_allowed_mask; lockdep_trace_alloc(gfp_mask); might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); if (should_fail_alloc_page(gfp_mask, order)) return NULL; /* * Check the zones suitable for the gfp_mask contain at least one * valid zone. It's possible to have an empty zonelist as a result * of __GFP_THISNODE and a memoryless node */ if (unlikely(!zonelist->_zonerefs->zone)) return NULL; if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; retry_cpuset: cpuset_mems_cookie = read_mems_allowed_begin(); /* We set it here, as __alloc_pages_slowpath might have changed it */ ac.zonelist = zonelist; /* Dirty zone balancing only done in the fast path */ ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); /* The preferred zone is used for statistics later */ preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx, ac.nodemask ? : &cpuset_current_mems_allowed, &ac.preferred_zone); if (!ac.preferred_zone) goto out; ac.classzone_idx = zonelist_zone_idx(preferred_zoneref); /* First allocation attempt */ alloc_mask = gfp_mask|__GFP_HARDWALL; page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); if (unlikely(!page)) { /* * Runtime PM, block IO and its error handling path * can deadlock because I/O on the device might not * complete. */ alloc_mask = memalloc_noio_flags(gfp_mask); ac.spread_dirty_pages = false; page = __alloc_pages_slowpath(alloc_mask, order, &ac); } if (kmemcheck_enabled && page) kmemcheck_pagealloc_alloc(page, order, gfp_mask); trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); out: /* * When updating a task's mems_allowed, it is possible to race with * parallel threads in such a way that an allocation can fail while * the mask is being updated. If a page allocation is about to fail, * check if the cpuset changed during allocation and if so, retry. */ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; return page; } EXPORT_SYMBOL(__alloc_pages_nodemask); /* * Common helper functions. */ unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) { struct page *page; /* * __get_free_pages() returns a 32-bit address, which cannot represent * a highmem page */ VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); page = alloc_pages(gfp_mask, order); if (!page) return 0; return (unsigned long) page_address(page); } EXPORT_SYMBOL(__get_free_pages); unsigned long get_zeroed_page(gfp_t gfp_mask) { return __get_free_pages(gfp_mask | __GFP_ZERO, 0); } EXPORT_SYMBOL(get_zeroed_page); void __free_pages(struct page *page, unsigned int order) { if (put_page_testzero(page)) { if (order == 0) free_hot_cold_page(page, false); else __free_pages_ok(page, order); } } EXPORT_SYMBOL(__free_pages); void free_pages(unsigned long addr, unsigned int order) { if (addr != 0) { VM_BUG_ON(!virt_addr_valid((void *)addr)); __free_pages(virt_to_page((void *)addr), order); } } EXPORT_SYMBOL(free_pages); /* * Page Fragment: * An arbitrary-length arbitrary-offset area of memory which resides * within a 0 or higher order page. Multiple fragments within that page * are individually refcounted, in the page's reference counter. * * The page_frag functions below provide a simple allocation framework for * page fragments. This is used by the network stack and network device * drivers to provide a backing region of memory for use as either an * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. */ static struct page *__page_frag_refill(struct page_frag_cache *nc, gfp_t gfp_mask) { struct page *page = NULL; gfp_t gfp = gfp_mask; #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC; page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, PAGE_FRAG_CACHE_MAX_ORDER); nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; #endif if (unlikely(!page)) page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); nc->va = page ? page_address(page) : NULL; return page; } void *__alloc_page_frag(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask) { unsigned int size = PAGE_SIZE; struct page *page; int offset; if (unlikely(!nc->va)) { refill: page = __page_frag_refill(nc, gfp_mask); if (!page) return NULL; #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) /* if size can vary use size else just use PAGE_SIZE */ size = nc->size; #endif /* Even if we own the page, we do not use atomic_set(). * This would break get_page_unless_zero() users. */ atomic_add(size - 1, &page->_count); /* reset page count bias and offset to start of new frag */ nc->pfmemalloc = page_is_pfmemalloc(page); nc->pagecnt_bias = size; nc->offset = size; } offset = nc->offset - fragsz; if (unlikely(offset < 0)) { page = virt_to_page(nc->va); if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count)) goto refill; #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) /* if size can vary use size else just use PAGE_SIZE */ size = nc->size; #endif /* OK, page count is 0, we can safely set it */ atomic_set(&page->_count, size); /* reset page count bias and offset to start of new frag */ nc->pagecnt_bias = size; offset = size - fragsz; } nc->pagecnt_bias--; nc->offset = offset; return nc->va + offset; } EXPORT_SYMBOL(__alloc_page_frag); /* * Frees a page fragment allocated out of either a compound or order 0 page. */ void __free_page_frag(void *addr) { struct page *page = virt_to_head_page(addr); if (unlikely(put_page_testzero(page))) __free_pages_ok(page, compound_order(page)); } EXPORT_SYMBOL(__free_page_frag); /* * alloc_kmem_pages charges newly allocated pages to the kmem resource counter * of the current memory cgroup. * * It should be used when the caller would like to use kmalloc, but since the * allocation is large, it has to fall back to the page allocator. */ struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) { struct page *page; page = alloc_pages(gfp_mask, order); if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { __free_pages(page, order); page = NULL; } return page; } struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) { struct page *page; page = alloc_pages_node(nid, gfp_mask, order); if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { __free_pages(page, order); page = NULL; } return page; } /* * __free_kmem_pages and free_kmem_pages will free pages allocated with * alloc_kmem_pages. */ void __free_kmem_pages(struct page *page, unsigned int order) { memcg_kmem_uncharge(page, order); __free_pages(page, order); } void free_kmem_pages(unsigned long addr, unsigned int order) { if (addr != 0) { VM_BUG_ON(!virt_addr_valid((void *)addr)); __free_kmem_pages(virt_to_page((void *)addr), order); } } static void *make_alloc_exact(unsigned long addr, unsigned int order, size_t size) { if (addr) { unsigned long alloc_end = addr + (PAGE_SIZE << order); unsigned long used = addr + PAGE_ALIGN(size); split_page(virt_to_page((void *)addr), order); while (used < alloc_end) { free_page(used); used += PAGE_SIZE; } } return (void *)addr; } /** * alloc_pages_exact - allocate an exact number physically-contiguous pages. * @size: the number of bytes to allocate * @gfp_mask: GFP flags for the allocation * * This function is similar to alloc_pages(), except that it allocates the * minimum number of pages to satisfy the request. alloc_pages() can only * allocate memory in power-of-two pages. * * This function is also limited by MAX_ORDER. * * Memory allocated by this function must be released by free_pages_exact(). */ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) { unsigned int order = get_order(size); unsigned long addr; addr = __get_free_pages(gfp_mask, order); return make_alloc_exact(addr, order, size); } EXPORT_SYMBOL(alloc_pages_exact); /** * alloc_pages_exact_nid - allocate an exact number of physically-contiguous * pages on a node. * @nid: the preferred node ID where memory should be allocated * @size: the number of bytes to allocate * @gfp_mask: GFP flags for the allocation * * Like alloc_pages_exact(), but try to allocate on node nid first before falling * back. */ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) { unsigned int order = get_order(size); struct page *p = alloc_pages_node(nid, gfp_mask, order); if (!p) return NULL; return make_alloc_exact((unsigned long)page_address(p), order, size); } /** * free_pages_exact - release memory allocated via alloc_pages_exact() * @virt: the value returned by alloc_pages_exact. * @size: size of allocation, same value as passed to alloc_pages_exact(). * * Release the memory allocated by a previous call to alloc_pages_exact. */ void free_pages_exact(void *virt, size_t size) { unsigned long addr = (unsigned long)virt; unsigned long end = addr + PAGE_ALIGN(size); while (addr < end) { free_page(addr); addr += PAGE_SIZE; } } EXPORT_SYMBOL(free_pages_exact); /** * nr_free_zone_pages - count number of pages beyond high watermark * @offset: The zone index of the highest zone * * nr_free_zone_pages() counts the number of counts pages which are beyond the * high watermark within all zones at or below a given zone index. For each * zone, the number of pages is calculated as: * managed_pages - high_pages */ static unsigned long nr_free_zone_pages(int offset) { struct zoneref *z; struct zone *zone; /* Just pick one node, since fallback list is circular */ unsigned long sum = 0; struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); for_each_zone_zonelist(zone, z, zonelist, offset) { unsigned long size = zone->managed_pages; unsigned long high = high_wmark_pages(zone); if (size > high) sum += size - high; } return sum; } /** * nr_free_buffer_pages - count number of pages beyond high watermark * * nr_free_buffer_pages() counts the number of pages which are beyond the high * watermark within ZONE_DMA and ZONE_NORMAL. */ unsigned long nr_free_buffer_pages(void) { return nr_free_zone_pages(gfp_zone(GFP_USER)); } EXPORT_SYMBOL_GPL(nr_free_buffer_pages); /** * nr_free_pagecache_pages - count number of pages beyond high watermark * * nr_free_pagecache_pages() counts the number of pages which are beyond the * high watermark within all zones. */ unsigned long nr_free_pagecache_pages(void) { return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); } static inline void show_node(struct zone *zone) { if (IS_ENABLED(CONFIG_NUMA)) printk("Node %d ", zone_to_nid(zone)); } void si_meminfo(struct sysinfo *val) { val->totalram = totalram_pages; val->sharedram = global_page_state(NR_SHMEM); val->freeram = global_page_state(NR_FREE_PAGES); val->bufferram = nr_blockdev_pages(); val->totalhigh = totalhigh_pages; val->freehigh = nr_free_highpages(); val->mem_unit = PAGE_SIZE; } EXPORT_SYMBOL(si_meminfo); #ifdef CONFIG_NUMA void si_meminfo_node(struct sysinfo *val, int nid) { int zone_type; /* needs to be signed */ unsigned long managed_pages = 0; pg_data_t *pgdat = NODE_DATA(nid); for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) managed_pages += pgdat->node_zones[zone_type].managed_pages; val->totalram = managed_pages; val->sharedram = node_page_state(nid, NR_SHMEM); val->freeram = node_page_state(nid, NR_FREE_PAGES); #ifdef CONFIG_HIGHMEM val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages; val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], NR_FREE_PAGES); #else val->totalhigh = 0; val->freehigh = 0; #endif val->mem_unit = PAGE_SIZE; } #endif /* * Determine whether the node should be displayed or not, depending on whether * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). */ bool skip_free_areas_node(unsigned int flags, int nid) { bool ret = false; unsigned int cpuset_mems_cookie; if (!(flags & SHOW_MEM_FILTER_NODES)) goto out; do { cpuset_mems_cookie = read_mems_allowed_begin(); ret = !node_isset(nid, cpuset_current_mems_allowed); } while (read_mems_allowed_retry(cpuset_mems_cookie)); out: return ret; } #define K(x) ((x) << (PAGE_SHIFT-10)) static void show_migration_types(unsigned char type) { static const char types[MIGRATE_TYPES] = { [MIGRATE_UNMOVABLE] = 'U', [MIGRATE_MOVABLE] = 'M', [MIGRATE_RECLAIMABLE] = 'E', [MIGRATE_HIGHATOMIC] = 'H', #ifdef CONFIG_CMA [MIGRATE_CMA] = 'C', #endif #ifdef CONFIG_MEMORY_ISOLATION [MIGRATE_ISOLATE] = 'I', #endif }; char tmp[MIGRATE_TYPES + 1]; char *p = tmp; int i; for (i = 0; i < MIGRATE_TYPES; i++) { if (type & (1 << i)) *p++ = types[i]; } *p = '\0'; printk("(%s) ", tmp); } /* * Show free area list (used inside shift_scroll-lock stuff) * We also calculate the percentage fragmentation. We do this by counting the * memory on each free list with the exception of the first item on the list. * * Bits in @filter: * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's * cpuset. */ void show_free_areas(unsigned int filter) { unsigned long free_pcp = 0; int cpu; struct zone *zone; for_each_populated_zone(zone) { if (skip_free_areas_node(filter, zone_to_nid(zone))) continue; for_each_online_cpu(cpu) free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; } printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" " active_file:%lu inactive_file:%lu isolated_file:%lu\n" " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n" " slab_reclaimable:%lu slab_unreclaimable:%lu\n" " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" " free:%lu free_pcp:%lu free_cma:%lu\n", global_page_state(NR_ACTIVE_ANON), global_page_state(NR_INACTIVE_ANON), global_page_state(NR_ISOLATED_ANON), global_page_state(NR_ACTIVE_FILE), global_page_state(NR_INACTIVE_FILE), global_page_state(NR_ISOLATED_FILE), global_page_state(NR_UNEVICTABLE), global_page_state(NR_FILE_DIRTY), global_page_state(NR_WRITEBACK), global_page_state(NR_UNSTABLE_NFS), global_page_state(NR_SLAB_RECLAIMABLE), global_page_state(NR_SLAB_UNRECLAIMABLE), global_page_state(NR_FILE_MAPPED), global_page_state(NR_SHMEM), global_page_state(NR_PAGETABLE), global_page_state(NR_BOUNCE), global_page_state(NR_FREE_PAGES), free_pcp, global_page_state(NR_FREE_CMA_PAGES)); for_each_populated_zone(zone) { int i; if (skip_free_areas_node(filter, zone_to_nid(zone))) continue; free_pcp = 0; for_each_online_cpu(cpu) free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; show_node(zone); printk("%s" " free:%lukB" " min:%lukB" " low:%lukB" " high:%lukB" " active_anon:%lukB" " inactive_anon:%lukB" " active_file:%lukB" " inactive_file:%lukB" " unevictable:%lukB" " isolated(anon):%lukB" " isolated(file):%lukB" " present:%lukB" " managed:%lukB" " mlocked:%lukB" " dirty:%lukB" " writeback:%lukB" " mapped:%lukB" " shmem:%lukB" " slab_reclaimable:%lukB" " slab_unreclaimable:%lukB" " kernel_stack:%lukB" " pagetables:%lukB" " unstable:%lukB" " bounce:%lukB" " free_pcp:%lukB" " local_pcp:%ukB" " free_cma:%lukB" " writeback_tmp:%lukB" " pages_scanned:%lu" " all_unreclaimable? %s" "\n", zone->name, K(zone_page_state(zone, NR_FREE_PAGES)), K(min_wmark_pages(zone)), K(low_wmark_pages(zone)), K(high_wmark_pages(zone)), K(zone_page_state(zone, NR_ACTIVE_ANON)), K(zone_page_state(zone, NR_INACTIVE_ANON)), K(zone_page_state(zone, NR_ACTIVE_FILE)), K(zone_page_state(zone, NR_INACTIVE_FILE)), K(zone_page_state(zone, NR_UNEVICTABLE)), K(zone_page_state(zone, NR_ISOLATED_ANON)), K(zone_page_state(zone, NR_ISOLATED_FILE)), K(zone->present_pages), K(zone->managed_pages), K(zone_page_state(zone, NR_MLOCK)), K(zone_page_state(zone, NR_FILE_DIRTY)), K(zone_page_state(zone, NR_WRITEBACK)), K(zone_page_state(zone, NR_FILE_MAPPED)), K(zone_page_state(zone, NR_SHMEM)), K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), zone_page_state(zone, NR_KERNEL_STACK) * THREAD_SIZE / 1024, K(zone_page_state(zone, NR_PAGETABLE)), K(zone_page_state(zone, NR_UNSTABLE_NFS)), K(zone_page_state(zone, NR_BOUNCE)), K(free_pcp), K(this_cpu_read(zone->pageset->pcp.count)), K(zone_page_state(zone, NR_FREE_CMA_PAGES)), K(zone_page_state(zone, NR_WRITEBACK_TEMP)), K(zone_page_state(zone, NR_PAGES_SCANNED)), (!zone_reclaimable(zone) ? "yes" : "no") ); printk("lowmem_reserve[]:"); for (i = 0; i < MAX_NR_ZONES; i++) printk(" %ld", zone->lowmem_reserve[i]); printk("\n"); } for_each_populated_zone(zone) { unsigned int order; unsigned long nr[MAX_ORDER], flags, total = 0; unsigned char types[MAX_ORDER]; if (skip_free_areas_node(filter, zone_to_nid(zone))) continue; show_node(zone); printk("%s: ", zone->name); spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < MAX_ORDER; order++) { struct free_area *area = &zone->free_area[order]; int type; nr[order] = area->nr_free; total += nr[order] << order; types[order] = 0; for (type = 0; type < MIGRATE_TYPES; type++) { if (!list_empty(&area->free_list[type])) types[order] |= 1 << type; } } spin_unlock_irqrestore(&zone->lock, flags); for (order = 0; order < MAX_ORDER; order++) { printk("%lu*%lukB ", nr[order], K(1UL) << order); if (nr[order]) show_migration_types(types[order]); } printk("= %lukB\n", K(total)); } hugetlb_show_meminfo(); printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); show_swap_cache_info(); } static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) { zoneref->zone = zone; zoneref->zone_idx = zone_idx(zone); } /* * Builds allocation fallback zone lists. * * Add all populated zones of a node to the zonelist. */ static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int nr_zones) { struct zone *zone; enum zone_type zone_type = MAX_NR_ZONES; do { zone_type--; zone = pgdat->node_zones + zone_type; if (populated_zone(zone)) { zoneref_set_zone(zone, &zonelist->_zonerefs[nr_zones++]); check_highest_zone(zone_type); } } while (zone_type); return nr_zones; } /* * zonelist_order: * 0 = automatic detection of better ordering. * 1 = order by ([node] distance, -zonetype) * 2 = order by (-zonetype, [node] distance) * * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create * the same zonelist. So only NUMA can configure this param. */ #define ZONELIST_ORDER_DEFAULT 0 #define ZONELIST_ORDER_NODE 1 #define ZONELIST_ORDER_ZONE 2 /* zonelist order in the kernel. * set_zonelist_order() will set this to NODE or ZONE. */ static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; #ifdef CONFIG_NUMA /* The value user specified ....changed by config */ static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; /* string for sysctl */ #define NUMA_ZONELIST_ORDER_LEN 16 char numa_zonelist_order[16] = "default"; /* * interface for configure zonelist ordering. * command line option "numa_zonelist_order" * = "[dD]efault - default, automatic configuration. * = "[nN]ode - order by node locality, then by zone within node * = "[zZ]one - order by zone, then by locality within zone */ static int __parse_numa_zonelist_order(char *s) { if (*s == 'd' || *s == 'D') { user_zonelist_order = ZONELIST_ORDER_DEFAULT; } else if (*s == 'n' || *s == 'N') { user_zonelist_order = ZONELIST_ORDER_NODE; } else if (*s == 'z' || *s == 'Z') { user_zonelist_order = ZONELIST_ORDER_ZONE; } else { printk(KERN_WARNING "Ignoring invalid numa_zonelist_order value: " "%s\n", s); return -EINVAL; } return 0; } static __init int setup_numa_zonelist_order(char *s) { int ret; if (!s) return 0; ret = __parse_numa_zonelist_order(s); if (ret == 0) strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN); return ret; } early_param("numa_zonelist_order", setup_numa_zonelist_order); /* * sysctl handler for numa_zonelist_order */ int numa_zonelist_order_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { char saved_string[NUMA_ZONELIST_ORDER_LEN]; int ret; static DEFINE_MUTEX(zl_order_mutex); mutex_lock(&zl_order_mutex); if (write) { if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) { ret = -EINVAL; goto out; } strcpy(saved_string, (char *)table->data); } ret = proc_dostring(table, write, buffer, length, ppos); if (ret) goto out; if (write) { int oldval = user_zonelist_order; ret = __parse_numa_zonelist_order((char *)table->data); if (ret) { /* * bogus value. restore saved string */ strncpy((char *)table->data, saved_string, NUMA_ZONELIST_ORDER_LEN); user_zonelist_order = oldval; } else if (oldval != user_zonelist_order) { mutex_lock(&zonelists_mutex); build_all_zonelists(NULL, NULL); mutex_unlock(&zonelists_mutex); } } out: mutex_unlock(&zl_order_mutex); return ret; } #define MAX_NODE_LOAD (nr_online_nodes) static int node_load[MAX_NUMNODES]; /** * find_next_best_node - find the next node that should appear in a given node's fallback list * @node: node whose fallback list we're appending * @used_node_mask: nodemask_t of already used nodes * * We use a number of factors to determine which is the next node that should * appear on a given node's fallback list. The node should not have appeared * already in @node's fallback list, and it should be the next closest node * according to the distance array (which contains arbitrary distance values * from each node to each node in the system), and should also prefer nodes * with no CPUs, since presumably they'll have very little allocation pressure * on them otherwise. * It returns -1 if no node is found. */ static int find_next_best_node(int node, nodemask_t *used_node_mask) { int n, val; int min_val = INT_MAX; int best_node = NUMA_NO_NODE; const struct cpumask *tmp = cpumask_of_node(0); /* Use the local node if we haven't already */ if (!node_isset(node, *used_node_mask)) { node_set(node, *used_node_mask); return node; } for_each_node_state(n, N_MEMORY) { /* Don't want a node to appear more than once */ if (node_isset(n, *used_node_mask)) continue; /* Use the distance array to find the distance */ val = node_distance(node, n); /* Penalize nodes under us ("prefer the next node") */ val += (n < node); /* Give preference to headless and unused nodes */ tmp = cpumask_of_node(n); if (!cpumask_empty(tmp)) val += PENALTY_FOR_NODE_WITH_CPUS; /* Slight preference for less loaded node */ val *= (MAX_NODE_LOAD*MAX_NUMNODES); val += node_load[n]; if (val < min_val) { min_val = val; best_node = n; } } if (best_node >= 0) node_set(best_node, *used_node_mask); return best_node; } /* * Build zonelists ordered by node and zones within node. * This results in maximum locality--normal zone overflows into local * DMA zone, if any--but risks exhausting DMA zone. */ static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) { int j; struct zonelist *zonelist; zonelist = &pgdat->node_zonelists[0]; for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) ; j = build_zonelists_node(NODE_DATA(node), zonelist, j); zonelist->_zonerefs[j].zone = NULL; zonelist->_zonerefs[j].zone_idx = 0; } /* * Build gfp_thisnode zonelists */ static void build_thisnode_zonelists(pg_data_t *pgdat) { int j; struct zonelist *zonelist; zonelist = &pgdat->node_zonelists[1]; j = build_zonelists_node(pgdat, zonelist, 0); zonelist->_zonerefs[j].zone = NULL; zonelist->_zonerefs[j].zone_idx = 0; } /* * Build zonelists ordered by zone and nodes within zones. * This results in conserving DMA zone[s] until all Normal memory is * exhausted, but results in overflowing to remote node while memory * may still exist in local DMA zone. */ static int node_order[MAX_NUMNODES]; static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) { int pos, j, node; int zone_type; /* needs to be signed */ struct zone *z; struct zonelist *zonelist; zonelist = &pgdat->node_zonelists[0]; pos = 0; for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { for (j = 0; j < nr_nodes; j++) { node = node_order[j]; z = &NODE_DATA(node)->node_zones[zone_type]; if (populated_zone(z)) { zoneref_set_zone(z, &zonelist->_zonerefs[pos++]); check_highest_zone(zone_type); } } } zonelist->_zonerefs[pos].zone = NULL; zonelist->_zonerefs[pos].zone_idx = 0; } #if defined(CONFIG_64BIT) /* * Devices that require DMA32/DMA are relatively rare and do not justify a * penalty to every machine in case the specialised case applies. Default * to Node-ordering on 64-bit NUMA machines */ static int default_zonelist_order(void) { return ZONELIST_ORDER_NODE; } #else /* * On 32-bit, the Normal zone needs to be preserved for allocations accessible * by the kernel. If processes running on node 0 deplete the low memory zone * then reclaim will occur more frequency increasing stalls and potentially * be easier to OOM if a large percentage of the zone is under writeback or * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set. * Hence, default to zone ordering on 32-bit. */ static int default_zonelist_order(void) { return ZONELIST_ORDER_ZONE; } #endif /* CONFIG_64BIT */ static void set_zonelist_order(void) { if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) current_zonelist_order = default_zonelist_order(); else current_zonelist_order = user_zonelist_order; } static void build_zonelists(pg_data_t *pgdat) { int j, node, load; enum zone_type i; nodemask_t used_mask; int local_node, prev_node; struct zonelist *zonelist; unsigned int order = current_zonelist_order; /* initialize zonelists */ for (i = 0; i < MAX_ZONELISTS; i++) { zonelist = pgdat->node_zonelists + i; zonelist->_zonerefs[0].zone = NULL; zonelist->_zonerefs[0].zone_idx = 0; } /* NUMA-aware ordering of nodes */ local_node = pgdat->node_id; load = nr_online_nodes; prev_node = local_node; nodes_clear(used_mask); memset(node_order, 0, sizeof(node_order)); j = 0; while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { /* * We don't want to pressure a particular node. * So adding penalty to the first node in same * distance group to make it round-robin. */ if (node_distance(local_node, node) != node_distance(local_node, prev_node)) node_load[node] = load; prev_node = node; load--; if (order == ZONELIST_ORDER_NODE) build_zonelists_in_node_order(pgdat, node); else node_order[j++] = node; /* remember order */ } if (order == ZONELIST_ORDER_ZONE) { /* calculate node order -- i.e., DMA last! */ build_zonelists_in_zone_order(pgdat, j); } build_thisnode_zonelists(pgdat); } #ifdef CONFIG_HAVE_MEMORYLESS_NODES /* * Return node id of node used for "local" allocations. * I.e., first node id of first zone in arg node's generic zonelist. * Used for initializing percpu 'numa_mem', which is used primarily * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. */ int local_memory_node(int node) { struct zone *zone; (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL), gfp_zone(GFP_KERNEL), NULL, &zone); return zone->node; } #endif #else /* CONFIG_NUMA */ static void set_zonelist_order(void) { current_zonelist_order = ZONELIST_ORDER_ZONE; } static void build_zonelists(pg_data_t *pgdat) { int node, local_node; enum zone_type j; struct zonelist *zonelist; local_node = pgdat->node_id; zonelist = &pgdat->node_zonelists[0]; j = build_zonelists_node(pgdat, zonelist, 0); /* * Now we build the zonelist so that it contains the zones * of all the other nodes. * We don't want to pressure a particular node, so when * building the zones for node N, we make sure that the * zones coming right after the local ones are those from * node N+1 (modulo N) */ for (node = local_node + 1; node < MAX_NUMNODES; node++) { if (!node_online(node)) continue; j = build_zonelists_node(NODE_DATA(node), zonelist, j); } for (node = 0; node < local_node; node++) { if (!node_online(node)) continue; j = build_zonelists_node(NODE_DATA(node), zonelist, j); } zonelist->_zonerefs[j].zone = NULL; zonelist->_zonerefs[j].zone_idx = 0; } #endif /* CONFIG_NUMA */ /* * Boot pageset table. One per cpu which is going to be used for all * zones and all nodes. The parameters will be set in such a way * that an item put on a list will immediately be handed over to * the buddy list. This is safe since pageset manipulation is done * with interrupts disabled. * * The boot_pagesets must be kept even after bootup is complete for * unused processors and/or zones. They do play a role for bootstrapping * hotplugged processors. * * zoneinfo_show() and maybe other functions do * not check if the processor is online before following the pageset pointer. * Other parts of the kernel may not check if the zone is available. */ static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); static void setup_zone_pageset(struct zone *zone); /* * Global mutex to protect against size modification of zonelists * as well as to serialize pageset setup for the new populated zone. */ DEFINE_MUTEX(zonelists_mutex); /* return values int ....just for stop_machine() */ static int __build_all_zonelists(void *data) { int nid; int cpu; pg_data_t *self = data; #ifdef CONFIG_NUMA memset(node_load, 0, sizeof(node_load)); #endif if (self && !node_online(self->node_id)) { build_zonelists(self); } for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); build_zonelists(pgdat); } /* * Initialize the boot_pagesets that are going to be used * for bootstrapping processors. The real pagesets for * each zone will be allocated later when the per cpu * allocator is available. * * boot_pagesets are used also for bootstrapping offline * cpus if the system is already booted because the pagesets * are needed to initialize allocators on a specific cpu too. * F.e. the percpu allocator needs the page allocator which * needs the percpu allocator in order to allocate its pagesets * (a chicken-egg dilemma). */ for_each_possible_cpu(cpu) { setup_pageset(&per_cpu(boot_pageset, cpu), 0); #ifdef CONFIG_HAVE_MEMORYLESS_NODES /* * We now know the "local memory node" for each node-- * i.e., the node of the first zone in the generic zonelist. * Set up numa_mem percpu variable for on-line cpus. During * boot, only the boot cpu should be on-line; we'll init the * secondary cpus' numa_mem as they come on-line. During * node/memory hotplug, we'll fixup all on-line cpus. */ if (cpu_online(cpu)) set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); #endif } return 0; } static noinline void __init build_all_zonelists_init(void) { __build_all_zonelists(NULL); mminit_verify_zonelist(); cpuset_init_current_mems_allowed(); } /* * Called with zonelists_mutex held always * unless system_state == SYSTEM_BOOTING. * * __ref due to (1) call of __meminit annotated setup_zone_pageset * [we're only called with non-NULL zone through __meminit paths] and * (2) call of __init annotated helper build_all_zonelists_init * [protected by SYSTEM_BOOTING]. */ void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) { set_zonelist_order(); if (system_state == SYSTEM_BOOTING) { build_all_zonelists_init(); } else { #ifdef CONFIG_MEMORY_HOTPLUG if (zone) setup_zone_pageset(zone); #endif /* we have to stop all cpus to guarantee there is no user of zonelist */ stop_machine(__build_all_zonelists, pgdat, NULL); /* cpuset refresh routine should be here */ } vm_total_pages = nr_free_pagecache_pages(); /* * Disable grouping by mobility if the number of pages in the * system is too low to allow the mechanism to work. It would be * more accurate, but expensive to check per-zone. This check is * made on memory-hotadd so a system can start with mobility * disabled and enable it later */ if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) page_group_by_mobility_disabled = 1; else page_group_by_mobility_disabled = 0; pr_info("Built %i zonelists in %s order, mobility grouping %s. " "Total pages: %ld\n", nr_online_nodes, zonelist_order_name[current_zonelist_order], page_group_by_mobility_disabled ? "off" : "on", vm_total_pages); #ifdef CONFIG_NUMA pr_info("Policy zone: %s\n", zone_names[policy_zone]); #endif } /* * Helper functions to size the waitqueue hash table. * Essentially these want to choose hash table sizes sufficiently * large so that collisions trying to wait on pages are rare. * But in fact, the number of active page waitqueues on typical * systems is ridiculously low, less than 200. So this is even * conservative, even though it seems large. * * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to * waitqueues, i.e. the size of the waitq table given the number of pages. */ #define PAGES_PER_WAITQUEUE 256 #ifndef CONFIG_MEMORY_HOTPLUG static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) { unsigned long size = 1; pages /= PAGES_PER_WAITQUEUE; while (size < pages) size <<= 1; /* * Once we have dozens or even hundreds of threads sleeping * on IO we've got bigger problems than wait queue collision. * Limit the size of the wait table to a reasonable size. */ size = min(size, 4096UL); return max(size, 4UL); } #else /* * A zone's size might be changed by hot-add, so it is not possible to determine * a suitable size for its wait_table. So we use the maximum size now. * * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: * * i386 (preemption config) : 4096 x 16 = 64Kbyte. * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. * * The maximum entries are prepared when a zone's memory is (512K + 256) pages * or more by the traditional way. (See above). It equals: * * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. * ia64(16K page size) : = ( 8G + 4M)byte. * powerpc (64K page size) : = (32G +16M)byte. */ static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) { return 4096UL; } #endif /* * This is an integer logarithm so that shifts can be used later * to extract the more random high bits from the multiplicative * hash function before the remainder is taken. */ static inline unsigned long wait_table_bits(unsigned long size) { return ffz(~size); } /* * Initially all pages are reserved - free ones are freed * up by free_all_bootmem() once the early boot process is * done. Non-atomic initialization, single-pass. */ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, unsigned long start_pfn, enum memmap_context context) { pg_data_t *pgdat = NODE_DATA(nid); unsigned long end_pfn = start_pfn + size; unsigned long pfn; struct zone *z; unsigned long nr_initialised = 0; if (highest_memmap_pfn < end_pfn - 1) highest_memmap_pfn = end_pfn - 1; z = &pgdat->node_zones[zone]; for (pfn = start_pfn; pfn < end_pfn; pfn++) { /* * There can be holes in boot-time mem_map[]s * handed to this function. They do not * exist on hotplugged memory. */ if (context == MEMMAP_EARLY) { if (!early_pfn_valid(pfn)) continue; if (!early_pfn_in_nid(pfn, nid)) continue; if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised)) break; } /* * Mark the block movable so that blocks are reserved for * movable at startup. This will force kernel allocations * to reserve their blocks rather than leaking throughout * the address space during boot when many long-lived * kernel allocations are made. * * bitmap is created for zone's valid pfn range. but memmap * can be created for invalid pages (for alignment) * check here not to call set_pageblock_migratetype() against * pfn out of zone. */ if (!(pfn & (pageblock_nr_pages - 1))) { struct page *page = pfn_to_page(pfn); __init_single_page(page, pfn, zone, nid); set_pageblock_migratetype(page, MIGRATE_MOVABLE); } else { __init_single_pfn(pfn, zone, nid); } } } static void __meminit zone_init_free_lists(struct zone *zone) { unsigned int order, t; for_each_migratetype_order(order, t) { INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); zone->free_area[order].nr_free = 0; } } #ifndef __HAVE_ARCH_MEMMAP_INIT #define memmap_init(size, nid, zone, start_pfn) \ memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) #endif static int zone_batchsize(struct zone *zone) { #ifdef CONFIG_MMU int batch; /* * The per-cpu-pages pools are set to around 1000th of the * size of the zone. But no more than 1/2 of a meg. * * OK, so we don't know how big the cache is. So guess. */ batch = zone->managed_pages / 1024; if (batch * PAGE_SIZE > 512 * 1024) batch = (512 * 1024) / PAGE_SIZE; batch /= 4; /* We effectively *= 4 below */ if (batch < 1) batch = 1; /* * Clamp the batch to a 2^n - 1 value. Having a power * of 2 value was found to be more likely to have * suboptimal cache aliasing properties in some cases. * * For example if 2 tasks are alternately allocating * batches of pages, one task can end up with a lot * of pages of one half of the possible page colors * and the other with pages of the other colors. */ batch = rounddown_pow_of_two(batch + batch/2) - 1; return batch; #else /* The deferral and batching of frees should be suppressed under NOMMU * conditions. * * The problem is that NOMMU needs to be able to allocate large chunks * of contiguous memory as there's no hardware page translation to * assemble apparent contiguous memory from discontiguous pages. * * Queueing large contiguous runs of pages for batching, however, * causes the pages to actually be freed in smaller chunks. As there * can be a significant delay between the individual batches being * recycled, this leads to the once large chunks of space being * fragmented and becoming unavailable for high-order allocations. */ return 0; #endif } /* * pcp->high and pcp->batch values are related and dependent on one another: * ->batch must never be higher then ->high. * The following function updates them in a safe manner without read side * locking. * * Any new users of pcp->batch and pcp->high should ensure they can cope with * those fields changing asynchronously (acording the the above rule). * * mutex_is_locked(&pcp_batch_high_lock) required when calling this function * outside of boot time (or some other assurance that no concurrent updaters * exist). */ static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, unsigned long batch) { /* start with a fail safe value for batch */ pcp->batch = 1; smp_wmb(); /* Update high, then batch, in order */ pcp->high = high; smp_wmb(); pcp->batch = batch; } /* a companion to pageset_set_high() */ static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch) { pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch)); } static void pageset_init(struct per_cpu_pageset *p) { struct per_cpu_pages *pcp; int migratetype; memset(p, 0, sizeof(*p)); pcp = &p->pcp; pcp->count = 0; for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) INIT_LIST_HEAD(&pcp->lists[migratetype]); } static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) { pageset_init(p); pageset_set_batch(p, batch); } /* * pageset_set_high() sets the high water mark for hot per_cpu_pagelist * to the value high for the pageset p. */ static void pageset_set_high(struct per_cpu_pageset *p, unsigned long high) { unsigned long batch = max(1UL, high / 4); if ((high / 4) > (PAGE_SHIFT * 8)) batch = PAGE_SHIFT * 8; pageset_update(&p->pcp, high, batch); } static void pageset_set_high_and_batch(struct zone *zone, struct per_cpu_pageset *pcp) { if (percpu_pagelist_fraction) pageset_set_high(pcp, (zone->managed_pages / percpu_pagelist_fraction)); else pageset_set_batch(pcp, zone_batchsize(zone)); } static void __meminit zone_pageset_init(struct zone *zone, int cpu) { struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); pageset_init(pcp); pageset_set_high_and_batch(zone, pcp); } static void __meminit setup_zone_pageset(struct zone *zone) { int cpu; zone->pageset = alloc_percpu(struct per_cpu_pageset); for_each_possible_cpu(cpu) zone_pageset_init(zone, cpu); } /* * Allocate per cpu pagesets and initialize them. * Before this call only boot pagesets were available. */ void __init setup_per_cpu_pageset(void) { struct zone *zone; for_each_populated_zone(zone) setup_zone_pageset(zone); } static noinline __init_refok int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) { int i; size_t alloc_size; /* * The per-page waitqueue mechanism uses hashed waitqueues * per zone. */ zone->wait_table_hash_nr_entries = wait_table_hash_nr_entries(zone_size_pages); zone->wait_table_bits = wait_table_bits(zone->wait_table_hash_nr_entries); alloc_size = zone->wait_table_hash_nr_entries * sizeof(wait_queue_head_t); if (!slab_is_available()) { zone->wait_table = (wait_queue_head_t *) memblock_virt_alloc_node_nopanic( alloc_size, zone->zone_pgdat->node_id); } else { /* * This case means that a zone whose size was 0 gets new memory * via memory hot-add. * But it may be the case that a new node was hot-added. In * this case vmalloc() will not be able to use this new node's * memory - this wait_table must be initialized to use this new * node itself as well. * To use this new node's memory, further consideration will be * necessary. */ zone->wait_table = vmalloc(alloc_size); } if (!zone->wait_table) return -ENOMEM; for (i = 0; i < zone->wait_table_hash_nr_entries; ++i) init_waitqueue_head(zone->wait_table + i); return 0; } static __meminit void zone_pcp_init(struct zone *zone) { /* * per cpu subsystem is not up at this point. The following code * relies on the ability of the linker to provide the * offset of a (static) per cpu variable into the per cpu area. */ zone->pageset = &boot_pageset; if (populated_zone(zone)) printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", zone->name, zone->present_pages, zone_batchsize(zone)); } int __meminit init_currently_empty_zone(struct zone *zone, unsigned long zone_start_pfn, unsigned long size) { struct pglist_data *pgdat = zone->zone_pgdat; int ret; ret = zone_wait_table_init(zone, size); if (ret) return ret; pgdat->nr_zones = zone_idx(zone) + 1; zone->zone_start_pfn = zone_start_pfn; mminit_dprintk(MMINIT_TRACE, "memmap_init", "Initialising map node %d zone %lu pfns %lu -> %lu\n", pgdat->node_id, (unsigned long)zone_idx(zone), zone_start_pfn, (zone_start_pfn + size)); zone_init_free_lists(zone); return 0; } #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID /* * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. */ int __meminit __early_pfn_to_nid(unsigned long pfn, struct mminit_pfnnid_cache *state) { unsigned long start_pfn, end_pfn; int nid; if (state->last_start <= pfn && pfn < state->last_end) return state->last_nid; nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); if (nid != -1) { state->last_start = start_pfn; state->last_end = end_pfn; state->last_nid = nid; } return nid; } #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ /** * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid * * If an architecture guarantees that all ranges registered contain no holes * and may be freed, this this function may be used instead of calling * memblock_free_early_nid() manually. */ void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) { unsigned long start_pfn, end_pfn; int i, this_nid; for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) { start_pfn = min(start_pfn, max_low_pfn); end_pfn = min(end_pfn, max_low_pfn); if (start_pfn < end_pfn) memblock_free_early_nid(PFN_PHYS(start_pfn), (end_pfn - start_pfn) << PAGE_SHIFT, this_nid); } } /** * sparse_memory_present_with_active_regions - Call memory_present for each active range * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. * * If an architecture guarantees that all ranges registered contain no holes and may * be freed, this function may be used instead of calling memory_present() manually. */ void __init sparse_memory_present_with_active_regions(int nid) { unsigned long start_pfn, end_pfn; int i, this_nid; for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) memory_present(this_nid, start_pfn, end_pfn); } /** * get_pfn_range_for_nid - Return the start and end page frames for a node * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. * @start_pfn: Passed by reference. On return, it will have the node start_pfn. * @end_pfn: Passed by reference. On return, it will have the node end_pfn. * * It returns the start and end page frame of a node based on information * provided by memblock_set_node(). If called for a node * with no available memory, a warning is printed and the start and end * PFNs will be 0. */ void __meminit get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn) { unsigned long this_start_pfn, this_end_pfn; int i; *start_pfn = -1UL; *end_pfn = 0; for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { *start_pfn = min(*start_pfn, this_start_pfn); *end_pfn = max(*end_pfn, this_end_pfn); } if (*start_pfn == -1UL) *start_pfn = 0; } /* * This finds a zone that can be used for ZONE_MOVABLE pages. The * assumption is made that zones within a node are ordered in monotonic * increasing memory addresses so that the "highest" populated zone is used */ static void __init find_usable_zone_for_movable(void) { int zone_index; for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { if (zone_index == ZONE_MOVABLE) continue; if (arch_zone_highest_possible_pfn[zone_index] > arch_zone_lowest_possible_pfn[zone_index]) break; } VM_BUG_ON(zone_index == -1); movable_zone = zone_index; } /* * The zone ranges provided by the architecture do not include ZONE_MOVABLE * because it is sized independent of architecture. Unlike the other zones, * the starting point for ZONE_MOVABLE is not fixed. It may be different * in each node depending on the size of each node and how evenly kernelcore * is distributed. This helper function adjusts the zone ranges * provided by the architecture for a given node by using the end of the * highest usable zone for ZONE_MOVABLE. This preserves the assumption that * zones within a node are in order of monotonic increases memory addresses */ static void __meminit adjust_zone_range_for_zone_movable(int nid, unsigned long zone_type, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *zone_start_pfn, unsigned long *zone_end_pfn) { /* Only adjust if ZONE_MOVABLE is on this node */ if (zone_movable_pfn[nid]) { /* Size ZONE_MOVABLE */ if (zone_type == ZONE_MOVABLE) { *zone_start_pfn = zone_movable_pfn[nid]; *zone_end_pfn = min(node_end_pfn, arch_zone_highest_possible_pfn[movable_zone]); /* Adjust for ZONE_MOVABLE starting within this range */ } else if (*zone_start_pfn < zone_movable_pfn[nid] && *zone_end_pfn > zone_movable_pfn[nid]) { *zone_end_pfn = zone_movable_pfn[nid]; /* Check if this whole range is within ZONE_MOVABLE */ } else if (*zone_start_pfn >= zone_movable_pfn[nid]) *zone_start_pfn = *zone_end_pfn; } } /* * Return the number of pages a zone spans in a node, including holes * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() */ static unsigned long __meminit zone_spanned_pages_in_node(int nid, unsigned long zone_type, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *ignored) { unsigned long zone_start_pfn, zone_end_pfn; /* When hotadd a new node from cpu_up(), the node should be empty */ if (!node_start_pfn && !node_end_pfn) return 0; /* Get the start and end of the zone */ zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; adjust_zone_range_for_zone_movable(nid, zone_type, node_start_pfn, node_end_pfn, &zone_start_pfn, &zone_end_pfn); /* Check that this node has pages within the zone's required range */ if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) return 0; /* Move the zone boundaries inside the node if necessary */ zone_end_pfn = min(zone_end_pfn, node_end_pfn); zone_start_pfn = max(zone_start_pfn, node_start_pfn); /* Return the spanned pages */ return zone_end_pfn - zone_start_pfn; } /* * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, * then all holes in the requested range will be accounted for. */ unsigned long __meminit __absent_pages_in_range(int nid, unsigned long range_start_pfn, unsigned long range_end_pfn) { unsigned long nr_absent = range_end_pfn - range_start_pfn; unsigned long start_pfn, end_pfn; int i; for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); nr_absent -= end_pfn - start_pfn; } return nr_absent; } /** * absent_pages_in_range - Return number of page frames in holes within a range * @start_pfn: The start PFN to start searching for holes * @end_pfn: The end PFN to stop searching for holes * * It returns the number of pages frames in memory holes within a range. */ unsigned long __init absent_pages_in_range(unsigned long start_pfn, unsigned long end_pfn) { return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); } /* Return the number of page frames in holes in a zone on a node */ static unsigned long __meminit zone_absent_pages_in_node(int nid, unsigned long zone_type, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *ignored) { unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; unsigned long zone_start_pfn, zone_end_pfn; /* When hotadd a new node from cpu_up(), the node should be empty */ if (!node_start_pfn && !node_end_pfn) return 0; zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); adjust_zone_range_for_zone_movable(nid, zone_type, node_start_pfn, node_end_pfn, &zone_start_pfn, &zone_end_pfn); return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); } #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, unsigned long zone_type, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *zones_size) { return zones_size[zone_type]; } static inline unsigned long __meminit zone_absent_pages_in_node(int nid, unsigned long zone_type, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *zholes_size) { if (!zholes_size) return 0; return zholes_size[zone_type]; } #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *zones_size, unsigned long *zholes_size) { unsigned long realtotalpages = 0, totalpages = 0; enum zone_type i; for (i = 0; i < MAX_NR_ZONES; i++) { struct zone *zone = pgdat->node_zones + i; unsigned long size, real_size; size = zone_spanned_pages_in_node(pgdat->node_id, i, node_start_pfn, node_end_pfn, zones_size); real_size = size - zone_absent_pages_in_node(pgdat->node_id, i, node_start_pfn, node_end_pfn, zholes_size); zone->spanned_pages = size; zone->present_pages = real_size; totalpages += size; realtotalpages += real_size; } pgdat->node_spanned_pages = totalpages; pgdat->node_present_pages = realtotalpages; printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); } #ifndef CONFIG_SPARSEMEM /* * Calculate the size of the zone->blockflags rounded to an unsigned long * Start by making sure zonesize is a multiple of pageblock_order by rounding * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally * round what is now in bits to nearest long in bits, then return it in * bytes. */ static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) { unsigned long usemapsize; zonesize += zone_start_pfn & (pageblock_nr_pages-1); usemapsize = roundup(zonesize, pageblock_nr_pages); usemapsize = usemapsize >> pageblock_order; usemapsize *= NR_PAGEBLOCK_BITS; usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); return usemapsize / 8; } static void __init setup_usemap(struct pglist_data *pgdat, struct zone *zone, unsigned long zone_start_pfn, unsigned long zonesize) { unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); zone->pageblock_flags = NULL; if (usemapsize) zone->pageblock_flags = memblock_virt_alloc_node_nopanic(usemapsize, pgdat->node_id); } #else static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, unsigned long zone_start_pfn, unsigned long zonesize) {} #endif /* CONFIG_SPARSEMEM */ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ void __paginginit set_pageblock_order(void) { unsigned int order; /* Check that pageblock_nr_pages has not already been setup */ if (pageblock_order) return; if (HPAGE_SHIFT > PAGE_SHIFT) order = HUGETLB_PAGE_ORDER; else order = MAX_ORDER - 1; /* * Assume the largest contiguous order of interest is a huge page. * This value may be variable depending on boot parameters on IA64 and * powerpc. */ pageblock_order = order; } #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ /* * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() * is unused as pageblock_order is set at compile-time. See * include/linux/pageblock-flags.h for the values of pageblock_order based on * the kernel config */ void __paginginit set_pageblock_order(void) { } #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages, unsigned long present_pages) { unsigned long pages = spanned_pages; /* * Provide a more accurate estimation if there are holes within * the zone and SPARSEMEM is in use. If there are holes within the * zone, each populated memory region may cost us one or two extra * memmap pages due to alignment because memmap pages for each * populated regions may not naturally algined on page boundary. * So the (present_pages >> 4) heuristic is a tradeoff for that. */ if (spanned_pages > present_pages + (present_pages >> 4) && IS_ENABLED(CONFIG_SPARSEMEM)) pages = present_pages; return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; } /* * Set up the zone data structures: * - mark all pages reserved * - mark all memory queues empty * - clear the memory bitmaps * * NOTE: pgdat should get zeroed by caller. */ static void __paginginit free_area_init_core(struct pglist_data *pgdat) { enum zone_type j; int nid = pgdat->node_id; unsigned long zone_start_pfn = pgdat->node_start_pfn; int ret; pgdat_resize_init(pgdat); #ifdef CONFIG_NUMA_BALANCING spin_lock_init(&pgdat->numabalancing_migrate_lock); pgdat->numabalancing_migrate_nr_pages = 0; pgdat->numabalancing_migrate_next_window = jiffies; #endif init_waitqueue_head(&pgdat->kswapd_wait); init_waitqueue_head(&pgdat->pfmemalloc_wait); pgdat_page_ext_init(pgdat); for (j = 0; j < MAX_NR_ZONES; j++) { struct zone *zone = pgdat->node_zones + j; unsigned long size, realsize, freesize, memmap_pages; size = zone->spanned_pages; realsize = freesize = zone->present_pages; /* * Adjust freesize so that it accounts for how much memory * is used by this zone for memmap. This affects the watermark * and per-cpu initialisations */ memmap_pages = calc_memmap_size(size, realsize); if (!is_highmem_idx(j)) { if (freesize >= memmap_pages) { freesize -= memmap_pages; if (memmap_pages) printk(KERN_DEBUG " %s zone: %lu pages used for memmap\n", zone_names[j], memmap_pages); } else printk(KERN_WARNING " %s zone: %lu pages exceeds freesize %lu\n", zone_names[j], memmap_pages, freesize); } /* Account for reserved pages */ if (j == 0 && freesize > dma_reserve) { freesize -= dma_reserve; printk(KERN_DEBUG " %s zone: %lu pages reserved\n", zone_names[0], dma_reserve); } if (!is_highmem_idx(j)) nr_kernel_pages += freesize; /* Charge for highmem memmap if there are enough kernel pages */ else if (nr_kernel_pages > memmap_pages * 2) nr_kernel_pages -= memmap_pages; nr_all_pages += freesize; /* * Set an approximate value for lowmem here, it will be adjusted * when the bootmem allocator frees pages into the buddy system. * And all highmem pages will be managed by the buddy system. */ zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; #ifdef CONFIG_NUMA zone->node = nid; zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio) / 100; zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100; #endif zone->name = zone_names[j]; spin_lock_init(&zone->lock); spin_lock_init(&zone->lru_lock); zone_seqlock_init(zone); zone->zone_pgdat = pgdat; zone_pcp_init(zone); /* For bootup, initialized properly in watermark setup */ mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages); lruvec_init(&zone->lruvec); if (!size) continue; set_pageblock_order(); setup_usemap(pgdat, zone, zone_start_pfn, size); ret = init_currently_empty_zone(zone, zone_start_pfn, size); BUG_ON(ret); memmap_init(size, nid, j, zone_start_pfn); zone_start_pfn += size; } } static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) { unsigned long __maybe_unused start = 0; unsigned long __maybe_unused offset = 0; /* Skip empty nodes */ if (!pgdat->node_spanned_pages) return; #ifdef CONFIG_FLAT_NODE_MEM_MAP start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); offset = pgdat->node_start_pfn - start; /* ia64 gets its own node_mem_map, before this, without bootmem */ if (!pgdat->node_mem_map) { unsigned long size, end; struct page *map; /* * The zone's endpoints aren't required to be MAX_ORDER * aligned but the node_mem_map endpoints must be in order * for the buddy allocator to function correctly. */ end = pgdat_end_pfn(pgdat); end = ALIGN(end, MAX_ORDER_NR_PAGES); size = (end - start) * sizeof(struct page); map = alloc_remap(pgdat->node_id, size); if (!map) map = memblock_virt_alloc_node_nopanic(size, pgdat->node_id); pgdat->node_mem_map = map + offset; } #ifndef CONFIG_NEED_MULTIPLE_NODES /* * With no DISCONTIG, the global mem_map is just set as node 0's */ if (pgdat == NODE_DATA(0)) { mem_map = NODE_DATA(0)->node_mem_map; #if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM) if (page_to_pfn(mem_map) != pgdat->node_start_pfn) mem_map -= offset; #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ } #endif #endif /* CONFIG_FLAT_NODE_MEM_MAP */ } void __paginginit free_area_init_node(int nid, unsigned long *zones_size, unsigned long node_start_pfn, unsigned long *zholes_size) { pg_data_t *pgdat = NODE_DATA(nid); unsigned long start_pfn = 0; unsigned long end_pfn = 0; /* pg_data_t should be reset to zero when it's allocated */ WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); reset_deferred_meminit(pgdat); pgdat->node_id = nid; pgdat->node_start_pfn = node_start_pfn; #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, (u64)start_pfn << PAGE_SHIFT, end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); #endif calculate_node_totalpages(pgdat, start_pfn, end_pfn, zones_size, zholes_size); alloc_node_mem_map(pgdat); #ifdef CONFIG_FLAT_NODE_MEM_MAP printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n", nid, (unsigned long)pgdat, (unsigned long)pgdat->node_mem_map); #endif free_area_init_core(pgdat); } #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP #if MAX_NUMNODES > 1 /* * Figure out the number of possible node ids. */ void __init setup_nr_node_ids(void) { unsigned int highest; highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); nr_node_ids = highest + 1; } #endif /** * node_map_pfn_alignment - determine the maximum internode alignment * * This function should be called after node map is populated and sorted. * It calculates the maximum power of two alignment which can distinguish * all the nodes. * * For example, if all nodes are 1GiB and aligned to 1GiB, the return value * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is * shifted, 1GiB is enough and this function will indicate so. * * This is used to test whether pfn -> nid mapping of the chosen memory * model has fine enough granularity to avoid incorrect mapping for the * populated node map. * * Returns the determined alignment in pfn's. 0 if there is no alignment * requirement (single node). */ unsigned long __init node_map_pfn_alignment(void) { unsigned long accl_mask = 0, last_end = 0; unsigned long start, end, mask; int last_nid = -1; int i, nid; for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { if (!start || last_nid < 0 || last_nid == nid) { last_nid = nid; last_end = end; continue; } /* * Start with a mask granular enough to pin-point to the * start pfn and tick off bits one-by-one until it becomes * too coarse to separate the current node from the last. */ mask = ~((1 << __ffs(start)) - 1); while (mask && last_end <= (start & (mask << 1))) mask <<= 1; /* accumulate all internode masks */ accl_mask |= mask; } /* convert mask to number of pages */ return ~accl_mask + 1; } /* Find the lowest pfn for a node */ static unsigned long __init find_min_pfn_for_node(int nid) { unsigned long min_pfn = ULONG_MAX; unsigned long start_pfn; int i; for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL) min_pfn = min(min_pfn, start_pfn); if (min_pfn == ULONG_MAX) { printk(KERN_WARNING "Could not find start_pfn for node %d\n", nid); return 0; } return min_pfn; } /** * find_min_pfn_with_active_regions - Find the minimum PFN registered * * It returns the minimum PFN based on information provided via * memblock_set_node(). */ unsigned long __init find_min_pfn_with_active_regions(void) { return find_min_pfn_for_node(MAX_NUMNODES); } /* * early_calculate_totalpages() * Sum pages in active regions for movable zone. * Populate N_MEMORY for calculating usable_nodes. */ static unsigned long __init early_calculate_totalpages(void) { unsigned long totalpages = 0; unsigned long start_pfn, end_pfn; int i, nid; for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { unsigned long pages = end_pfn - start_pfn; totalpages += pages; if (pages) node_set_state(nid, N_MEMORY); } return totalpages; } /* * Find the PFN the Movable zone begins in each node. Kernel memory * is spread evenly between nodes as long as the nodes have enough * memory. When they don't, some nodes will have more kernelcore than * others */ static void __init find_zone_movable_pfns_for_nodes(void) { int i, nid; unsigned long usable_startpfn; unsigned long kernelcore_node, kernelcore_remaining; /* save the state before borrow the nodemask */ nodemask_t saved_node_state = node_states[N_MEMORY]; unsigned long totalpages = early_calculate_totalpages(); int usable_nodes = nodes_weight(node_states[N_MEMORY]); struct memblock_region *r; /* Need to find movable_zone earlier when movable_node is specified. */ find_usable_zone_for_movable(); /* * If movable_node is specified, ignore kernelcore and movablecore * options. */ if (movable_node_is_enabled()) { for_each_memblock(memory, r) { if (!memblock_is_hotpluggable(r)) continue; nid = r->nid; usable_startpfn = PFN_DOWN(r->base); zone_movable_pfn[nid] = zone_movable_pfn[nid] ? min(usable_startpfn, zone_movable_pfn[nid]) : usable_startpfn; } goto out2; } /* * If movablecore=nn[KMG] was specified, calculate what size of * kernelcore that corresponds so that memory usable for * any allocation type is evenly spread. If both kernelcore * and movablecore are specified, then the value of kernelcore * will be used for required_kernelcore if it's greater than * what movablecore would have allowed. */ if (required_movablecore) { unsigned long corepages; /* * Round-up so that ZONE_MOVABLE is at least as large as what * was requested by the user */ required_movablecore = roundup(required_movablecore, MAX_ORDER_NR_PAGES); required_movablecore = min(totalpages, required_movablecore); corepages = totalpages - required_movablecore; required_kernelcore = max(required_kernelcore, corepages); } /* * If kernelcore was not specified or kernelcore size is larger * than totalpages, there is no ZONE_MOVABLE. */ if (!required_kernelcore || required_kernelcore >= totalpages) goto out; /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; restart: /* Spread kernelcore memory as evenly as possible throughout nodes */ kernelcore_node = required_kernelcore / usable_nodes; for_each_node_state(nid, N_MEMORY) { unsigned long start_pfn, end_pfn; /* * Recalculate kernelcore_node if the division per node * now exceeds what is necessary to satisfy the requested * amount of memory for the kernel */ if (required_kernelcore < kernelcore_node) kernelcore_node = required_kernelcore / usable_nodes; /* * As the map is walked, we track how much memory is usable * by the kernel using kernelcore_remaining. When it is * 0, the rest of the node is usable by ZONE_MOVABLE */ kernelcore_remaining = kernelcore_node; /* Go through each range of PFNs within this node */ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { unsigned long size_pages; start_pfn = max(start_pfn, zone_movable_pfn[nid]); if (start_pfn >= end_pfn) continue; /* Account for what is only usable for kernelcore */ if (start_pfn < usable_startpfn) { unsigned long kernel_pages; kernel_pages = min(end_pfn, usable_startpfn) - start_pfn; kernelcore_remaining -= min(kernel_pages, kernelcore_remaining); required_kernelcore -= min(kernel_pages, required_kernelcore); /* Continue if range is now fully accounted */ if (end_pfn <= usable_startpfn) { /* * Push zone_movable_pfn to the end so * that if we have to rebalance * kernelcore across nodes, we will * not double account here */ zone_movable_pfn[nid] = end_pfn; continue; } start_pfn = usable_startpfn; } /* * The usable PFN range for ZONE_MOVABLE is from * start_pfn->end_pfn. Calculate size_pages as the * number of pages used as kernelcore */ size_pages = end_pfn - start_pfn; if (size_pages > kernelcore_remaining) size_pages = kernelcore_remaining; zone_movable_pfn[nid] = start_pfn + size_pages; /* * Some kernelcore has been met, update counts and * break if the kernelcore for this node has been * satisfied */ required_kernelcore -= min(required_kernelcore, size_pages); kernelcore_remaining -= size_pages; if (!kernelcore_remaining) break; } } /* * If there is still required_kernelcore, we do another pass with one * less node in the count. This will push zone_movable_pfn[nid] further * along on the nodes that still have memory until kernelcore is * satisfied */ usable_nodes--; if (usable_nodes && required_kernelcore > usable_nodes) goto restart; out2: /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ for (nid = 0; nid < MAX_NUMNODES; nid++) zone_movable_pfn[nid] = roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); out: /* restore the node_state */ node_states[N_MEMORY] = saved_node_state; } /* Any regular or high memory on that node ? */ static void check_for_memory(pg_data_t *pgdat, int nid) { enum zone_type zone_type; if (N_MEMORY == N_NORMAL_MEMORY) return; for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { struct zone *zone = &pgdat->node_zones[zone_type]; if (populated_zone(zone)) { node_set_state(nid, N_HIGH_MEMORY); if (N_NORMAL_MEMORY != N_HIGH_MEMORY && zone_type <= ZONE_NORMAL) node_set_state(nid, N_NORMAL_MEMORY); break; } } } /** * free_area_init_nodes - Initialise all pg_data_t and zone data * @max_zone_pfn: an array of max PFNs for each zone * * This will call free_area_init_node() for each active node in the system. * Using the page ranges provided by memblock_set_node(), the size of each * zone in each node and their holes is calculated. If the maximum PFN * between two adjacent zones match, it is assumed that the zone is empty. * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed * that arch_max_dma32_pfn has no pages. It is also assumed that a zone * starts where the previous one ended. For example, ZONE_DMA32 starts * at arch_max_dma_pfn. */ void __init free_area_init_nodes(unsigned long *max_zone_pfn) { unsigned long start_pfn, end_pfn; int i, nid; /* Record where the zone boundaries are */ memset(arch_zone_lowest_possible_pfn, 0, sizeof(arch_zone_lowest_possible_pfn)); memset(arch_zone_highest_possible_pfn, 0, sizeof(arch_zone_highest_possible_pfn)); arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; for (i = 1; i < MAX_NR_ZONES; i++) { if (i == ZONE_MOVABLE) continue; arch_zone_lowest_possible_pfn[i] = arch_zone_highest_possible_pfn[i-1]; arch_zone_highest_possible_pfn[i] = max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); } arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; /* Find the PFNs that ZONE_MOVABLE begins at in each node */ memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); find_zone_movable_pfns_for_nodes(); /* Print out the zone ranges */ pr_info("Zone ranges:\n"); for (i = 0; i < MAX_NR_ZONES; i++) { if (i == ZONE_MOVABLE) continue; pr_info(" %-8s ", zone_names[i]); if (arch_zone_lowest_possible_pfn[i] == arch_zone_highest_possible_pfn[i]) pr_cont("empty\n"); else pr_cont("[mem %#018Lx-%#018Lx]\n", (u64)arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT, ((u64)arch_zone_highest_possible_pfn[i] << PAGE_SHIFT) - 1); } /* Print out the PFNs ZONE_MOVABLE begins at in each node */ pr_info("Movable zone start for each node\n"); for (i = 0; i < MAX_NUMNODES; i++) { if (zone_movable_pfn[i]) pr_info(" Node %d: %#018Lx\n", i, (u64)zone_movable_pfn[i] << PAGE_SHIFT); } /* Print out the early node map */ pr_info("Early memory node ranges\n"); for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, (u64)start_pfn << PAGE_SHIFT, ((u64)end_pfn << PAGE_SHIFT) - 1); /* Initialise every node */ mminit_verify_pageflags_layout(); setup_nr_node_ids(); for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); free_area_init_node(nid, NULL, find_min_pfn_for_node(nid), NULL); /* Any memory on that node */ if (pgdat->node_present_pages) node_set_state(nid, N_MEMORY); check_for_memory(pgdat, nid); } } static int __init cmdline_parse_core(char *p, unsigned long *core) { unsigned long long coremem; if (!p) return -EINVAL; coremem = memparse(p, &p); *core = coremem >> PAGE_SHIFT; /* Paranoid check that UL is enough for the coremem value */ WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); return 0; } /* * kernelcore=size sets the amount of memory for use for allocations that * cannot be reclaimed or migrated. */ static int __init cmdline_parse_kernelcore(char *p) { return cmdline_parse_core(p, &required_kernelcore); } /* * movablecore=size sets the amount of memory for use for allocations that * can be reclaimed or migrated. */ static int __init cmdline_parse_movablecore(char *p) { return cmdline_parse_core(p, &required_movablecore); } early_param("kernelcore", cmdline_parse_kernelcore); early_param("movablecore", cmdline_parse_movablecore); #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ void adjust_managed_page_count(struct page *page, long count) { spin_lock(&managed_page_count_lock); page_zone(page)->managed_pages += count; totalram_pages += count; #ifdef CONFIG_HIGHMEM if (PageHighMem(page)) totalhigh_pages += count; #endif spin_unlock(&managed_page_count_lock); } EXPORT_SYMBOL(adjust_managed_page_count); unsigned long free_reserved_area(void *start, void *end, int poison, char *s) { void *pos; unsigned long pages = 0; start = (void *)PAGE_ALIGN((unsigned long)start); end = (void *)((unsigned long)end & PAGE_MASK); for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { if ((unsigned int)poison <= 0xFF) memset(pos, poison, PAGE_SIZE); free_reserved_page(virt_to_page(pos)); } if (pages && s) pr_info("Freeing %s memory: %ldK (%p - %p)\n", s, pages << (PAGE_SHIFT - 10), start, end); return pages; } EXPORT_SYMBOL(free_reserved_area); #ifdef CONFIG_HIGHMEM void free_highmem_page(struct page *page) { __free_reserved_page(page); totalram_pages++; page_zone(page)->managed_pages++; totalhigh_pages++; } #endif void __init mem_init_print_info(const char *str) { unsigned long physpages, codesize, datasize, rosize, bss_size; unsigned long init_code_size, init_data_size; physpages = get_num_physpages(); codesize = _etext - _stext; datasize = _edata - _sdata; rosize = __end_rodata - __start_rodata; bss_size = __bss_stop - __bss_start; init_data_size = __init_end - __init_begin; init_code_size = _einittext - _sinittext; /* * Detect special cases and adjust section sizes accordingly: * 1) .init.* may be embedded into .data sections * 2) .init.text.* may be out of [__init_begin, __init_end], * please refer to arch/tile/kernel/vmlinux.lds.S. * 3) .rodata.* may be embedded into .text or .data sections. */ #define adj_init_size(start, end, size, pos, adj) \ do { \ if (start <= pos && pos < end && size > adj) \ size -= adj; \ } while (0) adj_init_size(__init_begin, __init_end, init_data_size, _sinittext, init_code_size); adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); #undef adj_init_size pr_info("Memory: %luK/%luK available " "(%luK kernel code, %luK rwdata, %luK rodata, " "%luK init, %luK bss, %luK reserved, %luK cma-reserved" #ifdef CONFIG_HIGHMEM ", %luK highmem" #endif "%s%s)\n", nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10), codesize >> 10, datasize >> 10, rosize >> 10, (init_data_size + init_code_size) >> 10, bss_size >> 10, (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10), totalcma_pages << (PAGE_SHIFT-10), #ifdef CONFIG_HIGHMEM totalhigh_pages << (PAGE_SHIFT-10), #endif str ? ", " : "", str ? str : ""); } /** * set_dma_reserve - set the specified number of pages reserved in the first zone * @new_dma_reserve: The number of pages to mark reserved * * The per-cpu batchsize and zone watermarks are determined by managed_pages. * In the DMA zone, a significant percentage may be consumed by kernel image * and other unfreeable allocations which can skew the watermarks badly. This * function may optionally be used to account for unfreeable pages in the * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and * smaller per-cpu batchsize. */ void __init set_dma_reserve(unsigned long new_dma_reserve) { dma_reserve = new_dma_reserve; } void __init free_area_init(unsigned long *zones_size) { free_area_init_node(0, zones_size, __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); } static int page_alloc_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { int cpu = (unsigned long)hcpu; if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { lru_add_drain_cpu(cpu); drain_pages(cpu); /* * Spill the event counters of the dead processor * into the current processors event counters. * This artificially elevates the count of the current * processor. */ vm_events_fold_cpu(cpu); /* * Zero the differential counters of the dead processor * so that the vm statistics are consistent. * * This is only okay since the processor is dead and cannot * race with what we are doing. */ cpu_vm_stats_fold(cpu); } return NOTIFY_OK; } void __init page_alloc_init(void) { hotcpu_notifier(page_alloc_cpu_notify, 0); } /* * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio * or min_free_kbytes changes. */ static void calculate_totalreserve_pages(void) { struct pglist_data *pgdat; unsigned long reserve_pages = 0; enum zone_type i, j; for_each_online_pgdat(pgdat) { for (i = 0; i < MAX_NR_ZONES; i++) { struct zone *zone = pgdat->node_zones + i; long max = 0; /* Find valid and maximum lowmem_reserve in the zone */ for (j = i; j < MAX_NR_ZONES; j++) { if (zone->lowmem_reserve[j] > max) max = zone->lowmem_reserve[j]; } /* we treat the high watermark as reserved pages. */ max += high_wmark_pages(zone); if (max > zone->managed_pages) max = zone->managed_pages; reserve_pages += max; /* * Lowmem reserves are not available to * GFP_HIGHUSER page cache allocations and * kswapd tries to balance zones to their high * watermark. As a result, neither should be * regarded as dirtyable memory, to prevent a * situation where reclaim has to clean pages * in order to balance the zones. */ zone->dirty_balance_reserve = max; } } dirty_balance_reserve = reserve_pages; totalreserve_pages = reserve_pages; } /* * setup_per_zone_lowmem_reserve - called whenever * sysctl_lowmem_reserve_ratio changes. Ensures that each zone * has a correct pages reserved value, so an adequate number of * pages are left in the zone after a successful __alloc_pages(). */ static void setup_per_zone_lowmem_reserve(void) { struct pglist_data *pgdat; enum zone_type j, idx; for_each_online_pgdat(pgdat) { for (j = 0; j < MAX_NR_ZONES; j++) { struct zone *zone = pgdat->node_zones + j; unsigned long managed_pages = zone->managed_pages; zone->lowmem_reserve[j] = 0; idx = j; while (idx) { struct zone *lower_zone; idx--; if (sysctl_lowmem_reserve_ratio[idx] < 1) sysctl_lowmem_reserve_ratio[idx] = 1; lower_zone = pgdat->node_zones + idx; lower_zone->lowmem_reserve[j] = managed_pages / sysctl_lowmem_reserve_ratio[idx]; managed_pages += lower_zone->managed_pages; } } } /* update totalreserve_pages */ calculate_totalreserve_pages(); } static void __setup_per_zone_wmarks(void) { unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); unsigned long lowmem_pages = 0; struct zone *zone; unsigned long flags; /* Calculate total number of !ZONE_HIGHMEM pages */ for_each_zone(zone) { if (!is_highmem(zone)) lowmem_pages += zone->managed_pages; } for_each_zone(zone) { u64 tmp; spin_lock_irqsave(&zone->lock, flags); tmp = (u64)pages_min * zone->managed_pages; do_div(tmp, lowmem_pages); if (is_highmem(zone)) { /* * __GFP_HIGH and PF_MEMALLOC allocations usually don't * need highmem pages, so cap pages_min to a small * value here. * * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) * deltas control asynch page reclaim, and so should * not be capped for highmem. */ unsigned long min_pages; min_pages = zone->managed_pages / 1024; min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); zone->watermark[WMARK_MIN] = min_pages; } else { /* * If it's a lowmem zone, reserve a number of pages * proportionate to the zone's size. */ zone->watermark[WMARK_MIN] = tmp; } zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); __mod_zone_page_state(zone, NR_ALLOC_BATCH, high_wmark_pages(zone) - low_wmark_pages(zone) - atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); spin_unlock_irqrestore(&zone->lock, flags); } /* update totalreserve_pages */ calculate_totalreserve_pages(); } /** * setup_per_zone_wmarks - called when min_free_kbytes changes * or when memory is hot-{added|removed} * * Ensures that the watermark[min,low,high] values for each zone are set * correctly with respect to min_free_kbytes. */ void setup_per_zone_wmarks(void) { mutex_lock(&zonelists_mutex); __setup_per_zone_wmarks(); mutex_unlock(&zonelists_mutex); } /* * The inactive anon list should be small enough that the VM never has to * do too much work, but large enough that each inactive page has a chance * to be referenced again before it is swapped out. * * The inactive_anon ratio is the target ratio of ACTIVE_ANON to * INACTIVE_ANON pages on this zone's LRU, maintained by the * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of * the anonymous pages are kept on the inactive list. * * total target max * memory ratio inactive anon * ------------------------------------- * 10MB 1 5MB * 100MB 1 50MB * 1GB 3 250MB * 10GB 10 0.9GB * 100GB 31 3GB * 1TB 101 10GB * 10TB 320 32GB */ static void __meminit calculate_zone_inactive_ratio(struct zone *zone) { unsigned int gb, ratio; /* Zone size in gigabytes */ gb = zone->managed_pages >> (30 - PAGE_SHIFT); if (gb) ratio = int_sqrt(10 * gb); else ratio = 1; zone->inactive_ratio = ratio; } static void __meminit setup_per_zone_inactive_ratio(void) { struct zone *zone; for_each_zone(zone) calculate_zone_inactive_ratio(zone); } /* * Initialise min_free_kbytes. * * For small machines we want it small (128k min). For large machines * we want it large (64MB max). But it is not linear, because network * bandwidth does not increase linearly with machine size. We use * * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: * min_free_kbytes = sqrt(lowmem_kbytes * 16) * * which yields * * 16MB: 512k * 32MB: 724k * 64MB: 1024k * 128MB: 1448k * 256MB: 2048k * 512MB: 2896k * 1024MB: 4096k * 2048MB: 5792k * 4096MB: 8192k * 8192MB: 11584k * 16384MB: 16384k */ int __meminit init_per_zone_wmark_min(void) { unsigned long lowmem_kbytes; int new_min_free_kbytes; lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); if (new_min_free_kbytes > user_min_free_kbytes) { min_free_kbytes = new_min_free_kbytes; if (min_free_kbytes < 128) min_free_kbytes = 128; if (min_free_kbytes > 65536) min_free_kbytes = 65536; } else { pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", new_min_free_kbytes, user_min_free_kbytes); } setup_per_zone_wmarks(); refresh_zone_stat_thresholds(); setup_per_zone_lowmem_reserve(); setup_per_zone_inactive_ratio(); return 0; } core_initcall(init_per_zone_wmark_min) /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so * that we can call two helper functions whenever min_free_kbytes * changes. */ int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { int rc; rc = proc_dointvec_minmax(table, write, buffer, length, ppos); if (rc) return rc; if (write) { user_min_free_kbytes = min_free_kbytes; setup_per_zone_wmarks(); } return 0; } #ifdef CONFIG_NUMA int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { struct zone *zone; int rc; rc = proc_dointvec_minmax(table, write, buffer, length, ppos); if (rc) return rc; for_each_zone(zone) zone->min_unmapped_pages = (zone->managed_pages * sysctl_min_unmapped_ratio) / 100; return 0; } int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { struct zone *zone; int rc; rc = proc_dointvec_minmax(table, write, buffer, length, ppos); if (rc) return rc; for_each_zone(zone) zone->min_slab_pages = (zone->managed_pages * sysctl_min_slab_ratio) / 100; return 0; } #endif /* * lowmem_reserve_ratio_sysctl_handler - just a wrapper around * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() * whenever sysctl_lowmem_reserve_ratio changes. * * The reserve ratio obviously has absolutely no relation with the * minimum watermarks. The lowmem reserve ratio can only make sense * if in function of the boot time zone sizes. */ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { proc_dointvec_minmax(table, write, buffer, length, ppos); setup_per_zone_lowmem_reserve(); return 0; } /* * percpu_pagelist_fraction - changes the pcp->high for each zone on each * cpu. It is the fraction of total pages in each zone that a hot per cpu * pagelist can have before it gets flushed back to buddy allocator. */ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { struct zone *zone; int old_percpu_pagelist_fraction; int ret; mutex_lock(&pcp_batch_high_lock); old_percpu_pagelist_fraction = percpu_pagelist_fraction; ret = proc_dointvec_minmax(table, write, buffer, length, ppos); if (!write || ret < 0) goto out; /* Sanity checking to avoid pcp imbalance */ if (percpu_pagelist_fraction && percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) { percpu_pagelist_fraction = old_percpu_pagelist_fraction; ret = -EINVAL; goto out; } /* No change? */ if (percpu_pagelist_fraction == old_percpu_pagelist_fraction) goto out; for_each_populated_zone(zone) { unsigned int cpu; for_each_possible_cpu(cpu) pageset_set_high_and_batch(zone, per_cpu_ptr(zone->pageset, cpu)); } out: mutex_unlock(&pcp_batch_high_lock); return ret; } #ifdef CONFIG_NUMA int hashdist = HASHDIST_DEFAULT; static int __init set_hashdist(char *str) { if (!str) return 0; hashdist = simple_strtoul(str, &str, 0); return 1; } __setup("hashdist=", set_hashdist); #endif /* * allocate a large system hash table from bootmem * - it is assumed that the hash table must contain an exact power-of-2 * quantity of entries * - limit is the number of hash buckets, not the total allocation size */ void *__init alloc_large_system_hash(const char *tablename, unsigned long bucketsize, unsigned long numentries, int scale, int flags, unsigned int *_hash_shift, unsigned int *_hash_mask, unsigned long low_limit, unsigned long high_limit) { unsigned long long max = high_limit; unsigned long log2qty, size; void *table = NULL; /* allow the kernel cmdline to have a say */ if (!numentries) { /* round applicable memory size up to nearest megabyte */ numentries = nr_kernel_pages; /* It isn't necessary when PAGE_SIZE >= 1MB */ if (PAGE_SHIFT < 20) numentries = round_up(numentries, (1<<20)/PAGE_SIZE); /* limit to 1 bucket per 2^scale bytes of low memory */ if (scale > PAGE_SHIFT) numentries >>= (scale - PAGE_SHIFT); else numentries <<= (PAGE_SHIFT - scale); /* Make sure we've got at least a 0-order allocation.. */ if (unlikely(flags & HASH_SMALL)) { /* Makes no sense without HASH_EARLY */ WARN_ON(!(flags & HASH_EARLY)); if (!(numentries >> *_hash_shift)) { numentries = 1UL << *_hash_shift; BUG_ON(!numentries); } } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) numentries = PAGE_SIZE / bucketsize; } numentries = roundup_pow_of_two(numentries); /* limit allocation size to 1/16 total memory by default */ if (max == 0) { max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; do_div(max, bucketsize); } max = min(max, 0x80000000ULL); if (numentries < low_limit) numentries = low_limit; if (numentries > max) numentries = max; log2qty = ilog2(numentries); do { size = bucketsize << log2qty; if (flags & HASH_EARLY) table = memblock_virt_alloc_nopanic(size, 0); else if (hashdist) table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); else { /* * If bucketsize is not a power-of-two, we may free * some pages at the end of hash table which * alloc_pages_exact() automatically does */ if (get_order(size) < MAX_ORDER) { table = alloc_pages_exact(size, GFP_ATOMIC); kmemleak_alloc(table, size, 1, GFP_ATOMIC); } } } while (!table && size > PAGE_SIZE && --log2qty); if (!table) panic("Failed to allocate %s hash table\n", tablename); printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n", tablename, (1UL << log2qty), ilog2(size) - PAGE_SHIFT, size); if (_hash_shift) *_hash_shift = log2qty; if (_hash_mask) *_hash_mask = (1 << log2qty) - 1; return table; } /* Return a pointer to the bitmap storing bits affecting a block of pages */ static inline unsigned long *get_pageblock_bitmap(struct zone *zone, unsigned long pfn) { #ifdef CONFIG_SPARSEMEM return __pfn_to_section(pfn)->pageblock_flags; #else return zone->pageblock_flags; #endif /* CONFIG_SPARSEMEM */ } static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) { #ifdef CONFIG_SPARSEMEM pfn &= (PAGES_PER_SECTION-1); return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #else pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages); return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; #endif /* CONFIG_SPARSEMEM */ } /** * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages * @page: The page within the block of interest * @pfn: The target page frame number * @end_bitidx: The last bit of interest to retrieve * @mask: mask of bits that the caller is interested in * * Return: pageblock_bits flags */ unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, unsigned long end_bitidx, unsigned long mask) { struct zone *zone; unsigned long *bitmap; unsigned long bitidx, word_bitidx; unsigned long word; zone = page_zone(page); bitmap = get_pageblock_bitmap(zone, pfn); bitidx = pfn_to_bitidx(zone, pfn); word_bitidx = bitidx / BITS_PER_LONG; bitidx &= (BITS_PER_LONG-1); word = bitmap[word_bitidx]; bitidx += end_bitidx; return (word >> (BITS_PER_LONG - bitidx - 1)) & mask; } /** * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages * @page: The page within the block of interest * @flags: The flags to set * @pfn: The target page frame number * @end_bitidx: The last bit of interest * @mask: mask of bits that the caller is interested in */ void set_pfnblock_flags_mask(struct page *page, unsigned long flags, unsigned long pfn, unsigned long end_bitidx, unsigned long mask) { struct zone *zone; unsigned long *bitmap; unsigned long bitidx, word_bitidx; unsigned long old_word, word; BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); zone = page_zone(page); bitmap = get_pageblock_bitmap(zone, pfn); bitidx = pfn_to_bitidx(zone, pfn); word_bitidx = bitidx / BITS_PER_LONG; bitidx &= (BITS_PER_LONG-1); VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); bitidx += end_bitidx; mask <<= (BITS_PER_LONG - bitidx - 1); flags <<= (BITS_PER_LONG - bitidx - 1); word = READ_ONCE(bitmap[word_bitidx]); for (;;) { old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); if (word == old_word) break; word = old_word; } } /* * This function checks whether pageblock includes unmovable pages or not. * If @count is not zero, it is okay to include less @count unmovable pages * * PageLRU check without isolation or lru_lock could race so that * MIGRATE_MOVABLE block might include unmovable pages. It means you can't * expect this function should be exact. */ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, bool skip_hwpoisoned_pages) { unsigned long pfn, iter, found; int mt; /* * For avoiding noise data, lru_add_drain_all() should be called * If ZONE_MOVABLE, the zone never contains unmovable pages */ if (zone_idx(zone) == ZONE_MOVABLE) return false; mt = get_pageblock_migratetype(page); if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt)) return false; pfn = page_to_pfn(page); for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { unsigned long check = pfn + iter; if (!pfn_valid_within(check)) continue; page = pfn_to_page(check); /* * Hugepages are not in LRU lists, but they're movable. * We need not scan over tail pages bacause we don't * handle each tail page individually in migration. */ if (PageHuge(page)) { iter = round_up(iter + 1, 1<<compound_order(page)) - 1; continue; } /* * We can't use page_count without pin a page * because another CPU can free compound page. * This check already skips compound tails of THP * because their page->_count is zero at all time. */ if (!atomic_read(&page->_count)) { if (PageBuddy(page)) iter += (1 << page_order(page)) - 1; continue; } /* * The HWPoisoned page may be not in buddy system, and * page_count() is not 0. */ if (skip_hwpoisoned_pages && PageHWPoison(page)) continue; if (!PageLRU(page)) found++; /* * If there are RECLAIMABLE pages, we need to check * it. But now, memory offline itself doesn't call * shrink_node_slabs() and it still to be fixed. */ /* * If the page is not RAM, page_count()should be 0. * we don't need more check. This is an _used_ not-movable page. * * The problematic thing here is PG_reserved pages. PG_reserved * is set to both of a memory hole page and a _used_ kernel * page at boot. */ if (found > count) return true; } return false; } bool is_pageblock_removable_nolock(struct page *page) { struct zone *zone; unsigned long pfn; /* * We have to be careful here because we are iterating over memory * sections which are not zone aware so we might end up outside of * the zone but still within the section. * We have to take care about the node as well. If the node is offline * its NODE_DATA will be NULL - see page_zone. */ if (!node_online(page_to_nid(page))) return false; zone = page_zone(page); pfn = page_to_pfn(page); if (!zone_spans_pfn(zone, pfn)) return false; return !has_unmovable_pages(zone, page, 0, true); } #ifdef CONFIG_CMA static unsigned long pfn_max_align_down(unsigned long pfn) { return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES, pageblock_nr_pages) - 1); } static unsigned long pfn_max_align_up(unsigned long pfn) { return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES, pageblock_nr_pages)); } /* [start, end) must belong to a single zone. */ static int __alloc_contig_migrate_range(struct compact_control *cc, unsigned long start, unsigned long end) { /* This function is based on compact_zone() from compaction.c. */ unsigned long nr_reclaimed; unsigned long pfn = start; unsigned int tries = 0; int ret = 0; migrate_prep(); while (pfn < end || !list_empty(&cc->migratepages)) { if (fatal_signal_pending(current)) { ret = -EINTR; break; } if (list_empty(&cc->migratepages)) { cc->nr_migratepages = 0; pfn = isolate_migratepages_range(cc, pfn, end); if (!pfn) { ret = -EINTR; break; } tries = 0; } else if (++tries == 5) { ret = ret < 0 ? ret : -EBUSY; break; } nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, &cc->migratepages); cc->nr_migratepages -= nr_reclaimed; ret = migrate_pages(&cc->migratepages, alloc_migrate_target, NULL, 0, cc->mode, MR_CMA); } if (ret < 0) { putback_movable_pages(&cc->migratepages); return ret; } return 0; } /** * alloc_contig_range() -- tries to allocate given range of pages * @start: start PFN to allocate * @end: one-past-the-last PFN to allocate * @migratetype: migratetype of the underlaying pageblocks (either * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks * in range must have the same migratetype and it must * be either of the two. * * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES * aligned, however it's the caller's responsibility to guarantee that * we are the only thread that changes migrate type of pageblocks the * pages fall in. * * The PFN range must belong to a single zone. * * Returns zero on success or negative error code. On success all * pages which PFN is in [start, end) are allocated for the caller and * need to be freed with free_contig_range(). */ int alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype) { unsigned long outer_start, outer_end; unsigned int order; int ret = 0; struct compact_control cc = { .nr_migratepages = 0, .order = -1, .zone = page_zone(pfn_to_page(start)), .mode = MIGRATE_SYNC, .ignore_skip_hint = true, }; INIT_LIST_HEAD(&cc.migratepages); /* * What we do here is we mark all pageblocks in range as * MIGRATE_ISOLATE. Because pageblock and max order pages may * have different sizes, and due to the way page allocator * work, we align the range to biggest of the two pages so * that page allocator won't try to merge buddies from * different pageblocks and change MIGRATE_ISOLATE to some * other migration type. * * Once the pageblocks are marked as MIGRATE_ISOLATE, we * migrate the pages from an unaligned range (ie. pages that * we are interested in). This will put all the pages in * range back to page allocator as MIGRATE_ISOLATE. * * When this is done, we take the pages in range from page * allocator removing them from the buddy system. This way * page allocator will never consider using them. * * This lets us mark the pageblocks back as * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the * aligned range but not in the unaligned, original range are * put back to page allocator so that buddy can use them. */ ret = start_isolate_page_range(pfn_max_align_down(start), pfn_max_align_up(end), migratetype, false); if (ret) return ret; ret = __alloc_contig_migrate_range(&cc, start, end); if (ret) goto done; /* * Pages from [start, end) are within a MAX_ORDER_NR_PAGES * aligned blocks that are marked as MIGRATE_ISOLATE. What's * more, all pages in [start, end) are free in page allocator. * What we are going to do is to allocate all pages from * [start, end) (that is remove them from page allocator). * * The only problem is that pages at the beginning and at the * end of interesting range may be not aligned with pages that * page allocator holds, ie. they can be part of higher order * pages. Because of this, we reserve the bigger range and * once this is done free the pages we are not interested in. * * We don't have to hold zone->lock here because the pages are * isolated thus they won't get removed from buddy. */ lru_add_drain_all(); drain_all_pages(cc.zone); order = 0; outer_start = start; while (!PageBuddy(pfn_to_page(outer_start))) { if (++order >= MAX_ORDER) { ret = -EBUSY; goto done; } outer_start &= ~0UL << order; } /* Make sure the range is really isolated. */ if (test_pages_isolated(outer_start, end, false)) { ret = -EBUSY; goto done; } /* Grab isolated pages from freelists. */ outer_end = isolate_freepages_range(&cc, outer_start, end); if (!outer_end) { ret = -EBUSY; goto done; } /* Free head and tail (if any) */ if (start != outer_start) free_contig_range(outer_start, start - outer_start); if (end != outer_end) free_contig_range(end, outer_end - end); done: undo_isolate_page_range(pfn_max_align_down(start), pfn_max_align_up(end), migratetype); return ret; } void free_contig_range(unsigned long pfn, unsigned nr_pages) { unsigned int count = 0; for (; nr_pages--; pfn++) { struct page *page = pfn_to_page(pfn); count += page_count(page) != 1; __free_page(page); } WARN(count != 0, "%d pages are still in use!\n", count); } #endif #ifdef CONFIG_MEMORY_HOTPLUG /* * The zone indicated has a new number of managed_pages; batch sizes and percpu * page high values need to be recalulated. */ void __meminit zone_pcp_update(struct zone *zone) { unsigned cpu; mutex_lock(&pcp_batch_high_lock); for_each_possible_cpu(cpu) pageset_set_high_and_batch(zone, per_cpu_ptr(zone->pageset, cpu)); mutex_unlock(&pcp_batch_high_lock); } #endif void zone_pcp_reset(struct zone *zone) { unsigned long flags; int cpu; struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ local_irq_save(flags); if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); drain_zonestat(zone, pset); } free_percpu(zone->pageset); zone->pageset = &boot_pageset; } local_irq_restore(flags); } #ifdef CONFIG_MEMORY_HOTREMOVE /* * All pages in the range must be isolated before calling this. */ void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) { struct page *page; struct zone *zone; unsigned int order, i; unsigned long pfn; unsigned long flags; /* find the first valid pfn */ for (pfn = start_pfn; pfn < end_pfn; pfn++) if (pfn_valid(pfn)) break; if (pfn == end_pfn) return; zone = page_zone(pfn_to_page(pfn)); spin_lock_irqsave(&zone->lock, flags); pfn = start_pfn; while (pfn < end_pfn) { if (!pfn_valid(pfn)) { pfn++; continue; } page = pfn_to_page(pfn); /* * The HWPoisoned page may be not in buddy system, and * page_count() is not 0. */ if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { pfn++; SetPageReserved(page); continue; } BUG_ON(page_count(page)); BUG_ON(!PageBuddy(page)); order = page_order(page); #ifdef CONFIG_DEBUG_VM printk(KERN_INFO "remove from free list %lx %d %lx\n", pfn, 1 << order, end_pfn); #endif list_del(&page->lru); rmv_page_order(page); zone->free_area[order].nr_free--; for (i = 0; i < (1 << order); i++) SetPageReserved((page+i)); pfn += (1 << order); } spin_unlock_irqrestore(&zone->lock, flags); } #endif #ifdef CONFIG_MEMORY_FAILURE bool is_free_buddy_page(struct page *page) { struct zone *zone = page_zone(page); unsigned long pfn = page_to_pfn(page); unsigned long flags; unsigned int order; spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < MAX_ORDER; order++) { struct page *page_head = page - (pfn & ((1 << order) - 1)); if (PageBuddy(page_head) && page_order(page_head) >= order) break; } spin_unlock_irqrestore(&zone->lock, flags); return order < MAX_ORDER; } #endif
adafruit/adafruit-raspberrypi-linux
mm/page_alloc.c
C
gpl-2.0
193,960
26.984418
106
0.68161
false
/* Generated By:JJTree: Do not edit this line. ASTBindingValue.java Version 4.3 */ /* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ package com.bigdata.rdf.sail.sparql.ast; public class ASTBindingValue extends SimpleNode { public ASTBindingValue(int id) { super(id); } public ASTBindingValue(SyntaxTreeBuilder p, int id) { super(p, id); } /** Accept the visitor. **/ public Object jjtAccept(SyntaxTreeBuilderVisitor visitor, Object data) throws VisitorException { return visitor.visit(this, data); } } /* JavaCC - OriginalChecksum=ef5d563071f07ef864ef92ccf25d35e7 (do not edit this line) */
wikimedia/wikidata-query-blazegraph
sparql-grammar/src/main/java/com/bigdata/rdf/sail/sparql/ast/ASTBindingValue.java
Java
gpl-2.0
760
34.190476
166
0.730263
false
<?php namespace Sabre\VObject; use Sabre\Xml; /** * iCalendar/vCard/jCal/jCard/xCal/xCard writer object. * * This object provides a few (static) convenience methods to quickly access * the serializers. * * @copyright Copyright (C) fruux GmbH (https://fruux.com/) * @author Ivan Enderlin * @license http://sabre.io/license/ Modified BSD License */ class Writer { /** * Serializes a vCard or iCalendar object. * * @return string */ public static function write(Component $component) { return $component->serialize(); } /** * Serializes a jCal or jCard object. * * @param int $options * * @return string */ public static function writeJson(Component $component, $options = 0) { return json_encode($component, $options); } /** * Serializes a xCal or xCard object. * * @return string */ public static function writeXml(Component $component) { $writer = new Xml\Writer(); $writer->openMemory(); $writer->setIndent(true); $writer->startDocument('1.0', 'utf-8'); if ($component instanceof Component\VCalendar) { $writer->startElement('icalendar'); $writer->writeAttribute('xmlns', Parser\XML::XCAL_NAMESPACE); } else { $writer->startElement('vcards'); $writer->writeAttribute('xmlns', Parser\XML::XCARD_NAMESPACE); } $component->xmlSerialize($writer); $writer->endElement(); return $writer->outputMemory(); } }
smartcitiescommunity/Civikmind
vendor/sabre/vobject/lib/Writer.php
PHP
gpl-2.0
1,584
22.294118
76
0.594066
false
#ifndef _NICK_OBJECT_H_ #define _NICK_OBJECT_H_ #include <Python.h> #include "base-objects.h" /* forward */ struct _NICK_REC; typedef struct { PyIrssi_HEAD(struct _NICK_REC) } PyNick; extern PyTypeObject PyNickType; int nick_object_init(void); PyObject *pynick_sub_new(void *nick, PyTypeObject *subclass); PyObject *pynick_new(void *nick); #define pynick_check(op) PyObject_TypeCheck(op, &PyNickType) #endif
mahmoudimus/irssi-python
src/objects/nick-object.h
C
gpl-2.0
418
18
61
0.729665
false
SECTION "sec", ROM0 DS $100 jp $200 DS $100 ld sp, $FFFE ld d, $13 ld a, $12 or a,a ;borra las banderas ld a, d push af ;the value expected is $1300
franmolinaca/papiGB
tests/asm/test_LDrr_ad.asm
Assembly
gpl-2.0
216
15.615385
36
0.458333
false
/* * Copyright (C) 2005-2006 Martin Willi * Copyright (C) 2005 Jan Hutter * Hochschule fuer Technik Rapperswil * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. */ /** * @defgroup ke_payload ke_payload * @{ @ingroup payloads */ #ifndef KE_PAYLOAD_H_ #define KE_PAYLOAD_H_ typedef struct ke_payload_t ke_payload_t; #include <library.h> #include <encoding/payloads/payload.h> #include <encoding/payloads/transform_substructure.h> #include <collections/linked_list.h> #include <crypto/diffie_hellman.h> /** * Class representing an IKEv1 or IKEv2 key exchange payload. */ struct ke_payload_t { /** * The payload_t interface. */ payload_t payload_interface; /** * Returns the key exchange data of this KE payload. * * @return chunk_t pointing to internal data */ chunk_t (*get_key_exchange_data) (ke_payload_t *this); /** * Gets the Diffie-Hellman Group Number of this KE payload (IKEv2 only). * * @return DH Group Number of this payload */ diffie_hellman_group_t (*get_dh_group_number) (ke_payload_t *this); /** * Destroys a ke_payload_t object. */ void (*destroy) (ke_payload_t *this); }; /** * Creates an empty ke_payload_t object. * * @param type PLV2_KEY_EXCHANGE or PLV1_KEY_EXCHANGE * @return ke_payload_t object */ ke_payload_t *ke_payload_create(payload_type_t type); /** * Creates a ke_payload_t from a diffie_hellman_t. * * @param type PLV2_KEY_EXCHANGE or PLV1_KEY_EXCHANGE * @param dh diffie hellman object containing group and key * @return ke_payload_t object */ ke_payload_t *ke_payload_create_from_diffie_hellman(payload_type_t type, diffie_hellman_t *dh); #endif /** KE_PAYLOAD_H_ @}*/
Bruno-M-/strongswan
src/libcharon/encoding/payloads/ke_payload.h
C
gpl-2.0
2,189
26.024691
77
0.697122
false
/*************************************************************************** * file: ChannelAccess.cc * * author: Marc Loebbers * * copyright: (C) 2004 Telecommunication Networks Group (TKN) at * Technische Universitaet Berlin, Germany. * * This program is free software; you can redistribute it * and/or modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later * version. * For further information see file COPYING * in the top level directory *************************************************************************** * part of: framework implementation developed by tkn * description: - Base class for physical layers * - if you create your own physical layer, please subclass * from this class and use the sendToChannel() function!! *************************************************************************** * changelog: $Revision: 284 $ * last modified: $Date: 2006-06-07 16:55:24 +0200 (Mi, 07 Jun 2006) $ * by: $Author: willkomm $ **************************************************************************/ #include "veins/base/connectionManager/ChannelAccess.h" #include <cassert> #include "veins/base/utils/FindModule.h" #include "veins/base/modules/BaseWorldUtility.h" #include "veins/base/connectionManager/BaseConnectionManager.h" using std::endl; const simsignalwrap_t ChannelAccess::mobilityStateChangedSignal = simsignalwrap_t(MIXIM_SIGNAL_MOBILITY_CHANGE_NAME); BaseConnectionManager* ChannelAccess::getConnectionManager(cModule* nic) { std::string cmName = nic->hasPar("connectionManagerName") ? nic->par("connectionManagerName").stringValue() : ""; if (cmName != ""){ cModule* ccModule = simulation.getModuleByPath(cmName.c_str()); return dynamic_cast<BaseConnectionManager *>(ccModule); } else { return FindModule<BaseConnectionManager *>::findGlobalModule(); } } void ChannelAccess::initialize( int stage ) { BatteryAccess::initialize(stage); if( stage == 0 ){ hasPar("coreDebug") ? coreDebug = par("coreDebug").boolValue() : coreDebug = false; findHost()->subscribe(mobilityStateChangedSignal, this); cModule* nic = getParentModule(); cc = getConnectionManager(nic); if( cc == NULL ) error("Could not find connectionmanager module"); isRegistered = false; } usePropagationDelay = par("usePropagationDelay"); } void ChannelAccess::sendToChannel(cPacket *msg) { const NicEntry::GateList& gateList = cc->getGateList( getParentModule()->getId()); NicEntry::GateList::const_iterator i = gateList.begin(); if(useSendDirect){ // use Andras stuff if( i != gateList.end() ){ simtime_t delay = SIMTIME_ZERO; for(; i != --gateList.end(); ++i){ //calculate delay (Propagation) to this receiving nic delay = calculatePropagationDelay(i->first); int radioStart = i->second->getId(); int radioEnd = radioStart + i->second->size(); for (int g = radioStart; g != radioEnd; ++g) sendDirect(static_cast<cPacket*>(msg->dup()), delay, msg->getDuration(), i->second->getOwnerModule(), g); } //calculate delay (Propagation) to this receiving nic delay = calculatePropagationDelay(i->first); int radioStart = i->second->getId(); int radioEnd = radioStart + i->second->size(); for (int g = radioStart; g != --radioEnd; ++g) sendDirect(static_cast<cPacket*>(msg->dup()), delay, msg->getDuration(), i->second->getOwnerModule(), g); sendDirect(msg, delay, msg->getDuration(), i->second->getOwnerModule(), radioEnd); } else{ coreEV << "Nic is not connected to any gates!" << endl; delete msg; } } else{ // use our stuff coreEV <<"sendToChannel: sending to gates\n"; if( i != gateList.end() ){ simtime_t delay = SIMTIME_ZERO; for(; i != --gateList.end(); ++i){ //calculate delay (Propagation) to this receiving nic delay = calculatePropagationDelay(i->first); sendDelayed( static_cast<cPacket*>(msg->dup()), delay, i->second ); } //calculate delay (Propagation) to this receiving nic delay = calculatePropagationDelay(i->first); sendDelayed( msg, delay, i->second ); } else{ coreEV << "Nic is not connected to any gates!" << endl; delete msg; } } } simtime_t ChannelAccess::calculatePropagationDelay(const NicEntry* nic) { if(!usePropagationDelay) return 0; ChannelAccess *const senderModule = this; ChannelAccess *const receiverModule = nic->chAccess; //const simtime_t_cref sStart = simTime(); assert(senderModule); assert(receiverModule); /** claim the Move pattern of the sender from the Signal */ Coord sendersPos = senderModule->getMobilityModule()->getCurrentPosition(/*sStart*/); Coord receiverPos = receiverModule->getMobilityModule()->getCurrentPosition(/*sStart*/); // this time-point is used to calculate the distance between sending and receiving host return receiverPos.distance(sendersPos) / BaseWorldUtility::speedOfLight; } void ChannelAccess::receiveSignal(cComponent *source, simsignal_t signalID, cObject *obj) { if(signalID == mobilityStateChangedSignal) { ChannelMobilityPtrType const mobility = check_and_cast<ChannelMobilityPtrType>(obj); Coord pos = mobility->getCurrentPosition(); if(isRegistered) { cc->updateNicPos(getParentModule()->getId(), &pos); } else { // register the nic with ConnectionManager // returns true, if sendDirect is used useSendDirect = cc->registerNic(getParentModule(), this, &pos); isRegistered = true; } } }
DankilltheShadow/veins-4a1
src/veins/base/connectionManager/ChannelAccess.cc
C++
gpl-2.0
6,333
36.473373
117
0.585347
false
<?php /** * * * @version $Id$ * @copyright 2003 **/ class righthere_service { function righthere_service(){ } function rh_service($url){ $request = wp_remote_post( $url , array('timeout'=>10) ); if ( is_wp_error($request) ){ $this->last_error_str = $request->get_error_message(); return false; }else{ $r = json_decode($request['body']); if(is_object($r)&&property_exists($r,'R')){ return $r; }else{ $this->last_error_str = $request['body']; return false; } } return false; } } ?>
wsalazar/nj
wp-content/plugins/white-label-branding/options-panel/class.righthere_service.php
PHP
gpl-2.0
537
15.8125
59
0.569832
false
/* * linux/drivers/cpufreq/cpufreq.c * * Copyright (C) 2001 Russell King * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> * * Oct 2005 - Ashok Raj <ashok.raj@intel.com> * Added handling for CPU hotplug * Feb 2006 - Jacob Shin <jacob.shin@amd.com> * Fix handling for CPU hotplug -- affected CPUs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/notifier.h> #include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/cpu.h> #include <linux/completion.h> #include <linux/mutex.h> #include <linux/syscore_ops.h> #include <trace/events/power.h> /** * The "cpufreq driver" - the arch- or hardware-dependent low * level driver of CPUFreq support, and its spinlock. This lock * also protects the cpufreq_cpu_data array. */ static struct cpufreq_driver *cpufreq_driver; static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); #ifdef CONFIG_HOTPLUG_CPU /* This one keeps track of the previously set governor of a removed CPU */ struct cpufreq_cpu_save_data { char gov[CPUFREQ_NAME_LEN]; unsigned int max, min; }; static DEFINE_PER_CPU(struct cpufreq_cpu_save_data, cpufreq_policy_save); #endif static DEFINE_SPINLOCK(cpufreq_driver_lock); /* * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure * all cpufreq/hotplug/workqueue/etc related lock issues. * * The rules for this semaphore: * - Any routine that wants to read from the policy structure will * do a down_read on this semaphore. * - Any routine that will write to the policy structure and/or may take away * the policy altogether (eg. CPU hotplug), will hold this lock in write * mode before doing so. * * Additional rules: * - All holders of the lock should check to make sure that the CPU they * are concerned with are online after they get the lock. * - Governor routines that can be called in cpufreq hotplug path should not * take this sem as top level hotplug notifier handler takes this. * - Lock should not be held across * __cpufreq_governor(data, CPUFREQ_GOV_STOP); */ static DEFINE_PER_CPU(int, cpufreq_policy_cpu); static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); #define lock_policy_rwsem(mode, cpu) \ int lock_policy_rwsem_##mode \ (int cpu) \ { \ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ BUG_ON(policy_cpu == -1); \ down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ if (unlikely(!cpu_online(cpu))) { \ up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ return -1; \ } \ \ return 0; \ } lock_policy_rwsem(read, cpu); lock_policy_rwsem(write, cpu); static void unlock_policy_rwsem_read(int cpu) { int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); BUG_ON(policy_cpu == -1); up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); } void unlock_policy_rwsem_write(int cpu) { int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); BUG_ON(policy_cpu == -1); up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); } /* internal prototypes */ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); static unsigned int __cpufreq_get(unsigned int cpu); static void handle_update(struct work_struct *work); /** * Two notifier lists: the "policy" list is involved in the * validation process for a new CPU frequency policy; the * "transition" list for kernel code that needs to handle * changes to devices when the CPU clock speed changes. * The mutex locks both lists. */ static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); static struct srcu_notifier_head cpufreq_transition_notifier_list; static bool init_cpufreq_transition_notifier_list_called; static int __init init_cpufreq_transition_notifier_list(void) { srcu_init_notifier_head(&cpufreq_transition_notifier_list); init_cpufreq_transition_notifier_list_called = true; return 0; } pure_initcall(init_cpufreq_transition_notifier_list); static int off __read_mostly; int cpufreq_disabled(void) { return off; } void disable_cpufreq(void) { off = 1; } static LIST_HEAD(cpufreq_governor_list); static DEFINE_MUTEX(cpufreq_governor_mutex); static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, int sysfs) { struct cpufreq_policy *data; unsigned long flags; if (cpu >= nr_cpu_ids) goto err_out; /* get the cpufreq driver */ spin_lock_irqsave(&cpufreq_driver_lock, flags); if (!cpufreq_driver) goto err_out_unlock; if (!try_module_get(cpufreq_driver->owner)) goto err_out_unlock; /* get the CPU */ data = per_cpu(cpufreq_cpu_data, cpu); if (!data) goto err_out_put_module; if (!sysfs && !kobject_get(&data->kobj)) goto err_out_put_module; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); return data; err_out_put_module: module_put(cpufreq_driver->owner); err_out_unlock: spin_unlock_irqrestore(&cpufreq_driver_lock, flags); err_out: return NULL; } struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) { return __cpufreq_cpu_get(cpu, 0); } EXPORT_SYMBOL_GPL(cpufreq_cpu_get); static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu) { return __cpufreq_cpu_get(cpu, 1); } static void __cpufreq_cpu_put(struct cpufreq_policy *data, int sysfs) { if (!sysfs) kobject_put(&data->kobj); module_put(cpufreq_driver->owner); } void cpufreq_cpu_put(struct cpufreq_policy *data) { __cpufreq_cpu_put(data, 0); } EXPORT_SYMBOL_GPL(cpufreq_cpu_put); static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data) { __cpufreq_cpu_put(data, 1); } /********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES * *********************************************************************/ /** * adjust_jiffies - adjust the system "loops_per_jiffy" * * This function alters the system "loops_per_jiffy" for the clock * speed change. Note that loops_per_jiffy cannot be updated on SMP * systems as each CPU might be scaled differently. So, use the arch * per-CPU loops_per_jiffy value wherever possible. */ #ifndef CONFIG_SMP static unsigned long l_p_j_ref; static unsigned int l_p_j_ref_freq; static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { if (ci->flags & CPUFREQ_CONST_LOOPS) return; if (!l_p_j_ref_freq) { l_p_j_ref = loops_per_jiffy; l_p_j_ref_freq = ci->old; pr_debug("saving %lu as reference value for loops_per_jiffy; " "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); } if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) || (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); pr_debug("scaling loops_per_jiffy to %lu " "for frequency %u kHz\n", loops_per_jiffy, ci->new); } } #else static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { return; } #endif /** * cpufreq_notify_transition - call notifier chain and adjust_jiffies * on frequency transition. * * This function calls the transition notifiers and the "adjust_jiffies" * function. It is called twice on all CPU frequency changes that have * external effects. */ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) { struct cpufreq_policy *policy; BUG_ON(irqs_disabled()); freqs->flags = cpufreq_driver->flags; pr_debug("notification %u of frequency transition to %u kHz\n", state, freqs->new); policy = per_cpu(cpufreq_cpu_data, freqs->cpu); switch (state) { case CPUFREQ_PRECHANGE: /* detect if the driver reported a value as "old frequency" * which is not equal to what the cpufreq core thinks is * "old frequency". */ if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { if ((policy) && (policy->cpu == freqs->cpu) && (policy->cur) && (policy->cur != freqs->old)) { pr_debug("Warning: CPU frequency is" " %u, cpufreq assumed %u kHz.\n", freqs->old, policy->cur); freqs->old = policy->cur; } } srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_PRECHANGE, freqs); adjust_jiffies(CPUFREQ_PRECHANGE, freqs); break; case CPUFREQ_POSTCHANGE: adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, (unsigned long)freqs->cpu); trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu); trace_cpu_frequency(freqs->new, freqs->cpu); srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); if (likely(policy) && likely(policy->cpu == freqs->cpu)) { policy->cur = freqs->new; sysfs_notify(&policy->kobj, NULL, "scaling_cur_freq"); } break; } } EXPORT_SYMBOL_GPL(cpufreq_notify_transition); /** * cpufreq_notify_utilization - notify CPU userspace about CPU utilization * change * * This function is called everytime the CPU load is evaluated by the * ondemand governor. It notifies userspace of cpu load changes via sysfs. */ void cpufreq_notify_utilization(struct cpufreq_policy *policy, unsigned int util) { if (policy) policy->util = util; if (policy->util >= MIN_CPU_UTIL_NOTIFY) sysfs_notify(&policy->kobj, NULL, "cpu_utilization"); } /********************************************************************* * SYSFS INTERFACE * *********************************************************************/ static struct cpufreq_governor *__find_governor(const char *str_governor) { struct cpufreq_governor *t; list_for_each_entry(t, &cpufreq_governor_list, governor_list) if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN)) return t; return NULL; } /** * cpufreq_parse_governor - parse a governor string */ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, struct cpufreq_governor **governor) { int err = -EINVAL; if (!cpufreq_driver) goto out; if (cpufreq_driver->setpolicy) { if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { *policy = CPUFREQ_POLICY_PERFORMANCE; err = 0; } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { *policy = CPUFREQ_POLICY_POWERSAVE; err = 0; } } else if (cpufreq_driver->target) { struct cpufreq_governor *t; mutex_lock(&cpufreq_governor_mutex); t = __find_governor(str_governor); if (t == NULL) { int ret; mutex_unlock(&cpufreq_governor_mutex); ret = request_module("cpufreq_%s", str_governor); mutex_lock(&cpufreq_governor_mutex); if (ret == 0) t = __find_governor(str_governor); } if (t != NULL) { *governor = t; err = 0; } mutex_unlock(&cpufreq_governor_mutex); } out: return err; } /** * cpufreq_per_cpu_attr_read() / show_##file_name() - * print out cpufreq information * * Write out information from cpufreq_driver->policy[cpu]; object must be * "unsigned int". */ #define show_one(file_name, object) \ static ssize_t show_##file_name \ (struct cpufreq_policy *policy, char *buf) \ { \ return sprintf(buf, "%u\n", policy->object); \ } show_one(cpuinfo_min_freq, cpuinfo.min_freq); show_one(cpuinfo_max_freq, cpuinfo.max_freq); show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); show_one(scaling_min_freq, min); show_one(scaling_max_freq, max); show_one(scaling_cur_freq, cur); show_one(cpu_utilization, util); static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy); /** * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access */ #define store_one(file_name, object) \ static ssize_t store_##file_name \ (struct cpufreq_policy *policy, const char *buf, size_t count) \ { \ unsigned int ret = -EINVAL; \ struct cpufreq_policy new_policy; \ \ ret = cpufreq_get_policy(&new_policy, policy->cpu); \ if (ret) \ return -EINVAL; \ \ ret = sscanf(buf, "%u", &new_policy.object); \ if (ret != 1) \ return -EINVAL; \ \ ret = cpufreq_driver->verify(&new_policy); \ if (ret) \ pr_err("cpufreq: Frequency verification failed\n"); \ \ policy->user_policy.object = new_policy.object; \ ret = __cpufreq_set_policy(policy, &new_policy); \ \ return ret ? ret : count; \ } store_one(scaling_min_freq, min); store_one(scaling_max_freq, max); /** * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware */ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, char *buf) { unsigned int cur_freq = __cpufreq_get(policy->cpu); if (!cur_freq) return sprintf(buf, "<unknown>"); return sprintf(buf, "%u\n", cur_freq); } /** * show_scaling_governor - show the current policy for the specified CPU */ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) { if (policy->policy == CPUFREQ_POLICY_POWERSAVE) return sprintf(buf, "powersave\n"); else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) return sprintf(buf, "performance\n"); else if (policy->governor) return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name); return -EINVAL; } /** * store_scaling_governor - store policy for the specified CPU */ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, const char *buf, size_t count) { unsigned int ret = -EINVAL; char str_governor[16]; struct cpufreq_policy new_policy; ret = cpufreq_get_policy(&new_policy, policy->cpu); if (ret) return ret; ret = sscanf(buf, "%15s", str_governor); if (ret != 1) return -EINVAL; if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor)) return -EINVAL; /* Do not use cpufreq_set_policy here or the user_policy.max will be wrongly overridden */ ret = __cpufreq_set_policy(policy, &new_policy); policy->user_policy.policy = policy->policy; policy->user_policy.governor = policy->governor; sysfs_notify(&policy->kobj, NULL, "scaling_governor"); if (ret) return ret; else return count; } /** * show_scaling_driver - show the cpufreq driver currently loaded */ static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) { return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name); } /** * show_scaling_available_governors - show the available CPUfreq governors */ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, char *buf) { ssize_t i = 0; struct cpufreq_governor *t; if (!cpufreq_driver->target) { i += sprintf(buf, "performance powersave"); goto out; } list_for_each_entry(t, &cpufreq_governor_list, governor_list) { if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2))) goto out; i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name); } out: i += sprintf(&buf[i], "\n"); return i; } static ssize_t show_cpus(const struct cpumask *mask, char *buf) { ssize_t i = 0; unsigned int cpu; for_each_cpu(cpu, mask) { if (i) i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); if (i >= (PAGE_SIZE - 5)) break; } i += sprintf(&buf[i], "\n"); return i; } /** * show_related_cpus - show the CPUs affected by each transition even if * hw coordination is in use */ static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) { if (cpumask_empty(policy->related_cpus)) return show_cpus(policy->cpus, buf); return show_cpus(policy->related_cpus, buf); } /** * show_affected_cpus - show the CPUs affected by each transition */ static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) { return show_cpus(policy->cpus, buf); } static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, const char *buf, size_t count) { unsigned int freq = 0; unsigned int ret; if (!policy->governor || !policy->governor->store_setspeed) return -EINVAL; ret = sscanf(buf, "%u", &freq); if (ret != 1) return -EINVAL; policy->governor->store_setspeed(policy, freq); return count; } static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) { if (!policy->governor || !policy->governor->show_setspeed) return sprintf(buf, "<unsupported>\n"); return policy->governor->show_setspeed(policy, buf); } /** * show_scaling_driver - show the current cpufreq HW/BIOS limitation */ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) { unsigned int limit; int ret; if (cpufreq_driver->bios_limit) { ret = cpufreq_driver->bios_limit(policy->cpu, &limit); if (!ret) return sprintf(buf, "%u\n", limit); } return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); } cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); cpufreq_freq_attr_ro(cpuinfo_min_freq); cpufreq_freq_attr_ro(cpuinfo_max_freq); cpufreq_freq_attr_ro(cpuinfo_transition_latency); cpufreq_freq_attr_ro(scaling_available_governors); cpufreq_freq_attr_ro(scaling_driver); cpufreq_freq_attr_ro(scaling_cur_freq); cpufreq_freq_attr_ro(bios_limit); cpufreq_freq_attr_ro(related_cpus); cpufreq_freq_attr_ro(affected_cpus); cpufreq_freq_attr_ro(cpu_utilization); cpufreq_freq_attr_rw(scaling_min_freq); cpufreq_freq_attr_rw(scaling_max_freq); cpufreq_freq_attr_rw(scaling_governor); cpufreq_freq_attr_rw(scaling_setspeed); static struct attribute *default_attrs[] = { &cpuinfo_min_freq.attr, &cpuinfo_max_freq.attr, &cpuinfo_transition_latency.attr, &scaling_min_freq.attr, &scaling_max_freq.attr, &affected_cpus.attr, &cpu_utilization.attr, &related_cpus.attr, &scaling_governor.attr, &scaling_driver.attr, &scaling_available_governors.attr, &scaling_setspeed.attr, NULL }; struct kobject *cpufreq_global_kobject; EXPORT_SYMBOL(cpufreq_global_kobject); #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) #define to_attr(a) container_of(a, struct freq_attr, attr) static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) { struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); ssize_t ret = -EINVAL; policy = cpufreq_cpu_get_sysfs(policy->cpu); if (!policy) goto no_policy; if (lock_policy_rwsem_read(policy->cpu) < 0) goto fail; if (fattr->show) ret = fattr->show(policy, buf); else ret = -EIO; unlock_policy_rwsem_read(policy->cpu); fail: cpufreq_cpu_put_sysfs(policy); no_policy: return ret; } static ssize_t store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); ssize_t ret = -EINVAL; policy = cpufreq_cpu_get_sysfs(policy->cpu); if (!policy) goto no_policy; if (lock_policy_rwsem_write(policy->cpu) < 0) goto fail; if (fattr->store) ret = fattr->store(policy, buf, count); else ret = -EIO; unlock_policy_rwsem_write(policy->cpu); fail: cpufreq_cpu_put_sysfs(policy); no_policy: return ret; } static void cpufreq_sysfs_release(struct kobject *kobj) { struct cpufreq_policy *policy = to_policy(kobj); pr_debug("last reference is dropped\n"); complete(&policy->kobj_unregister); } static const struct sysfs_ops sysfs_ops = { .show = show, .store = store, }; static struct kobj_type ktype_cpufreq = { .sysfs_ops = &sysfs_ops, .default_attrs = default_attrs, .release = cpufreq_sysfs_release, }; /* * Returns: * Negative: Failure * 0: Success * Positive: When we have a managed CPU and the sysfs got symlinked */ static int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy, struct device *dev) { int ret = 0; #ifdef CONFIG_SMP unsigned long flags; unsigned int j; #ifdef CONFIG_HOTPLUG_CPU struct cpufreq_governor *gov; gov = __find_governor(per_cpu(cpufreq_policy_save, cpu).gov); if (gov) { policy->governor = gov; pr_debug("Restoring governor %s for cpu %d\n", policy->governor->name, cpu); } if (per_cpu(cpufreq_policy_save, cpu).min) { policy->min = per_cpu(cpufreq_policy_save, cpu).min; policy->user_policy.min = policy->min; } if (per_cpu(cpufreq_policy_save, cpu).max) { policy->max = per_cpu(cpufreq_policy_save, cpu).max; policy->user_policy.max = policy->max; } pr_debug("Restoring CPU%d min %d and max %d\n", cpu, policy->min, policy->max); #endif for_each_cpu(j, policy->cpus) { struct cpufreq_policy *managed_policy; if (cpu == j) continue; /* Check for existing affected CPUs. * They may not be aware of it due to CPU Hotplug. * cpufreq_cpu_put is called when the device is removed * in __cpufreq_remove_dev() */ managed_policy = cpufreq_cpu_get(j); if (unlikely(managed_policy)) { /* Set proper policy_cpu */ unlock_policy_rwsem_write(cpu); per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu; if (lock_policy_rwsem_write(cpu) < 0) { /* Should not go through policy unlock path */ if (cpufreq_driver->exit) cpufreq_driver->exit(policy); cpufreq_cpu_put(managed_policy); return -EBUSY; } spin_lock_irqsave(&cpufreq_driver_lock, flags); cpumask_copy(managed_policy->cpus, policy->cpus); cpumask_and(managed_policy->cpus, managed_policy->cpus, cpu_online_mask); per_cpu(cpufreq_cpu_data, cpu) = managed_policy; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); pr_debug("CPU already managed, adding link\n"); ret = sysfs_create_link(&dev->kobj, &managed_policy->kobj, "cpufreq"); if (ret) cpufreq_cpu_put(managed_policy); /* * Success. We only needed to be added to the mask. * Call driver->exit() because only the cpu parent of * the kobj needed to call init(). */ if (cpufreq_driver->exit) cpufreq_driver->exit(policy); if (!ret) return 1; else return ret; } } #endif return ret; } /* symlink affected CPUs */ static int cpufreq_add_dev_symlink(unsigned int cpu, struct cpufreq_policy *policy) { unsigned int j; int ret = 0; for_each_cpu(j, policy->cpus) { struct cpufreq_policy *managed_policy; struct device *cpu_dev; if (j == cpu) continue; if (!cpu_online(j)) continue; pr_debug("CPU %u already managed, adding link\n", j); managed_policy = cpufreq_cpu_get(cpu); cpu_dev = get_cpu_device(j); ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq"); if (ret) { cpufreq_cpu_put(managed_policy); return ret; } } return ret; } static int cpufreq_add_dev_interface(unsigned int cpu, struct cpufreq_policy *policy, struct device *dev) { struct cpufreq_policy new_policy; struct freq_attr **drv_attr; unsigned long flags; int ret = 0; unsigned int j; /* prepare interface data */ ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, "cpufreq"); if (ret) return ret; /* set up files for this cpu device */ drv_attr = cpufreq_driver->attr; while ((drv_attr) && (*drv_attr)) { ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); if (ret) goto err_out_kobj_put; drv_attr++; } if (cpufreq_driver->get) { ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); if (ret) goto err_out_kobj_put; } if (cpufreq_driver->target) { ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); if (ret) goto err_out_kobj_put; } if (cpufreq_driver->bios_limit) { ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); if (ret) goto err_out_kobj_put; } spin_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu(j, policy->cpus) { if (!cpu_online(j)) continue; per_cpu(cpufreq_cpu_data, j) = policy; per_cpu(cpufreq_policy_cpu, j) = policy->cpu; } spin_unlock_irqrestore(&cpufreq_driver_lock, flags); ret = cpufreq_add_dev_symlink(cpu, policy); if (ret) goto err_out_kobj_put; memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); /* assure that the starting sequence is run in __cpufreq_set_policy */ policy->governor = NULL; /* set default policy */ ret = __cpufreq_set_policy(policy, &new_policy); policy->user_policy.policy = policy->policy; policy->user_policy.governor = policy->governor; if (ret) { pr_debug("setting policy failed\n"); if (cpufreq_driver->exit) cpufreq_driver->exit(policy); } return ret; err_out_kobj_put: kobject_put(&policy->kobj); wait_for_completion(&policy->kobj_unregister); return ret; } /** * cpufreq_add_dev - add a CPU device * * Adds the cpufreq interface for a CPU device. * * The Oracle says: try running cpufreq registration/unregistration concurrently * with with cpu hotplugging and all hell will break loose. Tried to clean this * mess up, but more thorough testing is needed. - Mathieu */ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) { unsigned int cpu = dev->id; int ret = 0, found = 0; struct cpufreq_policy *policy; unsigned long flags; unsigned int j; #ifdef CONFIG_HOTPLUG_CPU int sibling; #endif if (cpu_is_offline(cpu)) return 0; pr_debug("adding CPU %u\n", cpu); #ifdef CONFIG_SMP /* check whether a different CPU already registered this * CPU because it is in the same boat. */ policy = cpufreq_cpu_get(cpu); if (unlikely(policy)) { cpufreq_cpu_put(policy); return 0; } #endif if (!try_module_get(cpufreq_driver->owner)) { ret = -EINVAL; goto module_out; } ret = -ENOMEM; policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); if (!policy) goto nomem_out; if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) goto err_free_policy; if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) goto err_free_cpumask; policy->cpu = cpu; cpumask_copy(policy->cpus, cpumask_of(cpu)); /* Initially set CPU itself as the policy_cpu */ per_cpu(cpufreq_policy_cpu, cpu) = cpu; ret = (lock_policy_rwsem_write(cpu) < 0); WARN_ON(ret); init_completion(&policy->kobj_unregister); INIT_WORK(&policy->update, handle_update); /* Set governor before ->init, so that driver could check it */ #ifdef CONFIG_HOTPLUG_CPU for_each_online_cpu(sibling) { struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); if (cp && cp->governor) { policy->governor = cp->governor; policy->min = cp->min; policy->max = cp->max; policy->user_policy.min = cp->user_policy.min; policy->user_policy.max = cp->user_policy.max; found = 1; //pr_info("sibling: found sibling!\n"); break; } } #endif if (!found) policy->governor = CPUFREQ_DEFAULT_GOVERNOR; /* call driver. From then on the cpufreq must be able * to accept all calls to ->verify and ->setpolicy for this CPU */ ret = cpufreq_driver->init(policy); if (ret) { pr_debug("initialization failed\n"); goto err_unlock_policy; } /* * affected cpus must always be the one, which are online. We aren't * managing offline cpus here. */ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); policy->user_policy.min = policy->min; policy->user_policy.max = policy->max; blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_START, policy); ret = cpufreq_add_dev_policy(cpu, policy, dev); if (ret) { if (ret > 0) /* This is a managed cpu, symlink created, exit with 0 */ ret = 0; goto err_unlock_policy; } ret = cpufreq_add_dev_interface(cpu, policy, dev); if (ret) goto err_out_unregister; unlock_policy_rwsem_write(cpu); kobject_uevent(&policy->kobj, KOBJ_ADD); module_put(cpufreq_driver->owner); pr_debug("initialization complete\n"); return 0; err_out_unregister: spin_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu(j, policy->cpus) per_cpu(cpufreq_cpu_data, j) = NULL; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); kobject_put(&policy->kobj); wait_for_completion(&policy->kobj_unregister); err_unlock_policy: unlock_policy_rwsem_write(cpu); free_cpumask_var(policy->related_cpus); err_free_cpumask: free_cpumask_var(policy->cpus); err_free_policy: kfree(policy); nomem_out: module_put(cpufreq_driver->owner); module_out: return ret; } /** * __cpufreq_remove_dev - remove a CPU device * * Removes the cpufreq interface for a CPU device. * Caller should already have policy_rwsem in write mode for this CPU. * This routine frees the rwsem before returning. */ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) { unsigned int cpu = dev->id; unsigned long flags; struct cpufreq_policy *data; struct kobject *kobj; struct completion *cmp; #ifdef CONFIG_SMP struct device *cpu_dev; unsigned int j; #endif pr_debug("unregistering CPU %u\n", cpu); spin_lock_irqsave(&cpufreq_driver_lock, flags); data = per_cpu(cpufreq_cpu_data, cpu); if (!data) { spin_unlock_irqrestore(&cpufreq_driver_lock, flags); unlock_policy_rwsem_write(cpu); return -EINVAL; } per_cpu(cpufreq_cpu_data, cpu) = NULL; #ifdef CONFIG_SMP /* if this isn't the CPU which is the parent of the kobj, we * only need to unlink, put and exit */ if (unlikely(cpu != data->cpu)) { pr_debug("removing link\n"); cpumask_clear_cpu(cpu, data->cpus); spin_unlock_irqrestore(&cpufreq_driver_lock, flags); kobj = &dev->kobj; cpufreq_cpu_put(data); unlock_policy_rwsem_write(cpu); sysfs_remove_link(kobj, "cpufreq"); return 0; } #endif #ifdef CONFIG_SMP #ifdef CONFIG_HOTPLUG_CPU strncpy(per_cpu(cpufreq_policy_save, cpu).gov, data->governor->name, CPUFREQ_NAME_LEN); per_cpu(cpufreq_policy_save, cpu).min = data->user_policy.min; per_cpu(cpufreq_policy_save, cpu).max = data->user_policy.max; pr_debug("Saving CPU%d user policy min %d and max %d\n", cpu, data->user_policy.min, data->user_policy.max); #endif /* if we have other CPUs still registered, we need to unlink them, * or else wait_for_completion below will lock up. Clean the * per_cpu(cpufreq_cpu_data) while holding the lock, and remove * the sysfs links afterwards. */ if (unlikely(cpumask_weight(data->cpus) > 1)) { for_each_cpu(j, data->cpus) { if (j == cpu) continue; per_cpu(cpufreq_cpu_data, j) = NULL; } } spin_unlock_irqrestore(&cpufreq_driver_lock, flags); if (unlikely(cpumask_weight(data->cpus) > 1)) { for_each_cpu(j, data->cpus) { if (j == cpu) continue; pr_debug("removing link for cpu %u\n", j); #ifdef CONFIG_HOTPLUG_CPU strncpy(per_cpu(cpufreq_policy_save, j).gov, data->governor->name, CPUFREQ_NAME_LEN); per_cpu(cpufreq_policy_save, j).min = data->user_policy.min; per_cpu(cpufreq_policy_save, j).max = data->user_policy.max; pr_debug("Saving CPU%d user policy min %d and max %d\n", j, data->min, data->max); #endif cpu_dev = get_cpu_device(j); kobj = &cpu_dev->kobj; unlock_policy_rwsem_write(cpu); sysfs_remove_link(kobj, "cpufreq"); lock_policy_rwsem_write(cpu); cpufreq_cpu_put(data); } } #else spin_unlock_irqrestore(&cpufreq_driver_lock, flags); #endif if (cpufreq_driver->target) __cpufreq_governor(data, CPUFREQ_GOV_STOP); kobj = &data->kobj; cmp = &data->kobj_unregister; unlock_policy_rwsem_write(cpu); kobject_put(kobj); /* we need to make sure that the underlying kobj is actually * not referenced anymore by anybody before we proceed with * unloading. */ pr_debug("waiting for dropping of refcount\n"); wait_for_completion(cmp); pr_debug("wait complete\n"); lock_policy_rwsem_write(cpu); if (cpufreq_driver->exit) cpufreq_driver->exit(data); unlock_policy_rwsem_write(cpu); #ifdef CONFIG_HOTPLUG_CPU /* when the CPU which is the parent of the kobj is hotplugged * offline, check for siblings, and create cpufreq sysfs interface * and symlinks */ if (unlikely(cpumask_weight(data->cpus) > 1)) { /* first sibling now owns the new sysfs dir */ cpumask_clear_cpu(cpu, data->cpus); cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL); /* finally remove our own symlink */ lock_policy_rwsem_write(cpu); __cpufreq_remove_dev(dev, sif); } #endif free_cpumask_var(data->related_cpus); free_cpumask_var(data->cpus); kfree(data); return 0; } static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) { unsigned int cpu = dev->id; int retval; if (cpu_is_offline(cpu)) return 0; if (unlikely(lock_policy_rwsem_write(cpu))) BUG(); retval = __cpufreq_remove_dev(dev, sif); return retval; } static void handle_update(struct work_struct *work) { struct cpufreq_policy *policy = container_of(work, struct cpufreq_policy, update); unsigned int cpu = policy->cpu; pr_debug("handle_update for cpu %u called\n", cpu); cpufreq_update_policy(cpu); } /** * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble. * @cpu: cpu number * @old_freq: CPU frequency the kernel thinks the CPU runs at * @new_freq: CPU frequency the CPU actually runs at * * We adjust to current frequency first, and need to clean up later. * So either call to cpufreq_update_policy() or schedule handle_update()). */ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigned int new_freq) { struct cpufreq_freqs freqs; pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " "core thinks of %u, is %u kHz.\n", old_freq, new_freq); freqs.cpu = cpu; freqs.old = old_freq; freqs.new = new_freq; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } /** * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur * @cpu: CPU number * * This is the last known freq, without actually getting it from the driver. * Return value will be same as what is shown in scaling_cur_freq in sysfs. */ unsigned int cpufreq_quick_get(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); unsigned int ret_freq = 0; if (policy) { ret_freq = policy->cur; cpufreq_cpu_put(policy); } return ret_freq; } EXPORT_SYMBOL(cpufreq_quick_get); /** * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU * @cpu: CPU number * * Just return the max possible frequency for a given CPU. */ unsigned int cpufreq_quick_get_max(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); unsigned int ret_freq = 0; if (policy) { ret_freq = policy->max; cpufreq_cpu_put(policy); } return ret_freq; } EXPORT_SYMBOL(cpufreq_quick_get_max); static unsigned int __cpufreq_get(unsigned int cpu) { struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); unsigned int ret_freq = 0; if (!cpufreq_driver->get) return ret_freq; ret_freq = cpufreq_driver->get(cpu); if (ret_freq && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { /* verify no discrepancy between actual and saved value exists */ if (unlikely(ret_freq != policy->cur)) { cpufreq_out_of_sync(cpu, policy->cur, ret_freq); schedule_work(&policy->update); } } return ret_freq; } /** * cpufreq_get - get the current CPU frequency (in kHz) * @cpu: CPU number * * Get the CPU current (static) CPU frequency */ unsigned int cpufreq_get(unsigned int cpu) { unsigned int ret_freq = 0; struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); if (!policy) goto out; if (unlikely(lock_policy_rwsem_read(cpu))) goto out_policy; ret_freq = __cpufreq_get(cpu); unlock_policy_rwsem_read(cpu); out_policy: cpufreq_cpu_put(policy); out: return ret_freq; } EXPORT_SYMBOL(cpufreq_get); static struct subsys_interface cpufreq_interface = { .name = "cpufreq", .subsys = &cpu_subsys, .add_dev = cpufreq_add_dev, .remove_dev = cpufreq_remove_dev, }; /** * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. * * This function is only executed for the boot processor. The other CPUs * have been put offline by means of CPU hotplug. */ static int cpufreq_bp_suspend(void) { int ret = 0; int cpu = smp_processor_id(); struct cpufreq_policy *cpu_policy; pr_debug("suspending cpu %u\n", cpu); /* If there's no policy for the boot CPU, we have nothing to do. */ cpu_policy = cpufreq_cpu_get(cpu); if (!cpu_policy) return 0; if (cpufreq_driver->suspend) { ret = cpufreq_driver->suspend(cpu_policy); if (ret) printk(KERN_ERR "cpufreq: suspend failed in ->suspend " "step on CPU %u\n", cpu_policy->cpu); } cpufreq_cpu_put(cpu_policy); return ret; } /** * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU. * * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are * restored. It will verify that the current freq is in sync with * what we believe it to be. This is a bit later than when it * should be, but nonethteless it's better than calling * cpufreq_driver->get() here which might re-enable interrupts... * * This function is only executed for the boot CPU. The other CPUs have not * been turned on yet. */ static void cpufreq_bp_resume(void) { int ret = 0; int cpu = smp_processor_id(); struct cpufreq_policy *cpu_policy; pr_debug("resuming cpu %u\n", cpu); /* If there's no policy for the boot CPU, we have nothing to do. */ cpu_policy = cpufreq_cpu_get(cpu); if (!cpu_policy) return; if (cpufreq_driver->resume) { ret = cpufreq_driver->resume(cpu_policy); if (ret) { printk(KERN_ERR "cpufreq: resume failed in ->resume " "step on CPU %u\n", cpu_policy->cpu); goto fail; } } schedule_work(&cpu_policy->update); fail: cpufreq_cpu_put(cpu_policy); } static struct syscore_ops cpufreq_syscore_ops = { .suspend = cpufreq_bp_suspend, .resume = cpufreq_bp_resume, }; /********************************************************************* * NOTIFIER LISTS INTERFACE * *********************************************************************/ /** * cpufreq_register_notifier - register a driver with cpufreq * @nb: notifier function to register * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER * * Add a driver to one of two lists: either a list of drivers that * are notified about clock rate changes (once before and once after * the transition), or a list of drivers that are notified about * changes in cpufreq policy. * * This function may sleep, and has the same return conditions as * blocking_notifier_chain_register. */ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) { int ret; WARN_ON(!init_cpufreq_transition_notifier_list_called); switch (list) { case CPUFREQ_TRANSITION_NOTIFIER: ret = srcu_notifier_chain_register( &cpufreq_transition_notifier_list, nb); break; case CPUFREQ_POLICY_NOTIFIER: ret = blocking_notifier_chain_register( &cpufreq_policy_notifier_list, nb); break; default: ret = -EINVAL; } return ret; } EXPORT_SYMBOL(cpufreq_register_notifier); /** * cpufreq_unregister_notifier - unregister a driver with cpufreq * @nb: notifier block to be unregistered * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER * * Remove a driver from the CPU frequency notifier list. * * This function may sleep, and has the same return conditions as * blocking_notifier_chain_unregister. */ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) { int ret; switch (list) { case CPUFREQ_TRANSITION_NOTIFIER: ret = srcu_notifier_chain_unregister( &cpufreq_transition_notifier_list, nb); break; case CPUFREQ_POLICY_NOTIFIER: ret = blocking_notifier_chain_unregister( &cpufreq_policy_notifier_list, nb); break; default: ret = -EINVAL; } return ret; } EXPORT_SYMBOL(cpufreq_unregister_notifier); /********************************************************************* * GOVERNORS * *********************************************************************/ int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { int retval = -EINVAL; if (cpufreq_disabled()) return -ENODEV; pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu, target_freq, relation); if (cpu_online(policy->cpu) && cpufreq_driver->target) retval = cpufreq_driver->target(policy, target_freq, relation); return retval; } EXPORT_SYMBOL_GPL(__cpufreq_driver_target); int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { int ret = -EINVAL; policy = cpufreq_cpu_get(policy->cpu); if (!policy) goto no_policy; if (unlikely(lock_policy_rwsem_write(policy->cpu))) goto fail; ret = __cpufreq_driver_target(policy, target_freq, relation); unlock_policy_rwsem_write(policy->cpu); fail: cpufreq_cpu_put(policy); no_policy: return ret; } EXPORT_SYMBOL_GPL(cpufreq_driver_target); int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) { int ret = 0; policy = cpufreq_cpu_get(policy->cpu); if (!policy) return -EINVAL; if (cpu_online(cpu) && cpufreq_driver->getavg) ret = cpufreq_driver->getavg(policy, cpu); cpufreq_cpu_put(policy); return ret; } EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg); /* * when "event" is CPUFREQ_GOV_LIMITS */ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) { int ret; /* Only must be defined when default governor is known to have latency restrictions, like e.g. conservative or ondemand. That this is the case is already ensured in Kconfig */ #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE struct cpufreq_governor *gov = &cpufreq_gov_performance; #else struct cpufreq_governor *gov = NULL; #endif if (policy->governor->max_transition_latency && policy->cpuinfo.transition_latency > policy->governor->max_transition_latency) { if (!gov) return -EINVAL; else { printk(KERN_WARNING "%s governor failed, too long" " transition latency of HW, fallback" " to %s governor\n", policy->governor->name, gov->name); policy->governor = gov; } } if (!try_module_get(policy->governor->owner)) return -EINVAL; pr_debug("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event); ret = policy->governor->governor(policy, event); /* we keep one module reference alive for each CPU governed by this CPU */ if ((event != CPUFREQ_GOV_START) || ret) module_put(policy->governor->owner); if ((event == CPUFREQ_GOV_STOP) && !ret) module_put(policy->governor->owner); return ret; } int cpufreq_register_governor(struct cpufreq_governor *governor) { int err; if (!governor) return -EINVAL; if (cpufreq_disabled()) return -ENODEV; mutex_lock(&cpufreq_governor_mutex); err = -EBUSY; if (__find_governor(governor->name) == NULL) { err = 0; list_add(&governor->governor_list, &cpufreq_governor_list); } mutex_unlock(&cpufreq_governor_mutex); return err; } EXPORT_SYMBOL_GPL(cpufreq_register_governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor) { #ifdef CONFIG_HOTPLUG_CPU int cpu; #endif if (!governor) return; if (cpufreq_disabled()) return; #ifdef CONFIG_HOTPLUG_CPU for_each_present_cpu(cpu) { if (cpu_online(cpu)) continue; if (!strcmp(per_cpu(cpufreq_policy_save, cpu).gov, governor->name)) strcpy(per_cpu(cpufreq_policy_save, cpu).gov, "\0"); per_cpu(cpufreq_policy_save, cpu).min = 0; per_cpu(cpufreq_policy_save, cpu).max = 0; } #endif mutex_lock(&cpufreq_governor_mutex); list_del(&governor->governor_list); mutex_unlock(&cpufreq_governor_mutex); return; } EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); /********************************************************************* * POLICY INTERFACE * *********************************************************************/ /** * cpufreq_get_policy - get the current cpufreq_policy * @policy: struct cpufreq_policy into which the current cpufreq_policy * is written * * Reads the current cpufreq policy. */ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) { struct cpufreq_policy *cpu_policy; if (!policy) return -EINVAL; cpu_policy = cpufreq_cpu_get(cpu); if (!cpu_policy) return -EINVAL; memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); cpufreq_cpu_put(cpu_policy); return 0; } EXPORT_SYMBOL(cpufreq_get_policy); /* * data : current policy. * policy : policy to be set. */ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) { int ret = 0; pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, policy->min, policy->max); memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo)); if (policy->min > data->user_policy.max || policy->max < data->user_policy.min) { ret = -EINVAL; goto error_out; } /* verify the cpu speed can be set within this limit */ ret = cpufreq_driver->verify(policy); if (ret) goto error_out; /* adjust if necessary - all reasons */ blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST, policy); /* adjust if necessary - hardware incompatibility*/ blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_INCOMPATIBLE, policy); /* verify the cpu speed can be set within this limit, which might be different to the first one */ ret = cpufreq_driver->verify(policy); if (ret) goto error_out; /* notification of the new policy */ blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_NOTIFY, policy); data->min = policy->min; data->max = policy->max; pr_debug("new min and max freqs are %u - %u kHz\n", data->min, data->max); if (cpufreq_driver->setpolicy) { data->policy = policy->policy; pr_debug("setting range\n"); ret = cpufreq_driver->setpolicy(policy); } else { if (policy->governor != data->governor) { /* save old, working values */ struct cpufreq_governor *old_gov = data->governor; pr_debug("governor switch\n"); /* end old governor */ if (data->governor) __cpufreq_governor(data, CPUFREQ_GOV_STOP); /* start new governor */ data->governor = policy->governor; if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { /* new governor failed, so re-start old one */ pr_debug("starting governor %s failed\n", data->governor->name); if (old_gov) { data->governor = old_gov; __cpufreq_governor(data, CPUFREQ_GOV_START); } ret = -EINVAL; goto error_out; } /* might be a policy change, too, so fall through */ } pr_debug("governor: change or update limits\n"); __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); } error_out: return ret; } #ifdef CONFIG_CPUFREQ_LIMIT_MAX_FREQ // limit max freq enum { SET_MIN = 0, SET_MAX }; int cpufreq_set_limits_off(int cpu, unsigned int limit, unsigned int value) { int ret = -EINVAL; unsigned long flags; if (!(limit == SET_MIN || limit == SET_MAX)) goto out; if (!cpu_is_offline(cpu)) goto out; spin_lock_irqsave(&cpufreq_driver_lock, flags); if (!cpufreq_driver) goto out_unlock; if (!try_module_get(cpufreq_driver->owner)) goto out_unlock; if (limit == SET_MAX) { if (per_cpu(cpufreq_policy_save, cpu).max) per_cpu(cpufreq_policy_save, cpu).max = value; else goto out_put_module; } else { if (per_cpu(cpufreq_policy_save, cpu).min) per_cpu(cpufreq_policy_save, cpu).min = value; else goto out_put_module; } ret = 0; pr_info("%s: Setting [min/max:0/1] = %d frequency of cpu[%d] to %d\n", __func__, limit, cpu, value); out_put_module: module_put(cpufreq_driver->owner); out_unlock: spin_unlock_irqrestore(&cpufreq_driver_lock, flags); out: return ret; } int cpufreq_set_limits(int cpu, unsigned int limit, unsigned int value) { struct cpufreq_policy new_policy; struct cpufreq_policy *cur_policy; int ret = -EINVAL; if (!(limit == SET_MIN || limit == SET_MAX)) goto out; if (cpu_is_offline(cpu)) goto out; cur_policy = cpufreq_cpu_get(cpu); if (!cur_policy) goto out; if (lock_policy_rwsem_write(cpu) < 0) goto out_put_freq; memcpy(&new_policy, cur_policy, sizeof(struct cpufreq_policy)); if (limit == SET_MAX) { // for app boost = DVFS lock if (cur_policy->min > value) { new_policy.min = value; ret = __cpufreq_set_policy(cur_policy, &new_policy); if(ret < 0) goto out_unlock; cur_policy->user_policy.min = cur_policy->min; } new_policy.max = value; } else { // no other cases to change min value, now if (cur_policy->max < value) value = cur_policy->max; new_policy.min = value; } ret = __cpufreq_set_policy(cur_policy, &new_policy); if(ret < 0) goto out_unlock; if (limit == SET_MAX) cur_policy->user_policy.max = cur_policy->max; else cur_policy->user_policy.min = cur_policy->min; ret = 0; pr_info("%s: Setting [min/max:0/1] = %d frequency of cpu[%d] to %d\n", __func__, limit, cpu, value); out_unlock: unlock_policy_rwsem_write(cpu); out_put_freq: cpufreq_cpu_put(cur_policy); out: return ret; } int cpufreq_get_limits(int cpu, unsigned int limit) { struct cpufreq_policy *cur_policy; int ret = -EINVAL; unsigned int value = 0; if (!(limit == SET_MIN || limit == SET_MAX)) goto out; if (cpu_is_offline(cpu)) goto out; cur_policy = cpufreq_cpu_get(cpu); if (!cur_policy) goto out; if (lock_policy_rwsem_write(cpu) < 0) goto out_put_freq; if (limit == SET_MAX) value = cur_policy->max; else value = cur_policy->min; ret = value; unlock_policy_rwsem_write(cpu); pr_info("%s: [min/max:0/1] = %d frequency of cpu[%d]: %d\n", __func__, limit, cpu, value); out_put_freq: cpufreq_cpu_put(cur_policy); out: return ret; } #endif /** * cpufreq_update_policy - re-evaluate an existing cpufreq policy * @cpu: CPU which shall be re-evaluated * * Useful for policy notifiers which have different necessities * at different times. */ int cpufreq_update_policy(unsigned int cpu) { struct cpufreq_policy *data = cpufreq_cpu_get(cpu); struct cpufreq_policy policy; int ret; if (!data) { ret = -ENODEV; goto no_policy; } if (unlikely(lock_policy_rwsem_write(cpu))) { ret = -EINVAL; goto fail; } pr_debug("updating policy for CPU %u\n", cpu); memcpy(&policy, data, sizeof(struct cpufreq_policy)); policy.min = data->user_policy.min; policy.max = data->user_policy.max; policy.policy = data->user_policy.policy; policy.governor = data->user_policy.governor; /* BIOS might change freq behind our back -> ask driver for current freq and notify governors about a change */ if (cpufreq_driver->get) { policy.cur = cpufreq_driver->get(cpu); if (!data->cur) { pr_debug("Driver did not initialize current freq"); data->cur = policy.cur; } else { if (data->cur != policy.cur) cpufreq_out_of_sync(cpu, data->cur, policy.cur); } } ret = __cpufreq_set_policy(data, &policy); unlock_policy_rwsem_write(cpu); fail: cpufreq_cpu_put(data); no_policy: return ret; } EXPORT_SYMBOL(cpufreq_update_policy); static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct device *dev; dev = get_cpu_device(cpu); if (dev) { switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: cpufreq_add_dev(dev, NULL); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: if (unlikely(lock_policy_rwsem_write(cpu))) BUG(); __cpufreq_remove_dev(dev, NULL); break; case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: cpufreq_add_dev(dev, NULL); break; } } return NOTIFY_OK; } static struct notifier_block __refdata cpufreq_cpu_notifier = { .notifier_call = cpufreq_cpu_callback, }; /********************************************************************* * REGISTER / UNREGISTER CPUFREQ DRIVER * *********************************************************************/ /** * cpufreq_register_driver - register a CPU Frequency driver * @driver_data: A struct cpufreq_driver containing the values# * submitted by the CPU Frequency driver. * * Registers a CPU Frequency driver to this core code. This code * returns zero on success, -EBUSY when another driver got here first * (and isn't unregistered in the meantime). * */ int cpufreq_register_driver(struct cpufreq_driver *driver_data) { unsigned long flags; int ret; if (cpufreq_disabled()) return -ENODEV; if (!driver_data || !driver_data->verify || !driver_data->init || ((!driver_data->setpolicy) && (!driver_data->target))) return -EINVAL; pr_debug("trying to register driver %s\n", driver_data->name); if (driver_data->setpolicy) driver_data->flags |= CPUFREQ_CONST_LOOPS; spin_lock_irqsave(&cpufreq_driver_lock, flags); if (cpufreq_driver) { spin_unlock_irqrestore(&cpufreq_driver_lock, flags); return -EBUSY; } cpufreq_driver = driver_data; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); ret = subsys_interface_register(&cpufreq_interface); if (ret) goto err_null_driver; if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { int i; ret = -ENODEV; /* check for at least one working CPU */ for (i = 0; i < nr_cpu_ids; i++) if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) { ret = 0; break; } /* if all ->init() calls failed, unregister */ if (ret) { pr_debug("no CPU initialized for driver %s\n", driver_data->name); goto err_if_unreg; } } register_hotcpu_notifier(&cpufreq_cpu_notifier); pr_debug("driver %s up and running\n", driver_data->name); return 0; err_if_unreg: subsys_interface_unregister(&cpufreq_interface); err_null_driver: spin_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); return ret; } EXPORT_SYMBOL_GPL(cpufreq_register_driver); /** * cpufreq_unregister_driver - unregister the current CPUFreq driver * * Unregister the current CPUFreq driver. Only call this if you have * the right to do so, i.e. if you have succeeded in initialising before! * Returns zero if successful, and -EINVAL if the cpufreq_driver is * currently not initialised. */ int cpufreq_unregister_driver(struct cpufreq_driver *driver) { unsigned long flags; if (!cpufreq_driver || (driver != cpufreq_driver)) return -EINVAL; pr_debug("unregistering driver %s\n", driver->name); subsys_interface_unregister(&cpufreq_interface); unregister_hotcpu_notifier(&cpufreq_cpu_notifier); spin_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); return 0; } EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); static int __init cpufreq_core_init(void) { int cpu; if (cpufreq_disabled()) return -ENODEV; for_each_possible_cpu(cpu) { per_cpu(cpufreq_policy_cpu, cpu) = -1; init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); } cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj); BUG_ON(!cpufreq_global_kobject); register_syscore_ops(&cpufreq_syscore_ops); return 0; } core_initcall(cpufreq_core_init);
MuddyPlump/android_kernel_motorola_msm8226
drivers/cpufreq/cpufreq.c
C
gpl-2.0
55,424
24.6
103
0.670395
false
/* -*- c++ -*- ---------------------------------------------------------- LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator https://www.lammps.org/, Sandia National Laboratories Steve Plimpton, sjplimp@sandia.gov Copyright (2003) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. This software is distributed under the GNU General Public License. See the README file in the top-level LAMMPS directory. ------------------------------------------------------------------------- */ #ifdef MINIMIZE_CLASS // clang-format off MinimizeStyle(fire/old,MinFireOld); // clang-format on #else #ifndef LMP_MIN_FIRE_OLD_H #define LMP_MIN_FIRE_OLD_H #include "min.h" namespace LAMMPS_NS { class MinFireOld : public Min { public: MinFireOld(class LAMMPS *); void init() override; void setup_style() override; void reset_vectors() override; int iterate(int) override; private: double dt, dtmax; double alpha; bigint last_negative; }; } // namespace LAMMPS_NS #endif #endif
akohlmey/lammps
src/min_fire_old.h
C
gpl-2.0
1,134
24.2
76
0.63933
false
/*************************************************************************** * * * * Copyright 2004 - 2009 Broadcom Corporation. All rights reserved. * * * * Unless you and Broadcom execute a separate written software license * * agreement governing use of this software, this software is licensed to you * * under the terms of the GNU General Public License version 2, available at * * http://www.gnu.org/copyleft/gpl.html (the "GPL"). * * * * Notwithstanding the above, under no circumstances may you combine this * * software in any way with any other Broadcom software provided under a * * license other than the GPL, without Broadcom's express prior written * * consent. * * * ****************************************************************************/ /** * * @file chal_keypad.c * * * * @brief RHEA Keypad Controller cHAL source code file. * * * * @note * * * ****************************************************************************/ #ifdef tempINTERFACE_OSDAL_KEYPAD #include <plat/chal/chal_types.h> #include <plat/chal/chal_common.h> #include <mach/rdb/brcm_rdb_keypad.h> #include <mach/rdb/brcm_rdb_util.h> #include <plat/chal/chal_keypad.h> /******************************************************************************/ /* T Y P E D E F S */ /******************************************************************************/ typedef struct { void __iomem *baseAddr; Boolean pullUpMode; cUInt32 imr0Val; cUInt32 imr1Val; } CHAL_KEYPAD_DEVICE_t; /******************************************************************************/ /* G L O B A L S */ /******************************************************************************/ /******************************************************************************/ /* LOCALS */ /******************************************************************************/ static CHAL_KEYPAD_DEVICE_t KeypadDev; /******************************************************************************/ /* P R O T O T Y P E S */ /******************************************************************************/ static cUInt8 chal_keypad_calculate_pin_mask(cUInt8 pin); /******************************************************************************/ /* F U N C T I O N S */ /******************************************************************************/ /****************************************************************************** * * Function Name: bitCount() * * Description: takes in a 32-bit value and returns the number of bits set * to '1' in that value. This method of counting ones was chosen because it * is optimised for inputs that have a sparse number of ones. * * Parameters: n (in) The 32-bit value to examine * Return: cUInt8 The numner of '1' bits in n. * *****************************************************************************/ static cUInt8 bitCount(cUInt32 n) { cUInt8 count = 0; while (n) { /* loop until we have cleared all '1' bits */ count++; n &= (n - 1); /* this sets the rightmost '1' bit to 0 */ } return count; } /* bitCount() */ /****************************************************************************** * * Function Name: bitNumber() * * Description: takes in a 32-bit value and returns the index of the * rightmost '1' bit. It uses a DeBruijn sequence to calculate the index * faster than iterating through the value to find it. * * Parameters: val (in) The 32-bit value to examine. * Return: cUInt8 The index of the rightmost '1' bit in n. * ******************************************************************************/ static cUInt8 bitNumber(cUInt32 val) { static const cUInt8 MultiplyDeBruijnBitPosition[32] = { 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 }; /* The constant 0x077CB531 is a DeBruijn sequence that produces a * unique pattern of bits in the high 5 bits for each possible bit * position that it is multiplied against. * The usage of (val & -val) isolates only the rightmost '1' bit from * val. This ensures that the multiplier only has 1 bit set to '1'. */ return MultiplyDeBruijnBitPosition[(cUInt8) (((val & -val) * 0x077CB531U) >> 27)]; } /* bitNumber() */ /****************************************************************************** * * Function Name: chal_keypad_calculate_pin_mask() * * Description: creates a mask that has the first n bits set. * * Parameters: n (in) The number of bits to set * Return: cUInt8 A mask with the first n bits set. * *****************************************************************************/ cUInt8 chal_keypad_calculate_pin_mask(cUInt8 n) { cUInt16 tempVal; if ((n > 8) || (n < 1)) /* n should never be bigger than 8 as the hardware supports 8x8 */ /* matrix maximum. */ { return 0; } tempVal = (1 << n) - 1; return (cUInt8)tempVal; } /* chal_keypad_calculate_pin_maks() */ /****************************************************************************** * * Function Name: chal_keypad_init. * * Description: initialize the keypad csl driver * * Parameters: baseAddr (in) hardware cfg. of keypad to initialize * * Note: When this function completes, the keypad hardware is configured * as requested and key interrupts are enabled. * *******************************************************************************/ CHAL_HANDLE chal_keypad_init(void __iomem *baseAddr) { KeypadDev.baseAddr = baseAddr; return (CHAL_HANDLE)&KeypadDev; } /****************************************************************************** * * Function Name: chal_keypad_shutdown * * Description: turn off the keypad csl driver. * * Parameters: none * ******************************************************************************/ void chal_keypad_shutdown(CHAL_HANDLE handle) { CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; chal_keypad_disable_interrupts(handle); /*disable the hardware block */ BRCM_WRITE_REG_FIELD(pKpdDev->baseAddr, KEYPAD_KPCR, ENABLE, 0); } /****************************************************************************** * * Function Name: chal_keypad_set_enable * * Description: Enable or Disable the keypad ASIC block. * * Parameters: none * ******************************************************************************/ void chal_keypad_set_enable(CHAL_HANDLE handle, Boolean enable) { CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; BRCM_WRITE_REG_FIELD(pKpdDev->baseAddr, KEYPAD_KPCR, ENABLE, ((enable == TRUE) ? 1 : 0)); } /****************************************************************************** * * Function Name: chal_keypad_set_pullup_mode * * Description: Set Pull-up or pull-down mode for the ASIC block. * * Parameters: none * ******************************************************************************/ void chal_keypad_set_pullup_mode(CHAL_HANDLE handle, Boolean pullUp) { CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; pKpdDev->pullUpMode = pullUp; BRCM_WRITE_REG_FIELD(pKpdDev->baseAddr, KEYPAD_KPCR, MODE, ((pullUp == TRUE) ? 1 : 0)); } /****************************************************************************** * * Function Name: chal_keypad_set_column_filter * * Description: Set column filter mode for the ASIC block. * * Parameters: none * ******************************************************************************/ void chal_keypad_set_column_filter(CHAL_HANDLE handle, Boolean enable, CHAL_KEYPAD_DEBOUNCE_TIME_t debounce) { CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; BRCM_WRITE_REG_FIELD(pKpdDev->baseAddr, KEYPAD_KPCR, COLFILTERENABLE, ((enable == TRUE) ? 1 : 0)); BRCM_WRITE_REG_FIELD(pKpdDev->baseAddr, KEYPAD_KPCR, COLUMNFILTERTYPE, debounce); } /****************************************************************************** * * Function Name: chal_keypad_set_status_filter * * Description: Set status filter mode for the ASIC block. * * Parameters: none * ******************************************************************************/ void chal_keypad_set_status_filter(CHAL_HANDLE handle, Boolean enable, CHAL_KEYPAD_DEBOUNCE_TIME_t debounce) { CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; BRCM_WRITE_REG_FIELD(pKpdDev->baseAddr, KEYPAD_KPCR, STATUSFILTERENABLE, ((enable == TRUE) ? 1 : 0)); BRCM_WRITE_REG_FIELD(pKpdDev->baseAddr, KEYPAD_KPCR, STATUSFILTERTYPE, debounce); } /****************************************************************************** * * Function Name: chal_keypad_set_column_width * * Description: Set column width for the ASIC block. * * Parameters: none * ******************************************************************************/ void chal_keypad_set_column_width(CHAL_HANDLE handle, cUInt32 columns) { CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; BRCM_WRITE_REG_FIELD(pKpdDev->baseAddr, KEYPAD_KPCR, COLUMNWIDTH, (columns - 1)); } /****************************************************************************** * * Function Name: chal_keypad_set_row_width * * Description: Set row width for the ASIC block. * * Parameters: none * ******************************************************************************/ void chal_keypad_set_row_width(CHAL_HANDLE handle, cUInt32 rows) { CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; BRCM_WRITE_REG_FIELD(pKpdDev->baseAddr, KEYPAD_KPCR, ROWWIDTH, (rows - 1)); } /****************************************************************************** * * Function Name: chal_keypad_set_row_output_control * * Description: Set row output control for the ASIC block. * * Parameters: none * ******************************************************************************/ void chal_keypad_set_row_output_control(CHAL_HANDLE handle, cUInt32 rows) { cUInt32 rowMask; CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; rowMask = chal_keypad_calculate_pin_mask(rows); /* use rows as output */ BRCM_WRITE_REG_FIELD(pKpdDev->baseAddr, KEYPAD_KPIOR, ROWOCONTRL, rowMask); } /****************************************************************************** * * Function Name: chal_keypad_set_column_output_control * * Description: Set column output control for the ASIC block. * * Parameters: none * ******************************************************************************/ void chal_keypad_set_column_output_control(CHAL_HANDLE handle, cUInt32 columns) { cUInt32 columnMask; CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; columnMask = chal_keypad_calculate_pin_mask(columns); /* use columns as output */ BRCM_WRITE_REG_FIELD(pKpdDev->baseAddr, KEYPAD_KPIOR, COLUMNOCONTRL, columnMask); } /****************************************************************************** * * Function Name: chal_keypad_set_interrupt_edge * * Description: Set interrupt edge control for the ASIC block. * * Parameters: none * ******************************************************************************/ void chal_keypad_set_interrupt_edge(CHAL_HANDLE handle, CHAL_KEYPAD_INTERRUPT_EDGE_t edge) { CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; cUInt32 tempReg, i; /* configure the individual key interrupt controls */ tempReg = 0; for (i = 0; i <= 30; i = i + 2) tempReg |= (edge << i); BRCM_WRITE_REG(pKpdDev->baseAddr, KEYPAD_KPEMR0, tempReg); BRCM_WRITE_REG(pKpdDev->baseAddr, KEYPAD_KPEMR1, tempReg); BRCM_WRITE_REG(pKpdDev->baseAddr, KEYPAD_KPEMR2, tempReg); BRCM_WRITE_REG(pKpdDev->baseAddr, KEYPAD_KPEMR3, tempReg); } /****************************************************************************** * * Function Name: chal_keypad_set_interrupt_mask * * Description: Set interrupt mask control for the ASIC block. * * Parameters: none * ******************************************************************************/ void chal_keypad_set_interrupt_mask(CHAL_HANDLE handle, cUInt32 rows, cUInt32 columns) { cUInt32 tempReg, columnMask; cUInt32 imr0RowCount, imr1RowCount, i; CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; columnMask = chal_keypad_calculate_pin_mask(columns); /* enable the appropriate interrupts. */ if (rows >= 4) { imr0RowCount = 4; imr1RowCount = rows - 4; } else { imr0RowCount = rows; imr1RowCount = 0; } tempReg = 0; for (i = 0; i < imr0RowCount; i++) tempReg |= (columnMask << (i * 8)); BRCM_WRITE_REG(pKpdDev->baseAddr, KEYPAD_KPIMR0, tempReg); pKpdDev->imr0Val = tempReg; /* save the imr0 value as we need it when enabling/disabling interrupts */ tempReg = 0; for (i = 0; i < imr1RowCount; i++) tempReg |= (columnMask << (i * 8)); BRCM_WRITE_REG(pKpdDev->baseAddr, KEYPAD_KPIMR1, tempReg); pKpdDev->imr1Val = tempReg; /* save the imr1 value as we need it when enabling/disabling interrupts */ } /****************************************************************************** * * Function Name: chal_keypad_get_pullup_status * * Description: Return the value of the Keypad Pullup mode. * * Parameters: return (out) TRUE = Pullup mode * FALSE = Pull down mode * ******************************************************************************/ Boolean chal_keypad_get_pullup_status(CHAL_HANDLE handle) { CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; return pKpdDev->pullUpMode; } /****************************************************************************** * * Function Name: chal_keypad_swap_row_and_column * * Description: Set the Swap row and column feature * * Parameters: swap (in) TRUE = turn swap row and column ON * FALSE = set swap row and column OFF * ******************************************************************************/ void chal_keypad_swap_row_and_column(CHAL_HANDLE handle, Boolean swap) { CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; BRCM_WRITE_REG_FIELD(pKpdDev->baseAddr, KEYPAD_KPCR, SWAPROWCOLUMN, ((swap == TRUE) ? 1 : 0)); } /****************************************************************************** * * Function Name: chal_keypad_handle_interrupt * * Description: A keypad interrupt has occurred. Save off all information * relating to the event for later processing * * Parameters: none * ******************************************************************************/ void chal_keypad_retrieve_key_event_registers(CHAL_HANDLE handle, CHAL_KEYPAD_REGISTER_SET_t * regState) { CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; regState->ssr0 = BRCM_READ_REG(pKpdDev->baseAddr, KEYPAD_KPSSR0); regState->ssr1 = BRCM_READ_REG(pKpdDev->baseAddr, KEYPAD_KPSSR1); regState->isr0 = BRCM_READ_REG(pKpdDev->baseAddr, KEYPAD_KPISR0); regState->isr1 = BRCM_READ_REG(pKpdDev->baseAddr, KEYPAD_KPISR1); } /****************************************************************************** * * Function Name: chal_keypad_clear_interrupts * * Description: Clear all outstanding interrupts * * Parameters: none * ******************************************************************************/ void chal_keypad_clear_interrupts(CHAL_HANDLE handle) { CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; BRCM_WRITE_REG(pKpdDev->baseAddr, KEYPAD_KPICR0, 0xFFFFFFFF); BRCM_WRITE_REG(pKpdDev->baseAddr, KEYPAD_KPICR1, 0xFFFFFFFF); } /****************************************************************************** * * Function Name: chal_keypad_disable_interrupts * * Description: Disable key event interrupts * * Parameters: none * ******************************************************************************/ void chal_keypad_disable_interrupts(CHAL_HANDLE handle) { CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; BRCM_WRITE_REG(pKpdDev->baseAddr, KEYPAD_KPIMR0, 0); BRCM_WRITE_REG(pKpdDev->baseAddr, KEYPAD_KPIMR1, 0); } /****************************************************************************** * * Function Name: chal_keypad_enable_interrupts * * Description: Enable key event interrupts * * Parameters: none * ******************************************************************************/ void chal_keypad_enable_interrupts(CHAL_HANDLE handle) { CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; BRCM_WRITE_REG(pKpdDev->baseAddr, KEYPAD_KPIMR0, pKpdDev->imr0Val); BRCM_WRITE_REG(pKpdDev->baseAddr, KEYPAD_KPIMR1, pKpdDev->imr1Val); } /****************************************************************************** * * Function Name: chal_keypad_decode_interrupt_event_registers * * Description: Take a set of keypad registers recorded when an * interrupt occurred and decode the key event contained in them. * * Parameters: * regSet (in) Is this the first or second register set? * This determines if the events are for * columns 0-3 or 4-7 * 0 = ISR0 and SSR0 (col 0-3) * 1 = ISR1 and SSR1 (col 4-7) * bitCount (in) The number of bits set in ISR * ssr (in) The value of SSR * isr (in) The value of ISR * ******************************************************************************/ void chal_keypad_decode_key_event_registers(CHAL_HANDLE *handle, cUInt32 regSet, cUInt32 bitCount, cUInt32 ssr, cUInt32 isr, cUInt32 *numKeyEvents, CHAL_KEYPAD_KEY_EVENT_LIST_t keyEvents) { cUInt32 mask, i, firstbit; CHAL_KEYPAD_EVENT_t *keyEvent; CHAL_KEYPAD_DEVICE_t *pKpdDev = (CHAL_KEYPAD_DEVICE_t *) handle; mask = isr; if (!pKpdDev->pullUpMode) /* if pull-down mode, the SSR bits logic is negated */ { ssr = ~ssr; } for (i = 0; i < bitCount; i++) /* loop through all set bits, as each indicates a key event */ { keyEvent = &keyEvents[*numKeyEvents]; firstbit = mask & (-mask); /* this isolates the rightmost '1' bit from mask */ if ((ssr & firstbit) == 0) keyEvent->keyAction = CHAL_KEYPAD_KEY_PRESS; else keyEvent->keyAction = CHAL_KEYPAD_KEY_RELEASE; keyEvent->keyId = bitNumber(firstbit) + (regSet * 32); /* raw KeyId is of the form (C*8)+R: */ /* C = column number */ /* R = row number */ mask &= (~firstbit); /* mask out the bit we just processed the event for */ (*numKeyEvents)++; } } /****************************************************************************** * * Function Name: chal_keypad_process_interrupt_events * * Description: Take one data set off the interrupt event FIFO and decode all * key events contained in it and add them to the Key Event FIFO * * Parameters: none * ******************************************************************************/ cUInt32 chal_keypad_process_key_event_registers(CHAL_HANDLE *handle, CHAL_KEYPAD_REGISTER_SET_t * regState, CHAL_KEYPAD_KEY_EVENT_LIST_t keyEvents) { cUInt32 bc1, bc2; cUInt32 numKeyEvents = 0; /* chal_dprintf( CDBG_INFO, "Key Event Regs: %lx %lx %lx %lx", regState->ssr0,regState->ssr1, regState->isr0,regState->isr1);*/ bc1 = bitCount(regState->isr0); bc2 = bitCount(regState->isr1); if ((bc1 + bc2) <= MAX_SIMULTANEOUS_KEY_EVENTS) { if (bc1) { chal_keypad_decode_key_event_registers(handle, 0, bc1, regState->ssr0, regState->isr0, &numKeyEvents, keyEvents); } if (bc2) { chal_keypad_decode_key_event_registers(handle, 1, bc2, regState->ssr1, regState->isr1, &numKeyEvents, keyEvents); } } return numKeyEvents; } /* chal_keypad_process_key_event_registers() */ /****************************************************************************** * * Function Name: chal_keypad_config_read_status1 * * Description: Return the register value of the Keypad Status 1 register. * * Parameters: return (out) SSR1 value * ******************************************************************************/ cUInt32 chal_keypad_config_read_status1(void) { return BRCM_READ_REG(KeypadDev.baseAddr, KEYPAD_KPSSR0); } /****************************************************************************** * * Function Name: chal_keypad_config_read_status2 * * Description: Return the register value of the Keypad Status 2 register. * * Parameters: return (out) SSR2 value * ******************************************************************************/ cUInt32 chal_keypad_config_read_status2(void) { return BRCM_READ_REG(KeypadDev.baseAddr, KEYPAD_KPSSR1); } #else /* tempINTERFACE_OSDAL_KEYPAD */ #include <stdio.h> #include <string.h> #include <stdarg.h> /*#define "memmap.h" */ #include "chal_common.h" #include "dbg.h" #include "brcm_rdb_sysmap.h" /*#include "brcm_rdb_syscfg.h" */ #include "brcm_rdb_keypad.h" #include "chal_keypad.h" #if (defined(_HERA_) || defined(_RHEA_)) #define SWAP_ROW_COL #endif /******************************************************************************/ /* T Y P E D E F S */ /******************************************************************************/ #define IOCR1_REG 0x08880004 #define KEYPAD_KPCR_Enable KEYPAD_KPCR_ENABLE_MASK #define KEYPAD_KPCR_Mode KEYPAD_KPCR_MODE_MASK #define KEYPAD_KPCR_ColFilterEnable KEYPAD_KPCR_COLFILTERENABLE_MASK #define KEYPAD_KPCR_StatusFilterEnable KEYPAD_KPCR_STATUSFILTERENABLE_MASK #define KEYPAD_KPCR_SwapRowColumn KEYPAD_KPCR_SWAPROWCOLUMN_MASK #define CHAL_INTERRUPT_EVENT_FIFO_LENGTH 4 /* max number of interrupt event that can be stored */ /* must be a power of 2 for fifo operations to work */ #define CHAL_KEYPAD_KEY_ACTION_FIFO_LENGTH 4 /* max number of key event that can be stored */ /* since each interrupt register set can have a max of 4 simultaneous events it must be at least 4 */ /* must be a power of 2 for fifo operations to work */ #define MAX_KEY_EVENT_PER_INTERRUPT 4 /* max valid key events in one interrupt-hardware defined*/ typedef struct { cUInt32 ssr0; cUInt32 ssr1; cUInt32 isr0; cUInt32 isr1; } CHAL_KEYPAD_INTERRUPT_EVENT_t; /* register set data for 1 interrupt event. */ typedef struct { cUInt8 head; cUInt8 tail; cUInt8 length; CHAL_KEYPAD_INTERRUPT_EVENT_t eventQ[CHAL_INTERRUPT_EVENT_FIFO_LENGTH]; } CHAL_KEYPAD_INTERRUPT_FIFO_t; /* interrupt event Q - register data from an interrupt to be processed later. */ typedef struct { cUInt8 head; cUInt8 tail; cUInt8 length; CHAL_KEYPAD_EVENT_t actionQ[CHAL_KEYPAD_KEY_ACTION_FIFO_LENGTH]; } CHAL_KEYPAD_KEY_ACTION_FIFO_t; /* key event Q - key events decoded from interrupt events. */ #define FIFO_FULL(fifo) (((fifo.head - fifo.tail) & \ (fifo.length-1)) >= fifo.length) #define FIFO_EMPTY(fifo) (fifo.head == fifo.tail) #define FIFO_INCREMENT_HEAD(fifo) (fifo.head = ((fifo.head+1) & \ (fifo.length-1))) #define FIFO_INCREMENT_TAIL(fifo) (fifo.tail = ((fifo.tail+1) & \ (fifo.length-1))) /******************************************************************************/ /* G L O B A L S */ /******************************************************************************/ /******************************************************************************/ /* LOCALS */ /******************************************************************************/ CHAL_KEYPAD_CONFIG_t keypadConfig; /* keypad hardware configuration */ cUInt8 keypadRowMask; /* mask of pins used for rows */ cUInt8 keypadColumnMask; /* mask of pins used for columns */ CHAL_KEYPAD_INTERRUPT_FIFO_t interruptEventQ; /* interrupt Event Q */ CHAL_KEYPAD_KEY_ACTION_FIFO_t keyActionQ; /* key Event Q */ cUInt32 imr0Val, imr1Val; /* interrupt mask values. */ /******************************************************************************/ /* P R O T O T Y P E S */ /******************************************************************************/ static void chal_keypad_store_event(void); static cUInt8 chal_keypad_calculate_pin_mask(cUInt8 pin); /******************************************************************************/ /* F U N C T I O N S */ /******************************************************************************/ /****************************************************************************** * * Function Name: bitCount() * * Description: takes in a 32-bit value and returns the number of bits set * to '1' in that value. This method of counting ones was chosen because * it is optimised for inputs that have a sparse number of ones. * * Parameters: n (in) The 32-bit value to examine * Return: cUInt8 The numner of '1' bits in n. * ******************************************************************************/ static cUInt8 bitCount(cUInt32 n) { cUInt8 count = 0; while (n) { /* loop until we have cleared all '1' bits */ count++; n &= (n - 1); /* this sets the rightmost '1' bit to 0 */ } return count; } /* bitCount() */ /****************************************************************************** * * Function Name: bitNumber() * * Description:takes in a 32-bit value and returns the index of the rightmost * '1' bit. It uses a DeBruijn sequence to calculate the index faster than * iterating through the value to find it. * * Parameters: n (in) The 32-bit value to examine * Return: cUInt8 The index of the rightmost '1' bit in n * ******************************************************************************/ static cUInt8 bitNumber(cUInt32 val) { static const cUInt8 MultiplyDeBruijnBitPosition[32] = { 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 }; /* The constant 0x077CB531 is a DeBruijn sequence that produces a * unique pattern of bits in the high 5 bits for each possible bit * position that it is multiplied against * The usage of (val & -val) isolates only the rightmost '1' bit from * val. This ensures that the multiplier only has 1 bit set to '1'. */ return MultiplyDeBruijnBitPosition[(cUInt8) (((val & -val) * 0x077CB531U) >> 27)]; } /* bitNumber() */ /****************************************************************************** * * Function Name: chal_keypad_calculate_pin_mask() * * Description: creates a mask that has the first n bits set. * * Parameters: n (in) The number of bits to set * Return: cUInt8 A mask with the first n bits set. * ******************************************************************************/ cUInt8 chal_keypad_calculate_pin_mask(cUInt8 n) { if ((n > 8) || (n < 1)) { /* n should never be bigger than 8 as the hardware supports 8x8 matrix maximum. */ return 0; } return (1 << n) - 1; } /* chal_keypad_calculate_pin_maks() */ /****************************************************************************** * * Function Name: chal_keypad_init * * Description: initialize the keypad csl driver * * Parameters: config (in) hardware cfg. of keypad to initialize * * Note: When this function completes, the keypad hardware is configured as * requested and key interrupts are enabled. * ******************************************************************************/ void chal_keypad_init(CHAL_KEYPAD_CONFIG_t config) { cUInt32 tempReg, kpcrReg; cUInt8 imr0RowCount, imr1RowCount, i; /* Initialize the 2 interrupt event FIFOs */ interruptEventQ.head = 0; interruptEventQ.tail = 0; interruptEventQ.length = CHAL_INTERRUPT_EVENT_FIFO_LENGTH; keyActionQ.head = 0; keyActionQ.tail = 0; keyActionQ.length = CHAL_KEYPAD_KEY_ACTION_FIFO_LENGTH; /* store the keypad config locally */ keypadConfig = config; /* create the row and col masks needed to set various register values */ keypadRowMask = chal_keypad_calculate_pin_mask(keypadConfig.rows); keypadColumnMask = chal_keypad_calculate_pin_mask(keypadConfig.columns); /* disable all key interrupts */ CHAL_REG_WRITE32(KEYPAD_KPIMR0, 0); CHAL_REG_WRITE32(KEYPAD_KPIMR1, 0); /* clear any old interrupts */ CHAL_REG_WRITE32(KEYPAD_KPICR0, 0xFFFFFFFF); CHAL_REG_WRITE32(KEYPAD_KPICR1, 0xFFFFFFFF); /* disable the keypad hardware block */ kpcrReg = CHAL_REG_READ32(KEYPAD_KPCR); CHAL_REG_WRITE32(KEYPAD_KPCR, (kpcrReg & ~(KEYPAD_KPCR_Enable))); /* set the pin configuration for the row and column signals */ /*#if !(defined(_HERA_) || defined(_RHEA_)) */ CHAL_REG_WRITE32(IOCR1_REG, ((keypadColumnMask << 8) | keypadRowMask)); /*#endif */ #ifdef SWAP_ROW_COL #else #endif /* setup the hardware configuration register */ kpcrReg = 0; if (keypadConfig.pullUpMode) kpcrReg |= KEYPAD_KPCR_Mode; kpcrReg |= KEYPAD_KPCR_ColFilterEnable; kpcrReg |= (keypadConfig.debounceTime << 8); kpcrReg |= KEYPAD_KPCR_StatusFilterEnable; kpcrReg |= (keypadConfig.debounceTime << 12); kpcrReg |= ((keypadConfig.columns - 1) << 16); kpcrReg |= ((keypadConfig.rows - 1) << 20); #ifdef SWAP_ROW_COL kpcrReg |= KEYPAD_KPCR_SwapRowColumn; #endif CHAL_REG_WRITE32(KEYPAD_KPCR, kpcrReg); /*#ifdef SWAP_ROW_COL */ /* // use cols as output */ /* CHAL_REG_WRITE32( KEYPAD_KPIOR, (keypadColumnMask << 16) ); */ /*#else */ /* use rows as output */ CHAL_REG_WRITE32(KEYPAD_KPIOR, (keypadColumnMask << 24)); /*#endif */ /* configure the individual key interrupt controls */ tempReg = 0; for (i = 0; i <= 30; i = i + 2) tempReg |= (keypadConfig.interruptEdge << i); CHAL_REG_WRITE32(KEYPAD_KPEMR0, tempReg); CHAL_REG_WRITE32(KEYPAD_KPEMR1, tempReg); CHAL_REG_WRITE32(KEYPAD_KPEMR2, tempReg); CHAL_REG_WRITE32(KEYPAD_KPEMR3, tempReg); /* enable the appropriate interrupts. */ if (keypadConfig.rows >= 4) { imr0RowCount = 4; imr1RowCount = keypadConfig.rows - 4; } else { imr0RowCount = keypadConfig.rows; imr1RowCount = 0; } CHAL_REG_WRITE32(KEYPAD_KPICR0, 0xFFFFFFFF); CHAL_REG_WRITE32(KEYPAD_KPICR1, 0xFFFFFFFF); tempReg = 0; for (i = 0; i < imr0RowCount; i++) tempReg |= (keypadColumnMask << (i * 8)); CHAL_REG_WRITE32(KEYPAD_KPIMR0, tempReg); imr0Val = tempReg; /* save the imr0 value as we need it when enabling/disabling interrupts */ tempReg = 0; for (i = 0; i < imr1RowCount; i++) tempReg |= (keypadColumnMask << (i * 8)); CHAL_REG_WRITE32(KEYPAD_KPIMR1, tempReg); imr1Val = tempReg; /* save the imr1 value as we need it when enabling/disabling interrupts */ /* clear any outstanding interrupts */ CHAL_REG_WRITE32(KEYPAD_KPICR0, 0xFFFFFFFF); CHAL_REG_WRITE32(KEYPAD_KPICR1, 0xFFFFFFFF); /* enable the keypad hardware block */ CHAL_REG_WRITE32(KEYPAD_KPCR, (kpcrReg | KEYPAD_KPCR_Enable)); return; } /* chal_keypad_init() */ /****************************************************************************** * * Function Name: chal_keypad_shutdown * * Description: turn off the keypad csl driver * * Parameters: none * ******************************************************************************/ void chal_keypad_shutdown() { cUInt32 kpcrReg; chal_keypad_disable_interrupts(); /*disable the hardware block */ kpcrReg = CHAL_REG_READ32(KEYPAD_KPCR); CHAL_REG_WRITE32(KEYPAD_KPCR, (kpcrReg & ~(KEYPAD_KPCR_Enable))); } /****************************************************************************** * * Function Name: chal_keypad_handle_interrupt * * Description: A keypad interrupt has occurred. Save off all information * relating to the event for later processing * * Parameters: none * ******************************************************************************/ void chal_keypad_handle_interrupt() { CHAL_KEYPAD_INTERRUPT_EVENT_t *event; if (!FIFO_FULL(interruptEventQ)) { event = &interruptEventQ.eventQ[interruptEventQ.head]; event->ssr0 = *(volatile cUInt32 *)KEYPAD_KPSSR0; event->ssr1 = *(volatile cUInt32 *)KEYPAD_KPSSR1; event->isr0 = *(volatile cUInt32 *)KEYPAD_KPISR0; event->isr1 = *(volatile cUInt32 *)KEYPAD_KPISR1; FIFO_INCREMENT_HEAD(interruptEventQ); } /* event information is saved, clear the interrupt */ CHAL_REG_WRITE32(KEYPAD_KPICR0, 0xFFFFFFFF); CHAL_REG_WRITE32(KEYPAD_KPICR1, 0xFFFFFFFF); } /****************************************************************************** * * Function Name: chal_keypad_clear_interrupts * * Description: Clear all outstanding interrupts * * Parameters: none * ******************************************************************************/ void chal_keypad_clear_interrupts() { CHAL_REG_WRITE32(KEYPAD_KPICR0, 0xFFFFFFFF); CHAL_REG_WRITE32(KEYPAD_KPICR1, 0xFFFFFFFF); } /****************************************************************************** * * Function Name: chal_keypad_disable_interrupts * * Description: Disable key event interrupts * * Parameters: none * ******************************************************************************/ void chal_keypad_disable_interrupts() { CHAL_REG_WRITE32(KEYPAD_KPIMR0, 0); CHAL_REG_WRITE32(KEYPAD_KPIMR1, 0); } /****************************************************************************** * * Function Name: chal_keypad_enable_interrupts * * Description: Enable key event interrupts * * Parameters: none * ******************************************************************************/ void chal_keypad_enable_interrupts() { CHAL_REG_WRITE32(KEYPAD_KPIMR0, imr0Val); CHAL_REG_WRITE32(KEYPAD_KPIMR1, imr1Val); } /****************************************************************************** * * Function Name: chal_keypad_decode_interrupt_event_registers * * Description: Take a set of keypad registers recorded when an interrupt * occurred and decode the key event contained in them. * * Parameters: * regSet (in) Is this the first or second register set? * This determines if the events are for * columns 0-3 or 4-7 * 0 = ISR0 and SSR0 (col 0-3) * 1 = ISR1 and SSR1 (col 4-7) * bitCount (in) The number of bits set in ISR * ssr (in) The value of SSR * isr (in) The value of ISR * ******************************************************************************/ void chal_keypad_decode_interrupt_event_registers(cUInt32 regSet, cUInt32 bitCount, cUInt32 ssr, cUInt32 isr) { cUInt32 mask, i, firstbit; CHAL_KEYPAD_EVENT_t *keyEvent; mask = isr; if (!keypadConfig.pullUpMode) /* if pull-down mode, the SSR bits logic is negated */ { ssr = ~ssr; } for (i = 0; i < bitCount; i++) /* loop through all set bits, as each indicates a key event */ { keyEvent = &keyActionQ.actionQ[keyActionQ.tail]; firstbit = mask & (-mask); /* this isolates the rightmost '1' bit from mask */ if ((ssr & firstbit) == 0) keyEvent->keyAction = CHAL_KEYPAD_KEY_PRESS; else keyEvent->keyAction = CHAL_KEYPAD_KEY_RELEASE; keyEvent->keyId = bitNumber(firstbit) + (regSet * 32); /* raw KeyId is of the form 0xCR where: */ /* C = column number */ /* R = row number */ mask &= (~firstbit); /* mask out the bit we just processed the event for */ FIFO_INCREMENT_TAIL(keyActionQ); } } /****************************************************************************** * * Function Name: chal_keypad_process_interrupt_events * * Description: Take one data set off the interrupt event FIFO and decode all * of the key events contained in it and add them to the * Key Event FIFO. * * Parameters: none * ******************************************************************************/ void chal_keypad_process_interrupt_events() { cUInt32 bc1, bc2; CHAL_KEYPAD_INTERRUPT_EVENT_t *interruptEvent; if ((!FIFO_EMPTY(interruptEventQ)) && (!FIFO_FULL(keyActionQ))) { interruptEvent = &interruptEventQ.eventQ[interruptEventQ.tail]; bc1 = bitCount(interruptEvent->isr0); bc2 = bitCount(interruptEvent->isr1); if ((bc1 + bc2) <= MAX_KEY_EVENT_PER_INTERRUPT) { if (bc1) { chal_keypad_decode_interrupt_event_registers(0, bc1, interruptEvent->ssr0, interruptEvent->isr0); } if (bc2) { chal_keypad_decode_interrupt_event_registers(1, bc2, interruptEvent->ssr1, interruptEvent->isr1); } } else { /* appears to be an error (too many bits set in ISR) clear the interrupts to reset and hopefully continue */ chal_keypad_clear_interrupts(); } FIFO_INCREMENT_TAIL(interruptEventQ); } } /* chal_keypad_process_interrupt_events() */ /****************************************************************************** * * Function Name: chal_keypad_get_action_from_Q * * Description: Pull the first set of hardware register off of the interrupt * event Q. * * Parameters: event (out) A key event * * Return: Boolean TRUE = key event found and info set in event parameter * FALSE = no key event exist in queue. * ******************************************************************************/ Boolean chal_keypad_get_action_from_Q(CHAL_KEYPAD_EVENT_t *event) { Boolean retVal = FALSE; if (!FIFO_EMPTY(keyActionQ)) { event->keyId = keyActionQ.actionQ[keyActionQ.head].keyId; event->keyAction = keyActionQ.actionQ[keyActionQ.head].keyAction; FIFO_INCREMENT_HEAD(keyActionQ); retVal = TRUE; } return retVal; } /* chal_keypad_get_action_from_Q() */ /****************************************************************************** * * Function Name: chal_keypad_retrieve_event * * Description: Find a saved key event. First check the key event Q. This Q * contains key event from previously processed interrupt events. If * nothing is in the key event Q, then check to see if interrupt events * exist in that Q. If so, process the first one. and then take the * resulting first key event and return it. * * Parameters: event (out) A key event * ******************************************************************************/ void chal_keypad_retrieve_event(CHAL_KEYPAD_EVENT_t *event) { if (!chal_keypad_get_action_from_Q(event)) { chal_keypad_process_interrupt_events(); if (!chal_keypad_get_action_from_Q(event)) event->keyAction = CHAL_KEYPAD_KEY_NO_ACTION; } } /* chal_keypad_retrieve_event() */ /****************************************************************************** * * Function Name: chal_keypad_config_reset * * Description: Reset the keypad ASIC block * * Parameters: none * ******************************************************************************/ void chal_keypad_config_reset() { CHAL_REG_WRITE32(KEYPAD_KPIMR0, 0); } /****************************************************************************** * * Function Name: chal_keypad_config_read_status1 * * Description: Return the register value of the Keypad Status 1 register. * * Parameters: return (out) SSR1 value * ******************************************************************************/ cUInt32 chal_keypad_config_read_status1() { return CHAL_REG_READ32(KEYPAD_KPSSR0); } /****************************************************************************** * * Function Name: chal_keypad_config_read_status2 * * Description: Return the register value of the Keypad Status 2 register. * * Parameters: return (out) SSR2 value * ******************************************************************************/ cUInt32 chal_keypad_config_read_status2() { return CHAL_REG_READ32(KEYPAD_KPSSR1); } /****************************************************************************** * * Function Name: chal_keypad_update_interrupt_clear_register0 * * Description: Update the Keypad Interrupt Clear register 0. * * Parameters: value (in) new value for ICR0 * ******************************************************************************/ void chal_keypad_update_interrupt_clear_register0(cUInt32 value) { CHAL_REG_WRITE32(KEYPAD_KPICR0, value); } /****************************************************************************** * * Function Name: chal_keypad_update_interrupt_clear_register1 * * Description: Update the Keypad Interrupt Clear register 1. * * Parameters: value (in) new value for ICR1 * ******************************************************************************/ void chal_keypad_update_interrupt_clear_register2(cUInt32 value) { CHAL_REG_WRITE32(KEYPAD_KPICR1, value); } /****************************************************************************** * * Function Name: chal_keypad_read_pullup_status * * Description: Return the value of the Keypad Pullup mode. * * Parameters: return (out) Pullup Mode bit * ******************************************************************************/ cUInt32 chal_keypad_read_pullup_status() { return (CHAL_REG_READ32(KEYPAD_KPCR)) & KEYPAD_KPCR_Mode; } /****************************************************************************** * * Function Name: chal_keypad_swap_row_and_column * * Description: Set the Swap row and column feature * * Parameters: swap (in) TRUE = turn swap row and column ON * FALSE = set swap row and column OFF * ******************************************************************************/ void chal_keypad_swap_row_and_column(Boolean swap) { cUInt32 regVal; regVal = CHAL_REG_READ32(KEYPAD_KPCR); if (swap) regVal |= KEYPAD_KPCR_SWAPROWCOLUMN_MASK; else regVal &= ~KEYPAD_KPCR_SWAPROWCOLUMN_MASK; CHAL_REG_WRITE32(KEYPAD_KPCR, regVal); } #endif /* tempINTERFACE_OSDAL_KEYPAD */
alexey6600/kernel_sony_tetra_2
arch/arm/plat-kona/chal/chal_keypad.c
C
gpl-2.0
42,918
31.293454
80
0.515565
false
package org.jsoup.parser; import org.jsoup.helper.StringUtil; import org.jsoup.helper.Validate; import org.jsoup.nodes.Entities; import java.util.Arrays; /** * Readers the input stream into tokens. */ final class Tokeniser { static final char replacementChar = '\uFFFD'; // replaces null character private static final char[] notCharRefCharsSorted = new char[]{'\t', '\n', '\r', '\f', ' ', '<', '&'}; static { Arrays.sort(notCharRefCharsSorted); } private final CharacterReader reader; // html input private final ParseErrorList errors; // errors found while tokenising private TokeniserState state = TokeniserState.Data; // current tokenisation state private Token emitPending; // the token we are about to emit on next read private boolean isEmitPending = false; private String charsString = null; // characters pending an emit. Will fall to charsBuilder if more than one private StringBuilder charsBuilder = new StringBuilder(1024); // buffers characters to output as one token, if more than one emit per read StringBuilder dataBuffer = new StringBuilder(1024); // buffers data looking for </script> Token.Tag tagPending; // tag we are building up Token.StartTag startPending = new Token.StartTag(); Token.EndTag endPending = new Token.EndTag(); Token.Character charPending = new Token.Character(); Token.Doctype doctypePending = new Token.Doctype(); // doctype building up Token.Comment commentPending = new Token.Comment(); // comment building up private String lastStartTag; // the last start tag emitted, to test appropriate end tag Tokeniser(CharacterReader reader, ParseErrorList errors) { this.reader = reader; this.errors = errors; } Token read() { while (!isEmitPending) state.read(this, reader); // if emit is pending, a non-character token was found: return any chars in buffer, and leave token for next read: if (charsBuilder.length() > 0) { String str = charsBuilder.toString(); charsBuilder.delete(0, charsBuilder.length()); charsString = null; return charPending.data(str); } else if (charsString != null) { Token token = charPending.data(charsString); charsString = null; return token; } else { isEmitPending = false; return emitPending; } } void emit(Token token) { Validate.isFalse(isEmitPending, "There is an unread token pending!"); emitPending = token; isEmitPending = true; if (token.type == Token.TokenType.StartTag) { Token.StartTag startTag = (Token.StartTag) token; lastStartTag = startTag.tagName; } else if (token.type == Token.TokenType.EndTag) { Token.EndTag endTag = (Token.EndTag) token; if (endTag.attributes != null) error("Attributes incorrectly present on end tag"); } } void emit(final String str) { // buffer strings up until last string token found, to emit only one token for a run of character refs etc. // does not set isEmitPending; read checks that if (charsString == null) { charsString = str; } else { if (charsBuilder.length() == 0) { // switching to string builder as more than one emit before read charsBuilder.append(charsString); } charsBuilder.append(str); } } void emit(char[] chars) { emit(String.valueOf(chars)); } void emit(int[] codepoints) { emit(new String(codepoints, 0, codepoints.length)); } void emit(char c) { emit(String.valueOf(c)); } TokeniserState getState() { return state; } void transition(TokeniserState state) { this.state = state; } void advanceTransition(TokeniserState state) { reader.advance(); this.state = state; } final private int[] codepointHolder = new int[1]; // holder to not have to keep creating arrays final private int[] multipointHolder = new int[2]; int[] consumeCharacterReference(Character additionalAllowedCharacter, boolean inAttribute) { if (reader.isEmpty()) return null; if (additionalAllowedCharacter != null && additionalAllowedCharacter == reader.current()) return null; if (reader.matchesAnySorted(notCharRefCharsSorted)) return null; final int[] codeRef = codepointHolder; reader.mark(); if (reader.matchConsume("#")) { // numbered boolean isHexMode = reader.matchConsumeIgnoreCase("X"); String numRef = isHexMode ? reader.consumeHexSequence() : reader.consumeDigitSequence(); if (numRef.length() == 0) { // didn't match anything characterReferenceError("numeric reference with no numerals"); reader.rewindToMark(); return null; } if (!reader.matchConsume(";")) characterReferenceError("missing semicolon"); // missing semi int charval = -1; try { int base = isHexMode ? 16 : 10; charval = Integer.valueOf(numRef, base); } catch (NumberFormatException ignored) { } // skip if (charval == -1 || (charval >= 0xD800 && charval <= 0xDFFF) || charval > 0x10FFFF) { characterReferenceError("character outside of valid range"); codeRef[0] = replacementChar; return codeRef; } else { // todo: implement number replacement table // todo: check for extra illegal unicode points as parse errors codeRef[0] = charval; return codeRef; } } else { // named // get as many letters as possible, and look for matching entities. String nameRef = reader.consumeLetterThenDigitSequence(); boolean looksLegit = reader.matches(';'); // found if a base named entity without a ;, or an extended entity with the ;. boolean found = (Entities.isBaseNamedEntity(nameRef) || (Entities.isNamedEntity(nameRef) && looksLegit)); if (!found) { reader.rewindToMark(); if (looksLegit) // named with semicolon characterReferenceError(String.format("invalid named referenece '%s'", nameRef)); return null; } if (inAttribute && (reader.matchesLetter() || reader.matchesDigit() || reader.matchesAny('=', '-', '_'))) { // don't want that to match reader.rewindToMark(); return null; } if (!reader.matchConsume(";")) characterReferenceError("missing semicolon"); // missing semi int numChars = Entities.codepointsForName(nameRef, multipointHolder); if (numChars == 1) { codeRef[0] = multipointHolder[0]; return codeRef; } else if (numChars ==2) { return multipointHolder; } else { Validate.fail("Unexpected characters returned for " + nameRef); return multipointHolder; } } } Token.Tag createTagPending(boolean start) { tagPending = start ? startPending.reset() : endPending.reset(); return tagPending; } void emitTagPending() { tagPending.finaliseTag(); emit(tagPending); } void createCommentPending() { commentPending.reset(); } void emitCommentPending() { emit(commentPending); } void createDoctypePending() { doctypePending.reset(); } void emitDoctypePending() { emit(doctypePending); } void createTempBuffer() { Token.reset(dataBuffer); } boolean isAppropriateEndTagToken() { return lastStartTag != null && tagPending.name().equalsIgnoreCase(lastStartTag); } String appropriateEndTagName() { if (lastStartTag == null) return null; return lastStartTag; } void error(TokeniserState state) { if (errors.canAddError()) errors.add(new ParseError(reader.pos(), "Unexpected character '%s' in input state [%s]", reader.current(), state)); } void eofError(TokeniserState state) { if (errors.canAddError()) errors.add(new ParseError(reader.pos(), "Unexpectedly reached end of file (EOF) in input state [%s]", state)); } private void characterReferenceError(String message) { if (errors.canAddError()) errors.add(new ParseError(reader.pos(), "Invalid character reference: %s", message)); } void error(String errorMsg) { if (errors.canAddError()) errors.add(new ParseError(reader.pos(), errorMsg)); } boolean currentNodeInHtmlNS() { // todo: implement namespaces correctly return true; // Element currentNode = currentNode(); // return currentNode != null && currentNode.namespace().equals("HTML"); } /** * Utility method to consume reader and unescape entities found within. * @param inAttribute * @return unescaped string from reader */ String unescapeEntities(boolean inAttribute) { StringBuilder builder = StringUtil.stringBuilder(); while (!reader.isEmpty()) { builder.append(reader.consumeTo('&')); if (reader.matches('&')) { reader.consume(); int[] c = consumeCharacterReference(null, inAttribute); if (c == null || c.length==0) builder.append('&'); else { builder.appendCodePoint(c[0]); if (c.length == 2) builder.appendCodePoint(c[1]); } } } return builder.toString(); } }
SpoonLabs/astor
examples/librepair-experiments-jhy-jsoup-285353482-20171009-062400_bugonly_with_package_info/src/main/java/org/jsoup/parser/Tokeniser.java
Java
gpl-2.0
10,163
35.296429
142
0.591656
false
/* This file is part of libdvbcsa. libdvbcsa is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. libdvbcsa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with libdvbcsa; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Parallel bitslice implementation based on FFdecsa, Copyright (C) 2003-2004 fatih89r (c) 2006-2008 Alexandre Becoulet <alexandre.becoulet@free.fr> */ #ifndef LIBDVBCSA_H_ #define LIBDVBCSA_H_ /* csa control word */ typedef unsigned char dvbcsa_cw_t[8]; /*********************************************************************** Single packet CSA implemetation API */ /* single packet implementation key context */ typedef struct dvbcsa_key_s dvbcsa_key_t; /** allocate a new csa key context */ struct dvbcsa_key_s * dvbcsa_key_alloc(void); /** free a csa key context */ void dvbcsa_key_free(struct dvbcsa_key_s *key); /** setup a csa key context to use the given control word */ void dvbcsa_key_set (const dvbcsa_cw_t cw, struct dvbcsa_key_s *key); /** decrypt a packet payload */ void dvbcsa_decrypt (const struct dvbcsa_key_s *key, unsigned char *data, unsigned int len); /** encrypt a packet payload */ void dvbcsa_encrypt (const struct dvbcsa_key_s *key, unsigned char *data, unsigned int len); /*********************************************************************** Parallel bitslice CSA implemetation API */ /** packets batch structure, describe each data packet payload to process */ struct dvbcsa_bs_batch_s { unsigned char *data; /* pointer to payload */ unsigned int len; /* payload bytes lenght */ }; /** parallel bitslice implementation key context */ typedef struct dvbcsa_bs_key_s dvbcsa_bs_key_t; /** allocate a new csa bitslice key context */ struct dvbcsa_bs_key_s * dvbcsa_bs_key_alloc(void); /** free a csa bitslice key context */ void dvbcsa_bs_key_free(struct dvbcsa_bs_key_s *key); /** setup a csa bitslice key context to use the given control word */ void dvbcsa_bs_key_set(const dvbcsa_cw_t cw, struct dvbcsa_bs_key_s *key); /** get maximum number of packet per batch */ unsigned int dvbcsa_bs_batch_size(void); /** decrypt a packet batch. batch is an array of struct dvbcsa_bs_batch_s with an extra NULL data termination entry. maxlen is the maximum data bytes lenght to process, must be a multiple of 8, should be 184 for TS packets. */ void dvbcsa_bs_decrypt(const struct dvbcsa_bs_key_s *key, const struct dvbcsa_bs_batch_s *pcks, unsigned int maxlen); /** encrypt a packet batch. batch is an array of struct dvbcsa_bs_batch_s with an extra NULL data termination entry. maxlen is the maximum data bytes lenght to process, must be a multiple of 8, should be 184 for TS packets. */ void dvbcsa_bs_encrypt(const struct dvbcsa_bs_key_s *key, const struct dvbcsa_bs_batch_s *pcks, unsigned int maxlen); #endif
pingflood/libdvbcsa
src/dvbcsa/dvbcsa.h
C
gpl-2.0
3,418
29.517857
76
0.683733
false
/**************************************************************************** ** ** This file is part of the Qt Extended Opensource Package. ** ** Copyright (C) 2009 Trolltech ASA. ** ** Contact: Qt Extended Information (info@qtextended.org) ** ** This file may be used under the terms of the GNU General Public License ** version 2.0 as published by the Free Software Foundation and appearing ** in the file LICENSE.GPL included in the packaging of this file. ** ** Please review the following information to ensure GNU General Public ** Licensing requirements will be met: ** http://www.fsf.org/licensing/licenses/info/GPLv2.html. ** ** ****************************************************************************/ // // W A R N I N G // ------------- // // This file is part of QtUiTest and is released as a Technology Preview. // This file and/or the complete System testing solution may change from version to // version without notice, or even be removed. // #include "testabstractitemview.h" #include "testwidgetslog.h" #include <QListView> #include <QTimer> #include <QScrollBar> #include <qtuitestnamespace.h> #ifdef QTOPIA_TARGET # include <Qtopia> # include <QContentSetModel> #endif /*****************************************************************************/ /*! If \a model is an asynchronously updating model in the middle of an update, * wait for the update to complete. */ void TestAbstractItemView::waitForModelUpdate(QAbstractItemModel* model) { #ifdef QTOPIA_TARGET if (QContentSetModel *cmodel = qobject_cast<QContentSetModel*>(model)) { /* If an update is _likely_ to occur soon, wait for a little while */ if (0 == cmodel->rowCount() && !cmodel->updateInProgress()) { QtUiTest::waitForSignal(cmodel, SIGNAL(updateStarted()), 200); } if (cmodel->updateInProgress()) { QtUiTest::waitForSignal(cmodel, SIGNAL(updateFinished())); } QtUiTest::setErrorString(QString()); } #else Q_UNUSED(model); #endif } TestAbstractItemView::TestAbstractItemView(QObject *_q) : TestWidget(_q), q(qobject_cast<QAbstractItemView*>(_q)) { QtUiTest::connectFirst(q, SIGNAL(activated(QModelIndex)), this, SLOT(on_activated(QModelIndex))); // Unfortunately, some people connect to "pressed" rather than "activated", // so we have to as well, because not all selections go via "activated". QtUiTest::connectFirst(q, SIGNAL(pressed(QModelIndex)), this, SLOT(on_activated(QModelIndex))); } void TestAbstractItemView::on_activated(QModelIndex const& ind) { // Timer discourages duplicate signal emission. if (m_lastActivatedTimer.elapsed() > 500 || m_lastActivatedTimer.elapsed() < 0) { TestWidgetsLog() << "emit selected" << ind.data().toString(); emit selected(printable(ind.data().toString())); m_lastActivatedTimer.start(); } else { TestWidgetsLog() << "Would emit selected" << ind.data().toString() << "except we have already done that recently."; } } QString TestAbstractItemView::selectedText() const { TestWidgetsLog(); waitForModelUpdate(q->model()); return printable(q->currentIndex().data().toString()); } QString TestAbstractItemView::text() const { TestWidgetsLog(); return list().join("\n"); } QStringList TestAbstractItemView::list() const { using namespace QtUiTest; TestWidgetsLog() << q; waitForModelUpdate(q->model()); QStringList ret; /* FIXME get rid of this special case */ { static bool pass_through = false; ListWidget *parent; if (!pass_through && q->inherits("QCalendarView") && (parent = qtuitest_cast<ListWidget*>(q->parent()))) { pass_through = true; ret = parent->list(); pass_through = false; return ret; } } // Allow testwidgets to make decisions based on the view associated with this item. QVariant view = QVariant::fromValue((QObject*)q); q->model()->setProperty("_q_qtuitest_itemview", view); TestWidgetsLog() << "_q_qtuitest_itemview is" << q->model()->property("_q_qtuitest_itemview").value<QObject*>(); ListWidget* lw = qtuitest_cast<ListWidget*>(q->model()); if (!lw) { QString model; QDebug(&model) << q->model(); setErrorString("Could not find a ListWidget interface for model " + model); return QStringList(); } return lw->list(); } QRect TestAbstractItemView::visualRect(QString const &item) const { using namespace QtUiTest; TestWidgetsLog(); waitForModelUpdate(q->model()); QRect ret; /* FIXME get rid of this special case */ { static bool pass_through = false; ListWidget *parent; if (!pass_through && q->inherits("QCalendarView") && (parent = qtuitest_cast<ListWidget*>(q->parent()))) { pass_through = true; ret = parent->visualRect(item); pass_through = false; ret.moveTopLeft( q->mapFromGlobal( q->parentWidget()->mapToGlobal( ret.topLeft() ) ) ); return ret; } } // Allow testwidgets to make decisions based on the view associated with this item. QVariant view = QVariant::fromValue((QObject*)q); q->model()->setProperty("_q_qtuitest_itemview", view); ListWidget* lw = qtuitest_cast<ListWidget*>(q->model()); if (!lw) { QString model; QDebug(&model) << q->model(); setErrorString("Could not find a ListWidget interface for model " + model); } else { ret = lw->visualRect(item); } return ret; } bool TestAbstractItemView::isMultiSelection() const { return (q->selectionMode() > QAbstractItemView::SingleSelection); } bool TestAbstractItemView::canSelect(QString const &item) const { if (q->selectionMode() == QAbstractItemView::NoSelection) return false; return list().contains(item); } bool TestAbstractItemView::canSelectMulti(QStringList const &items) const { if (!isMultiSelection()) return false; QSet<QString> itemSet = items.toSet(); return ((itemSet & list().toSet()) == itemSet); } bool TestAbstractItemView::select(QString const &item) { /* FIXME fix calendar widget and remove this code */ if (q->inherits("QCalendarView")) return false; TestWidgetsLog() << item; if (!canSelect(item)) { TestWidgetsLog() << "can't select" << item; return false; } if (!setFocus() || !hasFocus()) { QtUiTest::setErrorString("Couldn't give focus to item view"); return false; } if (QtUiTest::mousePreferred()) { if (!ensureVisible(item)) { TestWidgetsLog() << "couldn't make" << item << "visible"; return false; } QPoint pos = visualRect(item).center(); TestWidgetsLog() << "after ensureVisible, item is at" << pos; QtUiTest::mouseClick(mapToGlobal(pos), Qt::LeftButton); return true; } else { // Consume pending key events, if any. while (QtUiTest::waitForEvent(q, QEvent::KeyRelease, 200, Qt::QueuedConnection)) {} QStringList allItems = list(); const int maxtries = 100; int desiredIndex = allItems.indexOf(item); int currentIndex = allItems.indexOf(selectedText()); TestWidgetsLog() << "desiredIndex=" << desiredIndex << ", currentIndex=" << currentIndex; // Move horizontally (if necessary) int desiredPos = visualRect(item).center().x(); int currentPos = visualRect(selectedText()).center().x(); for (int i = 0; i < maxtries && desiredPos != currentPos; ++i) { Qt::Key key; if (desiredPos < currentPos) { key = Qt::Key_Left; TestWidgetsLog() << "Left (desired=" << desiredPos << ", current=" << currentPos << ")"; } else { key = Qt::Key_Right; TestWidgetsLog() << "Right (desired=" << desiredPos << ", current=" << currentPos << ")"; } if (!QtUiTest::keyClick(q, key)) return false; currentPos = visualRect(selectedText()).center().x(); } if (desiredPos != currentPos) { QtUiTest::setErrorString(QString( "Left/right keys failed to move highlight horizontally; desired position %1, " "current position %2").arg(desiredPos).arg(currentPos)); return false; } // Move vertically Qt::Key key; if (desiredIndex > currentIndex) key = Qt::Key_Down; else key = Qt::Key_Up; for (int i = 0; i < maxtries && selectedText() != item; ++i) { TestWidgetsLog() << "keyClick" << (key==Qt::Key_Down ? "Down" : "Up"); if (!QtUiTest::keyClick(q, key)) return false; } QString selected = selectedText(); TestWidgetsLog() << "selectedText() now" << selected; if (selected != item) { QtUiTest::setErrorString(QString( "Up/down keys should have caused item %1 to become selected, but item %2 " "is selected instead.").arg(item).arg(selected)); return false; } TestWidgetsLog() << "hit activate key"; if (!QtUiTest::keyClick(q, QtUiTest::Key_Activate)) return false; return true; } return false; } bool TestAbstractItemView::selectMulti(QStringList const &items) { if (!canSelectMulti(items)) return false; TestWidgetsLog() << items; return false; } bool TestAbstractItemView::ensureVisible(QString const &item) { QPoint p = visualRect(item).center(); if (rect().contains(p)) { TestWidgetsLog() << item << "is already visible"; return true; } if (!QtUiTest::mousePreferred()) return false; /* Figure out the points to click for scrolling in each direction */ QScrollBar *vbar = q->verticalScrollBar(); QScrollBar *hbar = q->horizontalScrollBar(); QPoint up = vbar->mapToGlobal(QPoint(vbar->width()/2,5)); QPoint down = vbar->mapToGlobal(QPoint(vbar->width()/2,vbar->height()-5)); QPoint left = hbar->mapToGlobal(QPoint(5, hbar->height()/2)); QPoint right = hbar->mapToGlobal(QPoint(hbar->width()-5,hbar->height()/2)); /* While p is above rect... */ while (p.y() < rect().top()) { if (!vbar->isVisible()) return false; TestWidgetsLog() << item << "up" << "\nrect:" << rect() << "p:" << p; QtUiTest::mouseClick(up); p = visualRect(item).center(); } /* While p is below rect... */ while (p.y() > rect().bottom()) { if (!vbar->isVisible()) return false; TestWidgetsLog() << item << "down" << "\nrect:" << rect() << "p:" << p; QtUiTest::mouseClick(down); p = visualRect(item).center(); } /* While p is left of rect... */ while (p.x() < rect().left()) { if (!hbar->isVisible()) return false; TestWidgetsLog() << item << "left" << "\nrect:" << rect() << "p:" << p; QtUiTest::mouseClick(left); p = visualRect(item).center(); } /* While p is right of rect... */ while (p.x() > rect().right()) { if (!hbar->isVisible()) return false; TestWidgetsLog() << item << "right" << "\nrect:" << rect() << "p:" << p; QtUiTest::mouseClick(right); p = visualRect(item).center(); } if (!rect().contains(p)) { TestWidgetsLog() << item << "failed" << "\nrect:" << rect() << "p:" << p; return false; } return true; } bool TestAbstractItemView::canWrap(QObject* o) { return qobject_cast<QAbstractItemView*>(o); }
Trim/qtmoko
src/plugins/qtuitest/widgets_qt/testabstractitemview.cpp
C++
gpl-2.0
11,924
31.668493
116
0.582942
false
/**************************************************************************** ** ** Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). ** Contact: Qt Software Information (qt-info@nokia.com) ** ** This file is part of the QtCore module of the Qt Toolkit. ** ** Commercial Usage ** Licensees holding valid Qt Commercial licenses may use this file in ** accordance with the Qt Commercial License Agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and Nokia. ** ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU ** General Public License versions 2.0 or 3.0 as published by the Free ** Software Foundation and appearing in the file LICENSE.GPL included in ** the packaging of this file. Please review the following information ** to ensure GNU General Public Licensing requirements will be met: ** http://www.fsf.org/licensing/licenses/info/GPLv2.html and ** http://www.gnu.org/copyleft/gpl.html. In addition, as a special ** exception, Nokia gives you certain additional rights. These rights ** are described in the Nokia Qt GPL Exception version 1.3, included in ** the file GPL_EXCEPTION.txt in this package. ** ** Qt for Windows(R) Licensees ** As a special exception, Nokia, as the sole copyright holder for Qt ** Designer, grants users of the Qt/Eclipse Integration plug-in the ** right for the Qt/Eclipse Integration to link to functionality ** provided by Qt Designer and its related libraries. ** ** If you are unsure which license is appropriate for your use, please ** contact the sales department at qt-sales@nokia.com. ** ****************************************************************************/ #ifndef QMATH_H #define QMATH_H #include <math.h> #include <QtCore/qglobal.h> QT_BEGIN_HEADER QT_BEGIN_NAMESPACE QT_MODULE(Core) inline int qCeil(qreal v) { #ifdef QT_USE_MATH_H_FLOATS if (sizeof(qreal) == sizeof(float)) return int(ceilf(v)); else #endif return int(ceil(v)); } inline int qFloor(qreal v) { #ifdef QT_USE_MATH_H_FLOATS if (sizeof(qreal) == sizeof(float)) return int(floorf(v)); else #endif return int(floor(v)); } inline qreal qSin(qreal v) { #ifdef QT_USE_MATH_H_FLOATS if (sizeof(qreal) == sizeof(float)) return sinf(v); else #endif return sin(v); } inline qreal qCos(qreal v) { #ifdef QT_USE_MATH_H_FLOATS if (sizeof(qreal) == sizeof(float)) return cosf(v); else #endif return cos(v); } inline qreal qSqrt(qreal v) { #ifdef QT_USE_MATH_H_FLOATS if (sizeof(qreal) == sizeof(float)) return sqrtf(v); else #endif return sqrt(v); } inline qreal qLn(qreal v) { #ifdef QT_USE_MATH_H_FLOATS if (sizeof(qreal) == sizeof(float)) return logf(v); else #endif return log(v); } inline qreal qPow(qreal x, qreal y) { #ifdef QT_USE_MATH_H_FLOATS if (sizeof(qreal) == sizeof(float)) return powf(x, y); else #endif return pow(x, y); } QT_END_NAMESPACE QT_END_HEADER #endif // QMATH_H
liuyanghejerry/qtextended
qtopiacore/qt/src/corelib/kernel/qmath.h
C
gpl-2.0
3,138
24.104
77
0.655832
false
/* * include/asm-i386/processor.h * * Copyright (C) 1994 Linus Torvalds */ #ifndef __ASM_I386_PROCESSOR_H #define __ASM_I386_PROCESSOR_H #include <asm/vm86.h> #include <asm/math_emu.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/types.h> #include <asm/sigcontext.h> #include <asm/cpufeature.h> #include <asm/msr.h> #include <asm/system.h> #include <linux/cache.h> #include <linux/config.h> #include <linux/threads.h> #include <asm/percpu.h> /* flag for disabling the tsc */ extern int tsc_disable; struct desc_struct { unsigned long a,b; }; #define desc_empty(desc) \ (!((desc)->a | (desc)->b)) #define desc_equal(desc1, desc2) \ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) /* * Default implementation of macro that returns current * instruction pointer ("program counter"). */ #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) /* * CPU type and hardware bug flags. Kept separately for each CPU. * Members of this structure are referenced in head.S, so think twice * before touching them. [mj] */ struct cpuinfo_x86 { __u8 x86; /* CPU family */ __u8 x86_vendor; /* CPU vendor */ __u8 x86_model; __u8 x86_mask; char wp_works_ok; /* It doesn't on 386's */ char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ char hard_math; char rfu; int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ unsigned long x86_capability[NCAPINTS]; char x86_vendor_id[16]; char x86_model_id[64]; int x86_cache_size; /* in KB - valid for CPUS which support this call */ int x86_cache_alignment; /* In bytes */ int fdiv_bug; int f00f_bug; int coma_bug; unsigned long loops_per_jiffy; unsigned char x86_num_cores; } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 #define X86_VENDOR_CYRIX 1 #define X86_VENDOR_AMD 2 #define X86_VENDOR_UMC 3 #define X86_VENDOR_NEXGEN 4 #define X86_VENDOR_CENTAUR 5 #define X86_VENDOR_RISE 6 #define X86_VENDOR_TRANSMETA 7 #define X86_VENDOR_NSC 8 #define X86_VENDOR_NUM 9 #define X86_VENDOR_UNKNOWN 0xff /* * capabilities of CPUs */ extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 new_cpu_data; extern struct tss_struct doublefault_tss; DECLARE_PER_CPU(struct tss_struct, init_tss); #ifdef CONFIG_SMP extern struct cpuinfo_x86 cpu_data[]; #define current_cpu_data cpu_data[smp_processor_id()] #else #define cpu_data (&boot_cpu_data) #define current_cpu_data boot_cpu_data #endif extern int phys_proc_id[NR_CPUS]; extern int cpu_core_id[NR_CPUS]; extern char ignore_fpu_irq; extern void identify_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); #ifdef CONFIG_X86_HT extern void detect_ht(struct cpuinfo_x86 *c); #else static inline void detect_ht(struct cpuinfo_x86 *c) {} #endif /* * EFLAGS bits */ #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */ #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */ #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */ #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */ #define X86_EFLAGS_NT 0x00004000 /* Nested Task */ #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */ #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */ #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */ #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */ #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ /* * Generic CPUID function * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx * resulting in stale register contents being returned. */ static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { __asm__("cpuid" : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op), "c"(0)); } /* Some CPUID calls want 'count' to be placed in ecx */ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, int *edx) { __asm__("cpuid" : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op), "c" (count)); } /* * CPUID functions returning a single datum */ static inline unsigned int cpuid_eax(unsigned int op) { unsigned int eax; __asm__("cpuid" : "=a" (eax) : "0" (op) : "bx", "cx", "dx"); return eax; } static inline unsigned int cpuid_ebx(unsigned int op) { unsigned int eax, ebx; __asm__("cpuid" : "=a" (eax), "=b" (ebx) : "0" (op) : "cx", "dx" ); return ebx; } static inline unsigned int cpuid_ecx(unsigned int op) { unsigned int eax, ecx; __asm__("cpuid" : "=a" (eax), "=c" (ecx) : "0" (op) : "bx", "dx" ); return ecx; } static inline unsigned int cpuid_edx(unsigned int op) { unsigned int eax, edx; __asm__("cpuid" : "=a" (eax), "=d" (edx) : "0" (op) : "bx", "cx"); return edx; } #define load_cr3(pgdir) write_cr3(__pa(pgdir)) /* * Intel CPU features in CR4 */ #define X86_CR4_VME 0x0001 /* enable vm86 extensions */ #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */ #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */ #define X86_CR4_DE 0x0008 /* enable debugging extensions */ #define X86_CR4_PSE 0x0010 /* enable page size extensions */ #define X86_CR4_PAE 0x0020 /* enable physical address extensions */ #define X86_CR4_MCE 0x0040 /* Machine check enable */ #define X86_CR4_PGE 0x0080 /* enable global pages */ #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */ #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */ #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ /* * Save the cr4 feature set we're using (ie * Pentium 4MB enable and PPro Global page * enable), so that any CPU's that boot up * after us can get the correct flags. */ extern unsigned long mmu_cr4_features; static inline void set_in_cr4 (unsigned long mask) { unsigned cr4; mmu_cr4_features |= mask; cr4 = read_cr4(); cr4 |= mask; write_cr4(cr4); } static inline void clear_in_cr4 (unsigned long mask) { unsigned cr4; mmu_cr4_features &= ~mask; cr4 = read_cr4(); cr4 &= ~mask; write_cr4(cr4); } /* * NSC/Cyrix CPU configuration register indexes */ #define CX86_PCR0 0x20 #define CX86_GCR 0xb8 #define CX86_CCR0 0xc0 #define CX86_CCR1 0xc1 #define CX86_CCR2 0xc2 #define CX86_CCR3 0xc3 #define CX86_CCR4 0xe8 #define CX86_CCR5 0xe9 #define CX86_CCR6 0xea #define CX86_CCR7 0xeb #define CX86_PCR1 0xf0 #define CX86_DIR0 0xfe #define CX86_DIR1 0xff #define CX86_ARR_BASE 0xc4 #define CX86_RCR_BASE 0xdc /* * NSC/Cyrix CPU indexed register access macros */ #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); }) #define setCx86(reg, data) do { \ outb((reg), 0x22); \ outb((data), 0x23); \ } while (0) static inline void serialize_cpu(void) { __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); } static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { /* "monitor %eax,%ecx,%edx;" */ asm volatile( ".byte 0x0f,0x01,0xc8;" : :"a" (eax), "c" (ecx), "d"(edx)); } static inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax,%ecx;" */ asm volatile( ".byte 0x0f,0x01,0xc9;" : :"a" (eax), "c" (ecx)); } /* from system description table in BIOS. Mostly for MCA use, but others may find it useful. */ extern unsigned int machine_id; extern unsigned int machine_submodel_id; extern unsigned int BIOS_revision; extern unsigned int mca_pentium_flag; /* Boot loader type from the setup header */ extern int bootloader_type; /* * User space process size: 3GB (default). */ #define TASK_SIZE (PAGE_OFFSET) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) #define HAVE_ARCH_PICK_MMAP_LAYOUT /* * Size of io_bitmap. */ #define IO_BITMAP_BITS 65536 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) #define INVALID_IO_BITMAP_OFFSET 0x8000 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 struct i387_fsave_struct { long cwd; long swd; long twd; long fip; long fcs; long foo; long fos; long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ long status; /* software status information */ }; struct i387_fxsave_struct { unsigned short cwd; unsigned short swd; unsigned short twd; unsigned short fop; long fip; long fcs; long foo; long fos; long mxcsr; long mxcsr_mask; long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ long padding[56]; } __attribute__ ((aligned (16))); struct i387_soft_struct { long cwd; long swd; long twd; long fip; long fcs; long foo; long fos; long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ unsigned char ftop, changed, lookahead, no_update, rm, alimit; struct info *info; unsigned long entry_eip; }; union i387_union { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; struct i387_soft_struct soft; }; typedef struct { unsigned long seg; } mm_segment_t; struct thread_struct; struct tss_struct { unsigned short back_link,__blh; unsigned long esp0; unsigned short ss0,__ss0h; unsigned long esp1; unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */ unsigned long esp2; unsigned short ss2,__ss2h; unsigned long __cr3; unsigned long eip; unsigned long eflags; unsigned long eax,ecx,edx,ebx; unsigned long esp; unsigned long ebp; unsigned long esi; unsigned long edi; unsigned short es, __esh; unsigned short cs, __csh; unsigned short ss, __ssh; unsigned short ds, __dsh; unsigned short fs, __fsh; unsigned short gs, __gsh; unsigned short ldt, __ldth; unsigned short trace, io_bitmap_base; /* * The extra 1 is there because the CPU will access an * additional byte beyond the end of the IO permission * bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; /* * Cache the current maximum and the last task that used the bitmap: */ unsigned long io_bitmap_max; struct thread_struct *io_bitmap_owner; /* * pads the TSS to be cacheline-aligned (size is 0x100) */ unsigned long __cacheline_filler[35]; /* * .. and then another 0x100 bytes for emergency kernel stack */ unsigned long stack[64]; } __attribute__((packed)); #define ARCH_MIN_TASKALIGN 16 struct thread_struct { /* cached TLS descriptors. */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; unsigned long esp0; unsigned long sysenter_cs; unsigned long eip; unsigned long esp; unsigned long fs; unsigned long gs; /* Hardware debugging registers */ unsigned long debugreg[8]; /* %%db0-7 debug registers */ /* fault info */ unsigned long cr2, trap_no, error_code; /* floating point info */ union i387_union i387; /* virtual 86 mode info */ struct vm86_struct __user * vm86_info; unsigned long screen_bitmap; unsigned long v86flags, v86mask, saved_esp0; unsigned int saved_fs, saved_gs; /* IO permissions */ unsigned long *io_bitmap_ptr; unsigned long iopl; /* max allowed port in the bitmap, in bytes: */ unsigned long io_bitmap_max; }; #define INIT_THREAD { \ .vm86_info = NULL, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ } /* * Note that the .io_bitmap member must be extra-big. This is because * the CPU will access an additional byte beyond the end of the IO * permission bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ #define INIT_TSS { \ .esp0 = sizeof(init_stack) + (long)&init_stack, \ .ss0 = __KERNEL_DS, \ .ss1 = __KERNEL_CS, \ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ } static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) { tss->esp0 = thread->esp0; /* This can only happen when SEP is enabled, no need to test "SEP"arately */ if (unlikely(tss->ss1 != thread->sysenter_cs)) { tss->ss1 = thread->sysenter_cs; wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); } } #define start_thread(regs, new_eip, new_esp) do { \ __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \ set_fs(USER_DS); \ regs->xds = __USER_DS; \ regs->xes = __USER_DS; \ regs->xss = __USER_DS; \ regs->xcs = __USER_CS; \ regs->eip = new_eip; \ regs->esp = new_esp; \ } while (0) /* * These special macros can be used to get or set a debugging register */ #define get_debugreg(var, register) \ __asm__("movl %%db" #register ", %0" \ :"=r" (var)) #define set_debugreg(value, register) \ __asm__("movl %0,%%db" #register \ : /* no output */ \ :"r" (value)) /* * Set IOPL bits in EFLAGS from given mask */ static inline void set_iopl_mask(unsigned mask) { unsigned int reg; __asm__ __volatile__ ("pushfl;" "popl %0;" "andl %1, %0;" "orl %2, %0;" "pushl %0;" "popfl" : "=&r" (reg) : "i" (~X86_EFLAGS_IOPL), "r" (mask)); } /* Forward declaration, a strange C thing */ struct task_struct; struct mm_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); /* Prepare to copy thread state - unlazy all lazy status */ extern void prepare_to_copy(struct task_struct *tsk); /* * create a kernel thread without removing it from tasklists */ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern unsigned long thread_saved_pc(struct task_struct *tsk); void show_trace(struct task_struct *task, unsigned long *stack); unsigned long get_wchan(struct task_struct *p); #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) #define KSTK_TOP(info) \ ({ \ unsigned long *__ptr = (unsigned long *)(info); \ (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ }) #define task_pt_regs(task) \ ({ \ struct pt_regs *__regs__; \ __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \ __regs__ - 1; \ }) #define KSTK_EIP(task) (task_pt_regs(task)->eip) #define KSTK_ESP(task) (task_pt_regs(task)->esp) struct microcode_header { unsigned int hdrver; unsigned int rev; unsigned int date; unsigned int sig; unsigned int cksum; unsigned int ldrver; unsigned int pf; unsigned int datasize; unsigned int totalsize; unsigned int reserved[3]; }; struct microcode { struct microcode_header hdr; unsigned int bits[0]; }; typedef struct microcode microcode_t; typedef struct microcode_header microcode_header_t; /* microcode format is extended from prescott processors */ struct extended_signature { unsigned int sig; unsigned int pf; unsigned int cksum; }; struct extended_sigtable { unsigned int count; unsigned int cksum; unsigned int reserved[3]; struct extended_signature sigs[0]; }; /* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */ #define MICROCODE_IOCFREE _IO('6',0) /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ static inline void rep_nop(void) { __asm__ __volatile__("rep;nop": : :"memory"); } #define cpu_relax() rep_nop() /* generic versions from gas */ #define GENERIC_NOP1 ".byte 0x90\n" #define GENERIC_NOP2 ".byte 0x89,0xf6\n" #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 /* Opteron nops */ #define K8_NOP1 GENERIC_NOP1 #define K8_NOP2 ".byte 0x66,0x90\n" #define K8_NOP3 ".byte 0x66,0x66,0x90\n" #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" #define K8_NOP5 K8_NOP3 K8_NOP2 #define K8_NOP6 K8_NOP3 K8_NOP3 #define K8_NOP7 K8_NOP4 K8_NOP3 #define K8_NOP8 K8_NOP4 K8_NOP4 /* K7 nops */ /* uses eax dependencies (arbitary choice) */ #define K7_NOP1 GENERIC_NOP1 #define K7_NOP2 ".byte 0x8b,0xc0\n" #define K7_NOP3 ".byte 0x8d,0x04,0x20\n" #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" #define K7_NOP5 K7_NOP4 ASM_NOP1 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" #define K7_NOP8 K7_NOP7 ASM_NOP1 #ifdef CONFIG_MK8 #define ASM_NOP1 K8_NOP1 #define ASM_NOP2 K8_NOP2 #define ASM_NOP3 K8_NOP3 #define ASM_NOP4 K8_NOP4 #define ASM_NOP5 K8_NOP5 #define ASM_NOP6 K8_NOP6 #define ASM_NOP7 K8_NOP7 #define ASM_NOP8 K8_NOP8 #elif defined(CONFIG_MK7) #define ASM_NOP1 K7_NOP1 #define ASM_NOP2 K7_NOP2 #define ASM_NOP3 K7_NOP3 #define ASM_NOP4 K7_NOP4 #define ASM_NOP5 K7_NOP5 #define ASM_NOP6 K7_NOP6 #define ASM_NOP7 K7_NOP7 #define ASM_NOP8 K7_NOP8 #else #define ASM_NOP1 GENERIC_NOP1 #define ASM_NOP2 GENERIC_NOP2 #define ASM_NOP3 GENERIC_NOP3 #define ASM_NOP4 GENERIC_NOP4 #define ASM_NOP5 GENERIC_NOP5 #define ASM_NOP6 GENERIC_NOP6 #define ASM_NOP7 GENERIC_NOP7 #define ASM_NOP8 GENERIC_NOP8 #endif #define ASM_NOP_MAX 8 /* Prefetch instructions for Pentium III and AMD Athlon */ /* It's not worth to care about 3dnow! prefetches for the K6 because they are microcoded there and very slow. However we don't do prefetches for pre XP Athlons currently That should be fixed. */ #define ARCH_HAS_PREFETCH static inline void prefetch(const void *x) { alternative_input(ASM_NOP4, "prefetchnta (%1)", X86_FEATURE_XMM, "r" (x)); } #define ARCH_HAS_PREFETCH #define ARCH_HAS_PREFETCHW #define ARCH_HAS_SPINLOCK_PREFETCH /* 3dnow! prefetch to get an exclusive cache line. Useful for spinlocks to avoid one state transition in the cache coherency protocol. */ static inline void prefetchw(const void *x) { alternative_input(ASM_NOP4, "prefetchw (%1)", X86_FEATURE_3DNOW, "r" (x)); } #define spin_lock_prefetch(x) prefetchw(x) extern void select_idle_routine(const struct cpuinfo_x86 *c); #define cache_line_size() (boot_cpu_data.x86_cache_alignment) extern unsigned long boot_option_idle_override; extern void enable_sep_cpu(void); extern int sysenter_setup(void); #ifdef CONFIG_MTRR extern void mtrr_ap_init(void); extern void mtrr_bp_init(void); #else #define mtrr_ap_init() do {} while (0) #define mtrr_bp_init() do {} while (0) #endif #endif /* __ASM_I386_PROCESSOR_H */
ipwndev/DSLinux-Mirror
linux-2.6.x/include/asm-i386/processor.h
C
gpl-2.0
19,159
25.572816
117
0.666058
false
<?php // $Id: LdapAuthenticationConf.class.php,v 1.4.2.2 2011/02/08 20:05:41 johnbarclay Exp $ /** * @file * This class represents an ldap_authentication module's configuration * It is extended by LdapAuthenticationConfAdmin for configuration and other admin functions */ class LdapAuthenticationConf { // no need for LdapAuthenticationConf id as only one instance will exist per drupal install public $sids = array(); // server configuration ids being used for authentication public $servers = array(); // ldap server object public $inDatabase = FALSE; public $authenticationMode = LDAP_AUTHENTICATION_MODE_DEFAULT; public $ldapUserHelpLinkUrl; public $ldapUserHelpLinkText = LDAP_AUTHENTICATION_HELP_LINK_TEXT_DEFAULT; public $loginConflictResolve = LDAP_AUTHENTICATION_CONFLICT_RESOLVE_DEFAULT; public $acctCreation = LDAP_AUTHENTICATION_ACCT_CREATION_DEFAULT; public $emailOption = LDAP_AUTHENTICATION_EMAIL_FIELD_DEFAULT; public $emailUpdate = LDAP_AUTHENTICATION_EMAIL_UPDATE_ON_LDAP_CHANGE_DEFAULT; public $ssoEnabled = FALSE; public $seamlessLogin = FALSE; public $ldapImplementation = FALSE; public $cookieExpire = LDAP_AUTHENTICATION_COOKIE_EXPIRE; public $apiPrefs = array(); public $createLDAPAccounts; // should an drupal account be created when an ldap user authenticates public $createLDAPAccountsAdminApproval; // create them, but as blocked accounts /** * Advanced options. whitelist / blacklist options * * these are on the fuzzy line between authentication and authorization * and determine if a user is allowed to authenticate with ldap * */ public $allowOnlyIfTextInDn = array(); // eg ou=education that must be met to allow ldap authentication public $excludeIfTextInDn = array(); public $allowTestPhp = NULL; // code that returns boolean TRUE || FALSE for allowing ldap authentication public $excludeIfNoAuthorizations = LDAP_AUTHENTICATION_EXCL_IF_NO_AUTHZ_DEFAULT; public $saveable = array( 'sids', 'authenticationMode', 'loginConflictResolve', 'acctCreation', 'ldapUserHelpLinkUrl', 'ldapUserHelpLinkText', 'emailOption', 'emailUpdate', 'allowOnlyIfTextInDn', 'excludeIfTextInDn', 'allowTestPhp', 'excludeIfNoAuthorizations', 'ssoEnabled', 'seamlessLogin', 'ldapImplementation', 'cookieExpire', ); /** are any ldap servers that are enabled associated with ldap authentication **/ public function enabled_servers() { return !(count(array_filter(array_values($this->sids))) == 0); } function __construct() { $this->load(); } function load() { if ($saved = variable_get("ldap_authentication_conf", FALSE)) { $this->inDatabase = TRUE; foreach ($this->saveable as $property) { if (isset($saved[$property])) { $this->{$property} = $saved[$property]; } } foreach ($this->sids as $sid => $is_enabled) { if ($is_enabled) { $this->servers[$sid] = ldap_servers_get_servers($sid, 'enabled', TRUE); } } } else { $this->inDatabase = FALSE; } $this->apiPrefs['requireHttps'] = variable_get('ldap_servers_require_ssl_for_credentails', 1); $this->apiPrefs['encryption'] = variable_get('ldap_servers_encryption', LDAP_SERVERS_ENC_TYPE_CLEARTEXT); // determine account creation configuration $user_register = variable_get('user_register', USER_REGISTER_VISITORS_ADMINISTRATIVE_APPROVAL); if ($this->acctCreation == LDAP_AUTHENTICATION_ACCT_CREATION_DEFAULT || $user_register == USER_REGISTER_VISITORS) { $this->createLDAPAccounts = TRUE; $this->createLDAPAccountsAdminApproval = FALSE; } elseif ($user_register == USER_REGISTER_VISITORS_ADMINISTRATIVE_APPROVAL) { $this->createLDAPAccounts = FALSE; $this->createLDAPAccountsAdminApproval = TRUE; } else { $this->createLDAPAccounts = FALSE; $this->createLDAPAccountsAdminApproval = FALSE; } } /** * Destructor Method */ function __destruct() { } /** * decide if a username is excluded or not * * return boolean */ public function allowUser($name, $ldap_user_entry) { /** * do one of the exclude attribute pairs match */ $exclude = FALSE; foreach ($this->excludeIfTextInDn as $test) { if (strpos(drupal_strtolower($ldap_user_entry['dn']), drupal_strtolower($test)) !== FALSE) { return FALSE;// if a match, return FALSE; } } /** * evaluate php if it exists */ if ($this->allowTestPhp) { if (module_exists('php')) { global $_name, $_ldap_user_entry; $_name = $name; $_ldap_user_entry = $ldap_user_entry; $code = '<?php ' . "global \$_name; \n global \$_ldap_user_entry; \n" . $this->allowTestPhp . ' ?>'; $code_result = php_eval($code); $_name = NULL; $_ldap_user_entry = NULL; if ((boolean)($code_result) == FALSE) { return FALSE; } } else { drupal_set_message(t(LDAP_AUTHENTICATION_DISABLED_FOR_BAD_CONF_MSG), 'warning'); $tokens = array('!ldap_authentication_config' => l(t('LDAP Authentication Configuration'), 'admin/config/people/ldap/authentication')); watchdog('warning', 'LDAP Authentication is configured to deny users based on php execution with php_eval function, but php module is not enabled. Please enable php module or remove php code at !ldap_authentication_config .', $tokens); return FALSE; } } /** * do one of the allow attribute pairs match */ if (count($this->allowOnlyIfTextInDn)) { $fail = TRUE; foreach ($this->allowOnlyIfTextInDn as $test) { if (strpos(drupal_strtolower($ldap_user_entry['dn']), drupal_strtolower($test)) !== FALSE) { $fail = FALSE; } } if ($fail) { return FALSE; } } /** * is excludeIfNoAuthorizations option enabled and user not granted any groups */ if ($this->excludeIfNoAuthorizations) { if (!module_exists('ldap_authorization')) { drupal_set_message(t(LDAP_AUTHENTICATION_DISABLED_FOR_BAD_CONF_MSG), 'warning'); $tokens = array('!ldap_authentication_config' => l(t('LDAP Authentication Configuration'), 'admin/config/people/ldap/authentication')); watchdog('warning', 'LDAP Authentication is configured to deny users without LDAP Authorization mappings, but LDAP Authorization module is not enabled. Please enable and configure LDAP Authorization or disable this option at !ldap_authentication_config .', $tokens); return FALSE; } $user = new stdClass(); $user->name = $name; $user->ldap_authenticated = TRUE; // fake user property added for query $consumers = ldap_authorization_get_consumers(); $has_enabled_consumers = FALSE; foreach ($consumers as $consumer_type => $consumer_config) { $consumer_obj = ldap_authorization_get_consumer_object($consumer_type); if ($consumer_obj->consumerConf->status) { $has_enabled_consumers = TRUE; list($authorizations, $notifications) = ldap_authorizations_user_authorizations($user, 'query', $consumer_type, 'test_if_authorizations_granted'); if (count(array_filter(array_values($authorizations))) > 0) { return TRUE; } } } if (!$has_enabled_consumers) { drupal_set_message(t(LDAP_AUTHENTICATION_DISABLED_FOR_BAD_CONF_MSG), 'warning'); $tokens = array('!ldap_consumer_config' => l(t('LDAP Authorization Configuration'), 'admin/config/people/ldap/authorization')); watchdog('warning', 'LDAP Authentication is configured to deny users without LDAP Authorization mappings, but 0 LDAP Authorization consumers are configured: !ldap_consumer_config .', $tokens); return FALSE; } return FALSE; } /** * default to allowed */ return TRUE; } }
mcwalinski/NewspaperInEducation
sites/all/modules/ldap/ldap_authentication/LdapAuthenticationConf.class.php
PHP
gpl-2.0
8,028
34.522124
275
0.657698
false
/* * storage_common.c -- Common definitions for mass storage functionality * * Copyright (C) 2003-2008 Alan Stern * Copyeight (C) 2009 Samsung Electronics * Author: Michal Nazarewicz (m.nazarewicz@samsung.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * This file requires the following identifiers used in USB strings to * be defined (each of type pointer to char): * - fsg_string_manufacturer -- name of the manufacturer * - fsg_string_product -- name of the product * - fsg_string_config -- name of the configuration * - fsg_string_interface -- name of the interface * The first four are only needed when FSG_DESCRIPTORS_DEVICE_STRINGS * macro is defined prior to including this file. */ /* * When FSG_NO_INTR_EP is defined fsg_fs_intr_in_desc and * fsg_hs_intr_in_desc objects as well as * FSG_FS_FUNCTION_PRE_EP_ENTRIES and FSG_HS_FUNCTION_PRE_EP_ENTRIES * macros are not defined. * * When FSG_NO_DEVICE_STRINGS is defined FSG_STRING_MANUFACTURER, * FSG_STRING_PRODUCT, FSG_STRING_SERIAL and FSG_STRING_CONFIG are not * defined (as well as corresponding entries in string tables are * missing) and FSG_STRING_INTERFACE has value of zero. * * When FSG_NO_OTG is defined fsg_otg_desc won't be defined. */ /* * When FSG_BUFFHD_STATIC_BUFFER is defined when this file is included * the fsg_buffhd structure's buf field will be an array of FSG_BUFLEN * characters rather then a pointer to void. */ #include <linux/usb/storage.h> #include <scsi/scsi.h> #include <asm/unaligned.h> /* * Thanks to NetChip Technologies for donating this product ID. * * DO NOT REUSE THESE IDs with any other driver!! Ever!! * Instead: allocate your own, using normal USB-IF procedures. */ #define FSG_VENDOR_ID 0x0525 /* NetChip */ #define FSG_PRODUCT_ID 0xa4a5 /* Linux-USB File-backed Storage Gadget */ /*-------------------------------------------------------------------------*/ #ifndef DEBUG #undef VERBOSE_DEBUG #undef DUMP_MSGS #endif /* !DEBUG */ #ifdef VERBOSE_DEBUG #define VLDBG LDBG #else #define VLDBG(lun, fmt, args...) do { } while (0) #endif /* VERBOSE_DEBUG */ #define LDBG(lun, fmt, args...) dev_dbg (&(lun)->dev, fmt, ## args) #define LERROR(lun, fmt, args...) dev_err (&(lun)->dev, fmt, ## args) #define LWARN(lun, fmt, args...) dev_warn(&(lun)->dev, fmt, ## args) #define LINFO(lun, fmt, args...) dev_info(&(lun)->dev, fmt, ## args) /* * Keep those macros in sync with those in * include/linux/usb/composite.h or else GCC will complain. If they * are identical (the same names of arguments, white spaces in the * same places) GCC will allow redefinition otherwise (even if some * white space is removed or added) warning will be issued. * * Those macros are needed here because File Storage Gadget does not * include the composite.h header. For composite gadgets those macros * are redundant since composite.h is included any way. * * One could check whether those macros are already defined (which * would indicate composite.h had been included) or not (which would * indicate we were in FSG) but this is not done because a warning is * desired if definitions here differ from the ones in composite.h. * * We want the definitions to match and be the same in File Storage * Gadget as well as Mass Storage Function (and so composite gadgets * using MSF). If someone changes them in composite.h it will produce * a warning in this file when building MSF. */ #define DBG(d, fmt, args...) dev_dbg(&(d)->gadget->dev , fmt , ## args) #define VDBG(d, fmt, args...) dev_vdbg(&(d)->gadget->dev , fmt , ## args) #define ERROR(d, fmt, args...) dev_err(&(d)->gadget->dev , fmt , ## args) #define WARNING(d, fmt, args...) dev_warn(&(d)->gadget->dev , fmt , ## args) #define INFO(d, fmt, args...) dev_info(&(d)->gadget->dev , fmt , ## args) #ifdef DUMP_MSGS # define dump_msg(fsg, /* const char * */ label, \ /* const u8 * */ buf, /* unsigned */ length) do { \ if (length < 512) { \ DBG(fsg, "%s, length %u:\n", label, length); \ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, \ 16, 1, buf, length, 0); \ } \ } while (0) # define dump_cdb(fsg) do { } while (0) #else # define dump_msg(fsg, /* const char * */ label, \ /* const u8 * */ buf, /* unsigned */ length) do { } while (0) # ifdef VERBOSE_DEBUG # define dump_cdb(fsg) \ print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE, \ 16, 1, (fsg)->cmnd, (fsg)->cmnd_size, 0) \ # else # define dump_cdb(fsg) do { } while (0) # endif /* VERBOSE_DEBUG */ #endif /* DUMP_MSGS */ /*-------------------------------------------------------------------------*/ /* Bulk-only data structures */ /* Command Block Wrapper */ struct fsg_bulk_cb_wrap { __le32 Signature; /* Contains 'USBC' */ u32 Tag; /* Unique per command id */ __le32 DataTransferLength; /* Size of the data */ u8 Flags; /* Direction in bit 7 */ u8 Lun; /* LUN (normally 0) */ u8 Length; /* Of the CDB, <= MAX_COMMAND_SIZE */ u8 CDB[16]; /* Command Data Block */ }; #define USB_BULK_CB_WRAP_LEN 31 #define USB_BULK_CB_SIG 0x43425355 /* Spells out USBC */ #define USB_BULK_IN_FLAG 0x80 /* Command Status Wrapper */ struct bulk_cs_wrap { __le32 Signature; /* Should = 'USBS' */ u32 Tag; /* Same as original command */ __le32 Residue; /* Amount not transferred */ u8 Status; /* See below */ }; #define USB_BULK_CS_WRAP_LEN 13 #define USB_BULK_CS_SIG 0x53425355 /* Spells out 'USBS' */ #define USB_STATUS_PASS 0 #define USB_STATUS_FAIL 1 #define USB_STATUS_PHASE_ERROR 2 /* Bulk-only class specific requests */ #define USB_BULK_RESET_REQUEST 0xff #define USB_BULK_GET_MAX_LUN_REQUEST 0xfe /* CBI Interrupt data structure */ struct interrupt_data { u8 bType; u8 bValue; }; #define CBI_INTERRUPT_DATA_LEN 2 /* CBI Accept Device-Specific Command request */ #define USB_CBI_ADSC_REQUEST 0x00 /* Length of a SCSI Command Data Block */ #define MAX_COMMAND_SIZE 16 #if defined(CONFIG_USB_CDFS_SUPPORT) /* SCSI commands that we recognize */ #define READ_CD 0xbe #endif /* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */ #define SS_NO_SENSE 0 #define SS_COMMUNICATION_FAILURE 0x040800 #define SS_INVALID_COMMAND 0x052000 #define SS_INVALID_FIELD_IN_CDB 0x052400 #define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE 0x052100 #define SS_LOGICAL_UNIT_NOT_SUPPORTED 0x052500 #define SS_MEDIUM_NOT_PRESENT 0x023a00 #define SS_MEDIUM_REMOVAL_PREVENTED 0x055302 #define SS_NOT_READY_TO_READY_TRANSITION 0x062800 #define SS_RESET_OCCURRED 0x062900 #define SS_SAVING_PARAMETERS_NOT_SUPPORTED 0x053900 #define SS_UNRECOVERED_READ_ERROR 0x031100 #define SS_WRITE_ERROR 0x030c02 #define SS_WRITE_PROTECTED 0x072700 #define SK(x) ((u8) ((x) >> 16)) /* Sense Key byte, etc. */ #define ASC(x) ((u8) ((x) >> 8)) #define ASCQ(x) ((u8) (x)) /*-------------------------------------------------------------------------*/ struct fsg_lun { struct file *filp; loff_t file_length; loff_t num_sectors; unsigned int initially_ro:1; unsigned int ro:1; unsigned int removable:1; unsigned int cdrom:1; unsigned int prevent_medium_removal:1; unsigned int registered:1; unsigned int info_valid:1; unsigned int nofua:1; u32 sense_data; u32 sense_data_info; u32 unit_attention_data; struct device dev; }; #define fsg_lun_is_open(curlun) ((curlun)->filp != NULL) static struct fsg_lun *fsg_lun_from_dev(struct device *dev) { return container_of(dev, struct fsg_lun, dev); } /* Big enough to hold our biggest descriptor */ #define EP0_BUFSIZE 256 #define EP0_BUFSIZE_SS 512 #define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */ /* Number of buffers we will use. 2 is enough for double-buffering */ #define FSG_NUM_BUFFERS 2 /* Default size of buffer length. */ #define FSG_BUFLEN ((u32)16384) /* Maximal number of LUNs supported in mass storage function */ #define FSG_MAX_LUNS 8 enum fsg_buffer_state { BUF_STATE_EMPTY = 0, BUF_STATE_FULL, BUF_STATE_BUSY }; struct fsg_buffhd { #ifdef FSG_BUFFHD_STATIC_BUFFER char buf[FSG_BUFLEN]; #else void *buf; #endif enum fsg_buffer_state state; struct fsg_buffhd *next; /* * The NetChip 2280 is faster, and handles some protocol faults * better, if we don't submit any short bulk-out read requests. * So we will record the intended request length here. */ unsigned int bulk_out_intended_length; struct usb_request *inreq; int inreq_busy; struct usb_request *outreq; int outreq_busy; }; enum fsg_state { /* This one isn't used anywhere */ FSG_STATE_COMMAND_PHASE = -10, FSG_STATE_DATA_PHASE, FSG_STATE_STATUS_PHASE, FSG_STATE_IDLE = 0, FSG_STATE_ABORT_BULK_OUT, FSG_STATE_RESET, FSG_STATE_INTERFACE_CHANGE, FSG_STATE_CONFIG_CHANGE, FSG_STATE_DISCONNECT, FSG_STATE_EXIT, FSG_STATE_TERMINATED }; enum data_direction { DATA_DIR_UNKNOWN = 0, DATA_DIR_FROM_HOST, DATA_DIR_TO_HOST, DATA_DIR_NONE }; /*-------------------------------------------------------------------------*/ static inline u32 get_unaligned_be24(u8 *buf) { return 0xffffff & (u32) get_unaligned_be32(buf - 1); } /*-------------------------------------------------------------------------*/ enum { #ifndef FSG_NO_DEVICE_STRINGS FSG_STRING_MANUFACTURER = 1, FSG_STRING_PRODUCT, FSG_STRING_SERIAL, FSG_STRING_CONFIG, #endif FSG_STRING_INTERFACE }; #ifndef FSG_NO_OTG static struct usb_otg_descriptor fsg_otg_desc = { .bLength = sizeof fsg_otg_desc, .bDescriptorType = USB_DT_OTG, .bmAttributes = USB_OTG_SRP, }; #endif /* There is only one interface. */ static struct usb_interface_descriptor fsg_intf_desc = { .bLength = sizeof fsg_intf_desc, .bDescriptorType = USB_DT_INTERFACE, .bNumEndpoints = 2, /* Adjusted during fsg_bind() */ .bInterfaceClass = USB_CLASS_MASS_STORAGE, .bInterfaceSubClass = USB_SC_SCSI, /* Adjusted during fsg_bind() */ .bInterfaceProtocol = USB_PR_BULK, /* Adjusted during fsg_bind() */ .iInterface = FSG_STRING_INTERFACE, }; /* * Three full-speed endpoint descriptors: bulk-in, bulk-out, and * interrupt-in. */ static struct usb_endpoint_descriptor fsg_fs_bulk_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, /* wMaxPacketSize set by autoconfiguration */ }; static struct usb_endpoint_descriptor fsg_fs_bulk_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, /* wMaxPacketSize set by autoconfiguration */ }; #ifndef FSG_NO_INTR_EP static struct usb_endpoint_descriptor fsg_fs_intr_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = cpu_to_le16(2), .bInterval = 32, /* frames -> 32 ms */ }; #ifndef FSG_NO_OTG # define FSG_FS_FUNCTION_PRE_EP_ENTRIES 2 #else # define FSG_FS_FUNCTION_PRE_EP_ENTRIES 1 #endif #endif static struct usb_descriptor_header *fsg_fs_function[] = { #ifndef FSG_NO_OTG (struct usb_descriptor_header *) &fsg_otg_desc, #endif (struct usb_descriptor_header *) &fsg_intf_desc, (struct usb_descriptor_header *) &fsg_fs_bulk_in_desc, (struct usb_descriptor_header *) &fsg_fs_bulk_out_desc, #ifndef FSG_NO_INTR_EP (struct usb_descriptor_header *) &fsg_fs_intr_in_desc, #endif NULL, }; static struct usb_endpoint_descriptor fsg_ss_bulk_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */ .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(1024), }; static struct usb_ss_ep_comp_descriptor fsg_ss_bulk_in_comp_desc = { .bLength = sizeof(fsg_ss_bulk_in_comp_desc), .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /*.bMaxBurst = DYNAMIC, */ }; static struct usb_endpoint_descriptor fsg_ss_bulk_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */ .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(1024), }; static struct usb_ss_ep_comp_descriptor fsg_ss_bulk_out_comp_desc = { .bLength = sizeof(fsg_ss_bulk_in_comp_desc), .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /*.bMaxBurst = DYNAMIC, */ }; #ifndef FSG_NO_INTR_EP static struct usb_endpoint_descriptor fsg_ss_intr_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, /* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */ .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = cpu_to_le16(2), .bInterval = 9, /* 2**(9-1) = 256 uframes -> 32 ms */ }; static struct usb_ss_ep_comp_descriptor fsg_ss_intr_in_comp_desc = { .bLength = sizeof(fsg_ss_bulk_in_comp_desc), .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, .wBytesPerInterval = cpu_to_le16(1024), }; #ifndef FSG_NO_OTG # define FSG_SS_FUNCTION_PRE_EP_ENTRIES 4 #else # define FSG_SS_FUNCTION_PRE_EP_ENTRIES 3 #endif #endif static __maybe_unused struct usb_ext_cap_descriptor fsg_ext_cap_desc = { .bLength = USB_DT_USB_EXT_CAP_SIZE, .bDescriptorType = USB_DT_DEVICE_CAPABILITY, .bDevCapabilityType = USB_CAP_TYPE_EXT, .bmAttributes = cpu_to_le32(USB_LPM_SUPPORT), }; static __maybe_unused struct usb_ss_cap_descriptor fsg_ss_cap_desc = { .bLength = USB_DT_USB_SS_CAP_SIZE, .bDescriptorType = USB_DT_DEVICE_CAPABILITY, .bDevCapabilityType = USB_SS_CAP_TYPE, /* .bmAttributes = LTM is not supported yet */ .wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION | USB_FULL_SPEED_OPERATION | USB_HIGH_SPEED_OPERATION | USB_5GBPS_OPERATION), .bFunctionalitySupport = USB_LOW_SPEED_OPERATION, .bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT, .bU2DevExitLat = USB_DEFAULT_U2_DEV_EXIT_LAT, }; static __maybe_unused struct usb_bos_descriptor fsg_bos_desc = { .bLength = USB_DT_BOS_SIZE, .bDescriptorType = USB_DT_BOS, .wTotalLength = USB_DT_BOS_SIZE + USB_DT_USB_EXT_CAP_SIZE + USB_DT_USB_SS_CAP_SIZE, .bNumDeviceCaps = 2, }; static struct usb_descriptor_header *fsg_ss_function[] = { #ifndef FSG_NO_OTG (struct usb_descriptor_header *) &fsg_otg_desc, #endif (struct usb_descriptor_header *) &fsg_intf_desc, (struct usb_descriptor_header *) &fsg_ss_bulk_in_desc, (struct usb_descriptor_header *) &fsg_ss_bulk_in_comp_desc, (struct usb_descriptor_header *) &fsg_ss_bulk_out_desc, (struct usb_descriptor_header *) &fsg_ss_bulk_out_comp_desc, #ifndef FSG_NO_INTR_EP (struct usb_descriptor_header *) &fsg_ss_intr_in_desc, (struct usb_descriptor_header *) &fsg_ss_intr_in_comp_desc, #endif NULL, }; /* * USB 2.0 devices need to expose both high speed and full speed * descriptors, unless they only run at full speed. * * That means alternate endpoint descriptors (bigger packets) * and a "device qualifier" ... plus more construction options * for the configuration descriptor. */ static struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */ .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor fsg_hs_bulk_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */ .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), .bInterval = 1, /* NAK every 1 uframe */ }; #ifndef FSG_NO_INTR_EP static struct usb_endpoint_descriptor fsg_hs_intr_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, /* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */ .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = cpu_to_le16(2), .bInterval = 9, /* 2**(9-1) = 256 uframes -> 32 ms */ }; #ifndef FSG_NO_OTG # define FSG_HS_FUNCTION_PRE_EP_ENTRIES 2 #else # define FSG_HS_FUNCTION_PRE_EP_ENTRIES 1 #endif #endif static struct usb_descriptor_header *fsg_hs_function[] = { #ifndef FSG_NO_OTG (struct usb_descriptor_header *) &fsg_otg_desc, #endif (struct usb_descriptor_header *) &fsg_intf_desc, (struct usb_descriptor_header *) &fsg_hs_bulk_in_desc, (struct usb_descriptor_header *) &fsg_hs_bulk_out_desc, #ifndef FSG_NO_INTR_EP (struct usb_descriptor_header *) &fsg_hs_intr_in_desc, #endif NULL, }; /* Maxpacket and other transfer characteristics vary by speed. */ static struct usb_endpoint_descriptor * fsg_ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs, struct usb_endpoint_descriptor *hs) { if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) return hs; return fs; } /* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */ static struct usb_string fsg_strings[] = { #ifndef FSG_NO_DEVICE_STRINGS {FSG_STRING_MANUFACTURER, fsg_string_manufacturer}, {FSG_STRING_PRODUCT, fsg_string_product}, {FSG_STRING_SERIAL, ""}, {FSG_STRING_CONFIG, fsg_string_config}, #endif {FSG_STRING_INTERFACE, fsg_string_interface}, {} }; static struct usb_gadget_strings fsg_stringtab = { .language = 0x0409, /* en-us */ .strings = fsg_strings, }; /*-------------------------------------------------------------------------*/ /* * If the next two routines are called while the gadget is registered, * the caller must own fsg->filesem for writing. */ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) { int ro; struct file *filp = NULL; int rc = -EINVAL; struct inode *inode = NULL; loff_t size; loff_t num_sectors; loff_t min_sectors; /* R/W if we can, R/O if we must */ ro = curlun->initially_ro; if (!ro) { filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0); if (PTR_ERR(filp) == -EROFS || PTR_ERR(filp) == -EACCES) ro = 1; } if (ro) filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(filp)) { LINFO(curlun, "unable to open backing file: %s\n", filename); return PTR_ERR(filp); } if (!(filp->f_mode & FMODE_WRITE)) ro = 1; if (filp->f_path.dentry) inode = filp->f_path.dentry->d_inode; if (!inode || (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) { LINFO(curlun, "invalid file type: %s\n", filename); goto out; } /* * If we can't read the file, it's no good. * If we can't write the file, use it read-only. */ if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) { LINFO(curlun, "file not readable: %s\n", filename); goto out; } if (!(filp->f_op->write || filp->f_op->aio_write)) ro = 1; size = i_size_read(inode->i_mapping->host); if (size < 0) { LINFO(curlun, "unable to find file size: %s\n", filename); rc = (int) size; goto out; } num_sectors = size >> 9; /* File size in 512-byte blocks */ min_sectors = 1; if (curlun->cdrom) { num_sectors >>= 2; /* Reduce to a multiple of 2048 */ min_sectors = 300; /* Smallest track is 300 frames */ if (num_sectors >= 256*60*75) { num_sectors = (256*60*75 - 1); LINFO(curlun, "file too big: %s\n", filename); LINFO(curlun, "using only first %d blocks\n", (int) num_sectors); } } if (num_sectors < min_sectors) { LINFO(curlun, "file too small: %s\n", filename); rc = -ETOOSMALL; goto out; } get_file(filp); curlun->ro = ro; curlun->filp = filp; curlun->file_length = size; curlun->num_sectors = num_sectors; LDBG(curlun, "open backing file: %s\n", filename); rc = 0; out: filp_close(filp, current->files); return rc; } static void fsg_lun_close(struct fsg_lun *curlun) { if (curlun->filp) { LDBG(curlun, "close backing file\n"); fput(curlun->filp); curlun->filp = NULL; } } /*-------------------------------------------------------------------------*/ /* * Sync the file data, don't bother with the metadata. * This code was copied from fs/buffer.c:sys_fdatasync(). */ static int fsg_lun_fsync_sub(struct fsg_lun *curlun) { struct file *filp = curlun->filp; if (curlun->ro || !filp) return 0; return vfs_fsync(filp, 1); } static void store_cdrom_address(u8 *dest, int msf, u32 addr) { if (msf) { /* Convert to Minutes-Seconds-Frames */ addr += 2*75; /* Lead-in occupies 2 seconds */ dest[3] = addr % 75; /* Frames */ addr /= 75; dest[2] = addr % 60; /* Seconds */ addr /= 60; dest[1] = addr; /* Minutes */ dest[0] = 0; /* Reserved */ } else { /* Absolute sector */ put_unaligned_be32(addr, dest); } } /*-------------------------------------------------------------------------*/ static ssize_t fsg_show_ro(struct device *dev, struct device_attribute *attr, char *buf) { struct fsg_lun *curlun = fsg_lun_from_dev(dev); return sprintf(buf, "%d\n", fsg_lun_is_open(curlun) ? curlun->ro : curlun->initially_ro); } static ssize_t fsg_show_nofua(struct device *dev, struct device_attribute *attr, char *buf) { struct fsg_lun *curlun = fsg_lun_from_dev(dev); return sprintf(buf, "%u\n", curlun->nofua); } static ssize_t fsg_show_file(struct device *dev, struct device_attribute *attr, char *buf) { struct fsg_lun *curlun = fsg_lun_from_dev(dev); struct rw_semaphore *filesem = dev_get_drvdata(dev); char *p; ssize_t rc; down_read(filesem); if (fsg_lun_is_open(curlun)) { /* Get the complete pathname */ p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1); if (IS_ERR(p)) rc = PTR_ERR(p); else { rc = strlen(p); memmove(buf, p, rc); buf[rc] = '\n'; /* Add a newline */ buf[++rc] = 0; } } else { /* No file, return 0 bytes */ *buf = 0; rc = 0; } up_read(filesem); return rc; } static ssize_t fsg_store_ro(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { ssize_t rc; struct fsg_lun *curlun = fsg_lun_from_dev(dev); struct rw_semaphore *filesem = dev_get_drvdata(dev); unsigned ro; rc = kstrtouint(buf, 2, &ro); if (rc) return rc; /* * Allow the write-enable status to change only while the * backing file is closed. */ down_read(filesem); if (fsg_lun_is_open(curlun)) { LDBG(curlun, "read-only status change prevented\n"); rc = -EBUSY; } else { curlun->ro = ro; curlun->initially_ro = ro; LDBG(curlun, "read-only status set to %d\n", curlun->ro); rc = count; } up_read(filesem); return rc; } static ssize_t fsg_store_nofua(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fsg_lun *curlun = fsg_lun_from_dev(dev); unsigned nofua; int ret; ret = kstrtouint(buf, 2, &nofua); if (ret) return ret; /* Sync data when switching from async mode to sync */ if (!nofua && curlun->nofua) fsg_lun_fsync_sub(curlun); curlun->nofua = nofua; return count; } static ssize_t fsg_store_file(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fsg_lun *curlun = fsg_lun_from_dev(dev); struct rw_semaphore *filesem = dev_get_drvdata(dev); int rc = 0; #if !defined(CONFIG_USB_ANDROID_MASS_STORAGE) && !defined(CONFIG_USB_G_ANDROID) /* disabled in android because we need to allow closing the backing file * if the media was removed */ if (curlun->prevent_medium_removal && fsg_lun_is_open(curlun)) { LDBG(curlun, "eject attempt prevented\n"); return -EBUSY; /* "Door is locked" */ } #endif /* Remove a trailing newline */ if (count > 0 && buf[count-1] == '\n') ((char *) buf)[count-1] = 0; /* Ugh! */ /* Eject current medium */ down_write(filesem); if (fsg_lun_is_open(curlun)) { fsg_lun_close(curlun); curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT; } /* Load new medium */ if (count > 0 && buf[0]) { rc = fsg_lun_open(curlun, buf); if (rc == 0) curlun->unit_attention_data = SS_NOT_READY_TO_READY_TRANSITION; } up_write(filesem); return (rc < 0 ? rc : count); } static ssize_t fsg_show_cdrom (struct device *dev, struct device_attribute *attr, char *buf) { struct fsg_lun *curlun = fsg_lun_from_dev(dev); return sprintf(buf, "%u\n", curlun->cdrom); } static ssize_t fsg_store_cdrom(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { ssize_t rc; struct fsg_lun *curlun = fsg_lun_from_dev(dev); struct rw_semaphore *filesem = dev_get_drvdata(dev); unsigned cdrom; rc = kstrtouint(buf, 2, &cdrom); if (rc) return rc; /* * Allow the cdrom status to change only while the * backing file is closed. */ down_read(filesem); if (fsg_lun_is_open(curlun)) { LDBG(curlun, "cdrom status change prevented\n"); rc = -EBUSY; } else { curlun->cdrom = !!cdrom; LDBG(curlun, "cdrom status set to %d\n", curlun->cdrom); rc = count; } up_read(filesem); return rc; }
ffolkes/android_kernel_samsung_smdk4412
drivers/usb/gadget/storage_common.c
C
gpl-2.0
26,281
26.635121
81
0.643012
false
/* * R : A Computer Language for Statistical Data Analysis * Copyright (C) 2002-2017 The R Core Team. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, a copy is available at * https://www.R-project.org/Licenses/ */ /* ********************************************************************** * === This was 'sort()' in gamfit's mysort.f [or sortdi() in sortdi.f ] : * was at end of modreg/src/ppr.f * Translated by f2c (version 20010821) and f2c-clean,v 1.9 2000/01/13 13:46:53 * then manually by Martin Maechler */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <Defn.h> /* => Utils.h with the protos from here */ #include <Internal.h> #include <Rmath.h> #include <R_ext/RS.h> #ifdef LONG_VECTOR_SUPPORT static void R_qsort_R(double *v, double *I, size_t i, size_t j); static void R_qsort_int_R(int *v, double *I, size_t i, size_t j); #endif /* R function qsort(x, index.return) */ SEXP attribute_hidden do_qsort(SEXP call, SEXP op, SEXP args, SEXP rho) { SEXP x, sx; int indx_ret; double *vx = NULL; int *ivx = NULL; Rboolean x_real, x_int; checkArity(op, args); x = CAR(args); if (!isNumeric(x)) error(_("argument is not a numeric vector")); x_real= TYPEOF(x) == REALSXP; x_int = !x_real && (TYPEOF(x) == INTSXP || TYPEOF(x) == LGLSXP); PROTECT(sx = (x_real || x_int) ? duplicate(x) : coerceVector(x, REALSXP)); SET_ATTRIB(sx, R_NilValue); SET_OBJECT(sx, 0); indx_ret = asLogical(CADR(args)); R_xlen_t n = XLENGTH(x); #ifdef LONG_VECTOR_SUPPORT Rboolean isLong = n > INT_MAX; #endif if(x_int) ivx = INTEGER(sx); else vx = REAL(sx); if(indx_ret) { SEXP ans, ansnames, indx; /* answer will have x = sorted x , ix = index :*/ PROTECT(ans = allocVector(VECSXP, 2)); PROTECT(ansnames = allocVector(STRSXP, 2)); #ifdef LONG_VECTOR_SUPPORT if (isLong) { PROTECT(indx = allocVector(REALSXP, n)); double *ix = REAL(indx); for(R_xlen_t i = 0; i < n; i++) ix[i] = (double) (i+1); if(x_int) R_qsort_int_R(ivx, ix, 1, n); else R_qsort_R(vx, ix, 1, n); } else #endif { PROTECT(indx = allocVector(INTSXP, n)); int *ix = INTEGER(indx); int nn = (int) n; for(int i = 0; i < nn; i++) ix[i] = i+1; if(x_int) R_qsort_int_I(ivx, ix, 1, nn); else R_qsort_I(vx, ix, 1, nn); } SET_VECTOR_ELT(ans, 0, sx); SET_VECTOR_ELT(ans, 1, indx); SET_STRING_ELT(ansnames, 0, mkChar("x")); SET_STRING_ELT(ansnames, 1, mkChar("ix")); setAttrib(ans, R_NamesSymbol, ansnames); UNPROTECT(4); return ans; } else { if(x_int) R_qsort_int(ivx, 1, n); else R_qsort(vx, 1, n); UNPROTECT(1); return sx; } } /* These are exposed in Utils.h and are misguidely in the API */ void F77_SUB(qsort4)(double *v, int *indx, int *ii, int *jj) { R_qsort_I(v, indx, *ii, *jj); } void F77_SUB(qsort3)(double *v, int *ii, int *jj) { R_qsort(v, *ii, *jj); } // sort with index : -------------------------- #define qsort_Index #define INTt int #define INDt int #define NUMERIC double void R_qsort_I(double *v, int *I, int i, int j) #include "qsort-body.c" #undef NUMERIC #define NUMERIC int void R_qsort_int_I(int *v, int *I, int i, int j) #include "qsort-body.c" #undef NUMERIC #undef INTt #undef INDt #ifdef LONG_VECTOR_SUPPORT #define INDt double #define NUMERIC double static void R_qsort_R(double *v, double *I, size_t i, size_t j) #include "qsort-body.c" #undef NUMERIC #define NUMERIC int static void R_qsort_int_R(int *v, double *I, size_t i, size_t j) #include "qsort-body.c" #undef NUMERIC #undef INDt #endif // LONG_VECTOR_SUPPORT // sort withOUT index : ----------------------- #undef qsort_Index #define NUMERIC double void R_qsort(double *v, size_t i, size_t j) #include "qsort-body.c" #undef NUMERIC #define NUMERIC int void R_qsort_int(int *v, size_t i, size_t j) #include "qsort-body.c" #undef NUMERIC
jeroenooms/r-source
src/main/qsort.c
C
gpl-2.0
4,453
26.319018
79
0.630137
false
/** * @Project NUKEVIET 3.x * @Author VINADES.,JSC (contact@vinades.vn) * @Copyright (C) 2012 VINADES.,JSC. All rights reserved * @Createdate 31/05/2010, 9:36 */ function nv_is_del_cron(cronid) { if (confirm(nv_is_del_confirm[0])) { nv_ajax('get', window.location.href, nv_fc_variable + '=cronjobs_del&id=' + cronid, '', 'nv_is_del_cron_result'); } return false; } function nv_is_del_cron_result(res) { if (res == 1) { alert(nv_is_del_confirm[1]); window.location.href = window.location.href; } else { alert(nv_is_del_confirm[2]); } return false; }
anhnthpu/nukevietCAS
js/admin_settings.js
JavaScript
gpl-2.0
570
23.826087
115
0.650877
false
/****************************************************************************** * Wormux is a convivial mass murder game. * Copyright (C) 2001-2010 Wormux Team. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA ****************************************************************************** * Weapon's crosshair *****************************************************************************/ #ifndef CROSSHAIR_H #define CROSSHAIR_H #include "graphic/surface.h" #include "include/base.h" #include <WORMUX_point.h> class CrossHair { private: bool enable; bool hidden; private: Surface image; Point2i crosshair_position; public: CrossHair(); void Reset() const; // Refresh crosshair angle void Refresh(Double angle); // Draw crosshair void Draw() const; void SetActive(bool enable); bool IsActive() const { return enable; }; // Use it only for a few seconds (character walking, jumping, ...) void Hide(); void Show(); }; #endif /* CROSSHAIR_H */
yeKcim/warmux
old/wormux-0.9.2/src/weapon/crosshair.h
C
gpl-2.0
1,666
27.724138
79
0.610444
false
# # kbuild file for firmware/ # # Create $(fwabs) from $(CONFIG_EXTRA_FIRMWARE_DIR) -- if it doesn't have a # leading /, it's relative to $(srctree). fwdir := $(subst ",,$(CONFIG_EXTRA_FIRMWARE_DIR)) fwabs := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir)) fw-external-y := $(subst ",,$(CONFIG_EXTRA_FIRMWARE)) # There are three cases to care about: # 1. Building kernel with CONFIG_FIRMWARE_IN_KERNEL=y -- $(fw-shipped-y) should # include the firmware files to include, according to .config # 2. 'make modules_install', which will install firmware for modules, and # _also_ for the in-kernel drivers when CONFIG_FIRMWARE_IN_KERNEL=n # 3. 'make firmware_install', which installs all firmware, unconditionally. # For the former two cases we want $(fw-shipped-y) and $(fw-shipped-m) to be # accurate. In the latter case it doesn't matter -- it'll use $(fw-shipped-all). # But be aware that the config file might not be included at all. ifdef CONFIG_ACENIC_OMIT_TIGON_I acenic-objs := acenic/tg2.bin fw-shipped- += acenic/tg1.bin else acenic-objs := acenic/tg1.bin acenic/tg2.bin endif fw-shipped-$(CONFIG_ACENIC) += $(acenic-objs) fw-shipped-$(CONFIG_ADAPTEC_STARFIRE) += adaptec/starfire_rx.bin \ adaptec/starfire_tx.bin fw-shipped-$(CONFIG_ATARI_DSP56K) += dsp56k/bootstrap.bin fw-shipped-$(CONFIG_ATM_AMBASSADOR) += atmsar11.fw fw-shipped-$(CONFIG_BNX2X) += bnx2x/bnx2x-e1-6.2.9.0.fw \ bnx2x/bnx2x-e1h-6.2.9.0.fw \ bnx2x/bnx2x-e2-6.2.9.0.fw fw-shipped-$(CONFIG_BNX2) += bnx2/bnx2-mips-09-6.2.1a.fw \ bnx2/bnx2-rv2p-09-6.0.17.fw \ bnx2/bnx2-rv2p-09ax-6.0.17.fw \ bnx2/bnx2-mips-06-6.2.1.fw \ bnx2/bnx2-rv2p-06-6.0.15.fw fw-shipped-$(CONFIG_CASSINI) += sun/cassini.bin fw-shipped-$(CONFIG_CHELSIO_T3) += cxgb3/t3b_psram-1.1.0.bin \ cxgb3/t3c_psram-1.1.0.bin \ cxgb3/ael2005_opt_edc.bin \ cxgb3/ael2005_twx_edc.bin \ cxgb3/ael2020_twx_edc.bin fw-shipped-$(CONFIG_DRM_MGA) += matrox/g200_warp.fw matrox/g400_warp.fw fw-shipped-$(CONFIG_DRM_R128) += r128/r128_cce.bin fw-shipped-$(CONFIG_DRM_RADEON) += radeon/R100_cp.bin radeon/R200_cp.bin \ radeon/R300_cp.bin radeon/R420_cp.bin \ radeon/RS690_cp.bin radeon/RS600_cp.bin \ radeon/R520_cp.bin \ radeon/R600_pfp.bin radeon/R600_me.bin \ radeon/RV610_pfp.bin radeon/RV610_me.bin \ radeon/RV630_pfp.bin radeon/RV630_me.bin \ radeon/RV620_pfp.bin radeon/RV620_me.bin \ radeon/RV635_pfp.bin radeon/RV635_me.bin \ radeon/RV670_pfp.bin radeon/RV670_me.bin \ radeon/RS780_pfp.bin radeon/RS780_me.bin \ radeon/RV770_pfp.bin radeon/RV770_me.bin \ radeon/RV730_pfp.bin radeon/RV730_me.bin \ radeon/RV710_pfp.bin radeon/RV710_me.bin fw-shipped-$(CONFIG_DVB_AV7110) += av7110/bootcode.bin fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \ e100/d102e_ucode.bin fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis cis/PCMLM28.cis \ cis/DP83903.cis cis/NE2K.cis \ cis/tamarack.cis cis/PE-200.cis \ cis/PE520.cis fw-shipped-$(CONFIG_PCMCIA_3C589) += cis/3CXEM556.cis fw-shipped-$(CONFIG_PCMCIA_3C574) += cis/3CCFEM556.cis fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis \ cis/COMpad2.cis cis/COMpad4.cis \ cis/SW_555_SER.cis cis/SW_7xx_SER.cis \ cis/SW_8xx_SER.cis fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \ advansys/3550.bin advansys/38C0800.bin fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \ qlogic/12160.bin fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin fw-shipped-$(CONFIG_INFINIBAND_QIB) += qlogic/sd7220.fw fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \ ess/maestro3_assp_minisrc.fw fw-shipped-$(CONFIG_SND_SB16_CSP) += sb16/mulaw_main.csp sb16/alaw_main.csp \ sb16/ima_adpcm_init.csp \ sb16/ima_adpcm_playback.csp \ sb16/ima_adpcm_capture.csp fw-shipped-$(CONFIG_SND_YMFPCI) += yamaha/ds1_ctrl.fw yamaha/ds1_dsp.fw \ yamaha/ds1e_ctrl.fw fw-shipped-$(CONFIG_SND_WAVEFRONT) += yamaha/yss225_registers.bin fw-shipped-$(CONFIG_TEHUTI) += tehuti/bdx.bin fw-shipped-$(CONFIG_TIGON3) += tigon/tg3.bin tigon/tg3_tso.bin \ tigon/tg3_tso5.bin fw-shipped-$(CONFIG_TYPHOON) += 3com/typhoon.bin fw-shipped-$(CONFIG_USB_EMI26) += emi26/loader.fw emi26/firmware.fw \ emi26/bitstream.fw fw-shipped-$(CONFIG_USB_EMI62) += emi62/loader.fw emi62/bitstream.fw \ emi62/spdif.fw emi62/midi.fw fw-shipped-$(CONFIG_USB_KAWETH) += kaweth/new_code.bin kaweth/trigger_code.bin \ kaweth/new_code_fix.bin \ kaweth/trigger_code_fix.bin ifdef CONFIG_FIRMWARE_IN_KERNEL fw-shipped-$(CONFIG_EXYNOS_MBOX) += apm_0116_fw_v0.h fw-shipped-$(CONFIG_USB_SERIAL_KEYSPAN_MPR) += keyspan/mpr.fw fw-shipped-$(CONFIG_USB_SERIAL_KEYSPAN_USA18X) += keyspan/usa18x.fw fw-shipped-$(CONFIG_USB_SERIAL_KEYSPAN_USA19) += keyspan/usa19.fw fw-shipped-$(CONFIG_USB_SERIAL_KEYSPAN_USA19QI) += keyspan/usa19qi.fw fw-shipped-$(CONFIG_USB_SERIAL_KEYSPAN_USA19QW) += keyspan/usa19qw.fw fw-shipped-$(CONFIG_USB_SERIAL_KEYSPAN_USA19W) += keyspan/usa19w.fw fw-shipped-$(CONFIG_USB_SERIAL_KEYSPAN_USA28) += keyspan/usa28.fw fw-shipped-$(CONFIG_USB_SERIAL_KEYSPAN_USA28XA) += keyspan/usa28xa.fw fw-shipped-$(CONFIG_USB_SERIAL_KEYSPAN_USA28XB) += keyspan/usa28xb.fw fw-shipped-$(CONFIG_USB_SERIAL_KEYSPAN_USA28X) += keyspan/usa28x.fw fw-shipped-$(CONFIG_USB_SERIAL_KEYSPAN_USA49W) += keyspan/usa49w.fw fw-shipped-$(CONFIG_USB_SERIAL_KEYSPAN_USA49WLC) += keyspan/usa49wlc.fw else fw-shipped- += keyspan/mpr.fw keyspan/usa18x.fw keyspan/usa19.fw \ keyspan/usa19qi.fw keyspan/usa19qw.fw keyspan/usa19w.fw \ keyspan/usa28.fw keyspan/usa28xa.fw keyspan/usa28xb.fw \ keyspan/usa28x.fw keyspan/usa49w.fw keyspan/usa49wlc.fw endif fw-shipped-$(CONFIG_USB_SERIAL_TI) += ti_3410.fw ti_5052.fw \ mts_cdma.fw mts_gsm.fw mts_edge.fw fw-shipped-$(CONFIG_USB_SERIAL_EDGEPORT) += edgeport/boot.fw edgeport/boot2.fw \ edgeport/down.fw edgeport/down2.fw fw-shipped-$(CONFIG_USB_SERIAL_EDGEPORT_TI) += edgeport/down3.bin fw-shipped-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat_loader.fw whiteheat.fw \ # whiteheat_loader_debug.fw fw-shipped-$(CONFIG_USB_SERIAL_KEYSPAN_PDA) += keyspan_pda/keyspan_pda.fw fw-shipped-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda/xircom_pgs.fw fw-shipped-$(CONFIG_USB_VICAM) += vicam/firmware.fw fw-shipped-$(CONFIG_VIDEO_CPIA2) += cpia2/stv0672_vp4.bin fw-shipped-$(CONFIG_YAM) += yam/1200.bin yam/9600.bin TSPFIRMWARE_DIRECTORY = firmware/tsp_synaptics ifeq ($(shell test -d $(TSPFIRMWARE_DIRECTORY) && echo yes),yes) fw-shipped-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI) += tsp_synaptics/synaptics_b0_h.fw \ tsp_synaptics/synaptics_b0_fac.fw else $(warning '$(TSPFIRMWARE_DIRECTORY)' directory dose not exist) endif TSPFIRMWARE_DIRECTORY = firmware/tsp_stm ifeq ($(shell test -d $(TSPFIRMWARE_DIRECTORY) && echo yes),yes) fw-shipped-$(CONFIG_TOUCHSCREEN_FTS) += tsp_stm/stm_tb_integ.fw tsp_stm/stm_z2a.fw \ tsp_stm/stm_z2i.fw tsp_stm/stm_z1.fw else $(warning '$(TSPFIRMWARE_DIRECTORY)' directory dose not exist) endif fw-shipped-$(CONFIG_KEYBOARD_CYPRESS_TOUCH_MBR31X5) += cypress/cypress_zerof.fw fw-shipped-$(CONFIG_KEYBOARD_CYPRESS_TOUCH_20075) += cypress/cypress_k.fw fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-) # Sensorhub fw-shipped-$(CONFIG_SENSORS_SSP_STM) += ssp_stm.fw ssp_crashed.fw # Directories which we _might_ need to create, so we have a rule for them. firmware-dirs := $(sort $(addprefix $(objtree)/$(obj)/,$(dir $(fw-external-y) $(fw-shipped-all)))) quiet_cmd_mkdir = MKDIR $(patsubst $(objtree)/%,%,$@) cmd_mkdir = mkdir -p $@ quiet_cmd_ihex = IHEX $@ cmd_ihex = $(OBJCOPY) -Iihex -Obinary $< $@ quiet_cmd_ihex2fw = IHEX2FW $@ cmd_ihex2fw = $(objtree)/$(obj)/ihex2fw $< $@ quiet_cmd_h16tofw = H16TOFW $@ cmd_h16tofw = $(objtree)/$(obj)/ihex2fw -w $< $@ quiet_cmd_fwbin = MK_FW $@ cmd_fwbin = FWNAME="$(patsubst firmware/%.gen.S,%,$@)"; \ FWSTR="$(subst /,_,$(subst .,_,$(subst -,_,$(patsubst \ firmware/%.gen.S,%,$@))))"; \ ASM_WORD=$(if $(CONFIG_64BIT),.quad,.long); \ ASM_ALIGN=$(if $(CONFIG_64BIT),3,2); \ PROGBITS=$(if $(CONFIG_ARM),%,@)progbits; \ echo "/* Generated by firmware/Makefile */" > $@;\ echo " .section .rodata" >>$@;\ echo " .p2align $${ASM_ALIGN}" >>$@;\ echo "_fw_$${FWSTR}_bin:" >>$@;\ echo " .incbin \"$(2)\"" >>$@;\ echo "_fw_end:" >>$@;\ echo " .section .rodata.str,\"aMS\",$${PROGBITS},1" >>$@;\ echo " .p2align $${ASM_ALIGN}" >>$@;\ echo "_fw_$${FWSTR}_name:" >>$@;\ echo " .string \"$$FWNAME\"" >>$@;\ echo " .section .builtin_fw,\"a\",$${PROGBITS}" >>$@;\ echo " .p2align $${ASM_ALIGN}" >>$@;\ echo " $${ASM_WORD} _fw_$${FWSTR}_name" >>$@;\ echo " $${ASM_WORD} _fw_$${FWSTR}_bin" >>$@;\ echo " $${ASM_WORD} _fw_end - _fw_$${FWSTR}_bin" >>$@; # One of these files will change, or come into existence, whenever # the configuration changes between 32-bit and 64-bit. The .S files # need to change when that happens. wordsize_deps := $(wildcard include/config/64bit.h include/config/32bit.h \ include/config/ppc32.h include/config/ppc64.h \ include/config/superh32.h include/config/superh64.h \ include/config/x86_32.h include/config/x86_64.h) # Workaround for make < 3.81, where .SECONDEXPANSION doesn't work. # It'll end up depending on these targets, so make them a PHONY rule which # depends on _all_ the directories in $(firmware-dirs), and it'll work out OK. PHONY += $(objtree)/$$(%) $(objtree)/$(obj)/$$(%) $(objtree)/$$(%) $(objtree)/$(obj)/$$(%): $(firmware-dirs) @true # For the $$(dir %) trick, where we need % to be expanded first. .SECONDEXPANSION: $(patsubst %,$(obj)/%.gen.S, $(fw-shipped-y)): %: $(wordsize_deps) \ | $(objtree)/$$(dir %) $(call cmd,fwbin,$(patsubst %.gen.S,%,$@)) $(patsubst %,$(obj)/%.gen.S, $(fw-external-y)): %: $(wordsize_deps) \ include/config/extra/firmware/dir.h | $(objtree)/$$(dir %) $(call cmd,fwbin,$(fwabs)/$(patsubst $(obj)/%.gen.S,%,$@)) # The .o files depend on the binaries directly; the .S files don't. $(patsubst %,$(obj)/%.gen.o, $(fw-shipped-y)): %.gen.o: % $(patsubst %,$(obj)/%.gen.o, $(fw-external-y)): $(obj)/%.gen.o: $(fwdir)/% # .ihex is used just as a simple way to hold binary files in a source tree # where binaries are frowned upon. They are directly converted with objcopy. $(obj)/%: $(obj)/%.ihex | $(objtree)/$(obj)/$$(dir %) $(call cmd,ihex) # Don't depend on ihex2fw if we're installing and it already exists. # Putting it after | in the dependencies doesn't seem sufficient when # we're installing after a cross-compile, because ihex2fw has dependencies # on stuff like /usr/lib/gcc/ppc64-redhat-linux/4.3.0/include/stddef.h and # thus wants to be rebuilt. Which it can't be, if the prebuilt kernel tree # is exported read-only for someone to run 'make install'. ifeq ($(INSTALL):$(wildcard $(obj)/ihex2fw),install:$(obj)/ihex2fw) ihex2fw_dep := else ihex2fw_dep := $(obj)/ihex2fw endif # .HEX is also Intel HEX, but where the offset and length in each record # is actually meaningful, because the firmware has to be loaded in a certain # order rather than as a single binary blob. Thus, we convert them into our # more compact binary representation of ihex records (<linux/ihex.h>) $(obj)/%.fw: $(obj)/%.HEX $(ihex2fw_dep) | $(objtree)/$(obj)/$$(dir %) $(call cmd,ihex2fw) # .H16 is our own modified form of Intel HEX, with 16-bit length for records. $(obj)/%.fw: $(obj)/%.H16 $(ihex2fw_dep) | $(objtree)/$(obj)/$$(dir %) $(call cmd,h16tofw) $(firmware-dirs): $(call cmd,mkdir) obj-y += $(patsubst %,%.gen.o, $(fw-external-y)) obj-$(CONFIG_FIRMWARE_IN_KERNEL) += $(patsubst %,%.gen.o, $(fw-shipped-y)) # Remove .S files and binaries created from ihex # (during 'make clean' .config isn't included so they're all in $(fw-shipped-)) targets := $(fw-shipped-) $(patsubst $(obj)/%,%, \ $(shell find $(obj) -name \*.gen.S 2>/dev/null)) # Without this, built-in.o won't be created when it's empty, and the # final vmlinux link will fail. obj-n := dummy hostprogs-y := ihex2fw
bigbiff/android_kernel_samsung_zeroflte
firmware/Makefile
Makefile
gpl-2.0
12,644
45.315018
98
0.667431
false
using WCell.RealmServer.Misc; namespace WCell.RealmServer.Spells.Auras.Misc { public abstract class AttackEventEffectHandler : AuraEffectHandler, IAttackEventHandler { protected override void Apply() { Owner.AttackEventHandlers.Add(this); } protected override void Remove(bool cancelled) { Owner.AttackEventHandlers.Remove(this); } public virtual void OnBeforeAttack(DamageAction action) { } public virtual void OnAttack(DamageAction action) { } public virtual void OnDefend(DamageAction action) { } } }
WCell/WCell
Services/WCell.RealmServer/Spells/Auras/Misc/AttackModifierHandler.cs
C#
gpl-2.0
549
17.965517
88
0.746812
false
/* * Copyright (c) 2009, Microsoft Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Authors: * Haiyang Zhang <haiyangz@microsoft.com> * Hank Janssen <hjanssen@microsoft.com> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/hyperv.h> #include <linux/version.h> #include <linux/interrupt.h> #include <asm/hyperv.h> #include "hyperv_vmbus.h" /* The one and only */ struct hv_context hv_context = { .synic_initialized = false, .hypercall_page = NULL, }; /* * query_hypervisor_info - Get version info of the windows hypervisor */ unsigned int host_info_eax; unsigned int host_info_ebx; unsigned int host_info_ecx; unsigned int host_info_edx; static int query_hypervisor_info(void) { unsigned int eax; unsigned int ebx; unsigned int ecx; unsigned int edx; unsigned int max_leaf; unsigned int op; /* * Its assumed that this is called after confirming that Viridian * is present. Query id and revision. */ eax = 0; ebx = 0; ecx = 0; edx = 0; op = HVCPUID_VENDOR_MAXFUNCTION; cpuid(op, &eax, &ebx, &ecx, &edx); max_leaf = eax; if (max_leaf >= HVCPUID_VERSION) { eax = 0; ebx = 0; ecx = 0; edx = 0; op = HVCPUID_VERSION; cpuid(op, &eax, &ebx, &ecx, &edx); host_info_eax = eax; host_info_ebx = ebx; host_info_ecx = ecx; host_info_edx = edx; } return max_leaf; } /* * do_hypercall- Invoke the specified hypercall */ static u64 do_hypercall(u64 control, void *input, void *output) { #ifdef CONFIG_X86_64 u64 hv_status = 0; u64 input_address = (input) ? virt_to_phys(input) : 0; u64 output_address = (output) ? virt_to_phys(output) : 0; void *hypercall_page = hv_context.hypercall_page; __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8"); __asm__ __volatile__("call *%3" : "=a" (hv_status) : "c" (control), "d" (input_address), "m" (hypercall_page)); return hv_status; #else u32 control_hi = control >> 32; u32 control_lo = control & 0xFFFFFFFF; u32 hv_status_hi = 1; u32 hv_status_lo = 1; u64 input_address = (input) ? virt_to_phys(input) : 0; u32 input_address_hi = input_address >> 32; u32 input_address_lo = input_address & 0xFFFFFFFF; u64 output_address = (output) ? virt_to_phys(output) : 0; u32 output_address_hi = output_address >> 32; u32 output_address_lo = output_address & 0xFFFFFFFF; void *hypercall_page = ktva_ktla(hv_context.hypercall_page); __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi), "=a"(hv_status_lo) : "d" (control_hi), "a" (control_lo), "b" (input_address_hi), "c" (input_address_lo), "D"(output_address_hi), "S"(output_address_lo), "m" (hypercall_page)); return hv_status_lo | ((u64)hv_status_hi << 32); #endif /* !x86_64 */ } /* * hv_init - Main initialization routine. * * This routine must be called before any other routines in here are called */ int hv_init(void) { int max_leaf; union hv_x64_msr_hypercall_contents hypercall_msr; void *virtaddr = NULL; memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS); memset(hv_context.synic_message_page, 0, sizeof(void *) * NR_CPUS); memset(hv_context.vp_index, 0, sizeof(int) * NR_CPUS); memset(hv_context.event_dpc, 0, sizeof(void *) * NR_CPUS); max_leaf = query_hypervisor_info(); /* * Write our OS ID. */ hv_context.guestid = generate_guest_id(0, LINUX_VERSION_CODE, 0); wrmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid); /* See if the hypercall page is already set */ rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX); if (!virtaddr) goto cleanup; hypercall_msr.enable = 1; hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr); wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); /* Confirm that hypercall page did get setup. */ hypercall_msr.as_uint64 = 0; rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); if (!hypercall_msr.enable) goto cleanup; hv_context.hypercall_page = virtaddr; return 0; cleanup: if (virtaddr) { if (hypercall_msr.enable) { hypercall_msr.as_uint64 = 0; wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); } vfree(virtaddr); } return -ENOTSUPP; } /* * hv_cleanup - Cleanup routine. * * This routine is called normally during driver unloading or exiting. */ void hv_cleanup(void) { union hv_x64_msr_hypercall_contents hypercall_msr; /* Reset our OS id */ wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); if (hv_context.hypercall_page) { hypercall_msr.as_uint64 = 0; wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); vfree(hv_context.hypercall_page); hv_context.hypercall_page = NULL; } } /* * hv_post_message - Post a message using the hypervisor message IPC. * * This involves a hypercall. */ int hv_post_message(union hv_connection_id connection_id, enum hv_message_type message_type, void *payload, size_t payload_size) { struct aligned_input { u64 alignment8; struct hv_input_post_message msg; }; struct hv_input_post_message *aligned_msg; u16 status; unsigned long addr; if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) return -EMSGSIZE; addr = (unsigned long)kmalloc(sizeof(struct aligned_input), GFP_ATOMIC); if (!addr) return -ENOMEM; aligned_msg = (struct hv_input_post_message *) (ALIGN(addr, HV_HYPERCALL_PARAM_ALIGN)); aligned_msg->connectionid = connection_id; aligned_msg->message_type = message_type; aligned_msg->payload_size = payload_size; memcpy((void *)aligned_msg->payload, payload, payload_size); status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL) & 0xFFFF; kfree((void *)addr); return status; } /* * hv_signal_event - * Signal an event on the specified connection using the hypervisor event IPC. * * This involves a hypercall. */ u16 hv_signal_event(void *con_id) { u16 status; status = (do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL) & 0xFFFF); return status; } int hv_synic_alloc(void) { size_t size = sizeof(struct tasklet_struct); int cpu; for_each_online_cpu(cpu) { hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC); if (hv_context.event_dpc[cpu] == NULL) { pr_err("Unable to allocate event dpc\n"); goto err; } tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu); hv_context.synic_message_page[cpu] = (void *)get_zeroed_page(GFP_ATOMIC); if (hv_context.synic_message_page[cpu] == NULL) { pr_err("Unable to allocate SYNIC message page\n"); goto err; } hv_context.synic_event_page[cpu] = (void *)get_zeroed_page(GFP_ATOMIC); if (hv_context.synic_event_page[cpu] == NULL) { pr_err("Unable to allocate SYNIC event page\n"); goto err; } } return 0; err: return -ENOMEM; } static void hv_synic_free_cpu(int cpu) { kfree(hv_context.event_dpc[cpu]); if (hv_context.synic_event_page[cpu]) free_page((unsigned long)hv_context.synic_event_page[cpu]); if (hv_context.synic_message_page[cpu]) free_page((unsigned long)hv_context.synic_message_page[cpu]); } void hv_synic_free(void) { int cpu; for_each_online_cpu(cpu) hv_synic_free_cpu(cpu); } /* * hv_synic_init - Initialize the Synthethic Interrupt Controller. * * If it is already initialized by another entity (ie x2v shim), we need to * retrieve the initialized message and event pages. Otherwise, we create and * initialize the message and event pages. */ void hv_synic_init(void *arg) { u64 version; union hv_synic_simp simp; union hv_synic_siefp siefp; union hv_synic_sint shared_sint; union hv_synic_scontrol sctrl; u64 vp_index; int cpu = smp_processor_id(); if (!hv_context.hypercall_page) return; /* Check the version */ rdmsrl(HV_X64_MSR_SVERSION, version); /* Setup the Synic's message page */ rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64); simp.simp_enabled = 1; simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu]) >> PAGE_SHIFT; wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64); /* Setup the Synic's event page */ rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64); siefp.siefp_enabled = 1; siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu]) >> PAGE_SHIFT; wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64); /* Setup the shared SINT. */ rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); shared_sint.as_uint64 = 0; shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR; shared_sint.masked = false; shared_sint.auto_eoi = true; wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); /* Enable the global synic bit */ rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64); sctrl.enable = 1; wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64); hv_context.synic_initialized = true; /* * Setup the mapping between Hyper-V's notion * of cpuid and Linux' notion of cpuid. * This array will be indexed using Linux cpuid. */ rdmsrl(HV_X64_MSR_VP_INDEX, vp_index); hv_context.vp_index[cpu] = (u32)vp_index; return; } /* * hv_synic_cleanup - Cleanup routine for hv_synic_init(). */ void hv_synic_cleanup(void *arg) { union hv_synic_sint shared_sint; union hv_synic_simp simp; union hv_synic_siefp siefp; int cpu = smp_processor_id(); if (!hv_context.synic_initialized) return; rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); shared_sint.masked = 1; /* Need to correctly cleanup in the case of SMP!!! */ /* Disable the interrupt */ wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64); simp.simp_enabled = 0; simp.base_simp_gpa = 0; wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64); rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64); siefp.siefp_enabled = 0; siefp.base_siefp_gpa = 0; wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64); free_page((unsigned long)hv_context.synic_message_page[cpu]); free_page((unsigned long)hv_context.synic_event_page[cpu]); }
AdaLovelance/lxcGrsecKernels
linux-3.14.37/drivers/hv/hv.c
C
gpl-2.0
10,575
23.941038
79
0.684728
false
<html> <head> <title>Test of CSS plugin</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <script type="text/javascript"> _editor_url = "../"; _editor_lang = "en"; </script> <!-- load the main HTMLArea files --> <script type="text/javascript" src="../htmlarea.js"></script> <script type="text/javascript"> HTMLArea.loadPlugin("DynamicCSS"); function initDocument() { var editor = new HTMLArea("editor"); editor.config.pageStyle = "@import url(dynamic.css);"; editor.registerPlugin(DynamicCSS); editor.generate(); } </script> </head> <body onload="HTMLArea.init(); HTMLArea.onload = initDocument"> <h1>Test of DynamicCSS plugin</h1> <textarea id="editor" style="height: 30em; width: 100%;"> <p>p with default p style</p><br> <p class="p1">p with p.class="p1"</p><br> <p class="p2">p with p.class="p2"</p><br> <br> <div>div with default div class</div><br> <div class="div1">div with div.class="div1"</div><br> <div class="div2">div with div.class="div2"</div><br> <br> <span class="highlight">testtext with common class="highlight"</span> </textarea> </body> </html>
jwlynch/openacs.core
packages/acs-templating/www/resources/htmlarea/examples/dynamic_css.html
HTML
gpl-2.0
1,333
28.295455
77
0.564141
false
/* PKCS#7 parser * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define pr_fmt(fmt) "PKCS7: "fmt #include <linux/kernel.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/oid_registry.h> #include <crypto/public_key.h> #include "pkcs7_parser.h" #include "pkcs7-asn1.h" struct pkcs7_parse_context { struct pkcs7_message *msg; /* Message being constructed */ struct pkcs7_signed_info *sinfo; /* SignedInfo being constructed */ struct pkcs7_signed_info **ppsinfo; struct x509_certificate *certs; /* Certificate cache */ struct x509_certificate **ppcerts; unsigned long data; /* Start of data */ enum OID last_oid; /* Last OID encountered */ unsigned x509_index; unsigned sinfo_index; const void *raw_serial; unsigned raw_serial_size; unsigned raw_issuer_size; const void *raw_issuer; const void *raw_skid; unsigned raw_skid_size; bool expect_skid; }; /* * Free a signed information block. */ static void pkcs7_free_signed_info(struct pkcs7_signed_info *sinfo) { if (sinfo) { kfree(sinfo->sig.s); kfree(sinfo->sig.digest); kfree(sinfo->signing_cert_id); kfree(sinfo); } } /** * pkcs7_free_message - Free a PKCS#7 message * @pkcs7: The PKCS#7 message to free */ void pkcs7_free_message(struct pkcs7_message *pkcs7) { struct x509_certificate *cert; struct pkcs7_signed_info *sinfo; if (pkcs7) { while (pkcs7->certs) { cert = pkcs7->certs; pkcs7->certs = cert->next; x509_free_certificate(cert); } while (pkcs7->crl) { cert = pkcs7->crl; pkcs7->crl = cert->next; x509_free_certificate(cert); } while (pkcs7->signed_infos) { sinfo = pkcs7->signed_infos; pkcs7->signed_infos = sinfo->next; pkcs7_free_signed_info(sinfo); } kfree(pkcs7); } } EXPORT_SYMBOL_GPL(pkcs7_free_message); /* * Check authenticatedAttributes are provided or not provided consistently. */ static int pkcs7_check_authattrs(struct pkcs7_message *msg) { struct pkcs7_signed_info *sinfo; bool want; sinfo = msg->signed_infos; if (sinfo->authattrs) { want = true; msg->have_authattrs = true; } for (sinfo = sinfo->next; sinfo; sinfo = sinfo->next) if (!!sinfo->authattrs != want) goto inconsistent; return 0; inconsistent: pr_warn("Inconsistently supplied authAttrs\n"); return -EINVAL; } /** * pkcs7_parse_message - Parse a PKCS#7 message * @data: The raw binary ASN.1 encoded message to be parsed * @datalen: The size of the encoded message */ struct pkcs7_message *pkcs7_parse_message(const void *data, size_t datalen) { struct pkcs7_parse_context *ctx; struct pkcs7_message *msg = ERR_PTR(-ENOMEM); int ret; ctx = kzalloc(sizeof(struct pkcs7_parse_context), GFP_KERNEL); if (!ctx) goto out_no_ctx; ctx->msg = kzalloc(sizeof(struct pkcs7_message), GFP_KERNEL); if (!ctx->msg) goto out_no_msg; ctx->sinfo = kzalloc(sizeof(struct pkcs7_signed_info), GFP_KERNEL); if (!ctx->sinfo) goto out_no_sinfo; ctx->data = (unsigned long)data; ctx->ppcerts = &ctx->certs; ctx->ppsinfo = &ctx->msg->signed_infos; /* Attempt to decode the signature */ ret = asn1_ber_decoder(&pkcs7_decoder, ctx, data, datalen); if (ret < 0) { msg = ERR_PTR(ret); goto out; } ret = pkcs7_check_authattrs(ctx->msg); if (ret < 0) goto out; msg = ctx->msg; ctx->msg = NULL; out: while (ctx->certs) { struct x509_certificate *cert = ctx->certs; ctx->certs = cert->next; x509_free_certificate(cert); } pkcs7_free_signed_info(ctx->sinfo); out_no_sinfo: pkcs7_free_message(ctx->msg); out_no_msg: kfree(ctx); out_no_ctx: return msg; } EXPORT_SYMBOL_GPL(pkcs7_parse_message); /** * pkcs7_get_content_data - Get access to the PKCS#7 content * @pkcs7: The preparsed PKCS#7 message to access * @_data: Place to return a pointer to the data * @_data_len: Place to return the data length * @want_wrapper: True if the ASN.1 object header should be included in the data * * Get access to the data content of the PKCS#7 message, including, optionally, * the header of the ASN.1 object that contains it. Returns -ENODATA if the * data object was missing from the message. */ int pkcs7_get_content_data(const struct pkcs7_message *pkcs7, const void **_data, size_t *_data_len, bool want_wrapper) { size_t wrapper; if (!pkcs7->data) return -ENODATA; wrapper = want_wrapper ? pkcs7->data_hdrlen : 0; *_data = pkcs7->data - wrapper; *_data_len = pkcs7->data_len + wrapper; return 0; } EXPORT_SYMBOL_GPL(pkcs7_get_content_data); /* * Note an OID when we find one for later processing when we know how * to interpret it. */ int pkcs7_note_OID(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; ctx->last_oid = look_up_OID(value, vlen); if (ctx->last_oid == OID__NR) { char buffer[50]; sprint_oid(value, vlen, buffer, sizeof(buffer)); printk("PKCS7: Unknown OID: [%lu] %s\n", (unsigned long)value - ctx->data, buffer); } return 0; } /* * Note the digest algorithm for the signature. */ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; switch (ctx->last_oid) { case OID_md4: ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_MD4; break; case OID_md5: ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_MD5; break; case OID_sha1: ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA1; break; case OID_sha256: ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA256; break; case OID_sha384: ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA384; break; case OID_sha512: ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA512; break; case OID_sha224: ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA224; default: printk("Unsupported digest algo: %u\n", ctx->last_oid); return -ENOPKG; } return 0; } /* * Note the public key algorithm for the signature. */ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; switch (ctx->last_oid) { case OID_rsaEncryption: ctx->sinfo->sig.pkey_algo = PKEY_ALGO_RSA; break; default: printk("Unsupported pkey algo: %u\n", ctx->last_oid); return -ENOPKG; } return 0; } /* * We only support signed data [RFC2315 sec 9]. */ int pkcs7_check_content_type(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; if (ctx->last_oid != OID_signed_data) { pr_warn("Only support pkcs7_signedData type\n"); return -EINVAL; } return 0; } /* * Note the SignedData version */ int pkcs7_note_signeddata_version(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; unsigned version; if (vlen != 1) goto unsupported; ctx->msg->version = version = *(const u8 *)value; switch (version) { case 1: /* PKCS#7 SignedData [RFC2315 sec 9.1] * CMS ver 1 SignedData [RFC5652 sec 5.1] */ break; case 3: /* CMS ver 3 SignedData [RFC2315 sec 5.1] */ break; default: goto unsupported; } return 0; unsupported: pr_warn("Unsupported SignedData version\n"); return -EINVAL; } /* * Note the SignerInfo version */ int pkcs7_note_signerinfo_version(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; unsigned version; if (vlen != 1) goto unsupported; version = *(const u8 *)value; switch (version) { case 1: /* PKCS#7 SignerInfo [RFC2315 sec 9.2] * CMS ver 1 SignerInfo [RFC5652 sec 5.3] */ if (ctx->msg->version != 1) goto version_mismatch; ctx->expect_skid = false; break; case 3: /* CMS ver 3 SignerInfo [RFC2315 sec 5.3] */ if (ctx->msg->version == 1) goto version_mismatch; ctx->expect_skid = true; break; default: goto unsupported; } return 0; unsupported: pr_warn("Unsupported SignerInfo version\n"); return -EINVAL; version_mismatch: pr_warn("SignedData-SignerInfo version mismatch\n"); return -EBADMSG; } /* * Extract a certificate and store it in the context. */ int pkcs7_extract_cert(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; struct x509_certificate *x509; if (tag != ((ASN1_UNIV << 6) | ASN1_CONS_BIT | ASN1_SEQ)) { pr_debug("Cert began with tag %02x at %lu\n", tag, (unsigned long)ctx - ctx->data); return -EBADMSG; } /* We have to correct for the header so that the X.509 parser can start * from the beginning. Note that since X.509 stipulates DER, there * probably shouldn't be an EOC trailer - but it is in PKCS#7 (which * stipulates BER). */ value -= hdrlen; vlen += hdrlen; if (((u8*)value)[1] == 0x80) vlen += 2; /* Indefinite length - there should be an EOC */ x509 = x509_cert_parse(value, vlen); if (IS_ERR(x509)) return PTR_ERR(x509); x509->index = ++ctx->x509_index; pr_debug("Got cert %u for %s\n", x509->index, x509->subject); pr_debug("- fingerprint %*phN\n", x509->id->len, x509->id->data); *ctx->ppcerts = x509; ctx->ppcerts = &x509->next; return 0; } /* * Save the certificate list */ int pkcs7_note_certificate_list(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; pr_devel("Got cert list (%02x)\n", tag); *ctx->ppcerts = ctx->msg->certs; ctx->msg->certs = ctx->certs; ctx->certs = NULL; ctx->ppcerts = &ctx->certs; return 0; } /* * Note the content type. */ int pkcs7_note_content(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; if (ctx->last_oid != OID_data && ctx->last_oid != OID_msIndirectData) { pr_warn("Unsupported data type %d\n", ctx->last_oid); return -EINVAL; } ctx->msg->data_type = ctx->last_oid; return 0; } /* * Extract the data from the message and store that and its content type OID in * the context. */ int pkcs7_note_data(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; pr_debug("Got data\n"); ctx->msg->data = value; ctx->msg->data_len = vlen; ctx->msg->data_hdrlen = hdrlen; return 0; } /* * Parse authenticated attributes. */ int pkcs7_sig_note_authenticated_attr(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; struct pkcs7_signed_info *sinfo = ctx->sinfo; enum OID content_type; pr_devel("AuthAttr: %02x %zu [%*ph]\n", tag, vlen, (unsigned)vlen, value); switch (ctx->last_oid) { case OID_contentType: if (__test_and_set_bit(sinfo_has_content_type, &sinfo->aa_set)) goto repeated; content_type = look_up_OID(value, vlen); if (content_type != ctx->msg->data_type) { pr_warn("Mismatch between global data type (%d) and sinfo %u (%d)\n", ctx->msg->data_type, sinfo->index, content_type); return -EBADMSG; } return 0; case OID_signingTime: if (__test_and_set_bit(sinfo_has_signing_time, &sinfo->aa_set)) goto repeated; /* Should we check that the signing time is consistent * with the signer's X.509 cert? */ return x509_decode_time(&sinfo->signing_time, hdrlen, tag, value, vlen); case OID_messageDigest: if (__test_and_set_bit(sinfo_has_message_digest, &sinfo->aa_set)) goto repeated; if (tag != ASN1_OTS) return -EBADMSG; sinfo->msgdigest = value; sinfo->msgdigest_len = vlen; return 0; case OID_smimeCapabilites: if (__test_and_set_bit(sinfo_has_smime_caps, &sinfo->aa_set)) goto repeated; if (ctx->msg->data_type != OID_msIndirectData) { pr_warn("S/MIME Caps only allowed with Authenticode\n"); return -EKEYREJECTED; } return 0; /* Microsoft SpOpusInfo seems to be contain cont[0] 16-bit BE * char URLs and cont[1] 8-bit char URLs. * * Microsoft StatementType seems to contain a list of OIDs that * are also used as extendedKeyUsage types in X.509 certs. */ case OID_msSpOpusInfo: if (__test_and_set_bit(sinfo_has_ms_opus_info, &sinfo->aa_set)) goto repeated; goto authenticode_check; case OID_msStatementType: if (__test_and_set_bit(sinfo_has_ms_statement_type, &sinfo->aa_set)) goto repeated; authenticode_check: if (ctx->msg->data_type != OID_msIndirectData) { pr_warn("Authenticode AuthAttrs only allowed with Authenticode\n"); return -EKEYREJECTED; } /* I'm not sure how to validate these */ return 0; default: return 0; } repeated: /* We permit max one item per AuthenticatedAttribute and no repeats */ pr_warn("Repeated/multivalue AuthAttrs not permitted\n"); return -EKEYREJECTED; } /* * Note the set of auth attributes for digestion purposes [RFC2315 sec 9.3] */ int pkcs7_sig_note_set_of_authattrs(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; struct pkcs7_signed_info *sinfo = ctx->sinfo; if (!test_bit(sinfo_has_content_type, &sinfo->aa_set) || !test_bit(sinfo_has_message_digest, &sinfo->aa_set) || (ctx->msg->data_type == OID_msIndirectData && !test_bit(sinfo_has_ms_opus_info, &sinfo->aa_set))) { pr_warn("Missing required AuthAttr\n"); return -EBADMSG; } if (ctx->msg->data_type != OID_msIndirectData && test_bit(sinfo_has_ms_opus_info, &sinfo->aa_set)) { pr_warn("Unexpected Authenticode AuthAttr\n"); return -EBADMSG; } /* We need to switch the 'CONT 0' to a 'SET OF' when we digest */ sinfo->authattrs = value - (hdrlen - 1); sinfo->authattrs_len = vlen + (hdrlen - 1); return 0; } /* * Note the issuing certificate serial number */ int pkcs7_sig_note_serial(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; ctx->raw_serial = value; ctx->raw_serial_size = vlen; return 0; } /* * Note the issuer's name */ int pkcs7_sig_note_issuer(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; ctx->raw_issuer = value; ctx->raw_issuer_size = vlen; return 0; } /* * Note the issuing cert's subjectKeyIdentifier */ int pkcs7_sig_note_skid(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; pr_devel("SKID: %02x %zu [%*ph]\n", tag, vlen, (unsigned)vlen, value); ctx->raw_skid = value; ctx->raw_skid_size = vlen; return 0; } /* * Note the signature data */ int pkcs7_sig_note_signature(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; BUG_ON(ctx->sinfo->sig.pkey_algo != PKEY_ALGO_RSA); ctx->sinfo->sig.s = kmemdup(value, vlen, GFP_KERNEL); if (!ctx->sinfo->sig.s) return -ENOMEM; ctx->sinfo->sig.s_size = vlen; return 0; } /* * Note a signature information block */ int pkcs7_note_signed_info(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs7_parse_context *ctx = context; struct pkcs7_signed_info *sinfo = ctx->sinfo; struct asymmetric_key_id *kid; if (ctx->msg->data_type == OID_msIndirectData && !sinfo->authattrs) { pr_warn("Authenticode requires AuthAttrs\n"); return -EBADMSG; } /* Generate cert issuer + serial number key ID */ if (!ctx->expect_skid) { kid = asymmetric_key_generate_id(ctx->raw_serial, ctx->raw_serial_size, ctx->raw_issuer, ctx->raw_issuer_size); } else { kid = asymmetric_key_generate_id(ctx->raw_skid, ctx->raw_skid_size, "", 0); } if (IS_ERR(kid)) return PTR_ERR(kid); pr_devel("SINFO KID: %u [%*phN]\n", kid->len, kid->len, kid->data); sinfo->signing_cert_id = kid; sinfo->index = ++ctx->sinfo_index; *ctx->ppsinfo = sinfo; ctx->ppsinfo = &sinfo->next; ctx->sinfo = kzalloc(sizeof(struct pkcs7_signed_info), GFP_KERNEL); if (!ctx->sinfo) return -ENOMEM; return 0; }
linuxium/ubuntu-xenial
crypto/asymmetric_keys/pkcs7_parser.c
C
gpl-2.0
16,623
23.810448
80
0.66775
false
/* * Hamlib Drake backend - main header * Copyright (c) 2001-2004 by Stephane Fillod * * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * */ #ifndef _DRAKE_H #define _DRAKE_H 1 #include <hamlib/rig.h> #define BACKEND_VER "0.5" struct drake_priv_data { int curr_ch; }; int drake_set_freq(RIG *rig, vfo_t vfo, freq_t freq); int drake_get_freq(RIG *rig, vfo_t vfo, freq_t *freq); int drake_set_vfo(RIG *rig, vfo_t vfo); int drake_get_vfo(RIG *rig, vfo_t *vfo); int drake_set_mode(RIG *rig, vfo_t vfo, rmode_t mode, pbwidth_t width); int drake_get_mode(RIG *rig, vfo_t vfo, rmode_t *mode, pbwidth_t *width); int drake_init(RIG *rig); int drake_cleanup(RIG *rig); int drake_set_ant(RIG *rig, vfo_t vfo, ant_t ant); int drake_get_ant(RIG *rig, vfo_t vfo, ant_t *ant); int drake_set_mem(RIG *rig, vfo_t vfo, int ch); int drake_get_mem(RIG *rig, vfo_t vfo, int *ch); int drake_set_chan(RIG *rig, const channel_t *chan); int drake_get_chan(RIG *rig, channel_t *chan); int drake_vfo_op(RIG *rig, vfo_t vfo, vfo_op_t op); int drake_set_func(RIG *rig, vfo_t vfo, setting_t func, int status); int drake_get_func(RIG *rig, vfo_t vfo, setting_t func, int *status); int drake_set_level(RIG *rig, vfo_t vfo, setting_t level, value_t val); int drake_get_level(RIG *rig, vfo_t vfo, setting_t level, value_t *val); int drake_set_powerstat (RIG * rig, powerstat_t status); int drake_get_powerstat (RIG * rig, powerstat_t *status); const char *drake_get_info(RIG *rig); extern const struct rig_caps r8a_caps; extern const struct rig_caps r8b_caps; #endif /* _DRAKE_H */
thatchristoph/hamlib
drake/drake.h
C
gpl-2.0
2,281
37.016667
83
0.702324
false
/* * Simple NUMA memory policy for the Linux kernel. * * Copyright 2003,2004 Andi Kleen, SuSE Labs. * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. * Subject to the GNU Public License, version 2. * * NUMA policy allows the user to give hints in which node(s) memory should * be allocated. * * Support four policies per VMA and per process: * * The VMA policy has priority over the process policy for a page fault. * * interleave Allocate memory interleaved over a set of nodes, * with normal fallback if it fails. * For VMA based allocations this interleaves based on the * offset into the backing object or offset into the mapping * for anonymous memory. For process policy an process counter * is used. * * bind Only allocate memory on a specific set of nodes, * no fallback. * FIXME: memory is allocated starting with the first node * to the last. It would be better if bind would truly restrict * the allocation to memory nodes instead * * preferred Try a specific node first before normal fallback. * As a special case node -1 here means do the allocation * on the local CPU. This is normally identical to default, * but useful to set in a VMA when you have a non default * process policy. * * default Allocate on the local node first, or when on a VMA * use the process policy. This is what Linux always did * in a NUMA aware kernel and still does by, ahem, default. * * The process policy is applied for most non interrupt memory allocations * in that process' context. Interrupts ignore the policies and always * try to allocate on the local CPU. The VMA policy is only applied for memory * allocations for a VMA in the VM. * * Currently there are a few corner cases in swapping where the policy * is not applied, but the majority should be handled. When process policy * is used it is not remembered over swap outs/swap ins. * * Only the highest zone in the zone hierarchy gets policied. Allocations * requesting a lower zone just use default policy. This implies that * on systems with highmem kernel lowmem allocation don't get policied. * Same with GFP_DMA allocations. * * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between * all users and remembered even when nobody has memory mapped. */ /* Notebook: fix mmap readahead to honour policy and enable policy for any page cache object statistics for bigpages global policy for page cache? currently it uses process policy. Requires first item above. handle mremap for shared memory (currently ignored for the policy) grows down? make bind policy root only? It can trigger oom much faster and the kernel is not always grateful with that. */ #include <linux/mempolicy.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/hugetlb.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/nodemask.h> #include <linux/cpuset.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/module.h> #include <linux/nsproxy.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/compat.h> #include <linux/swap.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/migrate.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/ctype.h> #include <linux/mm_inline.h> #include <asm/tlbflush.h> #include <asm/uaccess.h> #include "internal.h" /* Internal flags */ #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */ static struct kmem_cache *policy_cache; static struct kmem_cache *sn_cache; /* Highest zone. An specific allocation for a zone below that is not policied. */ enum zone_type policy_zone = 0; /* * run-time system-wide default policy => local allocation */ struct mempolicy default_policy = { .refcnt = ATOMIC_INIT(1), /* never free it */ .mode = MPOL_PREFERRED, .flags = MPOL_F_LOCAL, }; static const struct mempolicy_operations { int (*create)(struct mempolicy *pol, const nodemask_t *nodes); /* * If read-side task has no lock to protect task->mempolicy, write-side * task will rebind the task->mempolicy by two step. The first step is * setting all the newly nodes, and the second step is cleaning all the * disallowed nodes. In this way, we can avoid finding no node to alloc * page. * If we have a lock to protect task->mempolicy in read-side, we do * rebind directly. * * step: * MPOL_REBIND_ONCE - do rebind work at once * MPOL_REBIND_STEP1 - set all the newly nodes * MPOL_REBIND_STEP2 - clean all the disallowed nodes */ void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes, enum mpol_rebind_step step); } mpol_ops[MPOL_MAX]; /* Check that the nodemask contains at least one populated zone */ static int is_valid_nodemask(const nodemask_t *nodemask) { int nd, k; for_each_node_mask(nd, *nodemask) { struct zone *z; for (k = 0; k <= policy_zone; k++) { z = &NODE_DATA(nd)->node_zones[k]; if (z->present_pages > 0) return 1; } } return 0; } static inline int mpol_store_user_nodemask(const struct mempolicy *pol) { return pol->flags & MPOL_MODE_FLAGS; } static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, const nodemask_t *rel) { nodemask_t tmp; nodes_fold(tmp, *orig, nodes_weight(*rel)); nodes_onto(*ret, tmp, *rel); } static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) { if (nodes_empty(*nodes)) return -EINVAL; pol->v.nodes = *nodes; return 0; } static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) { if (!nodes) pol->flags |= MPOL_F_LOCAL; /* local allocation */ else if (nodes_empty(*nodes)) return -EINVAL; /* no allowed nodes */ else pol->v.preferred_node = first_node(*nodes); return 0; } static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) { if (!is_valid_nodemask(nodes)) return -EINVAL; pol->v.nodes = *nodes; return 0; } /* * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if * any, for the new policy. mpol_new() has already validated the nodes * parameter with respect to the policy mode and flags. But, we need to * handle an empty nodemask with MPOL_PREFERRED here. * * Must be called holding task's alloc_lock to protect task's mems_allowed * and mempolicy. May also be called holding the mmap_semaphore for write. */ static int mpol_set_nodemask(struct mempolicy *pol, const nodemask_t *nodes, struct nodemask_scratch *nsc) { int ret; /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */ if (pol == NULL) return 0; /* Check N_HIGH_MEMORY */ nodes_and(nsc->mask1, cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]); VM_BUG_ON(!nodes); if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes)) nodes = NULL; /* explicit local allocation */ else { if (pol->flags & MPOL_F_RELATIVE_NODES) mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1); else nodes_and(nsc->mask2, *nodes, nsc->mask1); if (mpol_store_user_nodemask(pol)) pol->w.user_nodemask = *nodes; else pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; } if (nodes) ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); else ret = mpol_ops[pol->mode].create(pol, NULL); return ret; } /* * This function just creates a new policy, does some check and simple * initialization. You must invoke mpol_set_nodemask() to set nodes. */ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, nodemask_t *nodes) { struct mempolicy *policy; pr_debug("setting mode %d flags %d nodes[0] %lx\n", mode, flags, nodes ? nodes_addr(*nodes)[0] : -1); if (mode == MPOL_DEFAULT) { if (nodes && !nodes_empty(*nodes)) return ERR_PTR(-EINVAL); return NULL; /* simply delete any existing policy */ } VM_BUG_ON(!nodes); /* * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). * All other modes require a valid pointer to a non-empty nodemask. */ if (mode == MPOL_PREFERRED) { if (nodes_empty(*nodes)) { if (((flags & MPOL_F_STATIC_NODES) || (flags & MPOL_F_RELATIVE_NODES))) return ERR_PTR(-EINVAL); } } else if (nodes_empty(*nodes)) return ERR_PTR(-EINVAL); policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!policy) return ERR_PTR(-ENOMEM); atomic_set(&policy->refcnt, 1); policy->mode = mode; policy->flags = flags; return policy; } /* Slow path of a mpol destructor. */ void __mpol_put(struct mempolicy *p) { if (!atomic_dec_and_test(&p->refcnt)) return; kmem_cache_free(policy_cache, p); } static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes, enum mpol_rebind_step step) { } /* * step: * MPOL_REBIND_ONCE - do rebind work at once * MPOL_REBIND_STEP1 - set all the newly nodes * MPOL_REBIND_STEP2 - clean all the disallowed nodes */ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes, enum mpol_rebind_step step) { nodemask_t tmp; if (pol->flags & MPOL_F_STATIC_NODES) nodes_and(tmp, pol->w.user_nodemask, *nodes); else if (pol->flags & MPOL_F_RELATIVE_NODES) mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); else { /* * if step == 1, we use ->w.cpuset_mems_allowed to cache the * result */ if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) { nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed, *nodes); pol->w.cpuset_mems_allowed = step ? tmp : *nodes; } else if (step == MPOL_REBIND_STEP2) { tmp = pol->w.cpuset_mems_allowed; pol->w.cpuset_mems_allowed = *nodes; } else BUG(); } if (nodes_empty(tmp)) tmp = *nodes; if (step == MPOL_REBIND_STEP1) nodes_or(pol->v.nodes, pol->v.nodes, tmp); else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2) pol->v.nodes = tmp; else BUG(); if (!node_isset(current->il_next, tmp)) { current->il_next = next_node(current->il_next, tmp); if (current->il_next >= MAX_NUMNODES) current->il_next = first_node(tmp); if (current->il_next >= MAX_NUMNODES) current->il_next = numa_node_id(); } } static void mpol_rebind_preferred(struct mempolicy *pol, const nodemask_t *nodes, enum mpol_rebind_step step) { nodemask_t tmp; if (pol->flags & MPOL_F_STATIC_NODES) { int node = first_node(pol->w.user_nodemask); if (node_isset(node, *nodes)) { pol->v.preferred_node = node; pol->flags &= ~MPOL_F_LOCAL; } else pol->flags |= MPOL_F_LOCAL; } else if (pol->flags & MPOL_F_RELATIVE_NODES) { mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); pol->v.preferred_node = first_node(tmp); } else if (!(pol->flags & MPOL_F_LOCAL)) { pol->v.preferred_node = node_remap(pol->v.preferred_node, pol->w.cpuset_mems_allowed, *nodes); pol->w.cpuset_mems_allowed = *nodes; } } /* * mpol_rebind_policy - Migrate a policy to a different set of nodes * * If read-side task has no lock to protect task->mempolicy, write-side * task will rebind the task->mempolicy by two step. The first step is * setting all the newly nodes, and the second step is cleaning all the * disallowed nodes. In this way, we can avoid finding no node to alloc * page. * If we have a lock to protect task->mempolicy in read-side, we do * rebind directly. * * step: * MPOL_REBIND_ONCE - do rebind work at once * MPOL_REBIND_STEP1 - set all the newly nodes * MPOL_REBIND_STEP2 - clean all the disallowed nodes */ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask, enum mpol_rebind_step step) { if (!pol) return; if (!mpol_store_user_nodemask(pol) && step == 0 && nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) return; if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING)) return; if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING)) BUG(); if (step == MPOL_REBIND_STEP1) pol->flags |= MPOL_F_REBINDING; else if (step == MPOL_REBIND_STEP2) pol->flags &= ~MPOL_F_REBINDING; else if (step >= MPOL_REBIND_NSTEP) BUG(); mpol_ops[pol->mode].rebind(pol, newmask, step); } /* * Wrapper for mpol_rebind_policy() that just requires task * pointer, and updates task mempolicy. * * Called with task's alloc_lock held. */ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, enum mpol_rebind_step step) { mpol_rebind_policy(tsk->mempolicy, new, step); } /* * Rebind each vma in mm to new nodemask. * * Call holding a reference to mm. Takes mm->mmap_sem during call. */ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) { struct vm_area_struct *vma; down_write(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); up_write(&mm->mmap_sem); } static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { [MPOL_DEFAULT] = { .rebind = mpol_rebind_default, }, [MPOL_INTERLEAVE] = { .create = mpol_new_interleave, .rebind = mpol_rebind_nodemask, }, [MPOL_PREFERRED] = { .create = mpol_new_preferred, .rebind = mpol_rebind_preferred, }, [MPOL_BIND] = { .create = mpol_new_bind, .rebind = mpol_rebind_nodemask, }, }; static void gather_stats(struct page *, void *, int pte_dirty); static void migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags); /* Scan through pages checking if pages follow certain conditions. */ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { pte_t *orig_pte; pte_t *pte; spinlock_t *ptl; orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); do { struct page *page; int nid; if (!pte_present(*pte)) continue; page = vm_normal_page(vma, addr, *pte); if (!page) continue; /* * vm_normal_page() filters out zero pages, but there might * still be PageReserved pages to skip, perhaps in a VDSO. * And we cannot move PageKsm pages sensibly or safely yet. */ if (PageReserved(page) || PageKsm(page)) continue; nid = page_to_nid(page); if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) continue; if (flags & MPOL_MF_STATS) gather_stats(page, private, pte_dirty(*pte)); else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) migrate_page_add(page, private, flags); else break; } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap_unlock(orig_pte, ptl); return addr != end; } static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; if (check_pte_range(vma, pmd, addr, next, nodes, flags, private)) return -EIO; } while (pmd++, addr = next, addr != end); return 0; } static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { pud_t *pud; unsigned long next; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; if (check_pmd_range(vma, pud, addr, next, nodes, flags, private)) return -EIO; } while (pud++, addr = next, addr != end); return 0; } static inline int check_pgd_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { pgd_t *pgd; unsigned long next; pgd = pgd_offset(vma->vm_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; if (check_pud_range(vma, pgd, addr, next, nodes, flags, private)) return -EIO; } while (pgd++, addr = next, addr != end); return 0; } /* * Check if all pages in a range are on a set of nodes. * If pagelist != NULL then isolate pages from the LRU and * put them on the pagelist. */ static struct vm_area_struct * check_range(struct mm_struct *mm, unsigned long start, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { int err; struct vm_area_struct *first, *vma, *prev; first = find_vma(mm, start); if (!first) return ERR_PTR(-EFAULT); prev = NULL; for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { if (!(flags & MPOL_MF_DISCONTIG_OK)) { if (!vma->vm_next && vma->vm_end < end) return ERR_PTR(-EFAULT); if (prev && prev->vm_end < vma->vm_start) return ERR_PTR(-EFAULT); } if (!is_vm_hugetlb_page(vma) && ((flags & MPOL_MF_STRICT) || ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && vma_migratable(vma)))) { unsigned long endvma = vma->vm_end; if (endvma > end) endvma = end; if (vma->vm_start > start) start = vma->vm_start; err = check_pgd_range(vma, start, endvma, nodes, flags, private); if (err) { first = ERR_PTR(err); break; } } prev = vma; } return first; } /* Apply policy to a single VMA */ static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new) { int err = 0; struct mempolicy *old = vma->vm_policy; pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_ops, vma->vm_file, vma->vm_ops ? vma->vm_ops->set_policy : NULL); if (vma->vm_ops && vma->vm_ops->set_policy) err = vma->vm_ops->set_policy(vma, new); if (!err) { mpol_get(new); vma->vm_policy = new; mpol_put(old); } return err; } /* Step 2: apply policy to a range and do splits. */ static int mbind_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct mempolicy *new_pol) { struct vm_area_struct *next; struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; pgoff_t pgoff; unsigned long vmstart; unsigned long vmend; vma = find_vma_prev(mm, start, &prev); if (!vma || vma->vm_start > start) return -EFAULT; for (; vma && vma->vm_start < end; prev = vma, vma = next) { next = vma->vm_next; vmstart = max(start, vma->vm_start); vmend = min(end, vma->vm_end); pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, new_pol); if (prev) { vma = prev; next = vma->vm_next; continue; } if (vma->vm_start != vmstart) { err = split_vma(vma->vm_mm, vma, vmstart, 1); if (err) goto out; } if (vma->vm_end != vmend) { err = split_vma(vma->vm_mm, vma, vmend, 0); if (err) goto out; } err = policy_vma(vma, new_pol); if (err) goto out; } out: return err; } /* * Update task->flags PF_MEMPOLICY bit: set iff non-default * mempolicy. Allows more rapid checking of this (combined perhaps * with other PF_* flag bits) on memory allocation hot code paths. * * If called from outside this file, the task 'p' should -only- be * a newly forked child not yet visible on the task list, because * manipulating the task flags of a visible task is not safe. * * The above limitation is why this routine has the funny name * mpol_fix_fork_child_flag(). * * It is also safe to call this with a task pointer of current, * which the static wrapper mpol_set_task_struct_flag() does, * for use within this file. */ void mpol_fix_fork_child_flag(struct task_struct *p) { if (p->mempolicy) p->flags |= PF_MEMPOLICY; else p->flags &= ~PF_MEMPOLICY; } static void mpol_set_task_struct_flag(void) { mpol_fix_fork_child_flag(current); } /* Set the process memory policy */ static long do_set_mempolicy(unsigned short mode, unsigned short flags, nodemask_t *nodes) { struct mempolicy *new, *old; struct mm_struct *mm = current->mm; NODEMASK_SCRATCH(scratch); int ret; if (!scratch) return -ENOMEM; new = mpol_new(mode, flags, nodes); if (IS_ERR(new)) { ret = PTR_ERR(new); goto out; } /* * prevent changing our mempolicy while show_numa_maps() * is using it. * Note: do_set_mempolicy() can be called at init time * with no 'mm'. */ if (mm) down_write(&mm->mmap_sem); task_lock(current); ret = mpol_set_nodemask(new, nodes, scratch); if (ret) { task_unlock(current); if (mm) up_write(&mm->mmap_sem); mpol_put(new); goto out; } old = current->mempolicy; current->mempolicy = new; mpol_set_task_struct_flag(); if (new && new->mode == MPOL_INTERLEAVE && nodes_weight(new->v.nodes)) current->il_next = first_node(new->v.nodes); task_unlock(current); if (mm) up_write(&mm->mmap_sem); mpol_put(old); ret = 0; out: NODEMASK_SCRATCH_FREE(scratch); return ret; } /* * Return nodemask for policy for get_mempolicy() query * * Called with task's alloc_lock held */ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) { nodes_clear(*nodes); if (p == &default_policy) return; switch (p->mode) { case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: *nodes = p->v.nodes; break; case MPOL_PREFERRED: if (!(p->flags & MPOL_F_LOCAL)) node_set(p->v.preferred_node, *nodes); /* else return empty node mask for local allocation */ break; default: BUG(); } } static int lookup_node(struct mm_struct *mm, unsigned long addr) { struct page *p; int err; err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); if (err >= 0) { err = page_to_nid(p); put_page(p); } return err; } /* Retrieve NUMA policy */ static long do_get_mempolicy(int *policy, nodemask_t *nmask, unsigned long addr, unsigned long flags) { int err; struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; struct mempolicy *pol = current->mempolicy; if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) return -EINVAL; if (flags & MPOL_F_MEMS_ALLOWED) { if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) return -EINVAL; *policy = 0; /* just so it's initialized */ task_lock(current); *nmask = cpuset_current_mems_allowed; task_unlock(current); return 0; } if (flags & MPOL_F_ADDR) { /* * Do NOT fall back to task policy if the * vma/shared policy at addr is NULL. We * want to return MPOL_DEFAULT in this case. */ down_read(&mm->mmap_sem); vma = find_vma_intersection(mm, addr, addr+1); if (!vma) { up_read(&mm->mmap_sem); return -EFAULT; } if (vma->vm_ops && vma->vm_ops->get_policy) pol = vma->vm_ops->get_policy(vma, addr); else pol = vma->vm_policy; } else if (addr) return -EINVAL; if (!pol) pol = &default_policy; /* indicates default behavior */ if (flags & MPOL_F_NODE) { if (flags & MPOL_F_ADDR) { err = lookup_node(mm, addr); if (err < 0) goto out; *policy = err; } else if (pol == current->mempolicy && pol->mode == MPOL_INTERLEAVE) { *policy = current->il_next; } else { err = -EINVAL; goto out; } } else { *policy = pol == &default_policy ? MPOL_DEFAULT : pol->mode; /* * Internal mempolicy flags must be masked off before exposing * the policy to userspace. */ *policy |= (pol->flags & MPOL_MODE_FLAGS); } if (vma) { up_read(&current->mm->mmap_sem); vma = NULL; } err = 0; if (nmask) { if (mpol_store_user_nodemask(pol)) { *nmask = pol->w.user_nodemask; } else { task_lock(current); get_policy_nodemask(pol, nmask); task_unlock(current); } } out: mpol_cond_put(pol); if (vma) up_read(&current->mm->mmap_sem); return err; } #ifdef CONFIG_MIGRATION /* * page migration */ static void migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags) { /* * Avoid migrating a page that is shared with others. */ if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { if (!isolate_lru_page(page)) { list_add_tail(&page->lru, pagelist); inc_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); } } } static struct page *new_node_page(struct page *page, unsigned long node, int **x) { return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); } /* * Migrate pages from one node to a target node. * Returns error or the number of pages not migrated. */ static int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags) { nodemask_t nmask; LIST_HEAD(pagelist); int err = 0; nodes_clear(nmask); node_set(source, nmask); check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, flags | MPOL_MF_DISCONTIG_OK, &pagelist); if (!list_empty(&pagelist)) err = migrate_pages(&pagelist, new_node_page, dest, 0); return err; } /* * Move pages between the two nodesets so as to preserve the physical * layout as much as possible. * * Returns the number of page that could not be moved. */ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) { int busy = 0; int err; nodemask_t tmp; err = migrate_prep(); if (err) return err; down_read(&mm->mmap_sem); err = migrate_vmas(mm, from_nodes, to_nodes, flags); if (err) goto out; /* * Find a 'source' bit set in 'tmp' whose corresponding 'dest' * bit in 'to' is not also set in 'tmp'. Clear the found 'source' * bit in 'tmp', and return that <source, dest> pair for migration. * The pair of nodemasks 'to' and 'from' define the map. * * If no pair of bits is found that way, fallback to picking some * pair of 'source' and 'dest' bits that are not the same. If the * 'source' and 'dest' bits are the same, this represents a node * that will be migrating to itself, so no pages need move. * * If no bits are left in 'tmp', or if all remaining bits left * in 'tmp' correspond to the same bit in 'to', return false * (nothing left to migrate). * * This lets us pick a pair of nodes to migrate between, such that * if possible the dest node is not already occupied by some other * source node, minimizing the risk of overloading the memory on a * node that would happen if we migrated incoming memory to a node * before migrating outgoing memory source that same node. * * A single scan of tmp is sufficient. As we go, we remember the * most recent <s, d> pair that moved (s != d). If we find a pair * that not only moved, but what's better, moved to an empty slot * (d is not set in tmp), then we break out then, with that pair. * Otherwise when we finish scannng from_tmp, we at least have the * most recent <s, d> pair that moved. If we get all the way through * the scan of tmp without finding any node that moved, much less * moved to an empty node, then there is nothing left worth migrating. */ tmp = *from_nodes; while (!nodes_empty(tmp)) { int s,d; int source = -1; int dest = 0; for_each_node_mask(s, tmp) { d = node_remap(s, *from_nodes, *to_nodes); if (s == d) continue; source = s; /* Node moved. Memorize */ dest = d; /* dest not in remaining from nodes? */ if (!node_isset(dest, tmp)) break; } if (source == -1) break; node_clear(source, tmp); err = migrate_to_node(mm, source, dest, flags); if (err > 0) busy += err; if (err < 0) break; } out: up_read(&mm->mmap_sem); if (err < 0) return err; return busy; } /* * Allocate a new page for page migration based on vma policy. * Start assuming that page is mapped by vma pointed to by @private. * Search forward from there, if not. N.B., this assumes that the * list of pages handed to migrate_pages()--which is how we get here-- * is in virtual address order. */ static struct page *new_vma_page(struct page *page, unsigned long private, int **x) { struct vm_area_struct *vma = (struct vm_area_struct *)private; unsigned long uninitialized_var(address); while (vma) { address = page_address_in_vma(page, vma); if (address != -EFAULT) break; vma = vma->vm_next; } /* * if !vma, alloc_page_vma() will use task or system default policy */ return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); } #else static void migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags) { } int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) { return -ENOSYS; } static struct page *new_vma_page(struct page *page, unsigned long private, int **x) { return NULL; } #endif static long do_mbind(unsigned long start, unsigned long len, unsigned short mode, unsigned short mode_flags, nodemask_t *nmask, unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; struct mempolicy *new; unsigned long end; int err; LIST_HEAD(pagelist); if (flags & ~(unsigned long)(MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) return -EINVAL; if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) return -EPERM; if (start & ~PAGE_MASK) return -EINVAL; if (mode == MPOL_DEFAULT) flags &= ~MPOL_MF_STRICT; len = (len + PAGE_SIZE - 1) & PAGE_MASK; end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; new = mpol_new(mode, mode_flags, nmask); if (IS_ERR(new)) return PTR_ERR(new); /* * If we are using the default policy then operation * on discontinuous address spaces is okay after all */ if (!new) flags |= MPOL_MF_DISCONTIG_OK; pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", start, start + len, mode, mode_flags, nmask ? nodes_addr(*nmask)[0] : -1); if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { err = migrate_prep(); if (err) goto mpol_out; } { NODEMASK_SCRATCH(scratch); if (scratch) { down_write(&mm->mmap_sem); task_lock(current); err = mpol_set_nodemask(new, nmask, scratch); task_unlock(current); if (err) up_write(&mm->mmap_sem); } else err = -ENOMEM; NODEMASK_SCRATCH_FREE(scratch); } if (err) goto mpol_out; vma = check_range(mm, start, end, nmask, flags | MPOL_MF_INVERT, &pagelist); err = PTR_ERR(vma); if (!IS_ERR(vma)) { int nr_failed = 0; err = mbind_range(mm, start, end, new); if (!list_empty(&pagelist)) nr_failed = migrate_pages(&pagelist, new_vma_page, (unsigned long)vma, 0); if (!err && nr_failed && (flags & MPOL_MF_STRICT)) err = -EIO; } else putback_lru_pages(&pagelist); up_write(&mm->mmap_sem); mpol_out: mpol_put(new); return err; } /* * User space interface with variable sized bitmaps for nodelists. */ /* Copy a node mask from user space. */ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, unsigned long maxnode) { unsigned long k; unsigned long nlongs; unsigned long endmask; --maxnode; nodes_clear(*nodes); if (maxnode == 0 || !nmask) return 0; if (maxnode > PAGE_SIZE*BITS_PER_BYTE) return -EINVAL; nlongs = BITS_TO_LONGS(maxnode); if ((maxnode % BITS_PER_LONG) == 0) endmask = ~0UL; else endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; /* When the user specified more nodes than supported just check if the non supported part is all zero. */ if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { if (nlongs > PAGE_SIZE/sizeof(long)) return -EINVAL; for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { unsigned long t; if (get_user(t, nmask + k)) return -EFAULT; if (k == nlongs - 1) { if (t & endmask) return -EINVAL; } else if (t) return -EINVAL; } nlongs = BITS_TO_LONGS(MAX_NUMNODES); endmask = ~0UL; } if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) return -EFAULT; nodes_addr(*nodes)[nlongs-1] &= endmask; return 0; } /* Copy a kernel node mask to user space */ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, nodemask_t *nodes) { unsigned long copy = ALIGN(maxnode-1, 64) / 8; const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); if (copy > nbytes) { if (copy > PAGE_SIZE) return -EINVAL; if (clear_user((char __user *)mask + nbytes, copy - nbytes)) return -EFAULT; copy = nbytes; } return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; } SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, unsigned long, mode, unsigned long __user *, nmask, unsigned long, maxnode, unsigned, flags) { nodemask_t nodes; int err; unsigned short mode_flags; mode_flags = mode & MPOL_MODE_FLAGS; mode &= ~MPOL_MODE_FLAGS; if (mode >= MPOL_MAX) return -EINVAL; if ((mode_flags & MPOL_F_STATIC_NODES) && (mode_flags & MPOL_F_RELATIVE_NODES)) return -EINVAL; err = get_nodes(&nodes, nmask, maxnode); if (err) return err; return do_mbind(start, len, mode, mode_flags, &nodes, flags); } /* Set the process memory policy */ SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask, unsigned long, maxnode) { int err; nodemask_t nodes; unsigned short flags; flags = mode & MPOL_MODE_FLAGS; mode &= ~MPOL_MODE_FLAGS; if ((unsigned int)mode >= MPOL_MAX) return -EINVAL; if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES)) return -EINVAL; err = get_nodes(&nodes, nmask, maxnode); if (err) return err; return do_set_mempolicy(mode, flags, &nodes); } SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, const unsigned long __user *, old_nodes, const unsigned long __user *, new_nodes) { const struct cred *cred = current_cred(), *tcred; struct mm_struct *mm = NULL; struct task_struct *task; nodemask_t task_nodes; int err; nodemask_t *old; nodemask_t *new; NODEMASK_SCRATCH(scratch); if (!scratch) return -ENOMEM; old = &scratch->mask1; new = &scratch->mask2; err = get_nodes(old, old_nodes, maxnode); if (err) goto out; err = get_nodes(new, new_nodes, maxnode); if (err) goto out; /* Find the mm_struct */ rcu_read_lock(); task = pid ? find_task_by_vpid(pid) : current; if (!task) { rcu_read_unlock(); err = -ESRCH; goto out; } mm = get_task_mm(task); rcu_read_unlock(); err = -EINVAL; if (!mm) goto out; /* * Check if this process has the right to modify the specified * process. The right exists if the process has administrative * capabilities, superuser privileges or the same * userid as the target process. */ rcu_read_lock(); tcred = __task_cred(task); if (cred->euid != tcred->suid && cred->euid != tcred->uid && cred->uid != tcred->suid && cred->uid != tcred->uid && !capable(CAP_SYS_NICE)) { rcu_read_unlock(); err = -EPERM; goto out; } rcu_read_unlock(); task_nodes = cpuset_mems_allowed(task); /* Is the user allowed to access the target nodes? */ if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { err = -EPERM; goto out; } if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) { err = -EINVAL; goto out; } err = security_task_movememory(task); if (err) goto out; err = do_migrate_pages(mm, old, new, capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); out: if (mm) mmput(mm); NODEMASK_SCRATCH_FREE(scratch); return err; } /* Retrieve NUMA policy */ SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, unsigned long __user *, nmask, unsigned long, maxnode, unsigned long, addr, unsigned long, flags) { int err; int uninitialized_var(pval); nodemask_t nodes; if (nmask != NULL && maxnode < MAX_NUMNODES) return -EINVAL; err = do_get_mempolicy(&pval, &nodes, addr, flags); if (err) return err; if (policy && put_user(pval, policy)) return -EFAULT; if (nmask) err = copy_nodes_to_user(nmask, maxnode, &nodes); return err; } #ifdef CONFIG_COMPAT asmlinkage long compat_sys_get_mempolicy(int __user *policy, compat_ulong_t __user *nmask, compat_ulong_t maxnode, compat_ulong_t addr, compat_ulong_t flags) { long err; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) nm = compat_alloc_user_space(alloc_size); err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); if (!err && nmask) { err = copy_from_user(bm, nm, alloc_size); /* ensure entire bitmap is zeroed */ err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); err |= compat_put_bitmap(nmask, bm, nr_bits); } return err; } asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, compat_ulong_t maxnode) { long err = 0; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { err = compat_get_bitmap(bm, nmask, nr_bits); nm = compat_alloc_user_space(alloc_size); err |= copy_to_user(nm, bm, alloc_size); } if (err) return -EFAULT; return sys_set_mempolicy(mode, nm, nr_bits+1); } asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, compat_ulong_t mode, compat_ulong_t __user *nmask, compat_ulong_t maxnode, compat_ulong_t flags) { long err = 0; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; nodemask_t bm; nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); nm = compat_alloc_user_space(alloc_size); err |= copy_to_user(nm, nodes_addr(bm), alloc_size); } if (err) return -EFAULT; return sys_mbind(start, len, mode, nm, nr_bits+1, flags); } #endif /* * get_vma_policy(@task, @vma, @addr) * @task - task for fallback if vma policy == default * @vma - virtual memory area whose policy is sought * @addr - address in @vma for shared policy lookup * * Returns effective policy for a VMA at specified address. * Falls back to @task or system default policy, as necessary. * Current or other task's task mempolicy and non-shared vma policies * are protected by the task's mmap_sem, which must be held for read by * the caller. * Shared policies [those marked as MPOL_F_SHARED] require an extra reference * count--added by the get_policy() vm_op, as appropriate--to protect against * freeing by another task. It is the caller's responsibility to free the * extra reference for shared policies. */ static struct mempolicy *get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol = task->mempolicy; if (vma) { if (vma->vm_ops && vma->vm_ops->get_policy) { struct mempolicy *vpol = vma->vm_ops->get_policy(vma, addr); if (vpol) pol = vpol; } else if (vma->vm_policy) pol = vma->vm_policy; } if (!pol) pol = &default_policy; return pol; } /* * Return a nodemask representing a mempolicy for filtering nodes for * page allocation */ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) { /* Lower zones don't get a nodemask applied for MPOL_BIND */ if (unlikely(policy->mode == MPOL_BIND) && gfp_zone(gfp) >= policy_zone && cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) return &policy->v.nodes; return NULL; } /* Return a zonelist indicated by gfp for node representing a mempolicy */ static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy) { int nd = numa_node_id(); switch (policy->mode) { case MPOL_PREFERRED: if (!(policy->flags & MPOL_F_LOCAL)) nd = policy->v.preferred_node; break; case MPOL_BIND: /* * Normally, MPOL_BIND allocations are node-local within the * allowed nodemask. However, if __GFP_THISNODE is set and the * current node isn't part of the mask, we use the zonelist for * the first node in the mask instead. */ if (unlikely(gfp & __GFP_THISNODE) && unlikely(!node_isset(nd, policy->v.nodes))) nd = first_node(policy->v.nodes); break; default: BUG(); } return node_zonelist(nd, gfp); } /* Do dynamic interleaving for a process */ static unsigned interleave_nodes(struct mempolicy *policy) { unsigned nid, next; struct task_struct *me = current; nid = me->il_next; next = next_node(nid, policy->v.nodes); if (next >= MAX_NUMNODES) next = first_node(policy->v.nodes); if (next < MAX_NUMNODES) me->il_next = next; return nid; } /* * Depending on the memory policy provide a node from which to allocate the * next slab entry. * @policy must be protected by freeing by the caller. If @policy is * the current task's mempolicy, this protection is implicit, as only the * task can change it's policy. The system default policy requires no * such protection. */ unsigned slab_node(struct mempolicy *policy) { if (!policy || policy->flags & MPOL_F_LOCAL) return numa_node_id(); switch (policy->mode) { case MPOL_PREFERRED: /* * handled MPOL_F_LOCAL above */ return policy->v.preferred_node; case MPOL_INTERLEAVE: return interleave_nodes(policy); case MPOL_BIND: { /* * Follow bind policy behavior and start allocation at the * first node. */ struct zonelist *zonelist; struct zone *zone; enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0]; (void)first_zones_zonelist(zonelist, highest_zoneidx, &policy->v.nodes, &zone); return zone ? zone->node : numa_node_id(); } default: BUG(); } } /* Do static interleaving for a VMA with known offset. */ static unsigned offset_il_node(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long off) { unsigned nnodes = nodes_weight(pol->v.nodes); unsigned target; int c; int nid = -1; if (!nnodes) return numa_node_id(); target = (unsigned int)off % nnodes; c = 0; do { nid = next_node(nid, pol->v.nodes); c++; } while (c <= target); return nid; } /* Determine a node number for interleave */ static inline unsigned interleave_nid(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long addr, int shift) { if (vma) { unsigned long off; /* * for small pages, there is no difference between * shift and PAGE_SHIFT, so the bit-shift is safe. * for huge pages, since vm_pgoff is in units of small * pages, we need to shift off the always 0 bits to get * a useful offset. */ BUG_ON(shift < PAGE_SHIFT); off = vma->vm_pgoff >> (shift - PAGE_SHIFT); off += (addr - vma->vm_start) >> shift; return offset_il_node(pol, vma, off); } else return interleave_nodes(pol); } #ifdef CONFIG_HUGETLBFS /* * huge_zonelist(@vma, @addr, @gfp_flags, @mpol) * @vma = virtual memory area whose policy is sought * @addr = address in @vma for shared policy lookup and interleave policy * @gfp_flags = for requested zone * @mpol = pointer to mempolicy pointer for reference counted mempolicy * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask * * Returns a zonelist suitable for a huge page allocation and a pointer * to the struct mempolicy for conditional unref after allocation. * If the effective policy is 'BIND, returns a pointer to the mempolicy's * @nodemask for filtering the zonelist. * * Must be protected by get_mems_allowed() */ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) { struct zonelist *zl; *mpol = get_vma_policy(current, vma, addr); *nodemask = NULL; /* assume !MPOL_BIND */ if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { zl = node_zonelist(interleave_nid(*mpol, vma, addr, huge_page_shift(hstate_vma(vma))), gfp_flags); } else { zl = policy_zonelist(gfp_flags, *mpol); if ((*mpol)->mode == MPOL_BIND) *nodemask = &(*mpol)->v.nodes; } return zl; } /* * init_nodemask_of_mempolicy * * If the current task's mempolicy is "default" [NULL], return 'false' * to indicate default policy. Otherwise, extract the policy nodemask * for 'bind' or 'interleave' policy into the argument nodemask, or * initialize the argument nodemask to contain the single node for * 'preferred' or 'local' policy and return 'true' to indicate presence * of non-default mempolicy. * * We don't bother with reference counting the mempolicy [mpol_get/put] * because the current task is examining it's own mempolicy and a task's * mempolicy is only ever changed by the task itself. * * N.B., it is the caller's responsibility to free a returned nodemask. */ bool init_nodemask_of_mempolicy(nodemask_t *mask) { struct mempolicy *mempolicy; int nid; if (!(mask && current->mempolicy)) return false; task_lock(current); mempolicy = current->mempolicy; switch (mempolicy->mode) { case MPOL_PREFERRED: if (mempolicy->flags & MPOL_F_LOCAL) nid = numa_node_id(); else nid = mempolicy->v.preferred_node; init_nodemask_of_node(mask, nid); break; case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: *mask = mempolicy->v.nodes; break; default: BUG(); } task_unlock(current); return true; } #endif /* * mempolicy_nodemask_intersects * * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default * policy. Otherwise, check for intersection between mask and the policy * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' * policy, always return true since it may allocate elsewhere on fallback. * * Takes task_lock(tsk) to prevent freeing of its mempolicy. */ bool mempolicy_nodemask_intersects(struct task_struct *tsk, const nodemask_t *mask) { struct mempolicy *mempolicy; bool ret = true; if (!mask) return ret; task_lock(tsk); mempolicy = tsk->mempolicy; if (!mempolicy) goto out; switch (mempolicy->mode) { case MPOL_PREFERRED: /* * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to * allocate from, they may fallback to other nodes when oom. * Thus, it's possible for tsk to have allocated memory from * nodes in mask. */ break; case MPOL_BIND: case MPOL_INTERLEAVE: ret = nodes_intersects(mempolicy->v.nodes, *mask); break; default: BUG(); } out: task_unlock(tsk); return ret; } /* Allocate a page in interleaved policy. Own path because it needs to do special accounting. */ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid) { struct zonelist *zl; struct page *page; zl = node_zonelist(nid, gfp); page = __alloc_pages(gfp, order, zl); if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0])) inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); return page; } /** * alloc_page_vma - Allocate a page for a VMA. * * @gfp: * %GFP_USER user allocation. * %GFP_KERNEL kernel allocations, * %GFP_HIGHMEM highmem/user allocations, * %GFP_FS allocation should not call back into a file system. * %GFP_ATOMIC don't sleep. * * @vma: Pointer to VMA or NULL if not available. * @addr: Virtual Address of the allocation. Must be inside the VMA. * * This function allocates a page from the kernel page pool and applies * a NUMA policy associated with the VMA or the current process. * When VMA is not NULL caller must hold down_read on the mmap_sem of the * mm_struct of the VMA to prevent it from going away. Should be used for * all allocations for pages that will be mapped into * user space. Returns NULL when no page can be allocated. * * Should be called with the mm_sem of the vma hold. */ struct page * alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol = get_vma_policy(current, vma, addr); struct zonelist *zl; struct page *page; get_mems_allowed(); if (unlikely(pol->mode == MPOL_INTERLEAVE)) { unsigned nid; nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); mpol_cond_put(pol); page = alloc_page_interleave(gfp, 0, nid); put_mems_allowed(); return page; } zl = policy_zonelist(gfp, pol); if (unlikely(mpol_needs_cond_ref(pol))) { /* * slow path: ref counted shared policy */ struct page *page = __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol)); __mpol_put(pol); put_mems_allowed(); return page; } /* * fast path: default or task policy */ page = __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol)); put_mems_allowed(); return page; } /** * alloc_pages_current - Allocate pages. * * @gfp: * %GFP_USER user allocation, * %GFP_KERNEL kernel allocation, * %GFP_HIGHMEM highmem allocation, * %GFP_FS don't call back into a file system. * %GFP_ATOMIC don't sleep. * @order: Power of two of allocation size in pages. 0 is a single page. * * Allocate a page from the kernel page pool. When not in * interrupt context and apply the current process NUMA policy. * Returns NULL when no page can be allocated. * * Don't call cpuset_update_task_memory_state() unless * 1) it's ok to take cpuset_sem (can WAIT), and * 2) allocating for current task (not interrupt). */ struct page *alloc_pages_current(gfp_t gfp, unsigned order) { struct mempolicy *pol = current->mempolicy; struct page *page; if (!pol || in_interrupt() || (gfp & __GFP_THISNODE)) pol = &default_policy; get_mems_allowed(); /* * No reference counting needed for current->mempolicy * nor system default_policy */ if (pol->mode == MPOL_INTERLEAVE) page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); else page = __alloc_pages_nodemask(gfp, order, policy_zonelist(gfp, pol), policy_nodemask(gfp, pol)); put_mems_allowed(); return page; } EXPORT_SYMBOL(alloc_pages_current); /* * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it * rebinds the mempolicy its copying by calling mpol_rebind_policy() * with the mems_allowed returned by cpuset_mems_allowed(). This * keeps mempolicies cpuset relative after its cpuset moves. See * further kernel/cpuset.c update_nodemask(). * * current's mempolicy may be rebinded by the other task(the task that changes * cpuset's mems), so we needn't do rebind work for current task. */ /* Slow path of a mempolicy duplicate */ struct mempolicy *__mpol_dup(struct mempolicy *old) { struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!new) return ERR_PTR(-ENOMEM); /* task's mempolicy is protected by alloc_lock */ if (old == current->mempolicy) { task_lock(current); *new = *old; task_unlock(current); } else *new = *old; rcu_read_lock(); if (current_cpuset_is_being_rebound()) { nodemask_t mems = cpuset_mems_allowed(current); if (new->flags & MPOL_F_REBINDING) mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2); else mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); } rcu_read_unlock(); atomic_set(&new->refcnt, 1); return new; } /* * If *frompol needs [has] an extra ref, copy *frompol to *tompol , * eliminate the * MPOL_F_* flags that require conditional ref and * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly * after return. Use the returned value. * * Allows use of a mempolicy for, e.g., multiple allocations with a single * policy lookup, even if the policy needs/has extra ref on lookup. * shmem_readahead needs this. */ struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, struct mempolicy *frompol) { if (!mpol_needs_cond_ref(frompol)) return frompol; *tompol = *frompol; tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */ __mpol_put(frompol); return tompol; } /* Slow path of a mempolicy comparison */ int __mpol_equal(struct mempolicy *a, struct mempolicy *b) { if (!a || !b) return 0; if (a->mode != b->mode) return 0; if (a->flags != b->flags) return 0; if (mpol_store_user_nodemask(a)) if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) return 0; switch (a->mode) { case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: return nodes_equal(a->v.nodes, b->v.nodes); case MPOL_PREFERRED: return a->v.preferred_node == b->v.preferred_node && a->flags == b->flags; default: BUG(); return 0; } } /* * Shared memory backing store policy support. * * Remember policies even when nobody has shared memory mapped. * The policies are kept in Red-Black tree linked from the inode. * They are protected by the sp->lock spinlock, which should be held * for any accesses to the tree. */ /* lookup first element intersecting start-end */ /* Caller holds sp->lock */ static struct sp_node * sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) { struct rb_node *n = sp->root.rb_node; while (n) { struct sp_node *p = rb_entry(n, struct sp_node, nd); if (start >= p->end) n = n->rb_right; else if (end <= p->start) n = n->rb_left; else break; } if (!n) return NULL; for (;;) { struct sp_node *w = NULL; struct rb_node *prev = rb_prev(n); if (!prev) break; w = rb_entry(prev, struct sp_node, nd); if (w->end <= start) break; n = prev; } return rb_entry(n, struct sp_node, nd); } /* Insert a new shared policy into the list. */ /* Caller holds sp->lock */ static void sp_insert(struct shared_policy *sp, struct sp_node *new) { struct rb_node **p = &sp->root.rb_node; struct rb_node *parent = NULL; struct sp_node *nd; while (*p) { parent = *p; nd = rb_entry(parent, struct sp_node, nd); if (new->start < nd->start) p = &(*p)->rb_left; else if (new->end > nd->end) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->nd, parent, p); rb_insert_color(&new->nd, &sp->root); pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, new->policy ? new->policy->mode : 0); } /* Find shared policy intersecting idx */ struct mempolicy * mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) { struct mempolicy *pol = NULL; struct sp_node *sn; if (!sp->root.rb_node) return NULL; spin_lock(&sp->lock); sn = sp_lookup(sp, idx, idx+1); if (sn) { mpol_get(sn->policy); pol = sn->policy; } spin_unlock(&sp->lock); return pol; } static void sp_delete(struct shared_policy *sp, struct sp_node *n) { pr_debug("deleting %lx-l%lx\n", n->start, n->end); rb_erase(&n->nd, &sp->root); mpol_put(n->policy); kmem_cache_free(sn_cache, n); } static struct sp_node *sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol) { struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL); if (!n) return NULL; n->start = start; n->end = end; mpol_get(pol); pol->flags |= MPOL_F_SHARED; /* for unref */ n->policy = pol; return n; } /* Replace a policy range. */ static int shared_policy_replace(struct shared_policy *sp, unsigned long start, unsigned long end, struct sp_node *new) { struct sp_node *n, *new2 = NULL; restart: spin_lock(&sp->lock); n = sp_lookup(sp, start, end); /* Take care of old policies in the same range. */ while (n && n->start < end) { struct rb_node *next = rb_next(&n->nd); if (n->start >= start) { if (n->end <= end) sp_delete(sp, n); else n->start = end; } else { /* Old policy spanning whole new range. */ if (n->end > end) { if (!new2) { spin_unlock(&sp->lock); new2 = sp_alloc(end, n->end, n->policy); if (!new2) return -ENOMEM; goto restart; } n->end = start; sp_insert(sp, new2); new2 = NULL; break; } else n->end = start; } if (!next) break; n = rb_entry(next, struct sp_node, nd); } if (new) sp_insert(sp, new); spin_unlock(&sp->lock); if (new2) { mpol_put(new2->policy); kmem_cache_free(sn_cache, new2); } return 0; } /** * mpol_shared_policy_init - initialize shared policy for inode * @sp: pointer to inode shared policy * @mpol: struct mempolicy to install * * Install non-NULL @mpol in inode's shared policy rb-tree. * On entry, the current task has a reference on a non-NULL @mpol. * This must be released on exit. * This is called at get_inode() calls and we can use GFP_KERNEL. */ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) { int ret; sp->root = RB_ROOT; /* empty tree == default mempolicy */ spin_lock_init(&sp->lock); if (mpol) { struct vm_area_struct pvma; struct mempolicy *new; NODEMASK_SCRATCH(scratch); if (!scratch) goto put_mpol; /* contextualize the tmpfs mount point mempolicy */ new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); if (IS_ERR(new)) goto free_scratch; /* no valid nodemask intersection */ task_lock(current); ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); task_unlock(current); if (ret) goto put_new; /* Create pseudo-vma that contains just the policy */ memset(&pvma, 0, sizeof(struct vm_area_struct)); pvma.vm_end = TASK_SIZE; /* policy covers entire file */ mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ put_new: mpol_put(new); /* drop initial ref */ free_scratch: NODEMASK_SCRATCH_FREE(scratch); put_mpol: mpol_put(mpol); /* drop our incoming ref on sb mpol */ } } int mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *npol) { int err; struct sp_node *new = NULL; unsigned long sz = vma_pages(vma); pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", vma->vm_pgoff, sz, npol ? npol->mode : -1, npol ? npol->flags : -1, npol ? nodes_addr(npol->v.nodes)[0] : -1); if (npol) { new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); if (!new) return -ENOMEM; } err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); if (err && new) kmem_cache_free(sn_cache, new); return err; } /* Free a backing policy store on inode delete. */ void mpol_free_shared_policy(struct shared_policy *p) { struct sp_node *n; struct rb_node *next; if (!p->root.rb_node) return; spin_lock(&p->lock); next = rb_first(&p->root); while (next) { n = rb_entry(next, struct sp_node, nd); next = rb_next(&n->nd); rb_erase(&n->nd, &p->root); mpol_put(n->policy); kmem_cache_free(sn_cache, n); } spin_unlock(&p->lock); } /* assumes fs == KERNEL_DS */ void __init numa_policy_init(void) { nodemask_t interleave_nodes; unsigned long largest = 0; int nid, prefer = 0; policy_cache = kmem_cache_create("numa_policy", sizeof(struct mempolicy), 0, SLAB_PANIC, NULL); sn_cache = kmem_cache_create("shared_policy_node", sizeof(struct sp_node), 0, SLAB_PANIC, NULL); /* * Set interleaving policy for system init. Interleaving is only * enabled across suitably sized nodes (default is >= 16MB), or * fall back to the largest node if they're all smaller. */ nodes_clear(interleave_nodes); for_each_node_state(nid, N_HIGH_MEMORY) { unsigned long total_pages = node_present_pages(nid); /* Preserve the largest node */ if (largest < total_pages) { largest = total_pages; prefer = nid; } /* Interleave this node? */ if ((total_pages << PAGE_SHIFT) >= (16 << 20)) node_set(nid, interleave_nodes); } /* All too small, use the largest */ if (unlikely(nodes_empty(interleave_nodes))) node_set(prefer, interleave_nodes); if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) printk("numa_policy_init: interleaving failed\n"); } /* Reset policy of current process to default */ void numa_default_policy(void) { do_set_mempolicy(MPOL_DEFAULT, 0, NULL); } /* * Parse and format mempolicy from/to strings */ /* * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag * Used only for mpol_parse_str() and mpol_to_str() */ #define MPOL_LOCAL MPOL_MAX static const char * const policy_modes[] = { [MPOL_DEFAULT] = "default", [MPOL_PREFERRED] = "prefer", [MPOL_BIND] = "bind", [MPOL_INTERLEAVE] = "interleave", [MPOL_LOCAL] = "local" }; #ifdef CONFIG_TMPFS /** * mpol_parse_str - parse string to mempolicy * @str: string containing mempolicy to parse * @mpol: pointer to struct mempolicy pointer, returned on success. * @no_context: flag whether to "contextualize" the mempolicy * * Format of input: * <mode>[=<flags>][:<nodelist>] * * if @no_context is true, save the input nodemask in w.user_nodemask in * the returned mempolicy. This will be used to "clone" the mempolicy in * a specific context [cpuset] at a later time. Used to parse tmpfs mpol * mount option. Note that if 'static' or 'relative' mode flags were * specified, the input nodemask will already have been saved. Saving * it again is redundant, but safe. * * On success, returns 0, else 1 */ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) { struct mempolicy *new = NULL; unsigned short mode; unsigned short uninitialized_var(mode_flags); nodemask_t nodes; char *nodelist = strchr(str, ':'); char *flags = strchr(str, '='); int err = 1; if (nodelist) { /* NUL-terminate mode or flags string */ *nodelist++ = '\0'; if (nodelist_parse(nodelist, nodes)) goto out; if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY])) goto out; } else nodes_clear(nodes); if (flags) *flags++ = '\0'; /* terminate mode string */ for (mode = 0; mode <= MPOL_LOCAL; mode++) { if (!strcmp(str, policy_modes[mode])) { break; } } if (mode > MPOL_LOCAL) goto out; switch (mode) { case MPOL_PREFERRED: /* * Insist on a nodelist of one node only */ if (nodelist) { char *rest = nodelist; while (isdigit(*rest)) rest++; if (*rest) goto out; } break; case MPOL_INTERLEAVE: /* * Default to online nodes with memory if no nodelist */ if (!nodelist) nodes = node_states[N_HIGH_MEMORY]; break; case MPOL_LOCAL: /* * Don't allow a nodelist; mpol_new() checks flags */ if (nodelist) goto out; mode = MPOL_PREFERRED; break; case MPOL_DEFAULT: /* * Insist on a empty nodelist */ if (!nodelist) err = 0; goto out; case MPOL_BIND: /* * Insist on a nodelist */ if (!nodelist) goto out; } mode_flags = 0; if (flags) { /* * Currently, we only support two mutually exclusive * mode flags. */ if (!strcmp(flags, "static")) mode_flags |= MPOL_F_STATIC_NODES; else if (!strcmp(flags, "relative")) mode_flags |= MPOL_F_RELATIVE_NODES; else goto out; } new = mpol_new(mode, mode_flags, &nodes); if (IS_ERR(new)) goto out; if (no_context) { /* save for contextualization */ new->w.user_nodemask = nodes; } else { int ret; NODEMASK_SCRATCH(scratch); if (scratch) { task_lock(current); ret = mpol_set_nodemask(new, &nodes, scratch); task_unlock(current); } else ret = -ENOMEM; NODEMASK_SCRATCH_FREE(scratch); if (ret) { mpol_put(new); goto out; } } err = 0; out: /* Restore string for error message */ if (nodelist) *--nodelist = ':'; if (flags) *--flags = '='; if (!err) *mpol = new; return err; } #endif /* CONFIG_TMPFS */ /** * mpol_to_str - format a mempolicy structure for printing * @buffer: to contain formatted mempolicy string * @maxlen: length of @buffer * @pol: pointer to mempolicy to be formatted * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask * * Convert a mempolicy into a string. * Returns the number of characters in buffer (if positive) * or an error (negative) */ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) { char *p = buffer; int l; nodemask_t nodes; unsigned short mode; unsigned short flags = pol ? pol->flags : 0; /* * Sanity check: room for longest mode, flag and some nodes */ VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16); if (!pol || pol == &default_policy) mode = MPOL_DEFAULT; else mode = pol->mode; switch (mode) { case MPOL_DEFAULT: nodes_clear(nodes); break; case MPOL_PREFERRED: nodes_clear(nodes); if (flags & MPOL_F_LOCAL) mode = MPOL_LOCAL; /* pseudo-policy */ else node_set(pol->v.preferred_node, nodes); break; case MPOL_BIND: /* Fall through */ case MPOL_INTERLEAVE: if (no_context) nodes = pol->w.user_nodemask; else nodes = pol->v.nodes; break; default: BUG(); } l = strlen(policy_modes[mode]); if (buffer + maxlen < p + l + 1) return -ENOSPC; strcpy(p, policy_modes[mode]); p += l; if (flags & MPOL_MODE_FLAGS) { if (buffer + maxlen < p + 2) return -ENOSPC; *p++ = '='; /* * Currently, the only defined flags are mutually exclusive */ if (flags & MPOL_F_STATIC_NODES) p += snprintf(p, buffer + maxlen - p, "static"); else if (flags & MPOL_F_RELATIVE_NODES) p += snprintf(p, buffer + maxlen - p, "relative"); } if (!nodes_empty(nodes)) { if (buffer + maxlen < p + 2) return -ENOSPC; *p++ = ':'; p += nodelist_scnprintf(p, buffer + maxlen - p, nodes); } return p - buffer; } struct numa_maps { unsigned long pages; unsigned long anon; unsigned long active; unsigned long writeback; unsigned long mapcount_max; unsigned long dirty; unsigned long swapcache; unsigned long node[MAX_NUMNODES]; }; static void gather_stats(struct page *page, void *private, int pte_dirty) { struct numa_maps *md = private; int count = page_mapcount(page); md->pages++; if (pte_dirty || PageDirty(page)) md->dirty++; if (PageSwapCache(page)) md->swapcache++; if (PageActive(page) || PageUnevictable(page)) md->active++; if (PageWriteback(page)) md->writeback++; if (PageAnon(page)) md->anon++; if (count > md->mapcount_max) md->mapcount_max = count; md->node[page_to_nid(page)]++; } #ifdef CONFIG_HUGETLB_PAGE static void check_huge_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct numa_maps *md) { unsigned long addr; struct page *page; struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); for (addr = start; addr < end; addr += sz) { pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & huge_page_mask(h)); pte_t pte; if (!ptep) continue; pte = *ptep; if (pte_none(pte)) continue; page = pte_page(pte); if (!page) continue; gather_stats(page, md, pte_dirty(*ptep)); } } #else static inline void check_huge_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct numa_maps *md) { } #endif /* * Display pages allocated per node and memory policy via /proc. */ int show_numa_map(struct seq_file *m, void *v) { struct proc_maps_private *priv = m->private; struct vm_area_struct *vma = v; struct numa_maps *md; struct file *file = vma->vm_file; struct mm_struct *mm = vma->vm_mm; struct mempolicy *pol; int n; char buffer[50]; if (!mm) return 0; md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL); if (!md) return 0; pol = get_vma_policy(priv->task, vma, vma->vm_start); mpol_to_str(buffer, sizeof(buffer), pol, 0); mpol_cond_put(pol); seq_printf(m, "%08lx %s", vma->vm_start, buffer); if (file) { seq_printf(m, " file="); seq_path(m, &file->f_path, "\n\t= "); } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { seq_printf(m, " heap"); } else if (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack) { seq_printf(m, " stack"); } if (is_vm_hugetlb_page(vma)) { check_huge_range(vma, vma->vm_start, vma->vm_end, md); seq_printf(m, " huge"); } else { check_pgd_range(vma, vma->vm_start, vma->vm_end, &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md); } if (!md->pages) goto out; if (md->anon) seq_printf(m," anon=%lu",md->anon); if (md->dirty) seq_printf(m," dirty=%lu",md->dirty); if (md->pages != md->anon && md->pages != md->dirty) seq_printf(m, " mapped=%lu", md->pages); if (md->mapcount_max > 1) seq_printf(m, " mapmax=%lu", md->mapcount_max); if (md->swapcache) seq_printf(m," swapcache=%lu", md->swapcache); if (md->active < md->pages && !is_vm_hugetlb_page(vma)) seq_printf(m," active=%lu", md->active); if (md->writeback) seq_printf(m," writeback=%lu", md->writeback); for_each_node_state(n, N_HIGH_MEMORY) if (md->node[n]) seq_printf(m, " N%d=%lu", n, md->node[n]); out: seq_putc(m, '\n'); kfree(md); if (m->count < m->size) m->version = (vma != priv->tail_vma) ? vma->vm_start : 0; return 0; }
funky81/galaxy-2636
mm/mempolicy.c
C
gpl-2.0
68,789
24.734755
90
0.65714
false
/* * PROJECT: ReactOS Framebuffer Display Driver * LICENSE: Microsoft NT4 DDK Sample Code License * FILE: boot/drivers/video/displays/framebuf/enable.c * PURPOSE: Main Driver Initialization and PDEV Enabling * PROGRAMMERS: Copyright (c) 1992-1995 Microsoft Corporation * ReactOS Portable Systems Group */ #include "driver.h" // The driver function table with all function index/address pairs static DRVFN gadrvfn[] = { { INDEX_DrvEnablePDEV, (PFN) DrvEnablePDEV }, { INDEX_DrvCompletePDEV, (PFN) DrvCompletePDEV }, { INDEX_DrvDisablePDEV, (PFN) DrvDisablePDEV }, { INDEX_DrvEnableSurface, (PFN) DrvEnableSurface }, { INDEX_DrvDisableSurface, (PFN) DrvDisableSurface }, { INDEX_DrvAssertMode, (PFN) DrvAssertMode }, { INDEX_DrvSetPalette, (PFN) DrvSetPalette }, { INDEX_DrvMovePointer, (PFN) DrvMovePointer }, { INDEX_DrvSetPointerShape, (PFN) DrvSetPointerShape }, { INDEX_DrvGetModes, (PFN) DrvGetModes } }; // Define the functions you want to hook for 8/16/24/32 pel formats #define HOOKS_BMF8BPP 0 #define HOOKS_BMF16BPP 0 #define HOOKS_BMF24BPP 0 #define HOOKS_BMF32BPP 0 /******************************Public*Routine******************************\ * DrvEnableDriver * * Enables the driver by retrieving the drivers function table and version. * \**************************************************************************/ BOOL NTAPI DrvEnableDriver( ULONG iEngineVersion, ULONG cj, PDRVENABLEDATA pded) { // Engine Version is passed down so future drivers can support previous // engine versions. A next generation driver can support both the old // and new engine conventions if told what version of engine it is // working with. For the first version the driver does nothing with it. // eVb: 1.1 [DDK Change] - Remove bogus statement //iEngineVersion; // eVb: 1.1 [END] // Fill in as much as we can. if (cj >= sizeof(DRVENABLEDATA)) pded->pdrvfn = gadrvfn; if (cj >= (sizeof(ULONG) * 2)) pded->c = sizeof(gadrvfn) / sizeof(DRVFN); // DDI version this driver was targeted for is passed back to engine. // Future graphic's engine may break calls down to old driver format. if (cj >= sizeof(ULONG)) // eVb: 1.2 [DDK Change] - Use DDI_DRIVER_VERSION_NT4 instead of DDI_DRIVER_VERSION pded->iDriverVersion = DDI_DRIVER_VERSION_NT4; // eVb: 1.2 [END] return(TRUE); } /******************************Public*Routine******************************\ * DrvEnablePDEV * * DDI function, Enables the Physical Device. * * Return Value: device handle to pdev. * \**************************************************************************/ DHPDEV NTAPI DrvEnablePDEV( DEVMODEW *pDevmode, // Pointer to DEVMODE PWSTR pwszLogAddress, // Logical address ULONG cPatterns, // number of patterns HSURF *ahsurfPatterns, // return standard patterns ULONG cjGdiInfo, // Length of memory pointed to by pGdiInfo ULONG *pGdiInfo, // Pointer to GdiInfo structure ULONG cjDevInfo, // Length of following PDEVINFO structure DEVINFO *pDevInfo, // physical device information structure HDEV hdev, // HDEV, used for callbacks PWSTR pwszDeviceName, // DeviceName - not used HANDLE hDriver) // Handle to base driver { GDIINFO GdiInfo; DEVINFO DevInfo; PPDEV ppdev = (PPDEV) NULL; UNREFERENCED_PARAMETER(pwszLogAddress); UNREFERENCED_PARAMETER(pwszDeviceName); // Allocate a physical device structure. ppdev = (PPDEV) EngAllocMem(0, sizeof(PDEV), ALLOC_TAG); if (ppdev == (PPDEV) NULL) { RIP("DISP DrvEnablePDEV failed EngAllocMem\n"); return((DHPDEV) 0); } memset(ppdev, 0, sizeof(PDEV)); // Save the screen handle in the PDEV. ppdev->hDriver = hDriver; // Get the current screen mode information. Set up device caps and devinfo. if (!bInitPDEV(ppdev, pDevmode, &GdiInfo, &DevInfo)) { DISPDBG((0,"DISP DrvEnablePDEV failed\n")); goto error_free; } // Initialize the cursor information. if (!bInitPointer(ppdev, &DevInfo)) { // Not a fatal error... DISPDBG((0, "DrvEnablePDEV failed bInitPointer\n")); } // Initialize palette information. if (!bInitPaletteInfo(ppdev, &DevInfo)) { RIP("DrvEnablePDEV failed bInitPalette\n"); goto error_free; } // Copy the devinfo into the engine buffer. memcpy(pDevInfo, &DevInfo, min(sizeof(DEVINFO), cjDevInfo)); // Set the pdevCaps with GdiInfo we have prepared to the list of caps for this // pdev. memcpy(pGdiInfo, &GdiInfo, min(cjGdiInfo, sizeof(GDIINFO))); return((DHPDEV) ppdev); // Error case for failure. error_free: EngFreeMem(ppdev); return((DHPDEV) 0); } /******************************Public*Routine******************************\ * DrvCompletePDEV * * Store the HPDEV, the engines handle for this PDEV, in the DHPDEV. * \**************************************************************************/ VOID NTAPI DrvCompletePDEV( DHPDEV dhpdev, HDEV hdev) { ((PPDEV) dhpdev)->hdevEng = hdev; } /******************************Public*Routine******************************\ * DrvDisablePDEV * * Release the resources allocated in DrvEnablePDEV. If a surface has been * enabled DrvDisableSurface will have already been called. * \**************************************************************************/ VOID NTAPI DrvDisablePDEV( DHPDEV dhpdev) { vDisablePalette((PPDEV) dhpdev); EngFreeMem(dhpdev); } /******************************Public*Routine******************************\ * DrvEnableSurface * * Enable the surface for the device. Hook the calls this driver supports. * * Return: Handle to the surface if successful, 0 for failure. * \**************************************************************************/ HSURF NTAPI DrvEnableSurface( DHPDEV dhpdev) { PPDEV ppdev; HSURF hsurf; SIZEL sizl; ULONG ulBitmapType; FLONG flHooks; // Create engine bitmap around frame buffer. ppdev = (PPDEV) dhpdev; if (!bInitSURF(ppdev, TRUE)) { RIP("DISP DrvEnableSurface failed bInitSURF\n"); return(FALSE); } sizl.cx = ppdev->cxScreen; sizl.cy = ppdev->cyScreen; if (ppdev->ulBitCount == 8) { if (!bInit256ColorPalette(ppdev)) { RIP("DISP DrvEnableSurface failed to init the 8bpp palette\n"); return(FALSE); } ulBitmapType = BMF_8BPP; flHooks = HOOKS_BMF8BPP; } else if (ppdev->ulBitCount == 16) { ulBitmapType = BMF_16BPP; flHooks = HOOKS_BMF16BPP; } else if (ppdev->ulBitCount == 24) { ulBitmapType = BMF_24BPP; flHooks = HOOKS_BMF24BPP; } else { ulBitmapType = BMF_32BPP; flHooks = HOOKS_BMF32BPP; } // eVb: 1.3 [DDK Change] - Support new VGA Miniport behavior w.r.t updated framebuffer remapping ppdev->flHooks = flHooks; // eVb: 1.3 [END] // eVb: 1.4 [DDK Change] - Use EngCreateDeviceSurface instead of EngCreateBitmap hsurf = (HSURF)EngCreateDeviceSurface((DHSURF)ppdev, sizl, ulBitmapType); if (hsurf == (HSURF) 0) { RIP("DISP DrvEnableSurface failed EngCreateDeviceSurface\n"); return(FALSE); } // eVb: 1.4 [END] // eVb: 1.5 [DDK Change] - Use EngModifySurface instead of EngAssociateSurface if ( !EngModifySurface(hsurf, ppdev->hdevEng, ppdev->flHooks | HOOK_SYNCHRONIZE, MS_NOTSYSTEMMEMORY, (DHSURF)ppdev, ppdev->pjScreen, ppdev->lDeltaScreen, NULL)) { RIP("DISP DrvEnableSurface failed EngModifySurface\n"); return(FALSE); } // eVb: 1.5 [END] ppdev->hsurfEng = hsurf; return(hsurf); } /******************************Public*Routine******************************\ * DrvDisableSurface * * Free resources allocated by DrvEnableSurface. Release the surface. * \**************************************************************************/ VOID NTAPI DrvDisableSurface( DHPDEV dhpdev) { EngDeleteSurface(((PPDEV) dhpdev)->hsurfEng); vDisableSURF((PPDEV) dhpdev); ((PPDEV) dhpdev)->hsurfEng = (HSURF) 0; } /******************************Public*Routine******************************\ * DrvAssertMode * * This asks the device to reset itself to the mode of the pdev passed in. * \**************************************************************************/ BOOL NTAPI DrvAssertMode( DHPDEV dhpdev, BOOL bEnable) { PPDEV ppdev = (PPDEV) dhpdev; ULONG ulReturn; PBYTE pjScreen; if (bEnable) { // // The screen must be reenabled, reinitialize the device to clean state. // // eVb: 1.6 [DDK Change] - Support new VGA Miniport behavior w.r.t updated framebuffer remapping pjScreen = ppdev->pjScreen; if (!bInitSURF(ppdev, FALSE)) { DISPDBG((0, "DISP DrvAssertMode failed bInitSURF\n")); return (FALSE); } if (pjScreen != ppdev->pjScreen) { if ( !EngModifySurface(ppdev->hsurfEng, ppdev->hdevEng, ppdev->flHooks | HOOK_SYNCHRONIZE, MS_NOTSYSTEMMEMORY, (DHSURF)ppdev, ppdev->pjScreen, ppdev->lDeltaScreen, NULL)) { DISPDBG((0, "DISP DrvAssertMode failed EngModifySurface\n")); return (FALSE); } } // eVb: 1.6 [END] return (TRUE); } else { // // We must give up the display. // Call the kernel driver to reset the device to a known state. // if (EngDeviceIoControl(ppdev->hDriver, IOCTL_VIDEO_RESET_DEVICE, NULL, 0, NULL, 0, &ulReturn)) { RIP("DISP DrvAssertMode failed IOCTL"); return FALSE; } else { return TRUE; } } } /******************************Public*Routine******************************\ * DrvGetModes * * Returns the list of available modes for the device. * \**************************************************************************/ ULONG NTAPI DrvGetModes( HANDLE hDriver, ULONG cjSize, DEVMODEW *pdm) { DWORD cModes; DWORD cbOutputSize; PVIDEO_MODE_INFORMATION pVideoModeInformation, pVideoTemp; DWORD cOutputModes = cjSize / (sizeof(DEVMODEW) + DRIVER_EXTRA_SIZE); DWORD cbModeSize; DISPDBG((3, "DrvGetModes\n")); cModes = getAvailableModes(hDriver, (PVIDEO_MODE_INFORMATION *) &pVideoModeInformation, &cbModeSize); if (cModes == 0) { DISPDBG((0, "DrvGetModes failed to get mode information")); return 0; } if (pdm == NULL) { cbOutputSize = cModes * (sizeof(DEVMODEW) + DRIVER_EXTRA_SIZE); } else { // // Now copy the information for the supported modes back into the output // buffer // cbOutputSize = 0; pVideoTemp = pVideoModeInformation; do { if (pVideoTemp->Length != 0) { if (cOutputModes == 0) { break; } // // Zero the entire structure to start off with. // memset(pdm, 0, sizeof(DEVMODEW)); // // Set the name of the device to the name of the DLL. // memcpy(pdm->dmDeviceName, DLL_NAME, sizeof(DLL_NAME)); pdm->dmSpecVersion = DM_SPECVERSION; pdm->dmDriverVersion = DM_SPECVERSION; pdm->dmSize = sizeof(DEVMODEW); pdm->dmDriverExtra = DRIVER_EXTRA_SIZE; pdm->dmBitsPerPel = pVideoTemp->NumberOfPlanes * pVideoTemp->BitsPerPlane; pdm->dmPelsWidth = pVideoTemp->VisScreenWidth; pdm->dmPelsHeight = pVideoTemp->VisScreenHeight; pdm->dmDisplayFrequency = pVideoTemp->Frequency; pdm->dmDisplayFlags = 0; pdm->dmFields = DM_BITSPERPEL | DM_PELSWIDTH | DM_PELSHEIGHT | DM_DISPLAYFREQUENCY | DM_DISPLAYFLAGS ; // // Go to the next DEVMODE entry in the buffer. // cOutputModes--; pdm = (LPDEVMODEW) ( ((ULONG)pdm) + sizeof(DEVMODEW) + DRIVER_EXTRA_SIZE); cbOutputSize += (sizeof(DEVMODEW) + DRIVER_EXTRA_SIZE); } pVideoTemp = (PVIDEO_MODE_INFORMATION) (((PUCHAR)pVideoTemp) + cbModeSize); } while (--cModes); } EngFreeMem(pVideoModeInformation); return cbOutputSize; }
rickerliang/reactos-mirror2
win32ss/drivers/displays/framebuf_new/enable.c
C
gpl-2.0
13,951
28.308824
96
0.515519
false
<?php /** * @ingroup Maintenance */ if ( getenv( 'MW_INSTALL_PATH' ) ) { $IP = getenv( 'MW_INSTALL_PATH' ); } else { $IP = dirname(__FILE__).'/../../..'; } require_once( "$IP/maintenance/Maintenance.php" ); class PruneFRIncludeData extends Maintenance { public function __construct() { parent::__construct(); $this->mDescription = "This script clears template/image data for reviewed versions" . "that are 1+ month old and have 50+ newer versions in page. By default," . "it will just output how many rows can be deleted. Use the 'prune' option " . "to actually delete them."; $this->addOption( 'prune', 'Actually do a live run', false ); $this->addOption( 'start', 'The ID of the starting rev', false, true ); $this->setBatchSize( 500 ); } public function execute() { $start = $this->getOption( 'start' ); $prune = $this->getOption( 'prune' ); $this->prune_flaggedrevs( $start, $prune ); } protected function prune_flaggedrevs( $start = null, $prune = false ) { if ( $prune ) { $this->output( "Pruning old flagged revision inclusion data...\n" ); } else { $this->output( "Running dry-run of old flagged revision inclusion data pruning...\n" ); } $db = wfGetDB( DB_MASTER ); if ( $start === null ) { $start = $db->selectField( 'flaggedpages', 'MIN(fp_page_id)', false, __METHOD__ ); } $end = $db->selectField( 'flaggedpages', 'MAX(fp_page_id)', false, __METHOD__ ); if ( is_null( $start ) || is_null( $end ) ) { $this->output( "...flaggedpages table seems to be empty.\n" ); return; } $end += $this->mBatchSize - 1; # Do remaining chunk $blockStart = $start; $blockEnd = $start + $this->mBatchSize - 1; $tDeleted = $fDeleted = 0; // tallies $newerRevs = 50; $cutoff = $db->timestamp( time() - 3600 ); while ( $blockEnd <= $end ) { $this->output( "...doing fp_page_id from $blockStart to $blockEnd\n" ); $cond = "fp_page_id BETWEEN $blockStart AND $blockEnd"; $res = $db->select( 'flaggedpages', 'fp_page_id', $cond, __METHOD__ ); $batchCount = 0; // rows deleted without slave lag check // Go through a chunk of flagged pages... foreach ( $res as $row ) { // Get the newest X ($newerRevs) flagged revs for this page $sres = $db->select( 'flaggedrevs', 'fr_rev_id', array( 'fr_page_id' => $row->fp_page_id ), __METHOD__, array( 'ORDER BY' => 'fr_rev_id DESC', 'LIMIT' => $newerRevs ) ); // See if there are older revs that can be pruned... if ( $db->numRows( $sres ) == $newerRevs ) { // Get the oldest of the top X revisions $sres->seek( $newerRevs - 1 ); $lrow = $db->fetchObject( $sres ); $oldestId = (int)$lrow->fr_rev_id; // oldest revision Id // Get revs not in the top X that were not reviewed recently $db->freeResult( $sres ); $sres = $db->select( 'flaggedrevs', 'fr_rev_id', array( 'fr_page_id' => $row->fp_page_id, 'fr_rev_id < '.$oldestId, // not in the newest X 'fr_timestamp < '.$db->addQuotes( $cutoff ) // not reviewed recently ), __METHOD__, // Sanity check (start with the oldest) array( 'ORDER BY' => 'fr_rev_id ASC', 'LIMIT' => 5000 ) ); // Build an array of these rev Ids $revsClearIncludes = array(); foreach ( $sres as $srow ) { $revsClearIncludes[] = $srow->fr_rev_id; } $batchCount += count( $revsClearIncludes ); // # of revs to prune $db->freeResult( $sres ); // Write run: clear the include data for these old revs if ( $prune ) { $db->begin(); $db->delete( 'flaggedtemplates', array('ft_rev_id' => $revsClearIncludes), __METHOD__ ); $tDeleted += $db->affectedRows(); $db->delete( 'flaggedimages', array('fi_rev_id' => $revsClearIncludes), __METHOD__ ); $fDeleted += $db->affectedRows(); $db->commit(); // Dry run: say how many includes rows would have been cleared } elseif ( count( $revsClearIncludes ) ) { $tDeleted += $db->selectField( 'flaggedtemplates', 'COUNT(*)', array('ft_rev_id' => $revsClearIncludes), __METHOD__ ); $fDeleted += $db->selectField( 'flaggedimages', 'COUNT(*)', array('fi_rev_id' => $revsClearIncludes), __METHOD__ ); } // Check slave lag... if ( $batchCount >= $this->mBatchSize ) { $batchCount = 0; wfWaitForSlaves( 5 ); } } else { $db->freeResult( $sres ); } } $db->freeResult( $res ); $blockStart += $this->mBatchSize; $blockEnd += $this->mBatchSize; } if ( $prune ) { $this->output( "Flagged revision inclusion prunning complete ...\n" ); } else { $this->output( "Flagged revision inclusion prune test complete ...\n" ); } $this->output( "Rows: \tflaggedtemplates:$tDeleted\t\tflaggedimages:$fDeleted\n" ); } } $maintClass = "PruneFRIncludeData"; require_once( RUN_MAINTENANCE_IF_MAIN );
SuriyaaKudoIsc/wikia-app-test
extensions/FlaggedRevs/maintenance/pruneRevData.php
PHP
gpl-2.0
4,969
32.802721
90
0.583015
false
<?php /** * Frameworks Module Admin * * You may not change or alter any portion of this comment or credits * of supporting developers from this source code or any supporting source code * which is considered copyrighted (c) material of the original comment or credit authors. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * @copyright Grégory Mage (Aka Mage) * @copyright (c) 2000-2016 XOOPS Project (www.xoops.org) * @license GNU GPL 2 (https://www.gnu.org/licenses/gpl-2.0.html) * @author Grégory Mage (Aka Mage) */ define('XOOPS_FRAMEWORKS_MODULEADMIN_VERSION', '1.2'); define('XOOPS_FRAMEWORKS_MODULEADMIN_RELEASEDATE', '2014/02/25');
mambax7/XoopsCore25
htdocs/Frameworks/moduleclasses/moduleadmin/xoops_version.php
PHP
gpl-2.0
811
41.578947
90
0.731768
false
////////////////////////////////////////////////////////////////////// // // FILE: naglatex.h // Translationtable for NAG values to Tex. // // Part of: Scid (Shane's Chess Information Database) // Version: 3.6 // // Notice: Copyright (c) 2000-2003 Shane Hudson. All rights reserved. // // Author: Shane Hudson (sgh@users.sourceforge.net) // Updated: W. van den Akker // ////////////////////////////////////////////////////////////////////// const char * evalNagsLatex [] = { "", // one for the offset "!", // $1 "?", // $2 "!!", // $3 "??", // $4 "!?", // $5 "?!", // $6 "forced", // $7 "{\\onlymove}", // $8 "worst", // $9 "{\\equal}", // $10 "", // $11 "{$\\leftrightarrows$}", // $12 "{\\unclear}", // $13 "{\\wbetter}", // $14 "{\\bbetter}", // $15 "{\\wupperhand}", // $16 "{\\bupperhand}", // $17 "{\\wdecisive}", // $18 "{\\bdecisive}", // $19 "", // $20 "", // $21 "{\\zugzwang}", // $22 "{\\zugzwang}", // $23 "", // $24 "", // $25 "{\\moreroom}", // $26 "", // $27 "", // $28 "", // $29 "{$\\circlearrowleft$}", // $30 "{$\\circlearrowright$}", // $31 "", // $32 "", // $33 "", // $34 "{\\devadvantage}", // $35 "{\\withinit}", // $36 "", // $37 "", // $38 "", // $39 "{\\withattack}", // $40 "", // $41 "", // $42 "", // $43 "{\\compensation}", // $44 "", // $45 "", // $46 "", // $47 "{$$\\boxplus$$}", // $48 "{$$\\boxplus$$}", // $49 "{\\centre}", // $50 "{\\centre}", // $51 "", // $52 "", // $53 "", // $54 "", // $55 "", // $56 "", // $57 "{\\kside}", // $58 "", // $59 "", // $60 "", // $61 "{\\qside}", // $62 "", // $63 "", // $64 "", // $65 "", // $66 "", // $67 "", // $68 "", // $69 "", // $70 "", // $71 "", // $72 "", // $73 "", // $74 "", // $75 "", // $76 "", // $77 "", // $78 "", // $79 "", // $80 "", // $81 "", // $82 "", // $83 "", // $84 "", // $85 "", // $86 "", // $87 "", // $88 "", // $89 "", // $90 "", // $91 "", // $92 "", // $93 "", // $94 "", // $95 "", // $96 "", // $97 "", // $98 "", // $99 "", // $100 "", // $101 "", // $102 "", // $103 "", // $104 "", // $105 "", // $106 "", // $107 "", // $108 "", // $109 "", // $110 "", // $111 "", // $112 "", // $113 "", // $114 "", // $115 "", // $116 "", // $117 "", // $118 "", // $119 "", // $120 "", // $121 "", // $122 "", // $123 "", // $124 "", // $125 "", // $126 "", // $127 "", // $128 "", // $129 "", // $130 "", // $131 "{\\counterplay}", // $132 "", // $133 "", // $134 "", // $135 "{\\timelimt}", // $136 "", // $137 "", // $138 "", // $139 "{\\withidea}", // $140 "", // $141 "{\\betteris}", // $142 "", // $143 "{\\various}", // $144 "{\\comment}", // $145 "{\\novelty}", // $146 "{\\weakpt}", // $147 "{\\ending}", // $148 "{\\file}", // $149 "{\\diagonal}", // $150 "{\\bishoppair}", // $151 "", // $152 "{\\opposbishops}", // $153 "{\\samebishops}", // $154 "", // $155 "", // $156 "", // $157 "", // $158 "", // $159 "", // $160 "", // $161 "", // $162 "", // $163 "", // $164 "", // $165 "", // $166 "", // $167 "", // $168 "", // $169 "", // $170 "", // $171 "", // $172 "", // $173 "", // $174 "", // $175 "", // $176 "", // $177 "", // $178 "", // $179 "", // $180 "", // $181 "", // $182 "", // $183 "", // $184 "", // $185 "", // $186 "", // $187 "", // $188 "", // $189 "{\\etc}", // $190 "{\\doublepawns}", // $191 "{\\seppawns}", // $192 "{\\unitedpawns}", // $193 "", // $194 "", // $195 "", // $196 "", // $197 "", // $198 "", // $199 "", // $200 "", // $200 "", // $201 "", // $202 "", // $203 "", // $204 "", // $205 "", // $206 "", // $207 "", // $208 "", // $209 "{\\see}", // $210 "{\\mate}", // $211 "{\\passedpawn}", // $212 "{\\morepawns}", // $213 "{\\with}", // $214 "{\\without}", // $215 "", // $216 "", // $217 "", // $218 "", // $219 "", // $220 "", // $221 "", // $222 "", // $223 "" // $224 };
Raimondi/scid
src/naglatex.h
C
gpl-2.0
4,289
16.506122
75
0.285148
false
// { dg-options "-std=gnu++11" } // { dg-do compile } // Copyright (C) 2009-2014 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // terms of the GNU General Public License as published by the // Free Software Foundation; either version 3, or (at your option) // any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License along // with this library; see the file COPYING3. If not see // <http://www.gnu.org/licenses/>. #include <ext/rope> // libstdc++/40299 void test01() { __gnu_cxx::crope asdf; }
mageec/mageec-gcc
libstdc++-v3/testsuite/ext/rope/40299.cc
C++
gpl-2.0
906
32.555556
74
0.714128
false
/* * Copyright (C) 2008-2011 The Paparazzi Team * * This file is part of paparazzi. * * paparazzi is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * paparazzi is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with paparazzi; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Initial author: C. De Wagter */ /** * @file modules/gps/gps_ubx_ucenter.c * @brief Configure Ublox GPS * */ #include "gps_ubx_ucenter.h" #include "subsystems/gps/gps_ubx.h" #include "subsystems/datalink/downlink.h" #include <stdio.h> #if PRINT_DEBUG_GPS_UBX_UCENTER #define DEBUG_PRINT(...) printf(__VA_ARGS__) #else #define DEBUG_PRINT(...) {} #endif ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// // // UCENTER: init, periodic and event static bool_t gps_ubx_ucenter_autobaud(uint8_t nr); static bool_t gps_ubx_ucenter_configure(uint8_t nr); #define GPS_UBX_UCENTER_STATUS_STOPPED 0 #define GPS_UBX_UCENTER_STATUS_AUTOBAUD 1 #define GPS_UBX_UCENTER_STATUS_CONFIG 2 #define GPS_UBX_UCENTER_REPLY_NONE 0 #define GPS_UBX_UCENTER_REPLY_ACK 1 #define GPS_UBX_UCENTER_REPLY_NACK 2 #define GPS_UBX_UCENTER_REPLY_VERSION 3 #define GPS_UBX_UCENTER_REPLY_CFG_PRT 4 // All U-Center data struct gps_ubx_ucenter_struct gps_ubx_ucenter; ///////////////////////////// // Init Function void gps_ubx_ucenter_init(void) { // Start UCenter gps_ubx_ucenter.status = GPS_UBX_UCENTER_STATUS_AUTOBAUD; gps_ubx_ucenter.reply = GPS_UBX_UCENTER_REPLY_NONE; gps_ubx_ucenter.cnt = 0; gps_ubx_ucenter.baud_init = 0; gps_ubx_ucenter.baud_run = 0; gps_ubx_ucenter.sw_ver_h = 0; gps_ubx_ucenter.sw_ver_l = 0; gps_ubx_ucenter.hw_ver_h = 0; gps_ubx_ucenter.hw_ver_l = 0; for (int i=0; i<GPS_UBX_UCENTER_CONFIG_STEPS; i++) { gps_ubx_ucenter.replies[i] = 0; } } ///////////////////////////// // Periodic Function // -time-based configuration void gps_ubx_ucenter_periodic(void) { switch (gps_ubx_ucenter.status) { // Save processing time inflight case GPS_UBX_UCENTER_STATUS_STOPPED: return; // Automatically Determine Current Baudrate case GPS_UBX_UCENTER_STATUS_AUTOBAUD: if (gps_ubx_ucenter_autobaud(gps_ubx_ucenter.cnt) == FALSE) { gps_ubx_ucenter.status = GPS_UBX_UCENTER_STATUS_CONFIG; gps_ubx_ucenter.cnt = 0; #if PRINT_DEBUG_GPS_UBX_UCENTER if (gps_ubx_ucenter.baud_init > 0) { DEBUG_PRINT("Initial ublox baudrate found: %u\n", gps_ubx_ucenter.baud_init); } else { DEBUG_PRINT("WARNING: Unable to determine the ublox baudrate. Autoconfiguration is unlikely to work.\n"); } #endif } else { gps_ubx_ucenter.cnt++; } break; // Send Configuration case GPS_UBX_UCENTER_STATUS_CONFIG: if (gps_ubx_ucenter_configure(gps_ubx_ucenter.cnt) == FALSE) { gps_ubx_ucenter.status = GPS_UBX_UCENTER_STATUS_STOPPED; gps_ubx_ucenter.cnt = 0; } else { gps_ubx_ucenter.cnt++; } break; default: // stop this module now... // todo break; } } ///////////////////////////// // Event Function // -fetch replies: ACK and VERSION info void gps_ubx_ucenter_event(void) { // Save processing time inflight if (gps_ubx_ucenter.status == GPS_UBX_UCENTER_STATUS_STOPPED) return; // Read Configuration Reply's switch (gps_ubx.msg_class) { case UBX_ACK_ID: if (gps_ubx.msg_id == UBX_ACK_ACK_ID) { gps_ubx_ucenter.reply = GPS_UBX_UCENTER_REPLY_ACK; DEBUG_PRINT("ACK\n"); } else { gps_ubx_ucenter.reply = GPS_UBX_UCENTER_REPLY_NACK; DEBUG_PRINT("NACK\n"); } break; case UBX_MON_ID: if (gps_ubx.msg_id == UBX_MON_VER_ID ) { gps_ubx_ucenter.reply = GPS_UBX_UCENTER_REPLY_VERSION; gps_ubx_ucenter.sw_ver_h = UBX_MON_VER_c(gps_ubx.msg_buf,0) - '0'; gps_ubx_ucenter.sw_ver_l = 10*(UBX_MON_VER_c(gps_ubx.msg_buf,2) - '0'); gps_ubx_ucenter.sw_ver_l += UBX_MON_VER_c(gps_ubx.msg_buf,3) - '0'; gps_ubx_ucenter.hw_ver_h = UBX_MON_VER_c(gps_ubx.msg_buf,33) - '0'; gps_ubx_ucenter.hw_ver_h += 10*(UBX_MON_VER_c(gps_ubx.msg_buf,32) - '0'); gps_ubx_ucenter.hw_ver_l = UBX_MON_VER_c(gps_ubx.msg_buf,37) - '0'; gps_ubx_ucenter.hw_ver_l += 10*(UBX_MON_VER_c(gps_ubx.msg_buf,36) - '0'); DEBUG_PRINT("ublox sw_ver: %u.%u\n", gps_ubx_ucenter.sw_ver_h, gps_ubx_ucenter.sw_ver_l); DEBUG_PRINT("ublox hw_ver: %u.%u\n", gps_ubx_ucenter.hw_ver_h, gps_ubx_ucenter.hw_ver_l); } break; case UBX_CFG_ID: if (gps_ubx.msg_id == UBX_CFG_PRT_ID) { gps_ubx_ucenter.reply = GPS_UBX_UCENTER_REPLY_CFG_PRT; gps_ubx_ucenter.port_id = UBX_CFG_PRT_PortId(gps_ubx.msg_buf,0); gps_ubx_ucenter.baud_run = UBX_CFG_PRT_Baudrate(gps_ubx.msg_buf,0); DEBUG_PRINT("gps_ubx_ucenter.baud_run: %u\n", gps_ubx_ucenter.baud_run); DEBUG_PRINT("gps_ubx_ucenter.port_id: %u\n", gps_ubx_ucenter.port_id); } break; default: break; } } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// // // UCENTER Configuration Functions /** * Polls the u-blox port configuration * When the payload is omitted (zero length), the configuration for the incoming * (currently used) port is reported. * */ static inline void gps_ubx_ucenter_config_port_poll(void) { UbxSend_CFG_PRT_POLL(); } /** * Enable u-blox message at desired period. Will enable the message on the port * that this command is received on. For example, sending this configuration message * over UART1 will cause the desired message to be published on UART1. * * For more information on u-blox messages, see the protocol specification. * http://www.ublox.com/en/download/documents-a-resources.html * * @param class u-blox message class * @param id u-blox message ID * @param rate Desired period to send message. Example: Setting 3 would send the message on every 3rd navigation solution. */ static inline void gps_ubx_ucenter_enable_msg(uint8_t class, uint8_t id, uint8_t rate) { UbxSend_CFG_MSG(class, id, rate); } /** * Automatically determine the baudrate of the u-blox module. * Only needed when connecting to a UART port on the u-blox. * The discovered baudrate is copied to gps_ubx_ucenter.baud_init. * * @param nr Autobaud step number to perform * @return FALSE when completed */ static bool_t gps_ubx_ucenter_autobaud(uint8_t nr) { switch (nr) { case 0: case 1: // Very important for some modules: // Give the GPS some time to boot (up to 0.75 second) break; case 2: gps_ubx_ucenter.reply = GPS_UBX_UCENTER_REPLY_NONE; GpsUartSetBaudrate(B38400); // Try the most common first? gps_ubx_ucenter_config_port_poll(); break; case 3: if (gps_ubx_ucenter.reply == GPS_UBX_UCENTER_REPLY_ACK) { gps_ubx_ucenter.baud_init = gps_ubx_ucenter.baud_run; return FALSE; } gps_ubx_ucenter.reply = GPS_UBX_UCENTER_REPLY_NONE; GpsUartSetBaudrate(B9600); // Maybe the factory default? gps_ubx_ucenter_config_port_poll(); break; case 4: if (gps_ubx_ucenter.reply == GPS_UBX_UCENTER_REPLY_ACK) { gps_ubx_ucenter.baud_init = gps_ubx_ucenter.baud_run; return FALSE; } gps_ubx_ucenter.reply = GPS_UBX_UCENTER_REPLY_NONE; GpsUartSetBaudrate(B57600); // The high-rate default? gps_ubx_ucenter_config_port_poll(); break; case 5: if (gps_ubx_ucenter.reply == GPS_UBX_UCENTER_REPLY_ACK) { gps_ubx_ucenter.baud_init = gps_ubx_ucenter.baud_run; return FALSE; } gps_ubx_ucenter.reply = GPS_UBX_UCENTER_REPLY_NONE; GpsUartSetBaudrate(B4800); // Default NMEA baudrate? gps_ubx_ucenter_config_port_poll(); break; case 6: if (gps_ubx_ucenter.reply == GPS_UBX_UCENTER_REPLY_ACK) { gps_ubx_ucenter.baud_init = gps_ubx_ucenter.baud_run; return FALSE; } gps_ubx_ucenter.reply = GPS_UBX_UCENTER_REPLY_NONE; GpsUartSetBaudrate(B115200); // Last possible option for ublox gps_ubx_ucenter_config_port_poll(); break; case 7: if (gps_ubx_ucenter.reply == GPS_UBX_UCENTER_REPLY_ACK) { gps_ubx_ucenter.baud_init = gps_ubx_ucenter.baud_run; return FALSE; } // Autoconfig Failed... let's setup the failsafe baudrate // Should we try even a different baudrate? gps_ubx_ucenter.baud_init = 0; // Set as zero to indicate that we couldn't verify the baudrate GpsUartSetBaudrate(B9600); return FALSE; default: break; } return TRUE; } ///////////////////////////////////// // UBlox internal Navigation Solution #define NAV_DYN_STATIONARY 1 #define NAV_DYN_PEDESTRIAN 2 #define NAV_DYN_AUTOMOTIVE 3 #define NAV_DYN_SEA 4 #define NAV_DYN_AIRBORNE_1G 5 #define NAV_DYN_AIRBORNE_2G 6 // paparazzi default #define NAV_DYN_AIRBORNE_4G 7 #define NAV5_DYN_PORTABLE 0 // ublox default #define NAV5_DYN_FIXED 1 #define NAV5_DYN_STATIONARY 2 #define NAV5_DYN_PEDESTRIAN 3 #define NAV5_DYN_AUTOMOTIVE 4 #define NAV5_DYN_SEA 5 #define NAV5_DYN_AIRBORNE_1G 6 #define NAV5_DYN_AIRBORNE_2G 7 // paparazzi default #define NAV5_DYN_AIRBORNE_4G 8 #ifndef GPS_UBX_NAV5_DYNAMICS #define GPS_UBX_NAV5_DYNAMICS NAV5_DYN_AIRBORNE_2G #endif #define NAV5_MASK 0x05 // Apply dynamic model and position fix mode settings #define NAV5_2D_ONLY 1 #define NAV5_3D_ONLY 2 // paparazzi default #define NAV5_AUTO 3 // ublox default #define NAV5_DEFAULT_MIN_ELEV 5 // deg #define NAV5_DEFAULT_PDOP_MASK 25 // no units #define NAV5_DEFAULT_TDOP_MASK 25 // no units #define NAV5_DEFAULT_P_ACC 100 // m #define NAV5_DEFAULT_T_ACC 300 // m #define NAV5_DEFAULT_STATIC_HOLD_THRES 0 // cm/s #define IGNORED 0 #define RESERVED 0 static inline void gps_ubx_ucenter_config_nav(void) { //New ublox firmware v5 or higher uses CFG_NAV5 message, CFG_NAV is no longer available if (gps_ubx_ucenter.sw_ver_h < 5 && gps_ubx_ucenter.hw_ver_h < 6) { UbxSend_CFG_NAV(NAV_DYN_AIRBORNE_2G, 3, 16, 24, 20, 5, 0, 0x3C, 0x3C, 0x14, 0x03E8 ,0x0000, 0x0, 0x17, 0x00FA, 0x00FA, 0x0064, 0x012C, 0x000F, 0x00, 0x00); } else { UbxSend_CFG_NAV5(NAV5_MASK, GPS_UBX_NAV5_DYNAMICS, NAV5_3D_ONLY, IGNORED, IGNORED, NAV5_DEFAULT_MIN_ELEV, RESERVED, NAV5_DEFAULT_PDOP_MASK, NAV5_DEFAULT_TDOP_MASK, NAV5_DEFAULT_P_ACC, NAV5_DEFAULT_T_ACC, NAV5_DEFAULT_STATIC_HOLD_THRES, RESERVED, RESERVED, RESERVED, RESERVED); } } ///////////////////////////////////// // UBlox port and protocol GPS configuration #ifdef GPS_PORT_ID #warning "GPS_PORT_ID is no longer needed by the ublox ucenter for automatically configuration. Please remove this defined variable and double check that autoconfig is working as expected." #endif // UART mode: 8N1 with reserved1 set for compatability with A4 #define UBX_UART_MODE_MASK 0x000008D0 #define UBX_PROTO_MASK 0x0001 #define NMEA_PROTO_MASK 0x0002 #define RTCM_PROTO_MASK 0x0004 #define GPS_PORT_DDC 0x00 #define GPS_PORT_UART1 0x01 #define GPS_PORT_UART2 0x02 #define GPS_PORT_USB 0x03 #define GPS_PORT_SPI 0x04 #define GPS_PORT_RESERVED 0x05 #define __UBX_GPS_BAUD(_u) _u##_BAUD #define _UBX_GPS_BAUD(_u) __UBX_GPS_BAUD(_u) #define UBX_GPS_BAUD _UBX_GPS_BAUD(GPS_LINK) #ifndef GPS_UBX_UCENTER_RATE #define GPS_UBX_UCENTER_RATE 0x00FA // In milliseconds. 0x00FA = 250ms = 4Hz #endif static inline void gps_ubx_ucenter_config_port(void) { switch(gps_ubx_ucenter.port_id) { // I2C Interface case GPS_PORT_DDC: #ifdef GPS_I2C UbxSend_CFG_PRT(gps_ubx_ucenter.port_id, 0x0, 0x0, GPS_I2C_SLAVE_ADDR, 0x0, UBX_PROTO_MASK, UBX_PROTO_MASK, 0x0, 0x0); #else DEBUG_PRINT("WARNING: Please include the gps_i2c module.\n"); #endif break; // UART Interface case GPS_PORT_UART1: case GPS_PORT_UART2: UbxSend_CFG_PRT(gps_ubx_ucenter.port_id, 0x0, 0x0, UBX_UART_MODE_MASK, UART_SPEED(UBX_GPS_BAUD), UBX_PROTO_MASK, UBX_PROTO_MASK, 0x0, 0x0); break; // USB Interface case GPS_PORT_USB: UbxSend_CFG_PRT(gps_ubx_ucenter.port_id, 0x0, 0x0, 0x0, 0x0, UBX_PROTO_MASK, UBX_PROTO_MASK, 0x0, 0x0); break; case GPS_PORT_SPI: DEBUG_PRINT("WARNING: ublox SPI port is currently not supported.\n"); break; default: DEBUG_PRINT("WARNING: Unknown ublox port id: %u\n", gps_ubx_ucenter.port_id); break; } } #define GPS_SBAS_ENABLED 0x01 #define GPS_SBAS_RANGING 0x01 #define GPS_SBAS_CORRECTIONS 0x02 #define GPS_SBAS_INTEGRITY 0x04 #define GPS_SBAS_MAX_SBAS 1 // Default ublox setting uses 3 SBAS channels(?) #define GPS_SBAS_AUTOSCAN 0x00 static inline void gps_ubx_ucenter_config_sbas(void) { // Since March 2nd 2011 EGNOS is released for aviation purposes UbxSend_CFG_SBAS(GPS_SBAS_ENABLED, GPS_SBAS_RANGING | GPS_SBAS_CORRECTIONS | GPS_SBAS_INTEGRITY, GPS_SBAS_MAX_SBAS, GPS_SBAS_AUTOSCAN, GPS_SBAS_AUTOSCAN); //UbxSend_CFG_SBAS(0x00, 0x00, 0x00, 0x00, 0x00); } // Text Telemetry for Debugging #undef GOT_PAYLOAD static bool_t gps_ubx_ucenter_configure(uint8_t nr) { DEBUG_PRINT("gps_ubx_ucenter_configure nr: %u\n",nr); // Store the reply of the last configuration step and reset if (nr < GPS_UBX_UCENTER_CONFIG_STEPS) gps_ubx_ucenter.replies[nr] = gps_ubx_ucenter.reply; switch (nr) { case 0: // Use old baudrate to issue a baudrate change command gps_ubx_ucenter_config_port(); break; case 1: #if PRINT_DEBUG_GPS_UBX_UCENTER if (gps_ubx_ucenter.reply != GPS_UBX_UCENTER_REPLY_ACK) { DEBUG_PRINT("ublox did not acknowledge port configuration.\n"); } else { DEBUG_PRINT("Changed ublox baudrate to: %u\n", UART_SPEED(UBX_GPS_BAUD)); } #endif // Now the GPS baudrate should have changed GpsUartSetBaudrate(UBX_GPS_BAUD); gps_ubx_ucenter.baud_run = UART_SPEED(UBX_GPS_BAUD); UbxSend_MON_GET_VER(); break; case 2: case 3: case 4: case 5: // UBX_G5010 takes 0.7 seconds to answer a firmware request // Version info is important for proper configuration as different firmwares have different settings break; case 6: // Send some debugging info: detected baudrate, software version etc... gps_ubx_ucenter.replies[0] = (gps_ubx_ucenter.baud_init/1000); gps_ubx_ucenter.replies[1] = (gps_ubx_ucenter.baud_init - 1000 * gps_ubx_ucenter.replies[0]) / 100; gps_ubx_ucenter.replies[2] = gps_ubx_ucenter.sw_ver_h; gps_ubx_ucenter.replies[3] = gps_ubx_ucenter.sw_ver_l; gps_ubx_ucenter.replies[4] = gps_ubx_ucenter.hw_ver_h; gps_ubx_ucenter.replies[5] = gps_ubx_ucenter.hw_ver_l; #if DEBUG_GPS_UBX_UCENTER DOWNLINK_SEND_DEBUG(DefaultChannel, DefaultDevice,6,gps_ubx_ucenter.replies); #endif // Configure CFG-NAV(5) message gps_ubx_ucenter_config_nav(); break; case 7: // Geodetic Position Solution gps_ubx_ucenter_enable_msg(UBX_NAV_ID, UBX_NAV_POSLLH_ID,1); break; case 8: // Velocity Solution in NED gps_ubx_ucenter_enable_msg(UBX_NAV_ID, UBX_NAV_VELNED_ID, 1); break; case 9: // Receiver Navigation Status gps_ubx_ucenter_enable_msg(UBX_NAV_ID, UBX_NAV_STATUS_ID, 1); break; case 10: // Space Vehicle Information gps_ubx_ucenter_enable_msg(UBX_NAV_ID, UBX_NAV_SVINFO_ID, 4); break; case 11: // Navigation Solution Information #if GPS_UBX_UCENTER_SLOW_NAV_SOL gps_ubx_ucenter_enable_msg(UBX_NAV_ID, UBX_NAV_SOL_ID, 8); #else gps_ubx_ucenter_enable_msg(UBX_NAV_ID, UBX_NAV_SOL_ID, 1); #endif break; case 12: // Disable UTM on old Lea4P gps_ubx_ucenter_enable_msg(UBX_NAV_ID, UBX_NAV_POSUTM_ID, 0); break; case 13: // SBAS Configuration gps_ubx_ucenter_config_sbas(); break; case 14: // Poll Navigation/Measurement Rate Settings UbxSend_CFG_RATE(GPS_UBX_UCENTER_RATE, 0x0001, 0x0000); break; case 15: // Raw Measurement Data #if USE_GPS_UBX_RXM_RAW gps_ubx_ucenter_enable_msg(UBX_RXM_ID, UBX_RXM_RAW_ID, 1); #endif break; case 16: // Subframe Buffer #if USE_GPS_UBX_RXM_SFRB gps_ubx_ucenter_enable_msg(UBX_RXM_ID, UBX_RXM_SFRB_ID, 1); #endif break; case 17: // Try to save on non-ROM devices... UbxSend_CFG_CFG(0x00000000,0xffffffff,0x00000000); break; case 18: #if DEBUG_GPS_UBX_UCENTER // Debug Downlink the result of all configuration steps: see messages // To view, enable DEBUG message in your telemetry configuration .xml DOWNLINK_SEND_DEBUG(DefaultChannel, DefaultDevice,GPS_UBX_UCENTER_CONFIG_STEPS,gps_ubx_ucenter.replies); for (int i = 0; i < GPS_UBX_UCENTER_CONFIG_STEPS; i++) { DEBUG_PRINT("%u\n", gps_ubx_ucenter.replies[i]); } #endif return FALSE; default: break; } gps_ubx_ucenter.reply = GPS_UBX_UCENTER_REPLY_NONE; return TRUE; // Continue, except for the last case }
lplp8899/paparazzi
sw/airborne/modules/gps/gps_ubx_ucenter.c
C
gpl-2.0
17,413
30.833638
189
0.661977
false
<?xml version="1.0" encoding="iso-8859-1"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <!-- /home/qt/mkdist-qt-4.4.3-1222864207/qt-embedded-linux-opensource-src-4.4.3/src/gui/embedded/qdecoration_qws.cpp --> <head> <title>Qt 4.4: List of All Members for QDecoration</title> <link href="classic.css" rel="stylesheet" type="text/css" /> </head> <body> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr> <td align="left" valign="top" width="32"><a href="http://www.trolltech.com/products/qt"><img src="images/qt-logo.png" align="left" border="0" /></a></td> <td width="1">&nbsp;&nbsp;</td><td class="postheader" valign="center"><a href="index.html"><font color="#004faf">Home</font></a>&nbsp;&middot; <a href="namespaces.html"><font color="#004faf">All&nbsp;Namespaces</font></a>&nbsp;&middot; <a href="classes.html"><font color="#004faf">All&nbsp;Classes</font></a>&nbsp;&middot; <a href="mainclasses.html"><font color="#004faf">Main&nbsp;Classes</font></a>&nbsp;&middot; <a href="groups.html"><font color="#004faf">Grouped&nbsp;Classes</font></a>&nbsp;&middot; <a href="modules.html"><font color="#004faf">Modules</font></a>&nbsp;&middot; <a href="functions.html"><font color="#004faf">Functions</font></a></td> <td align="right" valign="top" width="230"></td></tr></table><h1 class="title">List of All Members for QDecoration</h1> <p>This is the complete list of members for <a href="qdecoration.html">QDecoration</a>, including inherited members.</p> <ul> <li><div class="fn"/>enum <a href="qdecoration.html#DecorationRegion-enum">DecorationRegion</a></li> <li><div class="fn"/>enum <a href="qdecoration.html#DecorationState-enum">DecorationState</a></li> <li><div class="fn"/><a href="qdecoration.html#QDecoration">QDecoration</a> ()</li> <li><div class="fn"/><a href="qdecoration.html#dtor.QDecoration">~QDecoration</a> ()</li> <li><div class="fn"/><a href="qdecoration.html#buildSysMenu">buildSysMenu</a> ( QWidget *, QMenu * )</li> <li><div class="fn"/><a href="qdecoration.html#menuTriggered">menuTriggered</a> ( QWidget *, QAction * )</li> <li><div class="fn"/><a href="qdecoration.html#paint">paint</a> ( QPainter *, const QWidget *, int, DecorationState ) : bool</li> <li><div class="fn"/><a href="qdecoration.html#region">region</a> ( const QWidget *, const QRect &amp;, int ) : QRegion</li> <li><div class="fn"/><a href="qdecoration.html#region-2">region</a> ( const QWidget *, int ) : QRegion</li> <li><div class="fn"/><a href="qdecoration.html#regionAt">regionAt</a> ( const QWidget *, const QPoint &amp; ) : int</li> <li><div class="fn"/><a href="qdecoration.html#regionClicked">regionClicked</a> ( QWidget *, int )</li> <li><div class="fn"/><a href="qdecoration.html#regionDoubleClicked">regionDoubleClicked</a> ( QWidget *, int )</li> <li><div class="fn"/><a href="qdecoration.html#startMove">startMove</a> ( QWidget * )</li> <li><div class="fn"/><a href="qdecoration.html#startResize">startResize</a> ( QWidget * )</li> </ul> <p /><address><hr /><div align="center"> <table width="100%" cellspacing="0" border="0"><tr class="address"> <td width="30%" align="left">Copyright &copy; 2008 Nokia</td> <td width="40%" align="center"><a href="trademarks.html">Trademarks</a></td> <td width="30%" align="right"><div align="right">Qt 4.4.3</div></td> </tr></table></div></address></body> </html>
FilipBE/qtextended
qtopiacore/qt/doc/html/qdecoration-members.html
HTML
gpl-2.0
3,438
87.153846
655
0.678301
false
/* * Copyright (C) 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include "DatabaseBase.h" #if ENABLE(SQL_DATABASE) #include "ScriptExecutionContext.h" #include <wtf/Assertions.h> namespace WebCore { DatabaseBase::DatabaseBase(ScriptExecutionContext* scriptExecutionContext) : m_scriptExecutionContext(scriptExecutionContext) { ASSERT(m_scriptExecutionContext->isContextThread()); } ScriptExecutionContext* DatabaseBase::scriptExecutionContext() const { return m_scriptExecutionContext.get(); } void DatabaseBase::logErrorMessage(const String& message) { m_scriptExecutionContext->addConsoleMessage(MessageSource::Storage, MessageLevel::Error, message); } } // namespace WebCore #endif // ENABLE(SQL_DATABASE)
lostdj/Jaklin-OpenJFX
modules/web/src/main/native/Source/WebCore/Modules/webdatabase/DatabaseBase.cpp
C++
gpl-2.0
2,022
36.444444
102
0.769535
false
/* This file is part of the KDE libraries Copyright (c) 2013 David Faure <faure+bluesystem@kde.org> This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2 of the License or ( at your option ) version 3 or, at the discretion of KDE e.V. ( which shall act as a proxy as in section 14 of the GPLv3 ), any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "klanguagebutton.h" #include <QtGui/QLineEdit> #include <kcmdlineargs.h> #include <kaboutdata.h> #include <kapplication.h> #include <klocale.h> int main(int argc, char** argv) { KAboutData about("KLanguageButtonTest", "kdelibs4", ki18n("KLanguageButtonTest"), "version"); KCmdLineArgs::init(argc, argv, &about); KApplication app; KLanguageButton button; button.loadAllLanguages(); button.show(); return app.exec(); }
melvyn-sopacua/kdelibs
kdeui/tests/klanguagebuttontest.cpp
C++
gpl-2.0
1,442
35.05
95
0.732316
false
/* * m200 clock common interface * * Copyright (C) 2013 Ingenic Semiconductor Co.,Ltd * Author: Zoro <ykli@ingenic.cn> * Based on: newxboot/modules/clk/jz4775_clk.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ /*#define DEBUG*/ #include <config.h> #include <common.h> #include <asm/io.h> #include <asm/gpio.h> #include <asm/arch/cpm.h> #include <asm/arch/clk.h> DECLARE_GLOBAL_DATA_PTR; #ifdef DUMP_CGU_SELECT static char clk_name[][10] = { [VPU] = {"vpu"}, [OTG] = {"otg"}, [I2S] = {"i2s"}, [LCD] = {"lcd"}, [UHC] = {"uhc"}, [SSI] = {"ssi"}, [CIM] = {"cim"}, [PCM] = {"pcm"}, [GPU] = {"gpu"}, [ISP] = {"isp"}, [DDR] = {"ddr"}, [MSC] = {"msc"}, [MSC1] = {"msc1"}, [MSC2] = {"msc2"}, [BCH] = {"bch"}, }; static char * cgu_name(int clk) { return clk_name[clk]; } #endif struct cgu cgu_clk_sel[CGU_CNT] = { [DDR] = {1, CPM_DDRCDR, 30, CONFIG_DDR_SEL_PLL, {0, APLL, MPLL, -1}, 29, 28, 27}, [MSC] = {1, CPM_MSC0CDR, 31, CONFIG_CPU_SEL_PLL, {APLL, MPLL, -1, -1}, 29, 28, 27}, [BCH] = {1, CPM_BCHCDR, 31, CONFIG_CPU_SEL_PLL, {APLL, MPLL, -1, -1}, 29, 28, 27}, [VPU] = {1, CPM_VPUCDR, 31, CONFIG_CPU_SEL_PLL, {APLL, MPLL, -1, -1}, 29, 28, 27}, #ifndef CONFIG_BURNER [OTG] = {1, CPM_USBCDR, 30, EXCLK, {EXCLK, EXCLK, APLL, MPLL}, 29, 28, 27}, #endif [I2S] = {1, CPM_I2SCDR, 30, EXCLK, {APLL, MPLL, EXCLK, -1}, 29, 28, 27}, [LCD] = {1, CPM_LPCDR, 31, CONFIG_CPU_SEL_PLL, {APLL, MPLL, -1, -1}, 28, 27, 26}, [MSC1] = {0, CPM_MSC1CDR, 0, 0, {-1, -1, -1, -1}, 29, 28, 27}, [MSC2] = {0, CPM_MSC2CDR, 0, 0, {-1, -1, -1, -1}, 29, 28, 27}, [UHC] = {1, CPM_UHCCDR, 30, OTG, {APLL, MPLL, OTG, -1}, 29, 28, 27}, #ifdef CONFIG_BURNER [SSI] = {1, CPM_SSICDR, 30, EXCLK, {APLL, MPLL, EXCLK, -1}, 29, 28, 27}, #else [SSI] = {1, CPM_SSICDR, 30, CONFIG_CPU_SEL_PLL, {APLL, MPLL, EXCLK, -1}, 29, 28, 27}, #endif [CIM] = {1, CPM_CIMCDR, 31, CONFIG_CPU_SEL_PLL, {APLL, MPLL, -1, -1}, 30, 29, 28}, [PCM] = {1, CPM_PCMCDR, 30, CONFIG_CPU_SEL_PLL, {APLL, MPLL, EXCLK, -1}, 28, 27, 26}, [GPU] = {1, CPM_GPUCDR, 31, CONFIG_CPU_SEL_PLL, {APLL, MPLL, -1, -1}, 29, 28, 27}, [ISP] = {1, CPM_ISPCDR, 31, CONFIG_CPU_SEL_PLL, {APLL, MPLL, -1, -1}, 29, 28, 27}, }; void clk_prepare(void) { /*stop clk and set div max*/ int id; struct cgu *cgu = NULL; unsigned regval = 0, reg = 0; for (id = 0; id < CGU_CNT; id++) { cgu = &(cgu_clk_sel[id]); reg = CPM_BASE + cgu->off; #ifdef CONFIG_BURNER if (id == OTG) continue; #endif #ifdef CONFIG_BURNER if (id == SSI) continue; #endif if (id != OTG) { regval = readl(reg); /*set div max*/ regval |= 0xff | (1 << cgu->ce); while (readl(reg) & (1 << cgu->busy)); writel(regval, reg); } /*stop clk*/ while (readl(reg) & (1 << cgu->busy)); regval = readl(reg); regval |= ((1 << cgu->stop) | (1 << cgu->ce)); writel(regval, reg); while (readl(reg) & (1 << cgu->busy)); /*clear ce*/ regval = readl(reg); regval &= ~(1 << cgu->ce); writel(regval, reg); #ifdef DUMP_CGU_SELECT printf("%s(0x%x) :0x%x\n",clk_name[id] ,reg, readl(reg)); #endif } } void cgu_clks_set(struct cgu *cgu_clks, int nr_cgu_clks) { int i, j, id; unsigned int xcdr = 0; unsigned int reg = 0; extern struct cgu_clk_src cgu_clk_src[]; for (i = 0; cgu_clk_src[i].cgu_clk != SRC_EOF; i++) { id = cgu_clk_src[i].cgu_clk; cgu_clks[id].sel_src = cgu_clk_src[i].src; } for(i = 0; i < nr_cgu_clks; i++) { for (j = 0; j < 4; j++) { if (cgu_clks[i].sel_src == cgu_clks[i].sel[j] && cgu_clks[i].en == 1) { reg = CPM_BASE + cgu_clks[i].off; xcdr = readl(reg); xcdr &= ~(3 << 30); xcdr |= j << cgu_clks[i].sel_bit; writel(xcdr, reg); #ifdef DUMP_CGU_SELECT printf("%s: 0x%X: value=0x%X\n", cgu_name(i), reg, readl(reg)); #endif break; } } } } static unsigned int pll_get_rate(int pll) { unsigned int cpxpcr = 0; unsigned int m, n, od0, od1; switch (pll) { case APLL: cpxpcr = cpm_inl(CPM_CPAPCR); break; case MPLL: cpxpcr = cpm_inl(CPM_CPMPCR); break; default: return 0; } m = (cpxpcr >> 20) & 0xfff; n = (cpxpcr >> 14) & 0x3f; od1 = (cpxpcr >> 11) & 0x7; od0 = (cpxpcr >> 8) & 0x7; #ifdef CONFIG_BURNER return (unsigned int)((unsigned long)gd->arch.gi->extal * m / n / od0 / od1); #else return (unsigned int)((unsigned long)CONFIG_SYS_EXTAL * m / n / od0 / od1); #endif } static unsigned int get_ddr_rate(void) { unsigned int ddrcdr = cpm_inl(CPM_DDRCDR); switch ((ddrcdr >> 30) & 3) { case 1: return pll_get_rate(APLL) / ((ddrcdr & 0xf) + 1); case 2: return pll_get_rate(MPLL) / ((ddrcdr & 0xf) + 1); } return 0; } static unsigned int get_cclk_rate(void) { unsigned int cpccr = cpm_inl(CPM_CPCCR); switch ((cpccr >> 28) & 3) { case 1: return pll_get_rate(APLL) / ((cpccr & 0xf) + 1); case 2: return pll_get_rate(MPLL) / ((cpccr & 0xf) + 1); } return 0; } static unsigned int get_msc_rate(unsigned int xcdr) { unsigned int msc0cdr = cpm_inl(CPM_MSC0CDR); unsigned int mscxcdr = cpm_inl(xcdr); unsigned int ret = 1; switch (msc0cdr >> 31) { case 0: ret = pll_get_rate(APLL) / (((mscxcdr & 0xff) + 1) * 2); break; case 1: ret = pll_get_rate(MPLL) / (((mscxcdr & 0xff) + 1) * 2); break; default: break; } return ret; } unsigned int cpm_get_h2clk(void) { int h2clk_div; unsigned int cpccr = cpm_inl(CPM_CPCCR); h2clk_div = (cpccr >> 12) & 0xf; switch ((cpccr >> 24) & 3) { case 1: return pll_get_rate(APLL) / (h2clk_div + 1); case 2: return pll_get_rate(MPLL) / (h2clk_div + 1); } } unsigned int clk_get_rate(int clk) { switch (clk) { case DDR: return get_ddr_rate(); case CPU: return get_cclk_rate(); case H2CLK: return cpm_get_h2clk(); case MSC0: return get_msc_rate(CPM_MSC0CDR); case MSC1: return get_msc_rate(CPM_MSC1CDR); case MSC2: return get_msc_rate(CPM_MSC2CDR); case APLL: return pll_get_rate(APLL); case MPLL: return pll_get_rate(MPLL); } return 0; } static unsigned int set_bch_rate(int clk, unsigned long rate) { unsigned int pll_rate = 0; unsigned int bchcdr = cpm_inl(CPM_BCHCDR); unsigned int cdr = 0; switch (bchcdr >> 31) { case 0: pll_rate = pll_get_rate(APLL); break; case 1: pll_rate = pll_get_rate(MPLL); break; default: return 0; } cdr = ((pll_rate + rate - 1)/rate - 1) & 0xf; bchcdr &= ~(0xf | 0x3 << 27); bchcdr |= (cdr | (1 << 29)); cpm_outl(bchcdr , CPM_BCHCDR); while (cpm_inl(CPM_BCHCDR) & (1 << 28)); debug("CPM_BCHCDR(%x) = %x\n",CPM_BCHCDR , cpm_inl(CPM_BCHCDR)); return 0; } static unsigned int set_msc_rate(int clk, unsigned long rate) { unsigned int msccdr = 0; unsigned int msc0cdr = cpm_inl(CPM_MSC0CDR); unsigned int xcdr_addr = 0; unsigned int pll_rate = 0; unsigned int cdr = 0; switch (clk) { case MSC0: xcdr_addr = CPM_MSC0CDR; break; case MSC1: xcdr_addr = CPM_MSC1CDR; break; case MSC2: xcdr_addr = CPM_MSC2CDR; break; default: return 0; } switch (msc0cdr >> 31) { case 0: pll_rate = pll_get_rate(APLL); break; case 1: pll_rate = pll_get_rate(MPLL); break; default: return 0; } msccdr = cpm_inl(xcdr_addr); msccdr &= ~(0x3 << 27 | 0xff); cdr = (((pll_rate + rate - 1)/rate)/2 - 1)& 0xff; msccdr |= (cdr | (1 << 29)); cpm_outl(msccdr , xcdr_addr); while (cpm_inl(xcdr_addr) & (1 << 28)); debug("CPM_MSC%dCDR(%x) = %x\n",(clk - MSC0), xcdr_addr, cpm_inl(xcdr_addr)); return 0; } static unsigned int set_ddr_rate(int clk, unsigned long rate) { unsigned int ddrcdr = cpm_inl(CPM_DDRCDR); unsigned int pll_rate; unsigned int cdr; switch (ddrcdr >> 30) { case 1: pll_rate = pll_get_rate(APLL); break; case 2: pll_rate = pll_get_rate(MPLL); break; case 0: printf("ddr clk is stop\n"); default: return 0; } cdr = ((pll_rate + rate - 1)/rate - 1) & 0xf; ddrcdr &= ~(0xf | 0x3f << 24); ddrcdr |= (cdr | (1 << 29)); cpm_outl(ddrcdr , CPM_DDRCDR); while (cpm_inl(CPM_DDRCDR) & (1 << 28)); debug("CPM_DDRCDR(%x) = %x\n",CPM_DDRCDR, cpm_inl(CPM_DDRCDR)); return 0; } static unsigned int set_lcd_rate(int clk, unsigned long rate) { unsigned int lcdcdr = cpm_inl(CPM_LPCDR); unsigned int pll_rate; unsigned int cdr; switch (lcdcdr >> 31) { case 0: pll_rate = pll_get_rate(APLL); break; case 1: pll_rate = pll_get_rate(MPLL); break; } cdr = ((pll_rate + rate - 1)/rate - 1 )& 0xff; lcdcdr &= ~(0xff | (0x3 << 26)); lcdcdr |= (cdr | (1 << 28)); cpm_outl(lcdcdr , CPM_LPCDR); while (cpm_inl(CPM_LPCDR) & (1 << 27)); debug("CPM_LPCDR(%x) = %x\n",CPM_LPCDR, cpm_inl(CPM_LPCDR)); return 0; } static unsigned int set_ssi_rate(int clk, unsigned long rate) { unsigned int cdr; unsigned int pll_rate; unsigned int ssicdr = cpm_inl(CPM_SSICDR); switch (ssicdr >> 30) { case 0: pll_rate = pll_get_rate(APLL); break; case 1: pll_rate = pll_get_rate(MPLL); break; case 2: pll_rate = CONFIG_SYS_EXTAL; break; default: printf("set_ssi_rate is error !\n"); } cdr = ((pll_rate + rate - 1)/rate - 1 )& 0xff; ssicdr &= ~(3 << 27 | 0xff); ssicdr |= ((1 << 29) | cdr); cpm_outl(ssicdr, CPM_SSICDR); while (cpm_inl(CPM_SSICDR) & (1 << 28)) ; debug("CPM_SSICDR(%x) = %x\n",CPM_SSICDR, cpm_inl(CPM_SSICDR)); return 0; } void clk_set_rate(int clk, unsigned long rate) { switch (clk) { case SSI: set_ssi_rate(clk, rate); return; case DDR: set_ddr_rate(clk, rate); return; case LCD: set_lcd_rate(clk, rate); return; case MSC0: case MSC1: case MSC2: set_msc_rate(clk, rate); return; case BCH: set_bch_rate(clk, rate); return; default: break; } debug("%s: clk%d is not supported\n", __func__, clk); } void clk_init(void) { unsigned int reg_clkgr = cpm_inl(CPM_CLKGR); unsigned int gate = 0 #ifdef CONFIG_JZ_MMC_MSC0 | CPM_CLKGR_MSC0 #endif #ifdef CONFIG_JZ_MMC_MSC1 | CPM_CLKGR_MSC1 #endif #ifdef CONFIG_JZ_MMC_MSC2 | CPM_CLKGR_MSC2 #endif #if defined(CONFIG_JZ_LCD_V12) || defined (CONFIG_JZ_EPD) | CPM_CLKGR_LCD #endif #if defined(CONFIG_NAND_NFI) |CPM_CLKGR_PDMA |CPM_CLKGR_BCH #endif #ifdef CONFIG_SPI_FLASH_INGENIC | CPM_CLKGR_SSI0 #endif #ifdef CONFIG_JZ_SSI1_PE | CPM_CLKGR_SSI1 #endif #ifdef CONFIG_MTD_NAND_JZ_NFI |CPM_CLKGR_PDMA |CPM_CLKGR_NFI #endif #ifdef CONFIG_MTD_NAND_JZ_BCH | CPM_CLKGR_BCH #endif ; reg_clkgr &= ~gate; cpm_outl(reg_clkgr,CPM_CLKGR); reg_clkgr = cpm_inl(CPM_CLKGR1); gate = 0 #ifdef CONFIG_JZ_EPD | CPM_CLKGR_EPD #endif ; reg_clkgr &= ~gate; cpm_outl(reg_clkgr,CPM_CLKGR1); cgu_clks_set(cgu_clk_sel, ARRAY_SIZE(cgu_clk_sel)); } void enable_uart_clk(void) { unsigned int clkgr = cpm_inl(CPM_CLKGR); switch (gd->arch.gi->uart_idx) { #define _CASE(U, N) case U: clkgr &= ~N; break _CASE(0, CPM_CLKGR_UART0); _CASE(1, CPM_CLKGR_UART1); _CASE(2, CPM_CLKGR_UART2); _CASE(3, CPM_CLKGR_UART3); _CASE(4, CPM_CLKGR_UART4); default: break; } cpm_outl(clkgr, CPM_CLKGR); } void otg_phy_init(enum otg_mode_t mode, unsigned extclk) { #ifndef CONFIG_SPL_BUILD int ext_sel = 0; int tmp_reg = 0; int timeout = 0x7fffff; tmp_reg = cpm_inl(CPM_USBPCR1); tmp_reg &= ~(USBPCR1_REFCLKSEL_MSK | USBPCR1_REFCLKDIV_MSK); tmp_reg |= USBPCR1_REFCLKSEL_CORE | USBPCR1_WORD_IF0_16_30; switch (extclk/1000000) { case 12: tmp_reg |= USBPCR1_REFCLKDIV_12M; break; case 19: tmp_reg |= USBPCR1_REFCLKDIV_19_2M; break; case 48: tmp_reg |= USBPCR1_REFCLKDIV_48M; break; default: ext_sel = 1; case 24: tmp_reg |= USBPCR1_REFCLKDIV_24M; break; } cpm_outl(tmp_reg,CPM_USBPCR1); /*set usb cdr clk*/ tmp_reg = cpm_inl(CPM_USBCDR); tmp_reg &= ~USBCDR_UCS_PLL; cpm_outl(tmp_reg, CPM_USBCDR); if (ext_sel) { unsigned int pll_rate = pll_get_rate(APLL); //FIXME: default apll unsigned int cdr = pll_rate/24000000; cdr = cdr ? cdr - 1 : cdr; tmp_reg |= (cdr & USBCDR_USBCDR_MSK) | USBCDR_CE_USB; tmp_reg &= ~USBCDR_USB_STOP; cpm_outl(tmp_reg, CPM_USBCDR); while ((cpm_inl(CPM_USBCDR) & USBCDR_USB_BUSY) || timeout--); tmp_reg = cpm_inl(CPM_USBCDR); tmp_reg &= ~USBCDR_UPCS_MPLL; tmp_reg |= USBCDR_UCS_PLL; cpm_outl(tmp_reg, CPM_USBCDR); } else { tmp_reg |= USBCDR_USB_STOP; cpm_outl(tmp_reg, CPM_USBCDR); while ((cpm_inl(CPM_USBCDR) & USBCDR_USB_BUSY) || timeout--); } tmp_reg = cpm_inl(CPM_USBCDR); tmp_reg &= ~USBCDR_USB_DIS; cpm_outl(tmp_reg, CPM_USBCDR); if (!timeout) printf("USBCDR wait busy bit failed\n"); tmp_reg = cpm_inl(CPM_USBPCR); switch (mode) { case OTG_MODE: case HOST_ONLY_MODE: tmp_reg |= USBPCR_USB_MODE_ORG; tmp_reg &= ~(USBPCR_VBUSVLDEXTSEL|USBPCR_VBUSVLDEXT|USBPCR_OTG_DISABLE); break; case DEVICE_ONLY_MODE: tmp_reg &= ~USBPCR_USB_MODE_ORG; tmp_reg |= USBPCR_VBUSVLDEXTSEL|USBPCR_VBUSVLDEXT|USBPCR_OTG_DISABLE; } cpm_outl(tmp_reg, CPM_USBPCR); tmp_reg = cpm_inl(CPM_OPCR); tmp_reg |= OPCR_SPENDN0; cpm_outl(tmp_reg, CPM_OPCR); tmp_reg = cpm_inl(CPM_USBPCR); tmp_reg |= USBPCR_POR; cpm_outl(tmp_reg, CPM_USBPCR); udelay(30); tmp_reg = cpm_inl(CPM_USBPCR); tmp_reg &= ~USBPCR_POR; cpm_outl(tmp_reg, CPM_USBPCR); udelay(300); tmp_reg = cpm_inl(CPM_CLKGR); tmp_reg &= ~CPM_CLKGR_OTG; cpm_outl(tmp_reg, CPM_CLKGR); #endif }
lxl1140989/dmsdk
uboot/u-boot-dm6291/arch/mips/cpu/xburst/m200/clk.c
C
gpl-2.0
13,725
21.836938
86
0.615446
false
<?php /** * Internationalisation file for extension Categorize. * * @file * @ingroup Extensions * @author Andreas Rindler (mediawiki at jenandi dot com) and Thomas Fauré (faure dot thomas at gmail dot com) * @license GNU General Public Licence 3.0 * */ $messages = array(); /** English * @author Thomas Fauré */ $messages['en'] = array( 'categorize-desc' => 'Adds an input box to edit and upload pages which allows users to assign categories', 'categorize-title' => 'Knowledge qualification', 'categorize-subtitle' => '', 'categorize-footer' => 'If you want to add a label, please contact an [[Special:ListUsers/sysop|administrator]].', 'categorize-advice' => 'Click on the labels you want to attribute to this page:' ); /** Message documentation (Message documentation) * @author Yekrats */ $messages['qqq'] = array( 'categorize-desc' => '{{desc}} For more information, see http://www.mediawiki.org/wiki/Extension:Categorize', ); /** Asturian (Asturianu) * @author Xuacu */ $messages['ast'] = array( 'categorize-desc' => "Amiesta un cuadru d'entrada pa editar y xubir páxines que permite a los usuarios conseñar categoríes", 'categorize-title' => 'Calificación del conocimientu', 'categorize-footer' => 'Si quies amestar una etiqueta, por favor comunícalo a un [[Special:ListUsers/sysop|alministrador]].', 'categorize-advice' => 'Calca nes etiquetes que quieras atribuir a esta páxina:', ); /** Belarusian (Taraškievica orthography) (‪Беларуская (тарашкевіца)‬) * @author EugeneZelenko * @author Jim-by * @author Wizardist */ $messages['be-tarask'] = array( 'categorize-desc' => 'Дадае поле ўводу для рэдагаваньня і загрузкі старонкі, якое дазваляе ўдзельнікам далучаць старонкі да катэгорыяў', 'categorize-title' => 'Ацэнка ведаў', 'categorize-footer' => 'Калі жадаеце дадаць надпіс, скантактуйцеся з [[Special:ListUsers/sysop|адміністратарам]].', 'categorize-advice' => 'Націсьніце на меткі, якія Вы жадаеце дадаць да гэтай старонкі:', ); /** Welsh (Cymraeg) * @author Pwyll */ $messages['cy'] = array( 'categorize-desc' => "Yn ychwanegu blwch mewnbynnu i olygu ac uwchlwytho tudalennau sy'n galluogi defnyddwyr i aseinio categorïau", 'categorize-title' => 'Cymhwyster gwybodaeth', 'categorize-footer' => 'Os hoffech ychwanegu label, cysylltwch â [[Special:ListUsers/sysop|gweinyddwr]] os gwelwch yn dda.', 'categorize-advice' => "Cliciwch ar y labeli yr hoffech briodoli i'r dudalen hon:", ); /** German (Deutsch) * @author Kghbln */ $messages['de'] = array( 'categorize-desc' => 'Fügt beim Eingabe- und Hochladefenster ein zusätzliches Eingabefeld hinzu, welches das Zuweisen von Kategorien ermöglicht', 'categorize-title' => 'Kategorisierung', 'categorize-footer' => 'Sofern du eine Bezeichnung hinzufügen lassen möchtest, kontaktiere hierzu bitte einen [[Special:ListUsers/sysop|Administrator]].', 'categorize-advice' => 'Die Kategorien anklicken, die hinzugefügt werden sollen:', ); /** German (formal address) (‪Deutsch (Sie-Form)‬) * @author Kghbln */ $messages['de-formal'] = array( 'categorize-footer' => 'Sofern Sie eine Bezeichnung hinzufügen lassen möchten, kontaktieren Sie hierzu bitte einen [[Special:ListUsers/sysop|Administrator]].', ); /** Lower Sorbian (Dolnoserbski) * @author Michawiki */ $messages['dsb'] = array( 'categorize-desc' => 'Pśidawa zapódawańske pólo za wobźěłowanje a nagrawanje bokow, kótarež dowólujo wužywarjam, kategorije pśipokazaś', 'categorize-title' => 'Kategorizacija wědy', 'categorize-footer' => 'Jolic coš pomjenjenje pśidaś, staj se pšosym z [[Special:ListUsers/sysop|administratorom]] do zwiska.', 'categorize-advice' => 'Klikni na pomjenjenja, kótarež coš toś tomu bokoju pśipisaś:', ); /** Spanish (Español) * @author Fitoschido */ $messages['es'] = array( 'categorize-desc' => 'Añade un cuadro de entrada para editar y cargar páginas que permiten a los usuarios asignar categorías', 'categorize-footer' => 'Si quieres añadir una etiqueta, contacta a un [[Special:ListUsers/sysop|administrador]].', 'categorize-advice' => 'Pulse en las etiquetas que quiere atribuir a esta página:', ); /** French (Français) * @author Crochet.david * @author Thomas Fauré */ $messages['fr'] = array( 'categorize-desc' => 'Ajoute une zone de saisie à la page d’édition et de téléversement permettant aux utilisateurs d’assigner des catégories à la page', 'categorize-title' => 'Qualification des savoirs', 'categorize-footer' => 'Si vous souhaitez ajouter un label, veuillez contacter un [[Special:ListUsers/sysop|administrateur]].', 'categorize-advice' => 'Cliquez sur les labels que vous souhaitez attribuer à cet article :', ); /** Franco-Provençal (Arpetan) * @author ChrisPtDe */ $messages['frp'] = array( 'categorize-title' => 'Qualificacion des savêrs', ); /** Galician (Galego) * @author Toliño */ $messages['gl'] = array( 'categorize-desc' => 'Engade unha caixa de entrada para editar e cargar páxinas, que permite aos usuarios asignar categorías', 'categorize-title' => 'Cualificación dos coñecementos', 'categorize-footer' => 'Se quere engadir unha etiqueta, póñase en contacto cun [[Special:ListUsers/sysop|administrador]].', 'categorize-advice' => 'Prema nas etiquetas que queira atribuír a esta páxina:', ); /** Upper Sorbian (Hornjoserbsce) * @author Michawiki */ $messages['hsb'] = array( 'categorize-desc' => 'Přidawa zapodawanske polo za wobdźěłowanje a nahrawanje stronow, kotrež wužiwarjam dowola, kategorije připokazać', 'categorize-title' => 'Kategorizacija wědy', 'categorize-footer' => 'Jeli chceš pomjenowanje přidać, staj so prošu z [[Special:ListUsers/sysop|administratorom]] do zwiska.', 'categorize-advice' => 'Klikń na pomjenowanja, kotrež chceš tutej stronje připisać:', ); /** Interlingua (Interlingua) * @author McDutchie */ $messages['ia'] = array( 'categorize-desc' => 'Insere un quadro de entrata in le paginas de modification e de incargamento que permitte al usatores de assignar categorias al pagina', 'categorize-title' => 'Qualification de cognoscentia', 'categorize-footer' => 'Si tu vole adder un etiquetta, per favor contacta un [[Special:ListUsers/sysop|administrator]].', 'categorize-advice' => 'Clicca super le etiquettas que tu vole attribuer a iste pagina:', ); /** Indonesian (Bahasa Indonesia) * @author Kenrick95 */ $messages['id'] = array( 'categorize-desc' => 'Tambahkan kotak input untuk menyunting dan mengunggah halaman yang memungkinkan pengguna menetapkan kategori', 'categorize-title' => 'Kualifikasi pengetahuan', 'categorize-footer' => 'Jika Anda ingin menambahkan label, silakan hubungi seorang [[Special:ListUsers/sysop|pengurus]].', 'categorize-advice' => 'Klik pada label yang Anda inginkan untuk menghubungkan ke halaman ini:', ); /** Colognian (Ripoarisch) * @author Purodha */ $messages['ksh'] = array( 'categorize-desc' => 'Deiht beim Sigge Enjävve un Huhlaade en Käßje dobei, woh mer Saachjroppe met zohdeijle kann.', 'categorize-title' => 'Wesse en Saachjroppe jeeße', 'categorize-footer' => 'Wann De en Beschreftung dobei donn wells, bes esu jood un sar_et enem [[Special:ListUsers/sysop|Wiki-Köbes]].', 'categorize-advice' => 'Klegg op de Saachjroppe för heh di Sigg dren zu donn:', ); /** Luxembourgish (Lëtzebuergesch) * @author Robby */ $messages['lb'] = array( 'categorize-desc' => "Setzt eng Këscht derbäi fir eng Säit eropzelueden an z'änneren déi de Benotzer erlaabt fir Kategorien mat Säiten ze verbannen", 'categorize-title' => 'Qualifikatioun vum Wëssen', 'categorize-footer' => 'Wann Dir eng Etiquette derbäisetze wëllt da mellt Iech w.e.g. bei engem [[Special:ListUsers/sysop|Administrateur]].', 'categorize-advice' => 'Klickt op déi Etiquetten déi Dir mat dësem Artikel verbanne wëllt:', ); /** Limburgish (Limburgs) * @author Pahles */ $messages['li'] = array( 'categorize-desc' => "Deit 'n invoorvèld bie aan de bewirkings- en uploadpagina woemit gebroekers categorieë kinne toekènne", 'categorize-title' => 'Kènniskwalifikatie', 'categorize-footer' => "Num kontak op mit 'ne [[Special:ListUsers/sysop|systeemwèrker]] es te 'n label wils biedoon.", 'categorize-advice' => "Klik op de labels es te 'm eigesjap aan dees pagina wils biedoon:", ); /** Lithuanian (Lietuvių) * @author Ignas693 */ $messages['lt'] = array( 'categorize-desc' => 'Prideda įvesties langelį redaguoti ir įkelti puslapių, kuris leidžia vartotojams priskirti kategorijoms', 'categorize-title' => 'Žinių kvalifikacijos', 'categorize-footer' => 'Jei norite pridėti žymę, prašome susisiekti su [[specialus: ListUsers / sysop|administrator]].', 'categorize-advice' => 'Spustelėkite ant etikečių, kurį norite priskirti šį puslapį:', ); /** Macedonian (Македонски) * @author Bjankuloski06 */ $messages['mk'] = array( 'categorize-desc' => 'Додава поле за внесување на категории во страницата за уредување и страницата за подигање', 'categorize-title' => 'Определување на знаењата', 'categorize-footer' => 'Ако сакате да додадете признак, обратете се кај [[Special:ListUsers/sysop|администратор]].', 'categorize-advice' => 'Стиснете на признаците што сакате да ѝ ги припишете на страницава:', ); /** Malay (Bahasa Melayu) * @author Anakmalaysia */ $messages['ms'] = array( 'categorize-desc' => 'Menambahkan petak input untuk menyunting dan memuat naik laman yang membolehkan pengguna memperuntukkan kategori kepada laman', 'categorize-title' => 'Kelayakan pengetahuan', 'categorize-footer' => 'Jika anda ingin menambahkan label, sila hubungi seorang [[Special:ListUsers/sysop|pentadbir]].', 'categorize-advice' => 'Klik pada label yang ingin anda atributkan kepada laman ini:', ); /** Norwegian (bokmål)‬ (‪Norsk (bokmål)‬) * @author Event */ $messages['nb'] = array( 'categorize-desc' => 'Legger til inndatafelt for redigerings- og opplastingsside for å tillate brukere å knytte kategorier til siden', 'categorize-title' => 'Kunnskapskvalifisering', 'categorize-footer' => 'Hvis du ønsker å legge til en etikett, vennligst kontakt en [[Special:ListUsers/sysop|administrator]].', 'categorize-advice' => 'Klikk på etikettene som du ønsker å knytte til denne siden:', ); /** Dutch (Nederlands) * @author Siebrand */ $messages['nl'] = array( 'categorize-desc' => 'Voegt een invoerveld toe aan de bewerkings- en uploadpagina waarmee gebruiker categorieën kunnen toevoegen', 'categorize-title' => 'Kenniskwalificatie', 'categorize-footer' => 'Neem contact met een [[Special:ListUsers/sysop|beheerder]] als u een label wilt toevoegen.', 'categorize-advice' => 'Klik op de labels als u een eigenschap aan deze pagina wilt toevoegen:', ); /** Polish (Polski) * @author Sp5uhe * @author Woytecr */ $messages['pl'] = array( 'categorize-desc' => 'Dodaje do stron edycji i przesyłania pole pozwalające użytkownikom na przypisanie kategorii', 'categorize-title' => 'Porządkowanie wiedzy', 'categorize-footer' => 'Jeśli chcesz dodać etykietę, skontaktuj się z [[Special:ListUsers/sysop|administratorem]].', 'categorize-advice' => 'Kliknij wybrane etykiety, które pasują do tej strony', ); /** Piedmontese (Piemontèis) * @author Borichèt * @author Dragonòt */ $messages['pms'] = array( 'categorize-desc' => "A gionta na fnestra d'imission për modifiché e carié dle pàgine ch'a përmëtto a j'utent d'assigné dle categorìe", 'categorize-title' => 'Qualificassion dla conossensa', 'categorize-footer' => "S'a veul gionté na tichëtta, për piasì ch'a contata n'[[Special:ListUsers/sysop|administrator]].", 'categorize-advice' => "Ch'a sgnaca an sla tichëtta ch'a veul dé a sta pàgina:", ); /** Portuguese (Português) * @author Hamilton Abreu * @author SandroHc */ $messages['pt'] = array( 'categorize-desc' => 'Acrescenta uma caixa de entrada para editar e fazer upload da página, o que permite que os utilizadores atribuam categorias à página', 'categorize-title' => 'Estruturação de conhecimento', 'categorize-footer' => 'Se quiser adicionar uma categoria, contacte um [[Special:ListUsers/sysop|administrador]].', 'categorize-advice' => 'Clique as categorias que pretende atribuir a esta página:', ); /** Brazilian Portuguese (Português do Brasil) * @author Pedroca cerebral */ $messages['pt-br'] = array( 'categorize-desc' => 'Adiciona uma caixa de entrada para editar e fazer upload da página, permitindo que usuários atribuam categorias à página', 'categorize-title' => 'Qualificação de conhecimento', 'categorize-footer' => 'Se quiser adicionar uma categoria, contate um [[Special:ListUsers/sysop|administrador]].', 'categorize-advice' => 'Clique sobre as categorias que deseja atribuir a esta página:', ); /** Russian (Русский) * @author Eleferen */ $messages['ru'] = array( 'categorize-desc' => 'Добавляет поле ввода для редактирования и загрузки страниц, которое позволяет пользователям указывать категории', 'categorize-title' => 'Квалификация знаний', 'categorize-footer' => 'Если Вы хотите добавить метку, пожалуйста, свяжитесь с [[Special:ListUsers/sysop|администратором]].', 'categorize-advice' => 'Нажмите на метки, которые Вы хотите присвоить данной странице:', ); /** Swedish (Svenska) * @author Ainali * @author WikiPhoenix */ $messages['sv'] = array( 'categorize-desc' => 'Lägger till inmatningsruta för att redigera och ladda upp sida som tillåter användare att tilldela kategorier till sidan', 'categorize-title' => 'Kunskapskvalificering', 'categorize-footer' => 'Om du vill lägga till en etikett, kontakta en [[Special:ListUsers/sysop|administratör]].', ); /** Ukrainian (Українська) * @author Microcell * @author Sodmy */ $messages['uk'] = array( 'categorize-desc' => 'Додає поле вводу для редагування та завантаження сторінки, яка дозволяє користувачам призначати категорії', 'categorize-title' => 'Кваліфікація знань', 'categorize-footer' => "Якщо ви хочете додати мітку, будь ласка, зв'яжіться з [[Special:ListUsers/sysop|адміністратором]].", 'categorize-advice' => 'Клацніть на ярликах, якщо ви хочете приписати цій сторінці:', );
felixonmars/app
extensions/Categorize/Categorize.i18n.php
PHP
gpl-2.0
15,085
43.772436
160
0.731047
false
/* $Id: adapter.h,v 1.1.1.1 2006/03/02 14:48:50 ijsung Exp $ */ #ifndef __DIVA_USER_MODE_IDI_ADAPTER_H__ #define __DIVA_USER_MODE_IDI_ADAPTER_H__ #define DIVA_UM_IDI_ADAPTER_REMOVED 0x00000001 typedef struct _diva_um_idi_adapter { struct list_head link; DESCRIPTOR d; int adapter_nr; struct list_head entity_q; /* entities linked to this adapter */ dword status; } diva_um_idi_adapter_t; #endif
fedya/aircam-openwrt
build_dir/toolchain-arm_v5te_gcc-linaro_uClibc-0.9.32_eabi/linux-2.6.28.fa2/drivers/isdn/hardware/eicon/adapter.h
C
gpl-2.0
405
22.823529
65
0.706173
false
local function doKeyboard_lang() local keyboard = { inline_keyboard = {} } for i,lang in pairs(config.available_languages) do local line = {{text = lang, callback_data = 'langselected:'..lang}} table.insert(keyboard.inline_keyboard, line) end return keyboard end local action = function(msg, blocks, ln) if msg.chat.type ~= 'private' and not is_mod(msg) then if msg.cb then api.answerCallbackQuery(msg.cb_id, lang[ln].not_mod:mEscape_hard()) end return end local keyboard if blocks[1] == 'lang' and not blocks[2] then keyboard = doKeyboard_lang() api.sendKeyboard(msg.chat.id, lang[ln].setlang.list, keyboard, true) end if blocks[1] == 'langselected' and msg.cb then local selected = blocks[2] db:set('lang:'..msg.chat.id, selected) api.editMessageText(msg.chat.id, msg.message_id, make_text(lang[selected].setlang.success, selected), false, true) end end return { action = action, triggers = { '^/(lang)$', '^###cb:(langselected):(%a%a)$' } }
C60Project/C60-PRODUCT
plugins/setlang.lua
Lua
gpl-2.0
1,017
24.45
122
0.6706
false
/** * CityDrain3 is an open source software for modelling and simulating integrated * urban drainage systems. * * Copyright (C) 2012 Gregor Burger * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A * PARTICULAR PURPOSE. See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 Franklin * Street, Fifth Floor, Boston, MA 02110-1301, USA. **/ #include "guilogsink.h" #include <QPlainTextEdit> #include <sstream> #include <log.h> GuiLogSink::GuiLogSink(QPlainTextEdit *widget) : logWidget(widget) { } GuiLogSink::~GuiLogSink() { } LogSink &GuiLogSink::operator<<(LogLevel level) { this->level = level; return *this; } LogSink &GuiLogSink::operator<<(const std::string &string) { buf += QString::fromStdString(string); return *this; } LogSink &GuiLogSink::operator<<(const char *string) { buf += QString(string); return *this; } LogSink &GuiLogSink::operator<<(int i) { buf += QString("%0").arg(i); return *this; } LogSink &GuiLogSink::operator<<(LSEndl i) { QTextCharFormat cf = logWidget->currentCharFormat(); QColor c = Qt::black; switch (level) { case Debug: c = Qt::gray; break; case Warning: c = Qt::magenta; break; case Error: c = Qt::red; break; case Standard: c = Qt::black; break; default: c = Qt::black; break; } cf.setForeground(c); logWidget->setCurrentCharFormat(cf); logWidget->appendPlainText(buf); buf.clear(); return *this; }
gregorburger/CityDrain3
src/gui/guilogsink.cpp
C++
gpl-2.0
1,884
23.789474
83
0.705945
false
/* Countdown Timer */ $('#countdown').countdown(clockEndDate, function(event) { var $this = $(this).html(event.strftime('' + '<div id="countdown-days" >%D <span>Days</span></div> ' + '<div id="countdown-hours" >%H <span>Hrs</span></div> ' + '<div id="countdown-minutes" >%M <span>Min</span></div> ' + '<div id="countdown-seconds" >%S <span>Sec</span></div> ')); }); /* Slider Backgrounds */ //$.vegas('slideshow', { // delay:5000, // backgrounds:[ // { src:'img/1.jpg', fade:10000 }, // { src:'img/2.jpg', fade:10000 }, // { src:'img/3.jpg', fade:10000 } // ] //})('overlay', { // src:'img/overlay.png' //});
mcribbs/shellmc.net
wp-content/plugins/easy-pie-coming-soon/mini-themes/base-responsive/js/custom.js
JavaScript
gpl-2.0
712
26.48
71
0.5
false
<?php /** * Copyright © 2015 Magento. All rights reserved. * See COPYING.txt for license details. */ namespace Magento\Framework\Model\Test\Unit\ResourceModel\Type\Db; use Magento\Framework\App\ResourceConnection\ConnectionAdapterInterface; use Magento\Framework\DB\LoggerInterface; use Magento\Framework\Model\ResourceModel\Type\Db\ConnectionFactory; use Magento\Framework\ObjectManagerInterface; class ConnectionFactoryTest extends \PHPUnit_Framework_TestCase { /** * @var ConnectionFactory */ private $model; /** * @var \PHPUnit_Framework_MockObject_MockObject|\Magento\Framework\ObjectManagerInterface */ private $objectManagerMock; /** * {@inheritDoc} */ protected function setUp() { $this->objectManagerMock = $this->getMockBuilder(ObjectManagerInterface::class) ->getMockForAbstractClass(); $this->model = new ConnectionFactory($this->objectManagerMock); } /** * @return void */ public function testCreateNoActiveConfig() { $config = ['foo' => 'bar']; $loggerMock = $this->getMockBuilder(LoggerInterface::class) ->getMockForAbstractClass(); $connectionAdapterMock = $this->getMockBuilder(ConnectionAdapterInterface::class) ->getMockForAbstractClass(); $this->objectManagerMock ->expects($this->once()) ->method('create') ->with(ConnectionAdapterInterface::class, ['config' => $config]) ->willReturn($connectionAdapterMock); $this->objectManagerMock ->expects($this->once()) ->method('get') ->with(LoggerInterface::class) ->willReturn($loggerMock); $connectionAdapterMock ->expects($this->once()) ->method('getConnection') ->with($loggerMock) ->willReturn('Expected result'); $this->assertEquals('Expected result', $this->model->create($config)); } }
FPLD/project0
vendor/magento/framework/Model/Test/Unit/ResourceModel/Type/Db/ConnectionFactoryTest.php
PHP
gpl-2.0
2,011
29
94
0.633831
false
<?php /** * Copyright © 2015 Magento. All rights reserved. * See COPYING.txt for license details. */ namespace Magento\CatalogInventory\Observer; use Magento\Framework\Event\ObserverInterface; use Magento\CatalogInventory\Api\StockManagementInterface; use Magento\Framework\Event\Observer as EventObserver; /** * Catalog inventory module observer */ class RevertQuoteInventoryObserver implements ObserverInterface { /** * @var ProductQty */ protected $productQty; /** * @var StockManagementInterface */ protected $stockManagement; /** * @var \Magento\CatalogInventory\Model\Indexer\Stock\Processor */ protected $stockIndexerProcessor; /** * @var \Magento\Catalog\Model\Indexer\Product\Price\Processor */ protected $priceIndexer; /** * RevertQuoteInventory constructor. * @param ProductQty $productQty * @param StockManagementInterface $stockManagement * @param \Magento\CatalogInventory\Model\Indexer\Stock\Processor $stockIndexerProcessor * @param \Magento\Catalog\Model\Indexer\Product\Price\Processor $priceIndexer */ public function __construct( ProductQty $productQty, StockManagementInterface $stockManagement, \Magento\CatalogInventory\Model\Indexer\Stock\Processor $stockIndexerProcessor, \Magento\Catalog\Model\Indexer\Product\Price\Processor $priceIndexer ) { $this->productQty = $productQty; $this->stockManagement = $stockManagement; $this->stockIndexerProcessor = $stockIndexerProcessor; $this->priceIndexer = $priceIndexer; } /** * Revert quote items inventory data (cover not success order place case) * * @param EventObserver $observer * @return void */ public function execute(EventObserver $observer) { $quote = $observer->getEvent()->getQuote(); $items = $this->productQty->getProductQty($quote->getAllItems()); $this->stockManagement->revertProductsSale($items, $quote->getStore()->getWebsiteId()); $productIds = array_keys($items); if (!empty($productIds)) { $this->stockIndexerProcessor->reindexList($productIds); $this->priceIndexer->reindexList($productIds); } // Clear flag, so if order placement retried again with success - it will be processed $quote->setInventoryProcessed(false); } }
FPLD/project0
vendor/magento/module-catalog-inventory/Observer/RevertQuoteInventoryObserver.php
PHP
gpl-2.0
2,439
31.078947
95
0.681296
false
<?php /** * @package EasyBlog * @copyright Copyright (C) 2010 Stack Ideas Private Limited. All rights reserved. * @license GNU/GPL, see LICENSE.php * * EasyBlog is free software. This version may have been modified pursuant * to the GNU General Public License, and as distributed it includes or * is derivative of works licensed under the GNU General Public License or * other free or open source software licenses. * See COPYRIGHT.php for copyright notices and details. */ defined('_JEXEC') or die('Restricted access'); ?> <?php if( $this->getParam( 'show_fontchanger' , true ) ){ ?> <script type="text/javascript"> EasyBlog.ready(function($){ // Bind event's on the font size changer. $( '#ezblog-body .font-switcher a' ).click( function(){ var blogText = $( '#ezblog-body .blog-text' ); var current = $( blogText ).css( 'font-size' ); var num = parseFloat(current, 10); var unit = current.slice(-2); if( this.id == 'fontLarge' ) { num = num * 1.4; } else if (this.id == 'fontSmall') { num = num / 1.4; } $( blogText ).css( 'font-size' , num + unit ); return false; }); }); </script> <li class="font-switcher"> <span><?php echo JText::_( 'COM_EASYBLOG_FONT_SIZE' ); ?>:</span> <a id="fontLarge" class="fontChanger" href="javascript:void(0);"><?php echo JText::_( 'COM_EASYBLOG_FONT_LARGER' ); ?></a> <a id="fontSmall" class="fontChanger" href="javascript:void(0);"><?php echo JText::_( 'COM_EASYBLOG_FONT_SMALLER' );?></a> </li> <?php } ?>
alexinteam/joomla3
components/com_easyblog/themes/default/blog.read.fontsize.php
PHP
gpl-2.0
1,498
31.565217
123
0.648198
false
/* Pencil2D - Traditional Animation Software Copyright (C) 2005-2007 Patrick Corrieri & Pascal Naidon Copyright (C) 2012-2020 Matthew Chiawen Chang This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ #ifndef TIMELINE2_H #define TIMELINE2_H #include "basedockwidget.h" namespace Ui { class Timeline2; } class Timeline2 : public BaseDockWidget { Q_OBJECT public: explicit Timeline2(QWidget* parent = nullptr); ~Timeline2() override; void initUI() override; void updateUI() override; private: Ui::Timeline2* ui; }; #endif // TIMELINE2_H
chchwy/pencil2d
app/src/timeline2.h
C
gpl-2.0
950
21.093023
71
0.756842
false
/* * Output various information about GMP and MPFR. */ /* Copyright 2010, 2011 Free Software Foundation, Inc. Contributed by the Arenaire and Caramel projects, INRIA. This file is part of the GNU MPFR Library. The GNU MPFR Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. The GNU MPFR Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <stdio.h> #include <limits.h> #include <gmp.h> #include <mpfr.h> /* The following failure can occur if GMP has been rebuilt with * a different ABI, e.g. * 1. GMP built with ABI=mode32. * 2. MPFR built against this GMP version. * 3. GMP rebuilt with ABI=32. */ static void failure_test (void) { mpfr_t x; mpfr_init2 (x, 128); mpfr_set_str (x, "17", 0, GMP_RNDN); if (mpfr_cmp_ui (x, 17) != 0) printf ("\nFailure in mpfr_set_str! Probably an unmatched ABI!\n"); mpfr_clear (x); } int main (void) { unsigned long c; mp_limb_t t[4] = { -1, -1, -1, -1 }; #if defined(__cplusplus) printf ("A C++ compiler is used.\n"); #endif printf ("GMP ..... Library: %-12s Header: %d.%d.%d\n", gmp_version, __GNU_MP_VERSION, __GNU_MP_VERSION_MINOR, __GNU_MP_VERSION_PATCHLEVEL); printf ("MPFR .... Library: %-12s Header: %s (based on %d.%d.%d)\n", mpfr_get_version (), MPFR_VERSION_STRING, MPFR_VERSION_MAJOR, MPFR_VERSION_MINOR, MPFR_VERSION_PATCHLEVEL); #if MPFR_VERSION_MAJOR >= 3 printf ("MPFR features: TLS = %s, decimal = %s", mpfr_buildopt_tls_p () ? "yes" : "no", mpfr_buildopt_decimal_p () ? "yes" : "no"); # if MPFR_VERSION_MAJOR > 3 || MPFR_VERSION_MINOR >= 1 printf (", GMP internals = %s\nMPFR tuning: %s", mpfr_buildopt_gmpinternals_p () ? "yes" : "no", mpfr_buildopt_tune_case ()); # endif printf ("\n"); #endif printf ("MPFR patches: %s\n\n", mpfr_get_patches ()); #ifdef __GMP_CC printf ("__GMP_CC = \"%s\"\n", __GMP_CC); #endif #ifdef __GMP_CFLAGS printf ("__GMP_CFLAGS = \"%s\"\n", __GMP_CFLAGS); #endif printf ("GMP_LIMB_BITS = %d\n", (int) GMP_LIMB_BITS); printf ("GMP_NAIL_BITS = %d\n", (int) GMP_NAIL_BITS); printf ("GMP_NUMB_BITS = %d\n", (int) GMP_NUMB_BITS); printf ("mp_bits_per_limb = %d\n", (int) mp_bits_per_limb); printf ("sizeof(mp_limb_t) = %d\n", (int) sizeof(mp_limb_t)); if (mp_bits_per_limb != GMP_LIMB_BITS) printf ("Warning! mp_bits_per_limb != GMP_LIMB_BITS\n"); if (GMP_LIMB_BITS != sizeof(mp_limb_t) * CHAR_BIT) printf ("Warning! GMP_LIMB_BITS != sizeof(mp_limb_t) * CHAR_BIT\n"); c = mpn_popcount (t, 1); printf ("The GMP library expects %lu bits in a mp_limb_t.\n", c); if (c != GMP_LIMB_BITS) printf ("Warning! This is different from GMP_LIMB_BITS!\n" "Different ABI caused by a GMP library upgrade?\n"); #if MPFR_VERSION_MAJOR >= 3 printf ("\n"); printf ("sizeof(mpfr_prec_t) = %d\n", (int) sizeof(mpfr_prec_t)); printf ("sizeof(mpfr_exp_t) = %d\n", (int) sizeof(mpfr_exp_t)); #endif failure_test (); return 0; }
PinguinoIDE/pinguino-compilers
macosx/p32/share/doc/mpfr/examples/version.c
C
gpl-2.0
3,668
32.045045
76
0.639858
false
#!/bin/bash # This script expects $1 to be passed and for $1 to be the filesystem location # to a json file for which it will run syntax checks against. syntax_errors=0 error_msg=$(mktemp /tmp/error_msg_json-syntax.XXXXX) if [ $2 ]; then module_path=$(echo $1 | sed -e 's|'$2'||') else module_path=$1 fi # Check json file syntax echo -e "$(tput setaf 6)Checking json syntax for $module_path...$(tput sgr0)" ruby -e "require 'json'; JSON.parse(File.read('$1'))" 2> $error_msg > /dev/null if [ $? -ne 0 ]; then cat $error_msg | sed -e "s/^/$(tput setaf 1)/" -e "s/$/$(tput sgr0)/" syntax_errors=`expr $syntax_errors + 1` echo -e "$(tput setaf 1)Error: json syntax error in $module_path (see above)$(tput sgr0)" fi rm -f $error_msg if [ "$syntax_errors" -ne 0 ]; then echo -e "$(tput setaf 1)Error: $syntax_errors syntax error(s) found in json file. Commit will be aborted.$(tput sgr0)" exit 1 fi exit 0
sanjeevtripurari/puppet-git-hooks
commit_hooks/json_syntax_check.sh
Shell
gpl-2.0
936
30.2
123
0.645299
false
/****************************************************************************** * Wormux is a convivial mass murder game. * Copyright (C) 2001-2010 Wormux Team. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA ****************************************************************************** * Camera : follow an object, center on it or follow mouse interaction. *****************************************************************************/ #include <WORMUX_debug.h> #include <WORMUX_random.h> #include "character/character.h" #include "game/config.h" #include "game/game.h" #include "game/time.h" #include "graphic/video.h" #include "include/app.h" #include "interface/cursor.h" #include "interface/interface.h" #include "interface/mouse.h" #include "map/camera.h" #include "map/map.h" #include "object/physical_obj.h" #include "team/teams_list.h" #include "tool/math_tools.h" #include "tool/string_tools.h" const Point2d MAX_CAMERA_SPEED(5000, 5000); const Point2d MAX_CAMERA_ACCELERATION(1.5,1.5); const Double ANTICIPATION = 18; const Double REACTIVITY = 0.6; const Double SPEED_REACTIVITY = 0.05; const int SPEED_REACTIVITY_CEIL = 4; const uint SCROLL_KEYBOARD = 20; // pixel const Double ADVANCE_ANTICIPATION = 10; const int REALTIME_FOLLOW_LIMIT = 25; const Double REALTIME_FOLLOW_FACTOR = 0.15; uint MAX_REFRESHES_PER_SECOND = 100; Camera::Camera(): m_started_shaking( 0 ), m_shake_duration( 0 ), m_shake_amplitude( 0, 0 ), m_shake_centerpoint( 0, 0 ), m_shake( 0, 0 ), m_last_time_shake_calculated( 0 ), m_speed( 0, 0 ), m_stop(false), m_control_mode(NO_CAMERA_CONTROL), m_begin_controlled_move_time(0), auto_crop(true), followed_object(NULL) { pointer_used_before_scroll = Mouse::POINTER_SELECT; } void Camera::Reset() { m_stop = false; auto_crop = true; followed_object = NULL; m_begin_controlled_move_time = 0; m_control_mode = NO_CAMERA_CONTROL; SetXYabs(GetWorld().GetSize() / 2); } bool Camera::HasFixedX() const { return (int)GetWorld().GetWidth() <= GetSizeX(); } bool Camera::HasFixedY() const { return (int)GetWorld().GetHeight() <= GetSizeY(); } void Camera::SetXYabs(int x, int y) { AppWormux * app = AppWormux::GetInstance(); if (!HasFixedX()) position.x = InRange_Long(x, 0, GetWorld().GetWidth() - GetSizeX()); else position.x = - (app->video->window.GetWidth() - GetWorld().GetWidth())/2; if (!HasFixedY()) position.y = InRange_Long(y, 0, GetWorld().GetHeight() - GetSizeY()); else position.y = - (app->video->window.GetHeight() - GetWorld().GetHeight())/2; } void Camera::SetXY(Point2i pos) { pos = pos * FreeDegrees(); if (pos.IsNull()) return; SetXYabs(position + pos); } void Camera::AutoCrop() { /* Stuff is put static in order to be able to reach the last position * of the object the camera was following, in case it desapears. This * typically happen when something explodes or a character dies. */ static Point2i obj_pos(0, 0); Point2i target(0,0); bool stop = false; if (followed_object && !(followed_object->IsGhost())) { /* compute the ideal position! * it takes the physical object direction into account */ obj_pos = followed_object->GetCenter(); if (obj_pos > GetPosition() + GetSize() / 7 && obj_pos < GetPosition() + 6 * GetSize() / 7) { if (m_stop) stop = true; } else { m_stop = false; } target = obj_pos; if (followed_object->IsMoving()) { Point2d anticipation = ADVANCE_ANTICIPATION * followed_object->GetSpeed(); Point2d anticipation_limit = GetSize()/3; //limit anticipation to screen size/3 if (anticipation.x > anticipation_limit.x) anticipation.x = anticipation_limit.x; if (anticipation.y > anticipation_limit.y) anticipation.y = anticipation_limit.y; if (anticipation.x < -anticipation_limit.x) anticipation.x = -anticipation_limit.x; if (anticipation.y < -anticipation_limit.y) anticipation.y = -anticipation_limit.y; target += anticipation; } target -= GetSize()/2; } else { target = GetPosition(); m_stop = true; } //Compute new speed to reach target Point2d acceleration(0,0); acceleration.x = REACTIVITY * (target.x - ANTICIPATION * m_speed.x - position.x) ; acceleration.y = REACTIVITY * (target.y - ANTICIPATION * m_speed.y - position.y) ; // Limit acceleration if (acceleration.x > MAX_CAMERA_ACCELERATION.x) acceleration.x = MAX_CAMERA_ACCELERATION.x; if (acceleration.y > MAX_CAMERA_ACCELERATION.y) acceleration.y = MAX_CAMERA_ACCELERATION.y; if (acceleration.x < -MAX_CAMERA_ACCELERATION.x) acceleration.x = -MAX_CAMERA_ACCELERATION.x; if (acceleration.y < -MAX_CAMERA_ACCELERATION.y) acceleration.y = -MAX_CAMERA_ACCELERATION.y; // std::cout<<"acceleration before : "<<acceleration.x<<" "<<acceleration.y<<std::endl; if (abs((int)m_speed.x) > SPEED_REACTIVITY_CEIL) { acceleration.x *= (1 + SPEED_REACTIVITY * (abs((int)m_speed.x) - SPEED_REACTIVITY_CEIL)); } if (abs((int)m_speed.y) > SPEED_REACTIVITY_CEIL) { acceleration.y *= (1 + SPEED_REACTIVITY * (abs((int)m_speed.y) - SPEED_REACTIVITY_CEIL)); } if (stop) { m_speed = m_speed/2; } else { //Apply acceleration m_speed = m_speed + acceleration; //Realtime follow is enable if object is too fast to be correctly followed if (abs((int)followed_object->GetSpeed().x) > REALTIME_FOLLOW_LIMIT) { m_speed.x = (target.x - position.x) * REALTIME_FOLLOW_FACTOR; } if (abs((int)followed_object->GetSpeed().y) > REALTIME_FOLLOW_LIMIT) { m_speed.y = (target.y - position.y) * REALTIME_FOLLOW_FACTOR; } //Limit if (m_speed.x > MAX_CAMERA_SPEED.x) m_speed.x = MAX_CAMERA_SPEED.x; if (m_speed.y > MAX_CAMERA_SPEED.y) m_speed.y = MAX_CAMERA_SPEED.y; if (m_speed.x < -MAX_CAMERA_SPEED.x) m_speed.x = -MAX_CAMERA_SPEED.x; if (m_speed.y < -MAX_CAMERA_SPEED.y) m_speed.y = -MAX_CAMERA_SPEED.y; } //Update position Point2i next_position(0,0); next_position.x = (int)m_speed.x; next_position.y = (int)m_speed.y; SetXY(next_position); if (!m_stop && next_position.x == 0 && next_position.y == 0 && followed_object->GetSpeed().x == 0 && followed_object->GetSpeed().y == 0) { m_stop = true; } } void Camera::SaveMouseCursor() { Mouse::pointer_t current_pointer = Mouse::GetInstance()->GetPointer(); if (current_pointer != Mouse::POINTER_MOVE && current_pointer != Mouse::POINTER_ARROW_UP && current_pointer != Mouse::POINTER_ARROW_DOWN && current_pointer != Mouse::POINTER_ARROW_LEFT && current_pointer != Mouse::POINTER_ARROW_RIGHT && current_pointer != Mouse::POINTER_ARROW_DOWN_RIGHT && current_pointer != Mouse::POINTER_ARROW_UP_RIGHT && current_pointer != Mouse::POINTER_ARROW_UP_LEFT && current_pointer != Mouse::POINTER_ARROW_DOWN_LEFT) { pointer_used_before_scroll = current_pointer; } } void Camera::RestoreMouseCursor() { Mouse::GetInstance()->SetPointer(pointer_used_before_scroll); } void Camera::ScrollCamera() { if (!Mouse::GetInstance()->HasFocus()) // The application has not the focus, don't move the camera!! return; Point2i mousePos = Mouse::GetInstance()->GetPosition(); uint zone_size = Config::GetInstance()->GetScrollBorderSize(); Point2i sensitZone(zone_size, zone_size); /* tstVector represents the vector of how deep the cursor is in a sensit * zone; negative value means that the camera has to reduce its coordinates, * a positive value means that it should increase. Actually reduce means * LEFT/UP (for x/y) and increase RIGHT/DOWN directions. * The bigger tstVector is, the faster the camera will scroll. */ Point2i tstVector; tstVector = GetSize().inf(mousePos + sensitZone) * (mousePos + sensitZone - GetSize()) ; tstVector -= mousePos.inf(sensitZone) * (sensitZone - mousePos); if (!tstVector.IsNull()) { SetXY(tstVector); SetAutoCrop(false); } /* mouse pointer ***********************************************************/ SaveMouseCursor(); if (tstVector.IsNull()) RestoreMouseCursor(); else if (tstVector.IsXNull() && tstVector.y < 0) Mouse::GetInstance()->SetPointer(Mouse::POINTER_ARROW_UP); else if (tstVector.IsXNull() && tstVector.y > 0) Mouse::GetInstance()->SetPointer(Mouse::POINTER_ARROW_DOWN); else if (tstVector.IsYNull() && tstVector.x < 0) Mouse::GetInstance()->SetPointer(Mouse::POINTER_ARROW_LEFT); else if (tstVector.IsYNull() && tstVector.x > 0) Mouse::GetInstance()->SetPointer(Mouse::POINTER_ARROW_RIGHT); else if (tstVector.y > 0 && tstVector.x > 0) Mouse::GetInstance()->SetPointer(Mouse::POINTER_ARROW_DOWN_RIGHT); else if (tstVector.y < 0 && tstVector.x > 0) Mouse::GetInstance()->SetPointer(Mouse::POINTER_ARROW_UP_RIGHT); else if (tstVector.y < 0 && tstVector.x < 0) Mouse::GetInstance()->SetPointer(Mouse::POINTER_ARROW_UP_LEFT); else if (tstVector.y > 0 && tstVector.x < 0) Mouse::GetInstance()->SetPointer(Mouse::POINTER_ARROW_DOWN_LEFT); /***************************************************************************/ } void Camera::HandleMouseMovement() { static Point2i first_mouse_pos(-1, -1); static Point2i last_mouse_pos(0, 0); Point2i curr_pos = Mouse::GetInstance()->GetPosition(); int x,y; //Move camera with mouse holding Ctrl key down or with middle button of mouse if (SDL_GetMouseState(&x, &y) & SDL_BUTTON(SDL_BUTTON_MIDDLE) || SDL_GetModState() & KMOD_CTRL) { // Begin to move the camera... if (Mouse::GetInstance()->GetPointer() != Mouse::POINTER_MOVE) { first_mouse_pos = Point2i(x, y); SaveMouseCursor(); Mouse::GetInstance()->SetPointer(Mouse::POINTER_MOVE); } SetAutoCrop(false); SetXY(last_mouse_pos - curr_pos); last_mouse_pos = curr_pos; if (m_begin_controlled_move_time == 0) { m_begin_controlled_move_time = Time::GetInstance()->Read(); } if (SDL_GetModState() & KMOD_CTRL) { m_control_mode = KEYBOARD_CAMERA_CONTROL; }else{ m_control_mode = MOUSE_CAMERA_CONTROL; } return; } else if (m_control_mode == MOUSE_CAMERA_CONTROL) { // if the mouse has not moved at all since the user pressed the middle button, we center the camera! if (abs((int)first_mouse_pos.x - curr_pos.x) < 5 && abs((int)first_mouse_pos.y - curr_pos.y) < 5 && Time::GetInstance()->Read() - m_begin_controlled_move_time < 500) { CenterOnActiveCharacter(); } first_mouse_pos = Point2i(-1, -1); RestoreMouseCursor(); m_control_mode = NO_CAMERA_CONTROL; m_begin_controlled_move_time = 0; } else if (m_control_mode == KEYBOARD_CAMERA_CONTROL) { first_mouse_pos = Point2i(-1, -1); RestoreMouseCursor(); m_control_mode = NO_CAMERA_CONTROL; m_begin_controlled_move_time = 0; } last_mouse_pos = curr_pos; if (Config::GetInstance()->GetScrollOnBorder()) ScrollCamera(); } void Camera::HandleMoveIntentions() { const UDMoveIntention * ud_move_intention = GetLastUDMoveIntention(); if (ud_move_intention) { if (ud_move_intention->GetDirection() == DIRECTION_UP) SetXY(Point2i(0, -SCROLL_KEYBOARD)); else SetXY(Point2i(0, SCROLL_KEYBOARD)); } const LRMoveIntention * lr_move_intention = GetLastLRMoveIntention(); if (lr_move_intention) { if (lr_move_intention->GetDirection() == DIRECTION_RIGHT) SetXY(Point2i(SCROLL_KEYBOARD, 0)); else SetXY(Point2i(-SCROLL_KEYBOARD, 0)); } if (lr_move_intention || ud_move_intention) SetAutoCrop(false); } void Camera::Refresh(){ // Refresh gets called very often when the game is paused. // This "if" ensures that the camera doesn't move to fast. if (refresh_stopwatch.GetValue() >= 1000 / MAX_REFRESHES_PER_SECOND) { // Check if player wants the camera to move HandleMouseMovement(); HandleMoveIntentions(); if (auto_crop && followed_object != NULL) AutoCrop(); refresh_stopwatch.Reset(1.0); } } void Camera::FollowObject(const PhysicalObj *obj, bool follow_closely) { MSG_DEBUG( "camera.tracking", "Following object %s (%d)", obj->GetName().c_str(), follow_closely); Mouse::GetInstance()->Hide(); auto_crop = true; m_stop = !follow_closely; followed_object = obj; } void Camera::StopFollowingObj(const PhysicalObj* obj) { if (followed_object == obj) { followed_object = NULL; m_stop = true; m_speed = Point2d(0,0); } } bool Camera::IsVisible(const PhysicalObj &obj) const { return Intersect( obj.GetRect() ); } void Camera::CenterOnActiveCharacter() { CharacterCursor::GetInstance()->FollowActiveCharacter(); FollowObject(&ActiveCharacter(),true); } Point2i Camera::ComputeShake() const { uint time = Time::GetInstance()->Read(); ASSERT(time >= m_started_shaking); if (time > m_started_shaking + m_shake_duration || m_shake_duration == 0) { return Point2i(0, 0); // not shaking now } if (time == m_last_time_shake_calculated) return m_shake; // FIXME: we can underflow to 0 if time and m_started_shaking are large enough Double t = (Double)(time - m_started_shaking) / (Double)m_shake_duration; Double func_val = 1.0f; if (t >= EPSILON) { const Double k_scale_angle = 10 * PI; Double arg = k_scale_angle * t; // denormalized sinc func_val = (1 - t) * sin(arg) / arg; } Double x_ampl = (Double)RandomLocal().GetDouble( -m_shake_amplitude.x, m_shake_amplitude.x ); Double y_ampl = (Double)RandomLocal().GetDouble( -m_shake_amplitude.y, m_shake_amplitude.y ); m_shake.x = (int)(x_ampl * func_val//( Double )m_shake_amplitude.x * func_val + (Double)m_shake_centerpoint.x); m_shake.y = (int)(y_ampl * func_val//( Double )m_shake_amplitude.y * func_val + (Double)m_shake_centerpoint.y); static uint t_last_time_logged = 0; if (time - t_last_time_logged > 10) { MSG_DEBUG("camera.shake", "Shaking: time = %d, t = %s, func_val = %s, shake: %d, %d", time, Double2str(t).c_str(), Double2str(func_val).c_str(), m_shake.x, m_shake.y); t_last_time_logged = time; } m_last_time_shake_calculated = time; return m_shake; } void Camera::Shake(uint how_long_msec, const Point2i & amplitude, const Point2i & centerpoint) { MSG_DEBUG("camera.shake", "Shake added!"); uint time = Time::GetInstance()->Read(); ASSERT(time >= m_started_shaking); if (m_started_shaking + m_shake_duration > time) { // still shaking, so add amplitude/centerpoint to allow shakes to combine m_shake_amplitude = max( m_shake_amplitude, amplitude ); m_shake_centerpoint = centerpoint; // increase shake duration so it lasts how_long_msec from this time m_shake_duration = how_long_msec + ( time - m_started_shaking ); } else { // reinit the shake m_started_shaking = time; m_shake_duration = how_long_msec; m_shake_amplitude = amplitude; m_shake_centerpoint = centerpoint; } } void Camera::ResetShake() { m_started_shaking = 0; m_shake_duration = 0; m_last_time_shake_calculated = 0; m_shake = Point2i( 0, 0 ); }
yeKcim/warmux
old/wormux-0.9.2/src/map/camera.cpp
C++
gpl-2.0
15,798
31.175153
104
0.649576
false
/* font-face */ @font-face { font-family: "GeneralEnclosedFoundicons"; src: url("../fonts/general_enclosed_foundicons.eot"); src: url("../fonts/general_enclosed_foundicons.eot?#iefix") format("embedded-opentype"), url("../fonts/general_enclosed_foundicons.woff") format("woff"), url("../fonts/general_enclosed_foundicons.ttf") format("truetype"), url("../fonts/general_enclosed_foundicons.svg#GeneralEnclosedFoundicons") format("svg"); font-weight: normal; font-style: normal; } /* global foundicon styles */ [class*="general foundicon-"] { display: inline; width: auto; height: auto; line-height: inherit; vertical-align: baseline; background-image: none; background-position: 0 0; background-repeat: repeat; } [class*="general foundicon-"]:before { font-family: "GeneralEnclosedFoundicons"; font-weight: normal; font-style: normal; text-decoration: inherit; } /* icons */ .foundicon-settings:before { content: "\f000"; } .foundicon-heart:before { content: "\f001"; } .foundicon-star:before { content: "\f002"; } .foundicon-plus:before { content: "\f003"; } .foundicon-minus:before { content: "\f004"; } .foundicon-checkmark:before { content: "\f005"; } .foundicon-remove:before { content: "\f006"; } .foundicon-mail:before { content: "\f007"; } .foundicon-calendar:before { content: "\f008"; } .foundicon-page:before { content: "\f009"; } .foundicon-tools:before { content: "\f00a"; } .foundicon-globe:before { content: "\f00b"; } .foundicon-home:before { content: "\f00c"; } .foundicon-quote:before { content: "\f00d"; } .foundicon-people:before { content: "\f00e"; } .foundicon-monitor:before { content: "\f00f"; } .foundicon-laptop:before { content: "\f010"; } .foundicon-phone:before { content: "\f011"; } .foundicon-cloud:before { content: "\f012"; } .foundicon-error:before { content: "\f013"; } .foundicon-right-arrow:before { content: "\f014"; } .foundicon-left-arrow:before { content: "\f015"; } .foundicon-up-arrow:before { content: "\f016"; } .foundicon-down-arrow:before { content: "\f017"; } .foundicon-trash:before { content: "\f018"; } .foundicon-add-doc:before { content: "\f019"; } .foundicon-edit:before { content: "\f01a"; } .foundicon-lock:before { content: "\f01b"; } .foundicon-unlock:before { content: "\f01c"; } .foundicon-refresh:before { content: "\f01d"; } .foundicon-paper-clip:before { content: "\f01e"; } .foundicon-video:before { content: "\f01f"; } .foundicon-photo:before { content: "\f020"; } .foundicon-graph:before { content: "\f021"; } .foundicon-idea:before { content: "\f022"; } .foundicon-mic:before { content: "\f023"; } .foundicon-cart:before { content: "\f024"; } .foundicon-address-book:before { content: "\f025"; } .foundicon-compass:before { content: "\f026"; } .foundicon-flag:before { content: "\f027"; } .foundicon-location:before { content: "\f028"; } .foundicon-clock:before { content: "\f029"; } .foundicon-folder:before { content: "\f02a"; } .foundicon-inbox:before { content: "\f02b"; } .foundicon-website:before { content: "\f02c"; } .foundicon-smiley:before { content: "\f02d"; } .foundicon-search:before { content: "\f02e"; }
geopcgeo/1wp
wp-content/plugins/buddypress-social/css/general_enclosed_foundicons.css
CSS
gpl-2.0
3,262
14.101852
312
0.657572
false
/* ***************************************************************** * ILP9 - Implantation d'un langage de programmation. * by Christian.Queinnec@paracamplus.com * See http://mooc.paracamplus.com/ilp9 * GPL version 3 ***************************************************************** */ package com.paracamplus.ilp9.interpreter.interfaces; import com.paracamplus.ilp9.interpreter.Interpreter; public interface IPrimitive extends Invocable { String getName(); default Object erroneousApply() throws EvaluationException { String msg = "Incorrect arity " + this.getName(); throw new EvaluationException(msg); } default Object apply() throws EvaluationException { return erroneousApply(); } default Object apply(Object arg1) throws EvaluationException { return erroneousApply(); } default Object apply(Object arg1, Object arg2) throws EvaluationException { return erroneousApply(); } default Object apply(Object arg1, Object arg2, Object arg3) throws EvaluationException { return erroneousApply(); } default Object apply(Interpreter interpreter, Object[] argument) throws EvaluationException { if ( argument.length == getArity() ) { switch (getArity()) { case 0: { return apply(); } case 1: { return apply(argument[0]); } case 2: { return apply(argument[0], argument[1]); } case 3: { return apply(argument[0], argument[1], argument[2]); } default: { String msg = "Unhandled primitive arity " + this.getName(); throw new EvaluationException(msg); } } } else { String msg = "Wrong arity for operator " + this.getName(); throw new EvaluationException(msg); } } }
moocnobody/ilp9moocnobody
Java/src/com/paracamplus/ilp9/interpreter/interfaces/IPrimitive.java
Java
gpl-2.0
1,986
33.241379
79
0.5428
false
// { dg-do compile { target c++11 } } // { dg-timeout-factor 2 } // 2007-03-12 Stephen M. Webb <stephen.webb@bregmasoft.com> // // Copyright (C) 2010-2021 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // terms of the GNU General Public License as published by the // Free Software Foundation; either version 3, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License along // with this library; see the file COPYING3. If not see // <http://www.gnu.org/licenses/>. // [28.8.2] class template basic_regex constructor #include <regex> // Tests range constructor of the basic_regex class. void test01() { typedef std::basic_regex<wchar_t> test_type; wchar_t s[] = L"a+b|c"; test_type re(s, s + 5); } int main() { test01(); return 0; }
Gurgel100/gcc
libstdc++-v3/testsuite/28_regex/basic_regex/ctors/wchar_t/range.cc
C++
gpl-2.0
1,179
27.756098
74
0.701442
false
// { dg-do compile } // 2005-02-17 Matt Austern <austern@apple.com> // // Copyright (C) 2004-2021 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // terms of the GNU General Public License as published by the // Free Software Foundation; either version 3, or (at your option) // any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License along // with this library; see the file COPYING3. If not see // <http://www.gnu.org/licenses/>. // 6.3.4.4 unordered_map #include <string> #include <tr1/unordered_map> using namespace std::tr1; using std::string; using std::allocator; using std::pair; using std::equal_to; template class std::tr1::unordered_map<string, float>; template class std::tr1::unordered_map<string, int, hash<string>, equal_to<string>, allocator<pair<const string, int> > >; template class std::tr1::unordered_map<string, float, hash<string>, equal_to<string>, allocator<char> >; template class std::tr1::__unordered_map<string, int, hash<string>, equal_to<string>, allocator<pair<const string, int> >, true>;
Gurgel100/gcc
libstdc++-v3/testsuite/tr1/6_containers/unordered_map/requirements/explicit_instantiation.cc
C++
gpl-2.0
1,487
34.404762
74
0.70343
false
/*************************************************************************** url.h - description ------------------- begin : Wed Aug 7 2002 copyright : (C) 2002 by chris email : chris@tux.teyssier.org ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #ifndef _URL_H_ #define _URL_H_ #include <string> #include <map> #include "celestiacore.h" #include "celengine/astro.h" #include <Eigen/Core> #include <Eigen/Geometry> class CelestiaCore; class CelestiaState; class Url { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW enum UrlType { Absolute = 0, Relative = 1, Settings = 2, }; /*! The TimeSource specifies what the time will be set to when the user * activates the URL. * - UseUrlTime indicates that the simulation time should be set to whatever * value was stored in the URL. * - UseSimulationTime means that the simulation time at activation is not * changed. * - UseSystemTime means that the simulation time will be set to whatever the * current system time is when the URL is activated. */ enum TimeSource { UseUrlTime = 0, UseSimulationTime = 1, UseSystemTime = 2, TimeSourceCount = 3, }; Url(); // parses str Url(const std::string& str, CelestiaCore *core); // current url of appCore Url(CelestiaCore* appCore, UrlType type = Absolute); Url(const CelestiaState& appState, unsigned int version = CurrentVersion, TimeSource _timeSource = UseUrlTime); ~Url(); std::string getAsString() const; std::string getName() const; void goTo(); static const unsigned int CurrentVersion; static std::string decodeString(const std::string& str); static std::string encodeString(const std::string& str); private: void initVersion2(std::map<std::string, std::string>& params, const std::string& timeString); void initVersion3(std::map<std::string, std::string>& params, const std::string& timeString); private: std::string urlStr; std::string name; std::string modeStr; std::string body1; std::string body2; std::string selectedStr; std::string trackedStr; CelestiaCore *appCore; ObserverFrame ref; Selection selected; Selection tracked; ObserverFrame::CoordinateSystem mode; int nbBodies; float fieldOfView; float timeScale; int renderFlags; int labelMode; bool lightTimeDelay; bool pauseState; std::map<std::string, std::string> parseUrlParams(const std::string& url) const; std::string getCoordSysName(ObserverFrame::CoordinateSystem mode) const; std::string getBodyShortName(const std::string& body) const; std::string getEncodedObjectName(const Selection& selection); bool fromString; UrlType type; TimeSource timeSource; unsigned int version; void evalName(); // Variables specific to Global Urls UniversalCoord coord; astro::Date date; Eigen::Quaternionf orientation; // Variables specific to Relative Urls double distance, longitude, latitude; }; /*! The CelestiaState class holds the current observer position, orientation, * frame, time, and render settings. It is designed to be serialized as a cel * URL, thus strings are stored for bodies instead of Selections. * * Some information is *not* stored in cel URLs, including the current * lists of reference marks and markers. Such lists can be arbitrarily long, * and thus not practical to store in a URL. */ class CelestiaState { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW CelestiaState(); bool loadState(std::map<std::string, std::string>& params); void saveState(std::map<std::string, std::string>& params); void captureState(CelestiaCore* appCore); // Observer frame, position, and orientation. For multiview, there needs // be one instance of these parameters per view saved. ObserverFrame::CoordinateSystem coordSys; string refBodyName; string targetBodyName; string trackedBodyName; UniversalCoord observerPosition; Eigen::Quaternionf observerOrientation; float fieldOfView; // Time parameters double tdb; float timeScale; bool pauseState; bool lightTimeDelay; string selectedBodyName; int labelMode; int renderFlags; }; #endif
jherico/celestia
src/celestia/url.h
C
gpl-2.0
5,176
29.269006
97
0.596793
false
/**************************************************************************** ** ** Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). ** Contact: Qt Software Information (qt-info@nokia.com) ** ** This file is part of the Qt3Support module of the Qt Toolkit. ** ** Commercial Usage ** Licensees holding valid Qt Commercial licenses may use this file in ** accordance with the Qt Commercial License Agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and Nokia. ** ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU ** General Public License versions 2.0 or 3.0 as published by the Free ** Software Foundation and appearing in the file LICENSE.GPL included in ** the packaging of this file. Please review the following information ** to ensure GNU General Public Licensing requirements will be met: ** http://www.fsf.org/licensing/licenses/info/GPLv2.html and ** http://www.gnu.org/copyleft/gpl.html. In addition, as a special ** exception, Nokia gives you certain additional rights. These rights ** are described in the Nokia Qt GPL Exception version 1.3, included in ** the file GPL_EXCEPTION.txt in this package. ** ** Qt for Windows(R) Licensees ** As a special exception, Nokia, as the sole copyright holder for Qt ** Designer, grants users of the Qt/Eclipse Integration plug-in the ** right for the Qt/Eclipse Integration to link to functionality ** provided by Qt Designer and its related libraries. ** ** If you are unsure which license is appropriate for your use, please ** contact the sales department at qt-sales@nokia.com. ** ****************************************************************************/ #ifndef Q3PTRSTACK_H #define Q3PTRSTACK_H #include <Qt3Support/q3glist.h> QT_BEGIN_HEADER QT_BEGIN_NAMESPACE QT_MODULE(Qt3SupportLight) template<class type> class Q3PtrStack : protected Q3GList { public: Q3PtrStack() { } Q3PtrStack( const Q3PtrStack<type> &s ) : Q3GList( s ) { } ~Q3PtrStack() { clear(); } Q3PtrStack<type> &operator=(const Q3PtrStack<type> &s) { return (Q3PtrStack<type>&)Q3GList::operator=(s); } bool autoDelete() const { return Q3PtrCollection::autoDelete(); } void setAutoDelete( bool del ) { Q3PtrCollection::setAutoDelete(del); } uint count() const { return Q3GList::count(); } bool isEmpty() const { return Q3GList::count() == 0; } void push( const type *d ) { Q3GList::insertAt(0,Item(d)); } type *pop() { return (type *)Q3GList::takeFirst(); } bool remove() { return Q3GList::removeFirst(); } void clear() { Q3GList::clear(); } type *top() const { return (type *)Q3GList::cfirst(); } operator type *() const { return (type *)Q3GList::cfirst(); } type *current() const { return (type *)Q3GList::cfirst(); } #ifdef qdoc protected: virtual QDataStream& read( QDataStream&, Q3PtrCollection::Item& ); virtual QDataStream& write( QDataStream&, Q3PtrCollection::Item ) const; #endif private: void deleteItem( Item d ); }; #if !defined(Q_BROKEN_TEMPLATE_SPECIALIZATION) template<> inline void Q3PtrStack<void>::deleteItem( Q3PtrCollection::Item ) { } #endif template<class type> inline void Q3PtrStack<type>::deleteItem( Q3PtrCollection::Item d ) { if ( del_item ) delete (type *)d; } QT_END_NAMESPACE QT_END_HEADER #endif // Q3PTRSTACK_H
rk34cj/qt-extend-4.4.3
qtopiacore/qt/src/qt3support/tools/q3ptrstack.h
C
gpl-2.0
3,452
35.336842
88
0.678737
false
/* * This is a module which is used for queueing packets and communicating with * userspace via nfetlink. * * (C) 2005 by Harald Welte <laforge@netfilter.org> * (C) 2007 by Patrick McHardy <kaber@trash.net> * * Based on the old ipv4-only ip_queue.c: * (C) 2000-2002 James Morris <jmorris@intercode.com.au> * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/notifier.h> #include <linux/netdevice.h> #include <linux/netfilter.h> #include <linux/proc_fs.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_queue.h> #include <linux/list.h> #include <net/sock.h> #include <net/netfilter/nf_queue.h> #include <asm/atomic.h> #ifdef CONFIG_BRIDGE_NETFILTER #include "../bridge/br_private.h" #endif #define NFQNL_QMAX_DEFAULT 1024 struct nfqnl_instance { struct hlist_node hlist; /* global list of queues */ struct rcu_head rcu; int peer_pid; unsigned int queue_maxlen; unsigned int copy_range; unsigned int queue_total; unsigned int queue_dropped; unsigned int queue_user_dropped; unsigned int id_sequence; /* 'sequence' of pkt ids */ u_int16_t queue_num; /* number of this queue */ u_int8_t copy_mode; spinlock_t lock; struct list_head queue_list; /* packets in queue */ }; typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); static DEFINE_SPINLOCK(instances_lock); #define INSTANCE_BUCKETS 16 static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly; static inline u_int8_t instance_hashfn(u_int16_t queue_num) { return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS; } static struct nfqnl_instance * instance_lookup(u_int16_t queue_num) { struct hlist_head *head; struct hlist_node *pos; struct nfqnl_instance *inst; head = &instance_table[instance_hashfn(queue_num)]; hlist_for_each_entry_rcu(inst, pos, head, hlist) { if (inst->queue_num == queue_num) return inst; } return NULL; } static struct nfqnl_instance * instance_create(u_int16_t queue_num, int pid) { struct nfqnl_instance *inst; unsigned int h; int err; spin_lock(&instances_lock); if (instance_lookup(queue_num)) { err = -EEXIST; goto out_unlock; } inst = kzalloc(sizeof(*inst), GFP_ATOMIC); if (!inst) { err = -ENOMEM; goto out_unlock; } inst->queue_num = queue_num; inst->peer_pid = pid; inst->queue_maxlen = NFQNL_QMAX_DEFAULT; inst->copy_range = 0xfffff; inst->copy_mode = NFQNL_COPY_NONE; spin_lock_init(&inst->lock); INIT_LIST_HEAD(&inst->queue_list); INIT_RCU_HEAD(&inst->rcu); if (!try_module_get(THIS_MODULE)) { err = -EAGAIN; goto out_free; } h = instance_hashfn(queue_num); hlist_add_head_rcu(&inst->hlist, &instance_table[h]); spin_unlock(&instances_lock); return inst; out_free: kfree(inst); out_unlock: spin_unlock(&instances_lock); return ERR_PTR(err); } static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data); static void instance_destroy_rcu(struct rcu_head *head) { struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance, rcu); nfqnl_flush(inst, NULL, 0); kfree(inst); module_put(THIS_MODULE); } static void __instance_destroy(struct nfqnl_instance *inst) { hlist_del_rcu(&inst->hlist); call_rcu(&inst->rcu, instance_destroy_rcu); } static void instance_destroy(struct nfqnl_instance *inst) { spin_lock(&instances_lock); __instance_destroy(inst); spin_unlock(&instances_lock); } static inline void __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) { list_add_tail(&entry->list, &queue->queue_list); queue->queue_total++; } static struct nf_queue_entry * find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) { struct nf_queue_entry *entry = NULL, *i; spin_lock_bh(&queue->lock); list_for_each_entry(i, &queue->queue_list, list) { if (i->id == id) { entry = i; break; } } if (entry) { list_del(&entry->list); queue->queue_total--; } spin_unlock_bh(&queue->lock); return entry; } static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) { struct nf_queue_entry *entry, *next; spin_lock_bh(&queue->lock); list_for_each_entry_safe(entry, next, &queue->queue_list, list) { if (!cmpfn || cmpfn(entry, data)) { list_del(&entry->list); queue->queue_total--; nf_reinject(entry, NF_DROP); } } spin_unlock_bh(&queue->lock); } static struct sk_buff * nfqnl_build_packet_message(struct nfqnl_instance *queue, struct nf_queue_entry *entry) { sk_buff_data_t old_tail; size_t size; size_t data_len = 0; struct sk_buff *skb; struct nfqnl_msg_packet_hdr pmsg; struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; struct sk_buff *entskb = entry->skb; struct net_device *indev; struct net_device *outdev; size = NLMSG_SPACE(sizeof(struct nfgenmsg)) + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #ifdef CONFIG_BRIDGE_NETFILTER + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #endif + nla_total_size(sizeof(u_int32_t)) /* mark */ + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); outdev = entry->outdev; spin_lock_bh(&queue->lock); switch ((enum nfqnl_config_mode)queue->copy_mode) { case NFQNL_COPY_META: case NFQNL_COPY_NONE: data_len = 0; break; case NFQNL_COPY_PACKET: if ((entskb->ip_summed == CHECKSUM_PARTIAL || entskb->ip_summed == CHECKSUM_COMPLETE) && skb_checksum_help(entskb)) { spin_unlock_bh(&queue->lock); return NULL; } if (queue->copy_range == 0 || queue->copy_range > entskb->len) data_len = entskb->len; else data_len = queue->copy_range; size += nla_total_size(data_len); break; } entry->id = queue->id_sequence++; spin_unlock_bh(&queue->lock); skb = alloc_skb(size, GFP_ATOMIC); if (!skb) goto nlmsg_failure; old_tail = skb->tail; nlh = NLMSG_PUT(skb, 0, 0, NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, sizeof(struct nfgenmsg)); nfmsg = NLMSG_DATA(nlh); nfmsg->nfgen_family = entry->pf; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(queue->queue_num); pmsg.packet_id = htonl(entry->id); pmsg.hw_protocol = entskb->protocol; pmsg.hook = entry->hook; NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg); indev = entry->indev; if (indev) { #ifndef CONFIG_BRIDGE_NETFILTER NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)); #else if (entry->pf == PF_BRIDGE) { /* Case 1: indev is physical input device, we need to * look for bridge group (when called from * netfilter_bridge) */ NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, htonl(indev->ifindex)); /* this is the bridge group "brX" */ NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->br_port->br->dev->ifindex)); } else { /* Case 2: indev is bridge group, we need to look for * physical device (when called from ipv4) */ NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)); if (entskb->nf_bridge && entskb->nf_bridge->physindev) NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, htonl(entskb->nf_bridge->physindev->ifindex)); } #endif } if (outdev) { #ifndef CONFIG_BRIDGE_NETFILTER NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); #else if (entry->pf == PF_BRIDGE) { /* Case 1: outdev is physical output device, we need to * look for bridge group (when called from * netfilter_bridge) */ NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, htonl(outdev->ifindex)); /* this is the bridge group "brX" */ NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->br_port->br->dev->ifindex)); } else { /* Case 2: outdev is bridge group, we need to look for * physical output device (when called from ipv4) */ NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); if (entskb->nf_bridge && entskb->nf_bridge->physoutdev) NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, htonl(entskb->nf_bridge->physoutdev->ifindex)); } #endif } if (entskb->mark) NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); if (indev && entskb->dev) { struct nfqnl_msg_packet_hw phw; int len = dev_parse_header(entskb, phw.hw_addr); if (len) { phw.hw_addrlen = htons(len); NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); } } if (entskb->tstamp.tv64) { struct nfqnl_msg_packet_timestamp ts; struct timeval tv = ktime_to_timeval(entskb->tstamp); ts.sec = cpu_to_be64(tv.tv_sec); ts.usec = cpu_to_be64(tv.tv_usec); NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); } if (data_len) { struct nlattr *nla; int sz = nla_attr_size(data_len); if (skb_tailroom(skb) < nla_total_size(data_len)) { printk(KERN_WARNING "nf_queue: no tailroom!\n"); goto nlmsg_failure; } nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len)); nla->nla_type = NFQA_PAYLOAD; nla->nla_len = sz; if (skb_copy_bits(entskb, 0, nla_data(nla), data_len)) BUG(); } nlh->nlmsg_len = skb->tail - old_tail; return skb; nlmsg_failure: nla_put_failure: if (skb) kfree_skb(skb); if (net_ratelimit()) printk(KERN_ERR "nf_queue: error creating packet message\n"); return NULL; } static int nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) { struct sk_buff *nskb; struct nfqnl_instance *queue; int err; /* rcu_read_lock()ed by nf_hook_slow() */ queue = instance_lookup(queuenum); if (!queue) goto err_out; if (queue->copy_mode == NFQNL_COPY_NONE) goto err_out; nskb = nfqnl_build_packet_message(queue, entry); if (nskb == NULL) goto err_out; spin_lock_bh(&queue->lock); if (!queue->peer_pid) goto err_out_free_nskb; if (queue->queue_total >= queue->queue_maxlen) { queue->queue_dropped++; if (net_ratelimit()) printk(KERN_WARNING "nf_queue: full at %d entries, " "dropping packets(s). Dropped: %d\n", queue->queue_total, queue->queue_dropped); goto err_out_free_nskb; } /* nfnetlink_unicast will either free the nskb or add it to a socket */ err = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT); if (err < 0) { queue->queue_user_dropped++; goto err_out_unlock; } __enqueue_entry(queue, entry); spin_unlock_bh(&queue->lock); return 0; err_out_free_nskb: kfree_skb(nskb); err_out_unlock: spin_unlock_bh(&queue->lock); err_out: return -1; } static int nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e) { struct sk_buff *nskb; int diff; diff = data_len - e->skb->len; if (diff < 0) { if (pskb_trim(e->skb, data_len)) return -ENOMEM; } else if (diff > 0) { if (data_len > 0xFFFF) return -EINVAL; if (diff > skb_tailroom(e->skb)) { nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), diff, GFP_ATOMIC); if (!nskb) { printk(KERN_WARNING "nf_queue: OOM " "in mangle, dropping packet\n"); return -ENOMEM; } kfree_skb(e->skb); e->skb = nskb; } skb_put(e->skb, diff); } if (!skb_make_writable(e->skb, data_len)) return -ENOMEM; skb_copy_to_linear_data(e->skb, data, data_len); e->skb->ip_summed = CHECKSUM_NONE; return 0; } static int nfqnl_set_mode(struct nfqnl_instance *queue, unsigned char mode, unsigned int range) { int status = 0; spin_lock_bh(&queue->lock); switch (mode) { case NFQNL_COPY_NONE: case NFQNL_COPY_META: queue->copy_mode = mode; queue->copy_range = 0; break; case NFQNL_COPY_PACKET: queue->copy_mode = mode; /* we're using struct nlattr which has 16bit nla_len */ if (range > 0xffff) queue->copy_range = 0xffff; else queue->copy_range = range; break; default: status = -EINVAL; } spin_unlock_bh(&queue->lock); return status; } static int dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) { if (entry->indev) if (entry->indev->ifindex == ifindex) return 1; if (entry->outdev) if (entry->outdev->ifindex == ifindex) return 1; #ifdef CONFIG_BRIDGE_NETFILTER if (entry->skb->nf_bridge) { if (entry->skb->nf_bridge->physindev && entry->skb->nf_bridge->physindev->ifindex == ifindex) return 1; if (entry->skb->nf_bridge->physoutdev && entry->skb->nf_bridge->physoutdev->ifindex == ifindex) return 1; } #endif return 0; } /* drop all packets with either indev or outdev == ifindex from all queue * instances */ static void nfqnl_dev_drop(int ifindex) { int i; rcu_read_lock(); for (i = 0; i < INSTANCE_BUCKETS; i++) { struct hlist_node *tmp; struct nfqnl_instance *inst; struct hlist_head *head = &instance_table[i]; hlist_for_each_entry_rcu(inst, tmp, head, hlist) nfqnl_flush(inst, dev_cmp, ifindex); } rcu_read_unlock(); } #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) static int nfqnl_rcv_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; if (dev_net(dev) != &init_net) return NOTIFY_DONE; /* Drop any packets associated with the downed device */ if (event == NETDEV_DOWN) nfqnl_dev_drop(dev->ifindex); return NOTIFY_DONE; } static struct notifier_block nfqnl_dev_notifier = { .notifier_call = nfqnl_rcv_dev_event, }; static int nfqnl_rcv_nl_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netlink_notify *n = ptr; if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER && n->pid) { int i; /* destroy all instances for this pid */ spin_lock(&instances_lock); for (i = 0; i < INSTANCE_BUCKETS; i++) { struct hlist_node *tmp, *t2; struct nfqnl_instance *inst; struct hlist_head *head = &instance_table[i]; hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { if ((n->net == &init_net) && (n->pid == inst->peer_pid)) __instance_destroy(inst); } } spin_unlock(&instances_lock); } return NOTIFY_DONE; } static struct notifier_block nfqnl_rtnl_notifier = { .notifier_call = nfqnl_rcv_nl_event, }; static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, [NFQA_MARK] = { .type = NLA_U32 }, [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, }; static int nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *nfqa[]) { struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t queue_num = ntohs(nfmsg->res_id); struct nfqnl_msg_verdict_hdr *vhdr; struct nfqnl_instance *queue; unsigned int verdict; struct nf_queue_entry *entry; int err; rcu_read_lock(); queue = instance_lookup(queue_num); if (!queue) { err = -ENODEV; goto err_out_unlock; } if (queue->peer_pid != NETLINK_CB(skb).pid) { err = -EPERM; goto err_out_unlock; } if (!nfqa[NFQA_VERDICT_HDR]) { err = -EINVAL; goto err_out_unlock; } vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); verdict = ntohl(vhdr->verdict); if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) { err = -EINVAL; goto err_out_unlock; } entry = find_dequeue_entry(queue, ntohl(vhdr->id)); if (entry == NULL) { err = -ENOENT; goto err_out_unlock; } rcu_read_unlock(); if (nfqa[NFQA_PAYLOAD]) { if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0) verdict = NF_DROP; } if (nfqa[NFQA_MARK]) entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); nf_reinject(entry, verdict); return 0; err_out_unlock: rcu_read_unlock(); return err; } static int nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *nfqa[]) { return -ENOTSUPP; } static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, }; static const struct nf_queue_handler nfqh = { .name = "nf_queue", .outfn = &nfqnl_enqueue_packet, }; static int nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *nfqa[]) { struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t queue_num = ntohs(nfmsg->res_id); struct nfqnl_instance *queue; struct nfqnl_msg_config_cmd *cmd = NULL; int ret = 0; if (nfqa[NFQA_CFG_CMD]) { cmd = nla_data(nfqa[NFQA_CFG_CMD]); /* Commands without queue context - might sleep */ switch (cmd->command) { case NFQNL_CFG_CMD_PF_BIND: return nf_register_queue_handler(ntohs(cmd->pf), &nfqh); case NFQNL_CFG_CMD_PF_UNBIND: return nf_unregister_queue_handler(ntohs(cmd->pf), &nfqh); } } rcu_read_lock(); queue = instance_lookup(queue_num); if (queue && queue->peer_pid != NETLINK_CB(skb).pid) { ret = -EPERM; goto err_out_unlock; } if (cmd != NULL) { switch (cmd->command) { case NFQNL_CFG_CMD_BIND: if (queue) { ret = -EBUSY; goto err_out_unlock; } queue = instance_create(queue_num, NETLINK_CB(skb).pid); if (IS_ERR(queue)) { ret = PTR_ERR(queue); goto err_out_unlock; } break; case NFQNL_CFG_CMD_UNBIND: if (!queue) { ret = -ENODEV; goto err_out_unlock; } instance_destroy(queue); break; case NFQNL_CFG_CMD_PF_BIND: case NFQNL_CFG_CMD_PF_UNBIND: break; default: ret = -ENOTSUPP; break; } } if (nfqa[NFQA_CFG_PARAMS]) { struct nfqnl_msg_config_params *params; if (!queue) { ret = -ENODEV; goto err_out_unlock; } params = nla_data(nfqa[NFQA_CFG_PARAMS]); nfqnl_set_mode(queue, params->copy_mode, ntohl(params->copy_range)); } if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) { __be32 *queue_maxlen; if (!queue) { ret = -ENODEV; goto err_out_unlock; } queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]); spin_lock_bh(&queue->lock); queue->queue_maxlen = ntohl(*queue_maxlen); spin_unlock_bh(&queue->lock); } err_out_unlock: rcu_read_unlock(); return ret; } static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp, .attr_count = NFQA_MAX, }, [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict, .attr_count = NFQA_MAX, .policy = nfqa_verdict_policy }, [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, .attr_count = NFQA_CFG_MAX, .policy = nfqa_cfg_policy }, }; static const struct nfnetlink_subsystem nfqnl_subsys = { .name = "nf_queue", .subsys_id = NFNL_SUBSYS_QUEUE, .cb_count = NFQNL_MSG_MAX, .cb = nfqnl_cb, }; #ifdef CONFIG_PROC_FS struct iter_state { unsigned int bucket; }; static struct hlist_node *get_first(struct seq_file *seq) { struct iter_state *st = seq->private; if (!st) return NULL; for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { if (!hlist_empty(&instance_table[st->bucket])) return instance_table[st->bucket].first; } return NULL; } static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) { struct iter_state *st = seq->private; h = h->next; while (!h) { if (++st->bucket >= INSTANCE_BUCKETS) return NULL; h = instance_table[st->bucket].first; } return h; } static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos) { struct hlist_node *head; head = get_first(seq); if (head) while (pos && (head = get_next(seq, head))) pos--; return pos ? NULL : head; } static void *seq_start(struct seq_file *seq, loff_t *pos) __acquires(instances_lock) { spin_lock(&instances_lock); return get_idx(seq, *pos); } static void *seq_next(struct seq_file *s, void *v, loff_t *pos) { (*pos)++; return get_next(s, v); } static void seq_stop(struct seq_file *s, void *v) __releases(instances_lock) { spin_unlock(&instances_lock); } static int seq_show(struct seq_file *s, void *v) { const struct nfqnl_instance *inst = v; return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n", inst->queue_num, inst->peer_pid, inst->queue_total, inst->copy_mode, inst->copy_range, inst->queue_dropped, inst->queue_user_dropped, inst->id_sequence, 1); } static const struct seq_operations nfqnl_seq_ops = { .start = seq_start, .next = seq_next, .stop = seq_stop, .show = seq_show, }; static int nfqnl_open(struct inode *inode, struct file *file) { return seq_open_private(file, &nfqnl_seq_ops, sizeof(struct iter_state)); } static const struct file_operations nfqnl_file_ops = { .owner = THIS_MODULE, .open = nfqnl_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif /* PROC_FS */ static int __init nfnetlink_queue_init(void) { int i, status = -ENOMEM; for (i = 0; i < INSTANCE_BUCKETS; i++) INIT_HLIST_HEAD(&instance_table[i]); netlink_register_notifier(&nfqnl_rtnl_notifier); status = nfnetlink_subsys_register(&nfqnl_subsys); if (status < 0) { printk(KERN_ERR "nf_queue: failed to create netlink socket\n"); goto cleanup_netlink_notifier; } #ifdef CONFIG_PROC_FS if (!proc_create("nfnetlink_queue", 0440, proc_net_netfilter, &nfqnl_file_ops)) goto cleanup_subsys; #endif register_netdevice_notifier(&nfqnl_dev_notifier); return status; #ifdef CONFIG_PROC_FS cleanup_subsys: nfnetlink_subsys_unregister(&nfqnl_subsys); #endif cleanup_netlink_notifier: netlink_unregister_notifier(&nfqnl_rtnl_notifier); return status; } static void __exit nfnetlink_queue_fini(void) { nf_unregister_queue_handlers(&nfqh); unregister_netdevice_notifier(&nfqnl_dev_notifier); #ifdef CONFIG_PROC_FS remove_proc_entry("nfnetlink_queue", proc_net_netfilter); #endif nfnetlink_subsys_unregister(&nfqnl_subsys); netlink_unregister_notifier(&nfqnl_rtnl_notifier); } MODULE_DESCRIPTION("netfilter packet queue handler"); MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE); module_init(nfnetlink_queue_init); module_exit(nfnetlink_queue_fini);
janrinze/loox7xxport
net/netfilter/nfnetlink_queue.c
C
gpl-2.0
22,299
22.621822
80
0.66411
false
/* * Copyright Red Hat Inc. and/or its affiliates and other contributors * as indicated by the authors tag. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. * * This particular file is subject to the "Classpath" exception as provided in the * LICENSE file that accompanied this code. * * This program is distributed in the hope that it will be useful, but WITHOUT A * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A * PARTICULAR PURPOSE. See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License, * along with this distribution; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ package com.redhat.ceylon.compiler.java.test.interop; public interface JavaOptionalInterface { JavaOptionalInterface method(JavaOptionalInterface x); JavaOptionalInterface method2(JavaOptionalInterface x); JavaOptionalInterface getProp1(); void setProp1(JavaOptionalInterface x); JavaOptionalInterface getProp2(); void setProp2(JavaOptionalInterface x); JavaOptionalInterface getProp3(); JavaOptionalInterface getProp4(); ceylon.language.Correspondence<? super Object, ? extends Object> getCorrespondence(); }
jvasileff/ceylon-compiler
test/src/com/redhat/ceylon/compiler/java/test/interop/JavaOptionalInterface.java
Java
gpl-2.0
1,491
40.416667
89
0.761234
false
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <title>Asterisk Project : Application_IAX2Provision</title> <link rel="stylesheet" href="styles/site.css" type="text/css" /> <META http-equiv="Content-Type" content="text/html; charset=UTF-8"> </head> <body> <table class="pagecontent" border="0" cellpadding="0" cellspacing="0" width="100%" bgcolor="#ffffff"> <tr> <td valign="top" class="pagebody"> <div class="pageheader"> <span class="pagetitle"> Asterisk Project : Application_IAX2Provision </span> </div> <div class="pagesubheading"> This page last changed on Mar 30, 2011 by <font color="#0050B2">wikibot</font>. </div> <h1><a name="Application_IAX2Provision-IAX2Provision%28%29"></a>IAX2Provision()</h1> <h3><a name="Application_IAX2Provision-Synopsis"></a>Synopsis</h3> <p>Provision a calling IAXy with a given template.</p> <h3><a name="Application_IAX2Provision-Description"></a>Description</h3> <p>Provisions the calling IAXy (assuming the calling entity is in fact an IAXy) with the given <em>template</em>. Returns <tt>-1</tt> on error or <tt>0</tt> on success.</p> <h3><a name="Application_IAX2Provision-Syntax"></a>Syntax</h3> <div class="preformatted panel" style="border-width: 1px;"><div class="preformattedContent panelContent"> <pre>IAX2Provision([template])</pre> </div></div> <h5><a name="Application_IAX2Provision-Arguments"></a>Arguments</h5> <ul> <li><tt>template</tt> - If not specified, defaults to <tt>default</tt>.</li> </ul> <h3><a name="Application_IAX2Provision-SeeAlso"></a>See Also</h3> <h3><a name="Application_IAX2Provision-ImportVersion"></a>Import Version</h3> <p>This documentation was imported from Asterisk version SVN-branch-1.8-r311874.</p> </td> </tr> </table> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr> <td height="12" background="https://wiki.asterisk.org/wiki/images/border/border_bottom.gif"><img src="images/border/spacer.gif" width="1" height="1" border="0"/></td> </tr> <tr> <td align="center"><font color="grey">Document generated by Confluence on Oct 04, 2011 12:42</font></td> </tr> </table> </body> </html>
yuezhou/asterisk1.8.21
doc/Asterisk-Admin-Guide/Application_IAX2Provision.html
HTML
gpl-2.0
2,454
37.952381
172
0.628769
false
jQuery.noConflict(); if(typeof(BTLJ)=='undefined') var BTLJ = jQuery; if(typeof(btTimeOut)=='undefined') var btTimeOut; if(typeof(requireRemove)=='undefined') var requireRemove = true; /* var mobile = function(){ return { detect:function(){ var uagent = navigator.userAgent.toLowerCase(); var list = this.mobiles; var ismobile = false; for(var d=0;d<list.length;d+=1){ if(uagent.indexOf(list[d])!=-1){ ismobile = true; } } return ismobile; }, mobiles:[ "midp","240x320","blackberry","netfront","nokia","panasonic", "portalmmm","sharp","sie-","sonyericsson","symbian", "windows ce","benq","mda","mot-","opera mini", "philips","pocket pc","sagem","samsung","sda", "sgh-","vodafone","xda","palm","iphone", "ipod","android" ] }; }(); */ //var autoPos = mobile.detect() != false; var autoPos = true; //var mobilePopupPos = {top:0,right:0}; // Position of popup BTLJ(document).ready(function() { BTLJ('#btl-content').appendTo('body'); BTLJ(".btl-input #jform_profile_aboutme").attr("cols",21); BTLJ('.bt-scroll .btl-buttonsubmit').click(function(){ setTimeout(function(){ if(BTLJ("#btl-registration-error").is(':visible')){ BTLJ('.bt-scroll').data('jsp').scrollToY(0,true); }else{ var position = BTLJ('.bt-scroll').find('.invalid:first').position(); if(position) BTLJ('.bt-scroll').data('jsp').scrollToY(position.top-15,true); } },20); }) //SET POSITION if(BTLJ('.btl-dropdown').length){ setFPosition(); BTLJ(window).resize(function(){ setFPosition(); }) } BTLJ(btlOpt.LOGIN_TAGS).addClass("btl-modal"); if(btlOpt.REGISTER_TAGS != ''){ BTLJ(btlOpt.REGISTER_TAGS).addClass("btl-modal"); } // Login event var elements = '#btl-panel-login'; if (btlOpt.LOGIN_TAGS) elements += ', ' + btlOpt.LOGIN_TAGS; if (btlOpt.MOUSE_EVENT =='click'){ BTLJ(elements).click(function (event) { showLoginForm(); event.preventDefault(); }); }else{ BTLJ(elements).hover(function () { showLoginForm(); },function(){}); } // Registration/Profile event elements = '#btl-panel-registration'; if (btlOpt.REGISTER_TAGS) elements += ', ' + btlOpt.REGISTER_TAGS; if (btlOpt.MOUSE_EVENT =='click'){ BTLJ(elements).click(function (event) { showRegistrationForm(); event.preventDefault(); }); BTLJ("#btl-panel-profile").click(function(event){ showProfile(); event.preventDefault(); }); }else{ BTLJ(elements).hover(function () { if(!BTLJ("#btl-integrated").length){ showRegistrationForm(); } },function(){}); BTLJ("#btl-panel-profile").hover(function () { showProfile(); },function(){}); } BTLJ('#register-link a').click(function (event) { if(BTLJ('.btl-modal').length){ BTLJ.modal.close();setTimeout("showRegistrationForm();",1000); } else{ showRegistrationForm(); } event.preventDefault(); }); // Close form BTLJ(document).click(function(event){ if(requireRemove && event.which == 1) btTimeOut = setTimeout('BTLJ("#btl-content > div").slideUp();BTLJ(".btl-panel span").removeClass("active");',10); requireRemove =true; }) BTLJ(".btl-content-block").click(function(){requireRemove =false;}); BTLJ(".btl-panel span").click(function(){requireRemove =false;}); // Modify iframe BTLJ('#btl-iframe').load(function (){ //edit action form oldAction=BTLJ('#btl-iframe').contents().find('form').attr("action"); if(oldAction!=null){ if(oldAction.search("tmpl=component")==-1){ if(BTLJ('#btl-iframe').contents().find('form').attr("action").indexOf('?')!=-1){ BTLJ('#btl-iframe').contents().find('form').attr("action",oldAction+"&tmpl=component"); } else{ BTLJ('#btl-iframe').contents().find('form').attr("action",oldAction+"?tmpl=component"); } } } }); }); function setFPosition(){ if(btlOpt.ALIGN == "center"){ BTLJ("#btl-content > div").each(function(){ var panelid = "#"+this.id.replace("content","panel"); var left = BTLJ(panelid).offset().left + BTLJ(panelid).width()/2 - BTLJ(this).width()/2; if(left < 0) left = 0; BTLJ(this).css('left',left); }); }else{ if(btlOpt.ALIGN == "right"){ BTLJ("#btl-content > div").css('right',BTLJ(document).width()-BTLJ('.btl-panel').offset().left-BTLJ('.btl-panel').width()); }else{ BTLJ("#btl-content > div").css('left',BTLJ('.btl-panel').offset().left); } } BTLJ("#btl-content > div").css('top',BTLJ(".btl-panel").offset().top+BTLJ(".btl-panel").height()+2); } // SHOW LOGIN FORM function showLoginForm(){ BTLJ('.btl-panel span').removeClass("active"); var el = '#btl-panel-login'; BTLJ.modal.close(); var containerWidth = 0; var containerHeight = 0; containerHeight = 371; containerWidth = 357; if(BTLJ(el).hasClass("btl-modal")){ BTLJ(el).addClass("active"); BTLJ("#btl-content > div").slideUp(); BTLJ("#btl-content-login").modal({ overlayClose:true, persist :true, fixed: BTLJ(window).width()>500, autoPosition:autoPos, onOpen: function (dialog) { //if(!autoPos){ // dialog.container.css(mobilePopupPos); //} dialog.overlay.fadeIn(); dialog.container.show(); dialog.data.show(); }, onClose: function (dialog) { dialog.overlay.fadeOut(function () { dialog.container.hide(); dialog.data.hide(); BTLJ.modal.close(); BTLJ('.btl-panel span').removeClass("active"); }); }, containerCss:{ height:containerHeight, width:containerWidth } }) } else { BTLJ("#btl-content > div").each(function(){ if(this.id=="btl-content-login") { if(BTLJ(this).is(":hidden")){ BTLJ(el).addClass("active"); BTLJ(this).slideDown(); } else{ BTLJ(this).slideUp(); BTLJ(el).removeClass("active"); } } else{ if(BTLJ(this).is(":visible")){ BTLJ(this).slideUp(); BTLJ('#btl-panel-registration').removeClass("active"); } } }) } } // SHOW REGISTRATION FORM function showRegistrationForm(){ if(BTLJ("#btl-integrated").length){ window.location.href=BTLJ("#btl-integrated").val(); return; } BTLJ('.btl-panel span').removeClass("active"); BTLJ.modal.close(); var el = '#btl-panel-registration'; var containerWidth = 0; var containerHeight = 0; containerHeight = "auto"; containerWidth = "auto"; if(BTLJ(el).hasClass("btl-modal")){ BTLJ(el).addClass("active"); BTLJ("#btl-content > div").slideUp(); BTLJ("#btl-content-registration").modal({ overlayClose:true, persist :true, autoPosition:autoPos, fixed: BTLJ(window).width()>500, onOpen: function (dialog) { //if(!autoPos){ // dialog.container.css(mobilePopupPos); //} dialog.overlay.fadeIn(); dialog.container.show(); dialog.data.show(); }, onClose: function (dialog) { dialog.overlay.fadeOut(function () { dialog.container.hide(); dialog.data.hide(); BTLJ.modal.close(); BTLJ('.btl-panel span').removeClass("active"); }); }, containerCss:{ height:containerHeight, width:containerWidth } }) } else { BTLJ("#btl-content > div").each(function(){ if(this.id=="btl-content-registration") { if(BTLJ(this).is(":hidden")){ BTLJ(el).addClass("active"); BTLJ(this).slideDown(); } else{ BTLJ(this).slideUp(); BTLJ(el).removeClass("active"); } } else{ if(BTLJ(this).is(":visible")){ BTLJ(this).slideUp(); BTLJ('#btl-panel-login').removeClass("active"); } } }) } } // SHOW PROFILE (LOGGED MODULES) function showProfile(){ var el = '#btl-panel-profile'; BTLJ("#btl-content > div").each(function(){ if(this.id=="btl-content-profile") { if(BTLJ(this).is(":hidden")){ BTLJ(el).addClass("active"); BTLJ(this).slideDown(); } else{ BTLJ(this).slideUp(); BTLJ('.btl-panel span').removeClass("active"); } } else{ if(BTLJ(this).is(":visible")){ BTLJ(this).slideUp(); BTLJ('.btl-panel span').removeClass("active"); } } }) } // AJAX REGISTRATION function registerAjax(){ BTLJ("#btl-registration-error").hide(); BTLJ(".btl-error-detail").hide(); if(BTLJ("#btl-input-name").val()==""){ BTLJ("#btl-registration-error").html(Joomla.JText._('REQUIRED_NAME')).show(); BTLJ("#btl-input-name").focus(); return false; } if(BTLJ("#btl-input-username1").val()==""){ BTLJ("#btl-registration-error").html(Joomla.JText._('REQUIRED_USERNAME')).show(); BTLJ("#btl-input-username1").focus(); return false; } if(BTLJ("#btl-input-password1").val()==""){ BTLJ("#btl-registration-error").html(Joomla.JText._('REQUIRED_PASSWORD')).show(); BTLJ("#btl-input-password1").focus(); return false; } if(BTLJ("#btl-input-password2").val()==""){ BTLJ("#btl-registration-error").html(Joomla.JText._('REQUIRED_VERIFY_PASSWORD')).show(); BTLJ("#btl-input-password2").focus(); return false; } if(BTLJ("#btl-input-password2").val()!=BTLJ("#btl-input-password1").val()){ BTLJ("#btl-registration-error").html(Joomla.JText._('PASSWORD_NOT_MATCH')).show(); BTLJ("#btl-input-password2").focus().select(); BTLJ("#btl-registration-error").show(); return false; } if(BTLJ("#btl-input-email1").val()==""){ BTLJ("#btl-registration-error").html(Joomla.JText._('REQUIRED_EMAIL')).show(); BTLJ("#btl-input-email1").focus(); return false; } var emailRegExp = /^[_a-zA-Z0-9-]+(\.[_a-zA-z0-9-]+)*@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*(\.([a-zA-Z]){2,4})$/; if(!emailRegExp.test(BTLJ("#btl-input-email1").val())){ BTLJ("#btl-registration-error").html(Joomla.JText._('EMAIL_INVALID')).show(); BTLJ("#btl-input-email1").focus().select(); return false; } if(BTLJ("#btl-input-email2").val()==""){ BTLJ("#btl-registration-error").html(Joomla.JText._('REQUIRED_VERIFY_EMAIL')).show(); BTLJ("#btl-input-email2").focus().select(); return false; } if(BTLJ("#btl-input-email1").val()!=BTLJ("#btl-input-email2").val()){ BTLJ("#btl-registration-error").html(Joomla.JText._('EMAIL_NOT_MATCH')).show();; BTLJ("#btl-input-email2").focus().select(); return false; } if(BTLJ('#recaptcha_response_field').length && BTLJ('#recaptcha_response_field').val()==''){ BTLJ('#recaptcha_response_field').focus(); return false; } var token = BTLJ('.btl-buttonsubmit input:last').attr("name"); var value_token = encodeURIComponent(BTLJ('.btl-buttonsubmit input:last').val()); var datasubmit= "bttask=register&name="+encodeURIComponent(BTLJ("#btl-input-name").val()) +"&username="+encodeURIComponent(BTLJ("#btl-input-username1").val()) +"&passwd1=" + encodeURIComponent(BTLJ("#btl-input-password1").val()) +"&passwd2=" + encodeURIComponent(BTLJ("#btl-input-password2").val()) +"&email1=" + encodeURIComponent(BTLJ("#btl-input-email1").val()) +"&email2=" + encodeURIComponent(BTLJ("#btl-input-email2").val()) + "&"+token+"="+value_token; if(btlOpt.RECAPTCHA =="recaptcha"){ datasubmit += "&recaptcha=yes&recaptcha_response_field="+ encodeURIComponent(BTLJ("#recaptcha_response_field").val()) +"&recaptcha_challenge_field="+encodeURIComponent(BTLJ("#recaptcha_challenge_field").val()); } BTLJ.ajax({ type: "POST", beforeSend:function(){ BTLJ("#btl-register-in-process").show(); }, url: btlOpt.BT_AJAX, data: datasubmit, success: function(html){ //if html contain "Registration failed" is register fail BTLJ("#btl-register-in-process").hide(); if(html.indexOf('$error$')!= -1){ BTLJ("#btl-registration-error").html(html.replace('$error$','')); BTLJ("#btl-registration-error").show(); if(btlOpt.RECAPTCHA =="recaptcha"){ Recaptcha.reload(); } }else{ BTLJ(".btl-formregistration").children("div").hide(); BTLJ("#btl-success").html(html); BTLJ("#btl-success").show(); setTimeout(function() {window.location.reload();},7000); } }, error: function (XMLHttpRequest, textStatus, errorThrown) { alert(textStatus + ': Ajax request failed'); } }); return false; } // AJAX LOGIN function loginAjax(){ if(BTLJ("#btl-input-username").val()=="") { showLoginError(Joomla.JText._('REQUIRED_USERNAME')); return false; } if(BTLJ("#btl-input-password").val()==""){ showLoginError(Joomla.JText._('REQUIRED_PASSWORD')); return false; } var token = BTLJ('.btl-buttonsubmit input:last').attr("name"); var value_token = encodeURIComponent(BTLJ('.btl-buttonsubmit input:last').val()); var datasubmit= "bttask=login&username="+encodeURIComponent(BTLJ("#btl-input-username").val()) +"&passwd=" + encodeURIComponent(BTLJ("#btl-input-password").val()) + "&"+token+"="+value_token +"&return="+ encodeURIComponent(BTLJ("#btl-return").val()); if(BTLJ("#btl-checkbox-remember").is(":checked")){ datasubmit += '&remember=yes'; } BTLJ.ajax({ type: "POST", beforeSend:function(){ BTLJ("#btl-login-in-process").show(); BTLJ("#btl-login-in-process").css('height',BTLJ('#btl-content-login').outerHeight()+'px'); }, url: btlOpt.BT_AJAX, data: datasubmit, success: function (html, textstatus, xhrReq){ if(html == "1" || html == 1){ window.location.href=btlOpt.BT_RETURN; }else{ if(html.indexOf('</head>')==-1){ showLoginError(Joomla.JText._('E_LOGIN_AUTHENTICATE')); } else { if(html.indexOf('btl-panel-profile')==-1){ showLoginError('Another plugin has redirected the page on login, Please check your plugins system'); } else { window.location.href=btlOpt.BT_RETURN; } } } }, error: function (XMLHttpRequest, textStatus, errorThrown) { alert(textStatus + ': Ajax request failed!'); } }); return false; } function showLoginError(notice,reload){ BTLJ("#btl-login-in-process").hide(); BTLJ("#btl-login-error").html(notice); BTLJ("#btl-login-error").show(); if(reload){ setTimeout(function() {window.location.reload();},5000); } }
effortlesssites/template
modules/mod_bt_login/tmpl/js/default.js
JavaScript
gpl-2.0
13,991
28.456842
153
0.620327
false
/* * ATI Frame Buffer Device Driver Core * * Copyright (C) 2004 Alex Kern <alex.kern@gmx.de> * Copyright (C) 1997-2001 Geert Uytterhoeven * Copyright (C) 1998 Bernd Harries * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) * * This driver supports the following ATI graphics chips: * - ATI Mach64 * * To do: add support for * - ATI Rage128 (from aty128fb.c) * - ATI Radeon (from radeonfb.c) * * This driver is partly based on the PowerMac console driver: * * Copyright (C) 1996 Paul Mackerras * * and on the PowerMac ATI/mach64 display driver: * * Copyright (C) 1997 Michael AK Tesch * * with work by Jon Howell * Harry AC Eaton * Anthony Tong <atong@uiuc.edu> * * Generic LCD support written by Daniel Mantione, ported from 2.4.20 by Alex Kern * Many Thanks to Ville Syrjälä for patches and fixing nasting 16 bit color bug. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * Many thanks to Nitya from ATI devrel for support and patience ! */ /****************************************************************************** TODO: - cursor support on all cards and all ramdacs. - cursor parameters controlable via ioctl()s. - guess PLL and MCLK based on the original PLL register values initialized by Open Firmware (if they are initialized). BIOS is done (Anyone with Mac to help with this?) ******************************************************************************/ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/console.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/backlight.h> #include <asm/io.h> #include <linux/uaccess.h> #include <video/mach64.h> #include "atyfb.h" #include "ati_ids.h" #ifdef __powerpc__ #include <asm/machdep.h> #include <asm/prom.h> #include "../macmodes.h" #endif #ifdef __sparc__ #include <asm/fbio.h> #include <asm/oplib.h> #include <asm/prom.h> #endif #ifdef CONFIG_ADB_PMU #include <linux/adb.h> #include <linux/pmu.h> #endif #ifdef CONFIG_BOOTX_TEXT #include <asm/btext.h> #endif #ifdef CONFIG_PMAC_BACKLIGHT #include <asm/backlight.h> #endif #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif /* * Debug flags. */ #undef DEBUG /*#define DEBUG*/ /* Make sure n * PAGE_SIZE is protected at end of Aperture for GUI-regs */ /* - must be large enough to catch all GUI-Regs */ /* - must be aligned to a PAGE boundary */ #define GUI_RESERVE (1 * PAGE_SIZE) /* FIXME: remove the FAIL definition */ #define FAIL(msg) do { \ if (!(var->activate & FB_ACTIVATE_TEST)) \ printk(KERN_CRIT "atyfb: " msg "\n"); \ return -EINVAL; \ } while (0) #define FAIL_MAX(msg, x, _max_) do { \ if (x > _max_) { \ if (!(var->activate & FB_ACTIVATE_TEST)) \ printk(KERN_CRIT "atyfb: " msg " %x(%x)\n", x, _max_); \ return -EINVAL; \ } \ } while (0) #ifdef DEBUG #define DPRINTK(fmt, args...) printk(KERN_DEBUG "atyfb: " fmt, ## args) #else #define DPRINTK(fmt, args...) #endif #define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args) #define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args) #if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \ defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_FB_ATY_BACKLIGHT) static const u32 lt_lcd_regs[] = { CONFIG_PANEL_LG, LCD_GEN_CNTL_LG, DSTN_CONTROL_LG, HFB_PITCH_ADDR_LG, HORZ_STRETCHING_LG, VERT_STRETCHING_LG, 0, /* EXT_VERT_STRETCH */ LT_GIO_LG, POWER_MANAGEMENT_LG }; void aty_st_lcd(int index, u32 val, const struct atyfb_par *par) { if (M64_HAS(LT_LCD_REGS)) { aty_st_le32(lt_lcd_regs[index], val, par); } else { unsigned long temp; /* write addr byte */ temp = aty_ld_le32(LCD_INDEX, par); aty_st_le32(LCD_INDEX, (temp & ~LCD_INDEX_MASK) | index, par); /* write the register value */ aty_st_le32(LCD_DATA, val, par); } } u32 aty_ld_lcd(int index, const struct atyfb_par *par) { if (M64_HAS(LT_LCD_REGS)) { return aty_ld_le32(lt_lcd_regs[index], par); } else { unsigned long temp; /* write addr byte */ temp = aty_ld_le32(LCD_INDEX, par); aty_st_le32(LCD_INDEX, (temp & ~LCD_INDEX_MASK) | index, par); /* read the register value */ return aty_ld_le32(LCD_DATA, par); } } #endif /* defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */ #ifdef CONFIG_FB_ATY_GENERIC_LCD /* * ATIReduceRatio -- * * Reduce a fraction by factoring out the largest common divider of the * fraction's numerator and denominator. */ static void ATIReduceRatio(int *Numerator, int *Denominator) { int Multiplier, Divider, Remainder; Multiplier = *Numerator; Divider = *Denominator; while ((Remainder = Multiplier % Divider)) { Multiplier = Divider; Divider = Remainder; } *Numerator /= Divider; *Denominator /= Divider; } #endif /* * The Hardware parameters for each card */ struct pci_mmap_map { unsigned long voff; unsigned long poff; unsigned long size; unsigned long prot_flag; unsigned long prot_mask; }; static struct fb_fix_screeninfo atyfb_fix __devinitdata = { .id = "ATY Mach64", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .xpanstep = 8, .ypanstep = 1, }; /* * Frame buffer device API */ static int atyfb_open(struct fb_info *info, int user); static int atyfb_release(struct fb_info *info, int user); static int atyfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info); static int atyfb_set_par(struct fb_info *info); static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info); static int atyfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info); static int atyfb_blank(int blank, struct fb_info *info); static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg); #ifdef __sparc__ static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma); #endif static int atyfb_sync(struct fb_info *info); /* * Internal routines */ static int aty_init(struct fb_info *info); static void aty_resume_chip(struct fb_info *info); #ifdef CONFIG_ATARI static int store_video_par(char *videopar, unsigned char m64_num); #endif static struct crtc saved_crtc; static union aty_pll saved_pll; static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc); static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc); static int aty_var_to_crtc(const struct fb_info *info, const struct fb_var_screeninfo *var, struct crtc *crtc); static int aty_crtc_to_var(const struct crtc *crtc, struct fb_var_screeninfo *var); static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info); #ifdef CONFIG_PPC static int read_aty_sense(const struct atyfb_par *par); #endif /* * Interface used by the world */ static struct fb_var_screeninfo default_var = { /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */ 640, 480, 640, 480, 0, 0, 8, 0, {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 39722, 48, 16, 33, 10, 96, 2, 0, FB_VMODE_NONINTERLACED }; static struct fb_videomode defmode = { /* 640x480 @ 60 Hz, 31.5 kHz hsync */ NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, 0, FB_VMODE_NONINTERLACED }; static struct fb_ops atyfb_ops = { .owner = THIS_MODULE, .fb_open = atyfb_open, .fb_release = atyfb_release, .fb_check_var = atyfb_check_var, .fb_set_par = atyfb_set_par, .fb_setcolreg = atyfb_setcolreg, .fb_pan_display = atyfb_pan_display, .fb_blank = atyfb_blank, .fb_ioctl = atyfb_ioctl, .fb_fillrect = atyfb_fillrect, .fb_copyarea = atyfb_copyarea, .fb_imageblit = atyfb_imageblit, #ifdef __sparc__ .fb_mmap = atyfb_mmap, #endif .fb_sync = atyfb_sync, }; static int noaccel; #ifdef CONFIG_MTRR static int nomtrr; #endif static int vram; static int pll; static int mclk; static int xclk; static int comp_sync __devinitdata = -1; static char *mode; #ifdef CONFIG_PMAC_BACKLIGHT static int backlight __devinitdata = 1; #else static int backlight __devinitdata = 0; #endif #ifdef CONFIG_PPC static int default_vmode __devinitdata = VMODE_CHOOSE; static int default_cmode __devinitdata = CMODE_CHOOSE; module_param_named(vmode, default_vmode, int, 0); MODULE_PARM_DESC(vmode, "int: video mode for mac"); module_param_named(cmode, default_cmode, int, 0); MODULE_PARM_DESC(cmode, "int: color mode for mac"); #endif #ifdef CONFIG_ATARI static unsigned int mach64_count __devinitdata = 0; static unsigned long phys_vmembase[FB_MAX] __devinitdata = { 0, }; static unsigned long phys_size[FB_MAX] __devinitdata = { 0, }; static unsigned long phys_guiregbase[FB_MAX] __devinitdata = { 0, }; #endif /* top -> down is an evolution of mach64 chipset, any corrections? */ #define ATI_CHIP_88800GX (M64F_GX) #define ATI_CHIP_88800CX (M64F_GX) #define ATI_CHIP_264CT (M64F_CT | M64F_INTEGRATED | M64F_CT_BUS | M64F_MAGIC_FIFO) #define ATI_CHIP_264ET (M64F_CT | M64F_INTEGRATED | M64F_CT_BUS | M64F_MAGIC_FIFO) #define ATI_CHIP_264VT (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_MAGIC_FIFO) #define ATI_CHIP_264GT (M64F_GT | M64F_INTEGRATED | M64F_MAGIC_FIFO | M64F_EXTRA_BRIGHT) #define ATI_CHIP_264VTB (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_GTB_DSP) #define ATI_CHIP_264VT3 (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_GTB_DSP | M64F_SDRAM_MAGIC_PLL) #define ATI_CHIP_264VT4 (M64F_VT | M64F_INTEGRATED | M64F_GTB_DSP) /* FIXME what is this chip? */ #define ATI_CHIP_264LT (M64F_GT | M64F_INTEGRATED | M64F_GTB_DSP) /* make sets shorter */ #define ATI_MODERN_SET (M64F_GT | M64F_INTEGRATED | M64F_GTB_DSP | M64F_EXTRA_BRIGHT) #define ATI_CHIP_264GTB (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL) /*#define ATI_CHIP_264GTDVD ?*/ #define ATI_CHIP_264LTG (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL) #define ATI_CHIP_264GT2C (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE) #define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D) #define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D) #define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4) #define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_MOBIL_BUS) static struct { u16 pci_id; const char *name; int pll, mclk, xclk, ecp_max; u32 features; } aty_chips[] __devinitdata = { #ifdef CONFIG_FB_ATY_GX /* Mach64 GX */ { PCI_CHIP_MACH64GX, "ATI888GX00 (Mach64 GX)", 135, 50, 50, 0, ATI_CHIP_88800GX }, { PCI_CHIP_MACH64CX, "ATI888CX00 (Mach64 CX)", 135, 50, 50, 0, ATI_CHIP_88800CX }, #endif /* CONFIG_FB_ATY_GX */ #ifdef CONFIG_FB_ATY_CT { PCI_CHIP_MACH64CT, "ATI264CT (Mach64 CT)", 135, 60, 60, 0, ATI_CHIP_264CT }, { PCI_CHIP_MACH64ET, "ATI264ET (Mach64 ET)", 135, 60, 60, 0, ATI_CHIP_264ET }, /* FIXME what is this chip? */ { PCI_CHIP_MACH64LT, "ATI264LT (Mach64 LT)", 135, 63, 63, 0, ATI_CHIP_264LT }, { PCI_CHIP_MACH64VT, "ATI264VT (Mach64 VT)", 170, 67, 67, 80, ATI_CHIP_264VT }, { PCI_CHIP_MACH64GT, "3D RAGE (Mach64 GT)", 135, 63, 63, 80, ATI_CHIP_264GT }, { PCI_CHIP_MACH64VU, "ATI264VT3 (Mach64 VU)", 200, 67, 67, 80, ATI_CHIP_264VT3 }, { PCI_CHIP_MACH64GU, "3D RAGE II+ (Mach64 GU)", 200, 67, 67, 100, ATI_CHIP_264GTB }, { PCI_CHIP_MACH64LG, "3D RAGE LT (Mach64 LG)", 230, 63, 63, 100, ATI_CHIP_264LTG | M64F_LT_LCD_REGS | M64F_G3_PB_1024x768 }, { PCI_CHIP_MACH64VV, "ATI264VT4 (Mach64 VV)", 230, 83, 83, 100, ATI_CHIP_264VT4 }, { PCI_CHIP_MACH64GV, "3D RAGE IIC (Mach64 GV, PCI)", 230, 83, 83, 100, ATI_CHIP_264GT2C }, { PCI_CHIP_MACH64GW, "3D RAGE IIC (Mach64 GW, AGP)", 230, 83, 83, 100, ATI_CHIP_264GT2C }, { PCI_CHIP_MACH64GY, "3D RAGE IIC (Mach64 GY, PCI)", 230, 83, 83, 100, ATI_CHIP_264GT2C }, { PCI_CHIP_MACH64GZ, "3D RAGE IIC (Mach64 GZ, AGP)", 230, 83, 83, 100, ATI_CHIP_264GT2C }, { PCI_CHIP_MACH64GB, "3D RAGE PRO (Mach64 GB, BGA, AGP)", 230, 100, 100, 125, ATI_CHIP_264GTPRO }, { PCI_CHIP_MACH64GD, "3D RAGE PRO (Mach64 GD, BGA, AGP 1x)", 230, 100, 100, 125, ATI_CHIP_264GTPRO }, { PCI_CHIP_MACH64GI, "3D RAGE PRO (Mach64 GI, BGA, PCI)", 230, 100, 100, 125, ATI_CHIP_264GTPRO | M64F_MAGIC_VRAM_SIZE }, { PCI_CHIP_MACH64GP, "3D RAGE PRO (Mach64 GP, PQFP, PCI)", 230, 100, 100, 125, ATI_CHIP_264GTPRO }, { PCI_CHIP_MACH64GQ, "3D RAGE PRO (Mach64 GQ, PQFP, PCI, limited 3D)", 230, 100, 100, 125, ATI_CHIP_264GTPRO }, { PCI_CHIP_MACH64LB, "3D RAGE LT PRO (Mach64 LB, AGP)", 236, 75, 100, 135, ATI_CHIP_264LTPRO }, { PCI_CHIP_MACH64LD, "3D RAGE LT PRO (Mach64 LD, AGP)", 230, 100, 100, 135, ATI_CHIP_264LTPRO }, { PCI_CHIP_MACH64LI, "3D RAGE LT PRO (Mach64 LI, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO | M64F_G3_PB_1_1 | M64F_G3_PB_1024x768 }, { PCI_CHIP_MACH64LP, "3D RAGE LT PRO (Mach64 LP, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO | M64F_G3_PB_1024x768 }, { PCI_CHIP_MACH64LQ, "3D RAGE LT PRO (Mach64 LQ, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO }, { PCI_CHIP_MACH64GM, "3D RAGE XL (Mach64 GM, AGP 2x)", 230, 83, 63, 135, ATI_CHIP_264XL }, { PCI_CHIP_MACH64GN, "3D RAGE XC (Mach64 GN, AGP 2x)", 230, 83, 63, 135, ATI_CHIP_264XL }, { PCI_CHIP_MACH64GO, "3D RAGE XL (Mach64 GO, PCI-66)", 230, 83, 63, 135, ATI_CHIP_264XL }, { PCI_CHIP_MACH64GL, "3D RAGE XC (Mach64 GL, PCI-66)", 230, 83, 63, 135, ATI_CHIP_264XL }, { PCI_CHIP_MACH64GR, "3D RAGE XL (Mach64 GR, PCI-33)", 230, 83, 63, 135, ATI_CHIP_264XL | M64F_SDRAM_MAGIC_PLL }, { PCI_CHIP_MACH64GS, "3D RAGE XC (Mach64 GS, PCI-33)", 230, 83, 63, 135, ATI_CHIP_264XL }, { PCI_CHIP_MACH64LM, "3D RAGE Mobility P/M (Mach64 LM, AGP 2x)", 230, 83, 125, 135, ATI_CHIP_MOBILITY }, { PCI_CHIP_MACH64LN, "3D RAGE Mobility L (Mach64 LN, AGP 2x)", 230, 83, 125, 135, ATI_CHIP_MOBILITY }, { PCI_CHIP_MACH64LR, "3D RAGE Mobility P/M (Mach64 LR, PCI)", 230, 83, 125, 135, ATI_CHIP_MOBILITY }, { PCI_CHIP_MACH64LS, "3D RAGE Mobility L (Mach64 LS, PCI)", 230, 83, 125, 135, ATI_CHIP_MOBILITY }, #endif /* CONFIG_FB_ATY_CT */ }; /* can not fail */ static int __devinit correct_chipset(struct atyfb_par *par) { u8 rev; u16 type; u32 chip_id; const char *name; int i; for (i = ARRAY_SIZE(aty_chips) - 1; i >= 0; i--) if (par->pci_id == aty_chips[i].pci_id) break; name = aty_chips[i].name; par->pll_limits.pll_max = aty_chips[i].pll; par->pll_limits.mclk = aty_chips[i].mclk; par->pll_limits.xclk = aty_chips[i].xclk; par->pll_limits.ecp_max = aty_chips[i].ecp_max; par->features = aty_chips[i].features; chip_id = aty_ld_le32(CONFIG_CHIP_ID, par); type = chip_id & CFG_CHIP_TYPE; rev = (chip_id & CFG_CHIP_REV) >> 24; switch(par->pci_id) { #ifdef CONFIG_FB_ATY_GX case PCI_CHIP_MACH64GX: if(type != 0x00d7) return -ENODEV; break; case PCI_CHIP_MACH64CX: if(type != 0x0057) return -ENODEV; break; #endif #ifdef CONFIG_FB_ATY_CT case PCI_CHIP_MACH64VT: switch (rev & 0x07) { case 0x00: switch (rev & 0xc0) { case 0x00: name = "ATI264VT (A3) (Mach64 VT)"; par->pll_limits.pll_max = 170; par->pll_limits.mclk = 67; par->pll_limits.xclk = 67; par->pll_limits.ecp_max = 80; par->features = ATI_CHIP_264VT; break; case 0x40: name = "ATI264VT2 (A4) (Mach64 VT)"; par->pll_limits.pll_max = 200; par->pll_limits.mclk = 67; par->pll_limits.xclk = 67; par->pll_limits.ecp_max = 80; par->features = ATI_CHIP_264VT | M64F_MAGIC_POSTDIV; break; } break; case 0x01: name = "ATI264VT3 (B1) (Mach64 VT)"; par->pll_limits.pll_max = 200; par->pll_limits.mclk = 67; par->pll_limits.xclk = 67; par->pll_limits.ecp_max = 80; par->features = ATI_CHIP_264VTB; break; case 0x02: name = "ATI264VT3 (B2) (Mach64 VT)"; par->pll_limits.pll_max = 200; par->pll_limits.mclk = 67; par->pll_limits.xclk = 67; par->pll_limits.ecp_max = 80; par->features = ATI_CHIP_264VT3; break; } break; case PCI_CHIP_MACH64GT: switch (rev & 0x07) { case 0x01: name = "3D RAGE II (Mach64 GT)"; par->pll_limits.pll_max = 170; par->pll_limits.mclk = 67; par->pll_limits.xclk = 67; par->pll_limits.ecp_max = 80; par->features = ATI_CHIP_264GTB; break; case 0x02: name = "3D RAGE II+ (Mach64 GT)"; par->pll_limits.pll_max = 200; par->pll_limits.mclk = 67; par->pll_limits.xclk = 67; par->pll_limits.ecp_max = 100; par->features = ATI_CHIP_264GTB; break; } break; #endif } PRINTKI("%s [0x%04x rev 0x%02x]\n", name, type, rev); return 0; } static char ram_dram[] __devinitdata = "DRAM"; static char ram_resv[] __devinitdata = "RESV"; #ifdef CONFIG_FB_ATY_GX static char ram_vram[] __devinitdata = "VRAM"; #endif /* CONFIG_FB_ATY_GX */ #ifdef CONFIG_FB_ATY_CT static char ram_edo[] __devinitdata = "EDO"; static char ram_sdram[] __devinitdata = "SDRAM (1:1)"; static char ram_sgram[] __devinitdata = "SGRAM (1:1)"; static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)"; static char ram_off[] __devinitdata = "OFF"; #endif /* CONFIG_FB_ATY_CT */ #ifdef CONFIG_FB_ATY_GX static char *aty_gx_ram[8] __devinitdata = { ram_dram, ram_vram, ram_vram, ram_dram, ram_dram, ram_vram, ram_vram, ram_resv }; #endif /* CONFIG_FB_ATY_GX */ #ifdef CONFIG_FB_ATY_CT static char *aty_ct_ram[8] __devinitdata = { ram_off, ram_dram, ram_edo, ram_edo, ram_sdram, ram_sgram, ram_sdram32, ram_resv }; #endif /* CONFIG_FB_ATY_CT */ static u32 atyfb_get_pixclock(struct fb_var_screeninfo *var, struct atyfb_par *par) { u32 pixclock = var->pixclock; #ifdef CONFIG_FB_ATY_GENERIC_LCD u32 lcd_on_off; par->pll.ct.xres = 0; if (par->lcd_table != 0) { lcd_on_off = aty_ld_lcd(LCD_GEN_CNTL, par); if(lcd_on_off & LCD_ON) { par->pll.ct.xres = var->xres; pixclock = par->lcd_pixclock; } } #endif return pixclock; } #if defined(CONFIG_PPC) /* * Apple monitor sense */ static int __devinit read_aty_sense(const struct atyfb_par *par) { int sense, i; aty_st_le32(GP_IO, 0x31003100, par); /* drive outputs high */ __delay(200); aty_st_le32(GP_IO, 0, par); /* turn off outputs */ __delay(2000); i = aty_ld_le32(GP_IO, par); /* get primary sense value */ sense = ((i & 0x3000) >> 3) | (i & 0x100); /* drive each sense line low in turn and collect the other 2 */ aty_st_le32(GP_IO, 0x20000000, par); /* drive A low */ __delay(2000); i = aty_ld_le32(GP_IO, par); sense |= ((i & 0x1000) >> 7) | ((i & 0x100) >> 4); aty_st_le32(GP_IO, 0x20002000, par); /* drive A high again */ __delay(200); aty_st_le32(GP_IO, 0x10000000, par); /* drive B low */ __delay(2000); i = aty_ld_le32(GP_IO, par); sense |= ((i & 0x2000) >> 10) | ((i & 0x100) >> 6); aty_st_le32(GP_IO, 0x10001000, par); /* drive B high again */ __delay(200); aty_st_le32(GP_IO, 0x01000000, par); /* drive C low */ __delay(2000); sense |= (aty_ld_le32(GP_IO, par) & 0x3000) >> 12; aty_st_le32(GP_IO, 0, par); /* turn off outputs */ return sense; } #endif /* defined(CONFIG_PPC) */ /* ------------------------------------------------------------------------- */ /* * CRTC programming */ static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc) { #ifdef CONFIG_FB_ATY_GENERIC_LCD if (par->lcd_table != 0) { if(!M64_HAS(LT_LCD_REGS)) { crtc->lcd_index = aty_ld_le32(LCD_INDEX, par); aty_st_le32(LCD_INDEX, crtc->lcd_index, par); } crtc->lcd_config_panel = aty_ld_lcd(CONFIG_PANEL, par); crtc->lcd_gen_cntl = aty_ld_lcd(LCD_GEN_CNTL, par); /* switch to non shadow registers */ aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl & ~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par); /* save stretching */ crtc->horz_stretching = aty_ld_lcd(HORZ_STRETCHING, par); crtc->vert_stretching = aty_ld_lcd(VERT_STRETCHING, par); if (!M64_HAS(LT_LCD_REGS)) crtc->ext_vert_stretch = aty_ld_lcd(EXT_VERT_STRETCH, par); } #endif crtc->h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par); crtc->h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par); crtc->v_tot_disp = aty_ld_le32(CRTC_V_TOTAL_DISP, par); crtc->v_sync_strt_wid = aty_ld_le32(CRTC_V_SYNC_STRT_WID, par); crtc->vline_crnt_vline = aty_ld_le32(CRTC_VLINE_CRNT_VLINE, par); crtc->off_pitch = aty_ld_le32(CRTC_OFF_PITCH, par); crtc->gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par); #ifdef CONFIG_FB_ATY_GENERIC_LCD if (par->lcd_table != 0) { /* switch to shadow registers */ aty_st_lcd(LCD_GEN_CNTL, (crtc->lcd_gen_cntl & ~CRTC_RW_SELECT) | SHADOW_EN | SHADOW_RW_EN, par); crtc->shadow_h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par); crtc->shadow_h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par); crtc->shadow_v_tot_disp = aty_ld_le32(CRTC_V_TOTAL_DISP, par); crtc->shadow_v_sync_strt_wid = aty_ld_le32(CRTC_V_SYNC_STRT_WID, par); aty_st_le32(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par); } #endif /* CONFIG_FB_ATY_GENERIC_LCD */ } static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc) { #ifdef CONFIG_FB_ATY_GENERIC_LCD if (par->lcd_table != 0) { /* stop CRTC */ aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl & ~(CRTC_EXT_DISP_EN | CRTC_EN), par); /* update non-shadow registers first */ aty_st_lcd(CONFIG_PANEL, crtc->lcd_config_panel, par); aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl & ~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par); /* temporarily disable stretching */ aty_st_lcd(HORZ_STRETCHING, crtc->horz_stretching & ~(HORZ_STRETCH_MODE | HORZ_STRETCH_EN), par); aty_st_lcd(VERT_STRETCHING, crtc->vert_stretching & ~(VERT_STRETCH_RATIO1 | VERT_STRETCH_RATIO2 | VERT_STRETCH_USE0 | VERT_STRETCH_EN), par); } #endif /* turn off CRT */ aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl & ~CRTC_EN, par); DPRINTK("setting up CRTC\n"); DPRINTK("set primary CRT to %ix%i %c%c composite %c\n", ((((crtc->h_tot_disp>>16) & 0xff) + 1)<<3), (((crtc->v_tot_disp>>16) & 0x7ff) + 1), (crtc->h_sync_strt_wid & 0x200000)?'N':'P', (crtc->v_sync_strt_wid & 0x200000)?'N':'P', (crtc->gen_cntl & CRTC_CSYNC_EN)?'P':'N'); DPRINTK("CRTC_H_TOTAL_DISP: %x\n",crtc->h_tot_disp); DPRINTK("CRTC_H_SYNC_STRT_WID: %x\n",crtc->h_sync_strt_wid); DPRINTK("CRTC_V_TOTAL_DISP: %x\n",crtc->v_tot_disp); DPRINTK("CRTC_V_SYNC_STRT_WID: %x\n",crtc->v_sync_strt_wid); DPRINTK("CRTC_OFF_PITCH: %x\n", crtc->off_pitch); DPRINTK("CRTC_VLINE_CRNT_VLINE: %x\n", crtc->vline_crnt_vline); DPRINTK("CRTC_GEN_CNTL: %x\n",crtc->gen_cntl); aty_st_le32(CRTC_H_TOTAL_DISP, crtc->h_tot_disp, par); aty_st_le32(CRTC_H_SYNC_STRT_WID, crtc->h_sync_strt_wid, par); aty_st_le32(CRTC_V_TOTAL_DISP, crtc->v_tot_disp, par); aty_st_le32(CRTC_V_SYNC_STRT_WID, crtc->v_sync_strt_wid, par); aty_st_le32(CRTC_OFF_PITCH, crtc->off_pitch, par); aty_st_le32(CRTC_VLINE_CRNT_VLINE, crtc->vline_crnt_vline, par); aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl, par); #if 0 FIXME if (par->accel_flags & FB_ACCELF_TEXT) aty_init_engine(par, info); #endif #ifdef CONFIG_FB_ATY_GENERIC_LCD /* after setting the CRTC registers we should set the LCD registers. */ if (par->lcd_table != 0) { /* switch to shadow registers */ aty_st_lcd(LCD_GEN_CNTL, (crtc->lcd_gen_cntl & ~CRTC_RW_SELECT) | (SHADOW_EN | SHADOW_RW_EN), par); DPRINTK("set shadow CRT to %ix%i %c%c\n", ((((crtc->shadow_h_tot_disp>>16) & 0xff) + 1)<<3), (((crtc->shadow_v_tot_disp>>16) & 0x7ff) + 1), (crtc->shadow_h_sync_strt_wid & 0x200000)?'N':'P', (crtc->shadow_v_sync_strt_wid & 0x200000)?'N':'P'); DPRINTK("SHADOW CRTC_H_TOTAL_DISP: %x\n", crtc->shadow_h_tot_disp); DPRINTK("SHADOW CRTC_H_SYNC_STRT_WID: %x\n", crtc->shadow_h_sync_strt_wid); DPRINTK("SHADOW CRTC_V_TOTAL_DISP: %x\n", crtc->shadow_v_tot_disp); DPRINTK("SHADOW CRTC_V_SYNC_STRT_WID: %x\n", crtc->shadow_v_sync_strt_wid); aty_st_le32(CRTC_H_TOTAL_DISP, crtc->shadow_h_tot_disp, par); aty_st_le32(CRTC_H_SYNC_STRT_WID, crtc->shadow_h_sync_strt_wid, par); aty_st_le32(CRTC_V_TOTAL_DISP, crtc->shadow_v_tot_disp, par); aty_st_le32(CRTC_V_SYNC_STRT_WID, crtc->shadow_v_sync_strt_wid, par); /* restore CRTC selection & shadow state and enable stretching */ DPRINTK("LCD_GEN_CNTL: %x\n", crtc->lcd_gen_cntl); DPRINTK("HORZ_STRETCHING: %x\n", crtc->horz_stretching); DPRINTK("VERT_STRETCHING: %x\n", crtc->vert_stretching); if(!M64_HAS(LT_LCD_REGS)) DPRINTK("EXT_VERT_STRETCH: %x\n", crtc->ext_vert_stretch); aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par); aty_st_lcd(HORZ_STRETCHING, crtc->horz_stretching, par); aty_st_lcd(VERT_STRETCHING, crtc->vert_stretching, par); if(!M64_HAS(LT_LCD_REGS)) { aty_st_lcd(EXT_VERT_STRETCH, crtc->ext_vert_stretch, par); aty_ld_le32(LCD_INDEX, par); aty_st_le32(LCD_INDEX, crtc->lcd_index, par); } } #endif /* CONFIG_FB_ATY_GENERIC_LCD */ } static int aty_var_to_crtc(const struct fb_info *info, const struct fb_var_screeninfo *var, struct crtc *crtc) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 xres, yres, vxres, vyres, xoffset, yoffset, bpp; u32 sync, vmode, vdisplay; u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol; u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync; u32 pix_width, dp_pix_width, dp_chain_mask; /* input */ xres = var->xres; yres = var->yres; vxres = var->xres_virtual; vyres = var->yres_virtual; xoffset = var->xoffset; yoffset = var->yoffset; bpp = var->bits_per_pixel; if (bpp == 16) bpp = (var->green.length == 5) ? 15 : 16; sync = var->sync; vmode = var->vmode; /* convert (and round up) and validate */ if (vxres < xres + xoffset) vxres = xres + xoffset; h_disp = xres; if (vyres < yres + yoffset) vyres = yres + yoffset; v_disp = yres; if (bpp <= 8) { bpp = 8; pix_width = CRTC_PIX_WIDTH_8BPP; dp_pix_width = HOST_8BPP | SRC_8BPP | DST_8BPP | BYTE_ORDER_LSB_TO_MSB; dp_chain_mask = DP_CHAIN_8BPP; } else if (bpp <= 15) { bpp = 16; pix_width = CRTC_PIX_WIDTH_15BPP; dp_pix_width = HOST_15BPP | SRC_15BPP | DST_15BPP | BYTE_ORDER_LSB_TO_MSB; dp_chain_mask = DP_CHAIN_15BPP; } else if (bpp <= 16) { bpp = 16; pix_width = CRTC_PIX_WIDTH_16BPP; dp_pix_width = HOST_16BPP | SRC_16BPP | DST_16BPP | BYTE_ORDER_LSB_TO_MSB; dp_chain_mask = DP_CHAIN_16BPP; } else if (bpp <= 24 && M64_HAS(INTEGRATED)) { bpp = 24; pix_width = CRTC_PIX_WIDTH_24BPP; dp_pix_width = HOST_8BPP | SRC_8BPP | DST_8BPP | BYTE_ORDER_LSB_TO_MSB; dp_chain_mask = DP_CHAIN_24BPP; } else if (bpp <= 32) { bpp = 32; pix_width = CRTC_PIX_WIDTH_32BPP; dp_pix_width = HOST_32BPP | SRC_32BPP | DST_32BPP | BYTE_ORDER_LSB_TO_MSB; dp_chain_mask = DP_CHAIN_32BPP; } else FAIL("invalid bpp"); if (vxres * vyres * bpp / 8 > info->fix.smem_len) FAIL("not enough video RAM"); h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1; v_sync_pol = sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1; if((xres > 1600) || (yres > 1200)) { FAIL("MACH64 chips are designed for max 1600x1200\n" "select anoter resolution."); } h_sync_strt = h_disp + var->right_margin; h_sync_end = h_sync_strt + var->hsync_len; h_sync_dly = var->right_margin & 7; h_total = h_sync_end + h_sync_dly + var->left_margin; v_sync_strt = v_disp + var->lower_margin; v_sync_end = v_sync_strt + var->vsync_len; v_total = v_sync_end + var->upper_margin; #ifdef CONFIG_FB_ATY_GENERIC_LCD if (par->lcd_table != 0) { if(!M64_HAS(LT_LCD_REGS)) { u32 lcd_index = aty_ld_le32(LCD_INDEX, par); crtc->lcd_index = lcd_index & ~(LCD_INDEX_MASK | LCD_DISPLAY_DIS | LCD_SRC_SEL | CRTC2_DISPLAY_DIS); aty_st_le32(LCD_INDEX, lcd_index, par); } if (!M64_HAS(MOBIL_BUS)) crtc->lcd_index |= CRTC2_DISPLAY_DIS; crtc->lcd_config_panel = aty_ld_lcd(CONFIG_PANEL, par) | 0x4000; crtc->lcd_gen_cntl = aty_ld_lcd(LCD_GEN_CNTL, par) & ~CRTC_RW_SELECT; crtc->lcd_gen_cntl &= ~(HORZ_DIVBY2_EN | DIS_HOR_CRT_DIVBY2 | TVCLK_PM_EN | /*VCLK_DAC_PM_EN | USE_SHADOWED_VEND |*/ USE_SHADOWED_ROWCUR | SHADOW_EN | SHADOW_RW_EN); crtc->lcd_gen_cntl |= DONT_SHADOW_VPAR | LOCK_8DOT; if((crtc->lcd_gen_cntl & LCD_ON) && ((xres > par->lcd_width) || (yres > par->lcd_height))) { /* We cannot display the mode on the LCD. If the CRT is enabled we can turn off the LCD. If the CRT is off, it isn't a good idea to switch it on; we don't know if one is connected. So it's better to fail then. */ if (crtc->lcd_gen_cntl & CRT_ON) { if (!(var->activate & FB_ACTIVATE_TEST)) PRINTKI("Disable LCD panel, because video mode does not fit.\n"); crtc->lcd_gen_cntl &= ~LCD_ON; /*aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par);*/ } else { if (!(var->activate & FB_ACTIVATE_TEST)) PRINTKE("Video mode exceeds size of LCD panel.\nConnect this computer to a conventional monitor if you really need this mode.\n"); return -EINVAL; } } } if ((par->lcd_table != 0) && (crtc->lcd_gen_cntl & LCD_ON)) { int VScan = 1; /* bpp -> bytespp, 1,4 -> 0; 8 -> 2; 15,16 -> 1; 24 -> 6; 32 -> 5 const u8 DFP_h_sync_dly_LT[] = { 0, 2, 1, 6, 5 }; const u8 ADD_to_strt_wid_and_dly_LT_DAC[] = { 0, 5, 6, 9, 9, 12, 12 }; */ vmode &= ~(FB_VMODE_DOUBLE | FB_VMODE_INTERLACED); /* This is horror! When we simulate, say 640x480 on an 800x600 LCD monitor, the CRTC should be programmed 800x600 values for the non visible part, but 640x480 for the visible part. This code has been tested on a laptop with it's 1400x1050 LCD monitor and a conventional monitor both switched on. Tested modes: 1280x1024, 1152x864, 1024x768, 800x600, works with little glitches also with DOUBLESCAN modes */ if (yres < par->lcd_height) { VScan = par->lcd_height / yres; if(VScan > 1) { VScan = 2; vmode |= FB_VMODE_DOUBLE; } } h_sync_strt = h_disp + par->lcd_right_margin; h_sync_end = h_sync_strt + par->lcd_hsync_len; h_sync_dly = /*DFP_h_sync_dly[ ( bpp + 1 ) / 3 ]; */par->lcd_hsync_dly; h_total = h_disp + par->lcd_hblank_len; v_sync_strt = v_disp + par->lcd_lower_margin / VScan; v_sync_end = v_sync_strt + par->lcd_vsync_len / VScan; v_total = v_disp + par->lcd_vblank_len / VScan; } #endif /* CONFIG_FB_ATY_GENERIC_LCD */ h_disp = (h_disp >> 3) - 1; h_sync_strt = (h_sync_strt >> 3) - 1; h_sync_end = (h_sync_end >> 3) - 1; h_total = (h_total >> 3) - 1; h_sync_wid = h_sync_end - h_sync_strt; FAIL_MAX("h_disp too large", h_disp, 0xff); FAIL_MAX("h_sync_strt too large", h_sync_strt, 0x1ff); /*FAIL_MAX("h_sync_wid too large", h_sync_wid, 0x1f);*/ if(h_sync_wid > 0x1f) h_sync_wid = 0x1f; FAIL_MAX("h_total too large", h_total, 0x1ff); if (vmode & FB_VMODE_DOUBLE) { v_disp <<= 1; v_sync_strt <<= 1; v_sync_end <<= 1; v_total <<= 1; } vdisplay = yres; #ifdef CONFIG_FB_ATY_GENERIC_LCD if ((par->lcd_table != 0) && (crtc->lcd_gen_cntl & LCD_ON)) vdisplay = par->lcd_height; #endif v_disp--; v_sync_strt--; v_sync_end--; v_total--; v_sync_wid = v_sync_end - v_sync_strt; FAIL_MAX("v_disp too large", v_disp, 0x7ff); FAIL_MAX("v_sync_stsrt too large", v_sync_strt, 0x7ff); /*FAIL_MAX("v_sync_wid too large", v_sync_wid, 0x1f);*/ if(v_sync_wid > 0x1f) v_sync_wid = 0x1f; FAIL_MAX("v_total too large", v_total, 0x7ff); c_sync = sync & FB_SYNC_COMP_HIGH_ACT ? CRTC_CSYNC_EN : 0; /* output */ crtc->vxres = vxres; crtc->vyres = vyres; crtc->xoffset = xoffset; crtc->yoffset = yoffset; crtc->bpp = bpp; crtc->off_pitch = ((yoffset*vxres+xoffset)*bpp/64) | (vxres<<19); crtc->vline_crnt_vline = 0; crtc->h_tot_disp = h_total | (h_disp<<16); crtc->h_sync_strt_wid = (h_sync_strt & 0xff) | (h_sync_dly<<8) | ((h_sync_strt & 0x100)<<4) | (h_sync_wid<<16) | (h_sync_pol<<21); crtc->v_tot_disp = v_total | (v_disp<<16); crtc->v_sync_strt_wid = v_sync_strt | (v_sync_wid<<16) | (v_sync_pol<<21); /* crtc->gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_PRESERVED_MASK; */ crtc->gen_cntl = CRTC_EXT_DISP_EN | CRTC_EN | pix_width | c_sync; crtc->gen_cntl |= CRTC_VGA_LINEAR; /* Enable doublescan mode if requested */ if (vmode & FB_VMODE_DOUBLE) crtc->gen_cntl |= CRTC_DBL_SCAN_EN; /* Enable interlaced mode if requested */ if (vmode & FB_VMODE_INTERLACED) crtc->gen_cntl |= CRTC_INTERLACE_EN; #ifdef CONFIG_FB_ATY_GENERIC_LCD if (par->lcd_table != 0) { vdisplay = yres; if(vmode & FB_VMODE_DOUBLE) vdisplay <<= 1; crtc->gen_cntl &= ~(CRTC2_EN | CRTC2_PIX_WIDTH); crtc->lcd_gen_cntl &= ~(HORZ_DIVBY2_EN | DIS_HOR_CRT_DIVBY2 | /*TVCLK_PM_EN | VCLK_DAC_PM_EN |*/ USE_SHADOWED_VEND | USE_SHADOWED_ROWCUR | SHADOW_EN | SHADOW_RW_EN); crtc->lcd_gen_cntl |= (DONT_SHADOW_VPAR/* | LOCK_8DOT*/); /* MOBILITY M1 tested, FIXME: LT */ crtc->horz_stretching = aty_ld_lcd(HORZ_STRETCHING, par); if (!M64_HAS(LT_LCD_REGS)) crtc->ext_vert_stretch = aty_ld_lcd(EXT_VERT_STRETCH, par) & ~(AUTO_VERT_RATIO | VERT_STRETCH_MODE | VERT_STRETCH_RATIO3); crtc->horz_stretching &= ~(HORZ_STRETCH_RATIO | HORZ_STRETCH_LOOP | AUTO_HORZ_RATIO | HORZ_STRETCH_MODE | HORZ_STRETCH_EN); if (xres < par->lcd_width && crtc->lcd_gen_cntl & LCD_ON) { do { /* * The horizontal blender misbehaves when HDisplay is less than a * a certain threshold (440 for a 1024-wide panel). It doesn't * stretch such modes enough. Use pixel replication instead of * blending to stretch modes that can be made to exactly fit the * panel width. The undocumented "NoLCDBlend" option allows the * pixel-replicated mode to be slightly wider or narrower than the * panel width. It also causes a mode that is exactly half as wide * as the panel to be pixel-replicated, rather than blended. */ int HDisplay = xres & ~7; int nStretch = par->lcd_width / HDisplay; int Remainder = par->lcd_width % HDisplay; if ((!Remainder && ((nStretch > 2))) || (((HDisplay * 16) / par->lcd_width) < 7)) { static const char StretchLoops[] = {10, 12, 13, 15, 16}; int horz_stretch_loop = -1, BestRemainder; int Numerator = HDisplay, Denominator = par->lcd_width; int Index = 5; ATIReduceRatio(&Numerator, &Denominator); BestRemainder = (Numerator * 16) / Denominator; while (--Index >= 0) { Remainder = ((Denominator - Numerator) * StretchLoops[Index]) % Denominator; if (Remainder < BestRemainder) { horz_stretch_loop = Index; if (!(BestRemainder = Remainder)) break; } } if ((horz_stretch_loop >= 0) && !BestRemainder) { int horz_stretch_ratio = 0, Accumulator = 0; int reuse_previous = 1; Index = StretchLoops[horz_stretch_loop]; while (--Index >= 0) { if (Accumulator > 0) horz_stretch_ratio |= reuse_previous; else Accumulator += Denominator; Accumulator -= Numerator; reuse_previous <<= 1; } crtc->horz_stretching |= (HORZ_STRETCH_EN | ((horz_stretch_loop & HORZ_STRETCH_LOOP) << 16) | (horz_stretch_ratio & HORZ_STRETCH_RATIO)); break; /* Out of the do { ... } while (0) */ } } crtc->horz_stretching |= (HORZ_STRETCH_MODE | HORZ_STRETCH_EN | (((HDisplay * (HORZ_STRETCH_BLEND + 1)) / par->lcd_width) & HORZ_STRETCH_BLEND)); } while (0); } if (vdisplay < par->lcd_height && crtc->lcd_gen_cntl & LCD_ON) { crtc->vert_stretching = (VERT_STRETCH_USE0 | VERT_STRETCH_EN | (((vdisplay * (VERT_STRETCH_RATIO0 + 1)) / par->lcd_height) & VERT_STRETCH_RATIO0)); if (!M64_HAS(LT_LCD_REGS) && xres <= (M64_HAS(MOBIL_BUS)?1024:800)) crtc->ext_vert_stretch |= VERT_STRETCH_MODE; } else { /* * Don't use vertical blending if the mode is too wide or not * vertically stretched. */ crtc->vert_stretching = 0; } /* copy to shadow crtc */ crtc->shadow_h_tot_disp = crtc->h_tot_disp; crtc->shadow_h_sync_strt_wid = crtc->h_sync_strt_wid; crtc->shadow_v_tot_disp = crtc->v_tot_disp; crtc->shadow_v_sync_strt_wid = crtc->v_sync_strt_wid; } #endif /* CONFIG_FB_ATY_GENERIC_LCD */ if (M64_HAS(MAGIC_FIFO)) { /* FIXME: display FIFO low watermark values */ crtc->gen_cntl |= (aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_FIFO_LWM); } crtc->dp_pix_width = dp_pix_width; crtc->dp_chain_mask = dp_chain_mask; return 0; } static int aty_crtc_to_var(const struct crtc *crtc, struct fb_var_screeninfo *var) { u32 xres, yres, bpp, left, right, upper, lower, hslen, vslen, sync; u32 h_total, h_disp, h_sync_strt, h_sync_dly, h_sync_wid, h_sync_pol; u32 v_total, v_disp, v_sync_strt, v_sync_wid, v_sync_pol, c_sync; u32 pix_width; u32 double_scan, interlace; /* input */ h_total = crtc->h_tot_disp & 0x1ff; h_disp = (crtc->h_tot_disp >> 16) & 0xff; h_sync_strt = (crtc->h_sync_strt_wid & 0xff) | ((crtc->h_sync_strt_wid >> 4) & 0x100); h_sync_dly = (crtc->h_sync_strt_wid >> 8) & 0x7; h_sync_wid = (crtc->h_sync_strt_wid >> 16) & 0x1f; h_sync_pol = (crtc->h_sync_strt_wid >> 21) & 0x1; v_total = crtc->v_tot_disp & 0x7ff; v_disp = (crtc->v_tot_disp >> 16) & 0x7ff; v_sync_strt = crtc->v_sync_strt_wid & 0x7ff; v_sync_wid = (crtc->v_sync_strt_wid >> 16) & 0x1f; v_sync_pol = (crtc->v_sync_strt_wid >> 21) & 0x1; c_sync = crtc->gen_cntl & CRTC_CSYNC_EN ? 1 : 0; pix_width = crtc->gen_cntl & CRTC_PIX_WIDTH_MASK; double_scan = crtc->gen_cntl & CRTC_DBL_SCAN_EN; interlace = crtc->gen_cntl & CRTC_INTERLACE_EN; /* convert */ xres = (h_disp + 1) * 8; yres = v_disp + 1; left = (h_total - h_sync_strt - h_sync_wid) * 8 - h_sync_dly; right = (h_sync_strt - h_disp) * 8 + h_sync_dly; hslen = h_sync_wid * 8; upper = v_total - v_sync_strt - v_sync_wid; lower = v_sync_strt - v_disp; vslen = v_sync_wid; sync = (h_sync_pol ? 0 : FB_SYNC_HOR_HIGH_ACT) | (v_sync_pol ? 0 : FB_SYNC_VERT_HIGH_ACT) | (c_sync ? FB_SYNC_COMP_HIGH_ACT : 0); switch (pix_width) { #if 0 case CRTC_PIX_WIDTH_4BPP: bpp = 4; var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; #endif case CRTC_PIX_WIDTH_8BPP: bpp = 8; var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case CRTC_PIX_WIDTH_15BPP: /* RGB 555 */ bpp = 16; var->red.offset = 10; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case CRTC_PIX_WIDTH_16BPP: /* RGB 565 */ bpp = 16; var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case CRTC_PIX_WIDTH_24BPP: /* RGB 888 */ bpp = 24; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case CRTC_PIX_WIDTH_32BPP: /* ARGB 8888 */ bpp = 32; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; default: PRINTKE("Invalid pixel width\n"); return -EINVAL; } /* output */ var->xres = xres; var->yres = yres; var->xres_virtual = crtc->vxres; var->yres_virtual = crtc->vyres; var->bits_per_pixel = bpp; var->left_margin = left; var->right_margin = right; var->upper_margin = upper; var->lower_margin = lower; var->hsync_len = hslen; var->vsync_len = vslen; var->sync = sync; var->vmode = FB_VMODE_NONINTERLACED; /* In double scan mode, the vertical parameters are doubled, so we need to half them to get the right values. In interlaced mode the values are already correct, so no correction is necessary. */ if (interlace) var->vmode = FB_VMODE_INTERLACED; if (double_scan) { var->vmode = FB_VMODE_DOUBLE; var->yres>>=1; var->upper_margin>>=1; var->lower_margin>>=1; var->vsync_len>>=1; } return 0; } /* ------------------------------------------------------------------------- */ static int atyfb_set_par(struct fb_info *info) { struct atyfb_par *par = (struct atyfb_par *) info->par; struct fb_var_screeninfo *var = &info->var; u32 tmp, pixclock; int err; #ifdef DEBUG struct fb_var_screeninfo debug; u32 pixclock_in_ps; #endif if (par->asleep) return 0; if ((err = aty_var_to_crtc(info, var, &par->crtc))) return err; pixclock = atyfb_get_pixclock(var, par); if (pixclock == 0) { PRINTKE("Invalid pixclock\n"); return -EINVAL; } else { if((err = par->pll_ops->var_to_pll(info, pixclock, var->bits_per_pixel, &par->pll))) return err; } par->accel_flags = var->accel_flags; /* hack */ if (var->accel_flags) { info->fbops->fb_sync = atyfb_sync; info->flags &= ~FBINFO_HWACCEL_DISABLED; } else { info->fbops->fb_sync = NULL; info->flags |= FBINFO_HWACCEL_DISABLED; } if (par->blitter_may_be_busy) wait_for_idle(par); aty_set_crtc(par, &par->crtc); par->dac_ops->set_dac(info, &par->pll, var->bits_per_pixel, par->accel_flags); par->pll_ops->set_pll(info, &par->pll); #ifdef DEBUG if(par->pll_ops && par->pll_ops->pll_to_var) pixclock_in_ps = par->pll_ops->pll_to_var(info, &(par->pll)); else pixclock_in_ps = 0; if(0 == pixclock_in_ps) { PRINTKE("ALERT ops->pll_to_var get 0\n"); pixclock_in_ps = pixclock; } memset(&debug, 0, sizeof(debug)); if(!aty_crtc_to_var(&(par->crtc), &debug)) { u32 hSync, vRefresh; u32 h_disp, h_sync_strt, h_sync_end, h_total; u32 v_disp, v_sync_strt, v_sync_end, v_total; h_disp = debug.xres; h_sync_strt = h_disp + debug.right_margin; h_sync_end = h_sync_strt + debug.hsync_len; h_total = h_sync_end + debug.left_margin; v_disp = debug.yres; v_sync_strt = v_disp + debug.lower_margin; v_sync_end = v_sync_strt + debug.vsync_len; v_total = v_sync_end + debug.upper_margin; hSync = 1000000000 / (pixclock_in_ps * h_total); vRefresh = (hSync * 1000) / v_total; if (par->crtc.gen_cntl & CRTC_INTERLACE_EN) vRefresh *= 2; if (par->crtc.gen_cntl & CRTC_DBL_SCAN_EN) vRefresh /= 2; DPRINTK("atyfb_set_par\n"); DPRINTK(" Set Visible Mode to %ix%i-%i\n", var->xres, var->yres, var->bits_per_pixel); DPRINTK(" Virtual resolution %ix%i, pixclock_in_ps %i (calculated %i)\n", var->xres_virtual, var->yres_virtual, pixclock, pixclock_in_ps); DPRINTK(" Dot clock: %i MHz\n", 1000000 / pixclock_in_ps); DPRINTK(" Horizontal sync: %i kHz\n", hSync); DPRINTK(" Vertical refresh: %i Hz\n", vRefresh); DPRINTK(" x style: %i.%03i %i %i %i %i %i %i %i %i\n", 1000000 / pixclock_in_ps, 1000000 % pixclock_in_ps, h_disp, h_sync_strt, h_sync_end, h_total, v_disp, v_sync_strt, v_sync_end, v_total); DPRINTK(" fb style: %i %i %i %i %i %i %i %i %i\n", pixclock_in_ps, debug.left_margin, h_disp, debug.right_margin, debug.hsync_len, debug.upper_margin, v_disp, debug.lower_margin, debug.vsync_len); } #endif /* DEBUG */ if (!M64_HAS(INTEGRATED)) { /* Don't forget MEM_CNTL */ tmp = aty_ld_le32(MEM_CNTL, par) & 0xf0ffffff; switch (var->bits_per_pixel) { case 8: tmp |= 0x02000000; break; case 16: tmp |= 0x03000000; break; case 32: tmp |= 0x06000000; break; } aty_st_le32(MEM_CNTL, tmp, par); } else { tmp = aty_ld_le32(MEM_CNTL, par) & 0xf00fffff; if (!M64_HAS(MAGIC_POSTDIV)) tmp |= par->mem_refresh_rate << 20; switch (var->bits_per_pixel) { case 8: case 24: tmp |= 0x00000000; break; case 16: tmp |= 0x04000000; break; case 32: tmp |= 0x08000000; break; } if (M64_HAS(CT_BUS)) { aty_st_le32(DAC_CNTL, 0x87010184, par); aty_st_le32(BUS_CNTL, 0x680000f9, par); } else if (M64_HAS(VT_BUS)) { aty_st_le32(DAC_CNTL, 0x87010184, par); aty_st_le32(BUS_CNTL, 0x680000f9, par); } else if (M64_HAS(MOBIL_BUS)) { aty_st_le32(DAC_CNTL, 0x80010102, par); aty_st_le32(BUS_CNTL, 0x7b33a040 | (par->aux_start ? BUS_APER_REG_DIS : 0), par); } else { /* GT */ aty_st_le32(DAC_CNTL, 0x86010102, par); aty_st_le32(BUS_CNTL, 0x7b23a040 | (par->aux_start ? BUS_APER_REG_DIS : 0), par); aty_st_le32(EXT_MEM_CNTL, aty_ld_le32(EXT_MEM_CNTL, par) | 0x5000001, par); } aty_st_le32(MEM_CNTL, tmp, par); } aty_st_8(DAC_MASK, 0xff, par); info->fix.line_length = var->xres_virtual * var->bits_per_pixel/8; info->fix.visual = var->bits_per_pixel <= 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; /* Initialize the graphics engine */ if (par->accel_flags & FB_ACCELF_TEXT) aty_init_engine(par, info); #ifdef CONFIG_BOOTX_TEXT btext_update_display(info->fix.smem_start, (((par->crtc.h_tot_disp >> 16) & 0xff) + 1) * 8, ((par->crtc.v_tot_disp >> 16) & 0x7ff) + 1, var->bits_per_pixel, par->crtc.vxres * var->bits_per_pixel / 8); #endif /* CONFIG_BOOTX_TEXT */ #if 0 /* switch to accelerator mode */ if (!(par->crtc.gen_cntl & CRTC_EXT_DISP_EN)) aty_st_le32(CRTC_GEN_CNTL, par->crtc.gen_cntl | CRTC_EXT_DISP_EN, par); #endif #ifdef DEBUG { /* dump non shadow CRTC, pll, LCD registers */ int i; u32 base; /* CRTC registers */ base = 0x2000; printk("debug atyfb: Mach64 non-shadow register values:"); for (i = 0; i < 256; i = i+4) { if(i%16 == 0) printk("\ndebug atyfb: 0x%04X: ", base + i); printk(" %08X", aty_ld_le32(i, par)); } printk("\n\n"); #ifdef CONFIG_FB_ATY_CT /* PLL registers */ base = 0x00; printk("debug atyfb: Mach64 PLL register values:"); for (i = 0; i < 64; i++) { if(i%16 == 0) printk("\ndebug atyfb: 0x%02X: ", base + i); if(i%4 == 0) printk(" "); printk("%02X", aty_ld_pll_ct(i, par)); } printk("\n\n"); #endif /* CONFIG_FB_ATY_CT */ #ifdef CONFIG_FB_ATY_GENERIC_LCD if (par->lcd_table != 0) { /* LCD registers */ base = 0x00; printk("debug atyfb: LCD register values:"); if(M64_HAS(LT_LCD_REGS)) { for(i = 0; i <= POWER_MANAGEMENT; i++) { if(i == EXT_VERT_STRETCH) continue; printk("\ndebug atyfb: 0x%04X: ", lt_lcd_regs[i]); printk(" %08X", aty_ld_lcd(i, par)); } } else { for (i = 0; i < 64; i++) { if(i%4 == 0) printk("\ndebug atyfb: 0x%02X: ", base + i); printk(" %08X", aty_ld_lcd(i, par)); } } printk("\n\n"); } #endif /* CONFIG_FB_ATY_GENERIC_LCD */ } #endif /* DEBUG */ return 0; } static int atyfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct atyfb_par *par = (struct atyfb_par *) info->par; int err; struct crtc crtc; union aty_pll pll; u32 pixclock; memcpy(&pll, &(par->pll), sizeof(pll)); if((err = aty_var_to_crtc(info, var, &crtc))) return err; pixclock = atyfb_get_pixclock(var, par); if (pixclock == 0) { if (!(var->activate & FB_ACTIVATE_TEST)) PRINTKE("Invalid pixclock\n"); return -EINVAL; } else { if((err = par->pll_ops->var_to_pll(info, pixclock, var->bits_per_pixel, &pll))) return err; } if (var->accel_flags & FB_ACCELF_TEXT) info->var.accel_flags = FB_ACCELF_TEXT; else info->var.accel_flags = 0; aty_crtc_to_var(&crtc, var); var->pixclock = par->pll_ops->pll_to_var(info, &pll); return 0; } static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info) { u32 xoffset = info->var.xoffset; u32 yoffset = info->var.yoffset; u32 vxres = par->crtc.vxres; u32 bpp = info->var.bits_per_pixel; par->crtc.off_pitch = ((yoffset * vxres + xoffset) * bpp / 64) | (vxres << 19); } /* * Open/Release the frame buffer device */ static int atyfb_open(struct fb_info *info, int user) { struct atyfb_par *par = (struct atyfb_par *) info->par; if (user) { par->open++; #ifdef __sparc__ par->mmaped = 0; #endif } return (0); } static irqreturn_t aty_irq(int irq, void *dev_id) { struct atyfb_par *par = dev_id; int handled = 0; u32 int_cntl; spin_lock(&par->int_lock); int_cntl = aty_ld_le32(CRTC_INT_CNTL, par); if (int_cntl & CRTC_VBLANK_INT) { /* clear interrupt */ aty_st_le32(CRTC_INT_CNTL, (int_cntl & CRTC_INT_EN_MASK) | CRTC_VBLANK_INT_AK, par); par->vblank.count++; if (par->vblank.pan_display) { par->vblank.pan_display = 0; aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par); } wake_up_interruptible(&par->vblank.wait); handled = 1; } spin_unlock(&par->int_lock); return IRQ_RETVAL(handled); } static int aty_enable_irq(struct atyfb_par *par, int reenable) { u32 int_cntl; if (!test_and_set_bit(0, &par->irq_flags)) { if (request_irq(par->irq, aty_irq, IRQF_SHARED, "atyfb", par)) { clear_bit(0, &par->irq_flags); return -EINVAL; } spin_lock_irq(&par->int_lock); int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK; /* clear interrupt */ aty_st_le32(CRTC_INT_CNTL, int_cntl | CRTC_VBLANK_INT_AK, par); /* enable interrupt */ aty_st_le32(CRTC_INT_CNTL, int_cntl | CRTC_VBLANK_INT_EN, par); spin_unlock_irq(&par->int_lock); } else if (reenable) { spin_lock_irq(&par->int_lock); int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK; if (!(int_cntl & CRTC_VBLANK_INT_EN)) { printk("atyfb: someone disabled IRQ [%08x]\n", int_cntl); /* re-enable interrupt */ aty_st_le32(CRTC_INT_CNTL, int_cntl | CRTC_VBLANK_INT_EN, par ); } spin_unlock_irq(&par->int_lock); } return 0; } static int aty_disable_irq(struct atyfb_par *par) { u32 int_cntl; if (test_and_clear_bit(0, &par->irq_flags)) { if (par->vblank.pan_display) { par->vblank.pan_display = 0; aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par); } spin_lock_irq(&par->int_lock); int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK; /* disable interrupt */ aty_st_le32(CRTC_INT_CNTL, int_cntl & ~CRTC_VBLANK_INT_EN, par ); spin_unlock_irq(&par->int_lock); free_irq(par->irq, par); } return 0; } static int atyfb_release(struct fb_info *info, int user) { struct atyfb_par *par = (struct atyfb_par *) info->par; if (user) { par->open--; mdelay(1); wait_for_idle(par); if (!par->open) { #ifdef __sparc__ int was_mmaped = par->mmaped; par->mmaped = 0; if (was_mmaped) { struct fb_var_screeninfo var; /* Now reset the default display config, we have no * idea what the program(s) which mmap'd the chip did * to the configuration, nor whether it restored it * correctly. */ var = default_var; if (noaccel) var.accel_flags &= ~FB_ACCELF_TEXT; else var.accel_flags |= FB_ACCELF_TEXT; if (var.yres == var.yres_virtual) { u32 videoram = (info->fix.smem_len - (PAGE_SIZE << 2)); var.yres_virtual = ((videoram * 8) / var.bits_per_pixel) / var.xres_virtual; if (var.yres_virtual < var.yres) var.yres_virtual = var.yres; } } #endif aty_disable_irq(par); } } return (0); } /* * Pan or Wrap the Display * * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag */ static int atyfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 xres, yres, xoffset, yoffset; xres = (((par->crtc.h_tot_disp >> 16) & 0xff) + 1) * 8; yres = ((par->crtc.v_tot_disp >> 16) & 0x7ff) + 1; if (par->crtc.gen_cntl & CRTC_DBL_SCAN_EN) yres >>= 1; xoffset = (var->xoffset + 7) & ~7; yoffset = var->yoffset; if (xoffset + xres > par->crtc.vxres || yoffset + yres > par->crtc.vyres) return -EINVAL; info->var.xoffset = xoffset; info->var.yoffset = yoffset; if (par->asleep) return 0; set_off_pitch(par, info); if ((var->activate & FB_ACTIVATE_VBL) && !aty_enable_irq(par, 0)) { par->vblank.pan_display = 1; } else { par->vblank.pan_display = 0; aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par); } return 0; } static int aty_waitforvblank(struct atyfb_par *par, u32 crtc) { struct aty_interrupt *vbl; unsigned int count; int ret; switch (crtc) { case 0: vbl = &par->vblank; break; default: return -ENODEV; } ret = aty_enable_irq(par, 0); if (ret) return ret; count = vbl->count; ret = wait_event_interruptible_timeout(vbl->wait, count != vbl->count, HZ/10); if (ret < 0) { return ret; } if (ret == 0) { aty_enable_irq(par, 1); return -ETIMEDOUT; } return 0; } #ifdef DEBUG #define ATYIO_CLKR 0x41545900 /* ATY\00 */ #define ATYIO_CLKW 0x41545901 /* ATY\01 */ struct atyclk { u32 ref_clk_per; u8 pll_ref_div; u8 mclk_fb_div; u8 mclk_post_div; /* 1,2,3,4,8 */ u8 mclk_fb_mult; /* 2 or 4 */ u8 xclk_post_div; /* 1,2,3,4,8 */ u8 vclk_fb_div; u8 vclk_post_div; /* 1,2,3,4,6,8,12 */ u32 dsp_xclks_per_row; /* 0-16383 */ u32 dsp_loop_latency; /* 0-15 */ u32 dsp_precision; /* 0-7 */ u32 dsp_on; /* 0-2047 */ u32 dsp_off; /* 0-2047 */ }; #define ATYIO_FEATR 0x41545902 /* ATY\02 */ #define ATYIO_FEATW 0x41545903 /* ATY\03 */ #endif #ifndef FBIO_WAITFORVSYNC #define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) #endif static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg) { struct atyfb_par *par = (struct atyfb_par *) info->par; #ifdef __sparc__ struct fbtype fbtyp; #endif switch (cmd) { #ifdef __sparc__ case FBIOGTYPE: fbtyp.fb_type = FBTYPE_PCI_GENERIC; fbtyp.fb_width = par->crtc.vxres; fbtyp.fb_height = par->crtc.vyres; fbtyp.fb_depth = info->var.bits_per_pixel; fbtyp.fb_cmsize = info->cmap.len; fbtyp.fb_size = info->fix.smem_len; if (copy_to_user((struct fbtype __user *) arg, &fbtyp, sizeof(fbtyp))) return -EFAULT; break; #endif /* __sparc__ */ case FBIO_WAITFORVSYNC: { u32 crtc; if (get_user(crtc, (__u32 __user *) arg)) return -EFAULT; return aty_waitforvblank(par, crtc); } break; #if defined(DEBUG) && defined(CONFIG_FB_ATY_CT) case ATYIO_CLKR: if (M64_HAS(INTEGRATED)) { struct atyclk clk; union aty_pll *pll = &(par->pll); u32 dsp_config = pll->ct.dsp_config; u32 dsp_on_off = pll->ct.dsp_on_off; clk.ref_clk_per = par->ref_clk_per; clk.pll_ref_div = pll->ct.pll_ref_div; clk.mclk_fb_div = pll->ct.mclk_fb_div; clk.mclk_post_div = pll->ct.mclk_post_div_real; clk.mclk_fb_mult = pll->ct.mclk_fb_mult; clk.xclk_post_div = pll->ct.xclk_post_div_real; clk.vclk_fb_div = pll->ct.vclk_fb_div; clk.vclk_post_div = pll->ct.vclk_post_div_real; clk.dsp_xclks_per_row = dsp_config & 0x3fff; clk.dsp_loop_latency = (dsp_config >> 16) & 0xf; clk.dsp_precision = (dsp_config >> 20) & 7; clk.dsp_off = dsp_on_off & 0x7ff; clk.dsp_on = (dsp_on_off >> 16) & 0x7ff; if (copy_to_user((struct atyclk __user *) arg, &clk, sizeof(clk))) return -EFAULT; } else return -EINVAL; break; case ATYIO_CLKW: if (M64_HAS(INTEGRATED)) { struct atyclk clk; union aty_pll *pll = &(par->pll); if (copy_from_user(&clk, (struct atyclk __user *) arg, sizeof(clk))) return -EFAULT; par->ref_clk_per = clk.ref_clk_per; pll->ct.pll_ref_div = clk.pll_ref_div; pll->ct.mclk_fb_div = clk.mclk_fb_div; pll->ct.mclk_post_div_real = clk.mclk_post_div; pll->ct.mclk_fb_mult = clk.mclk_fb_mult; pll->ct.xclk_post_div_real = clk.xclk_post_div; pll->ct.vclk_fb_div = clk.vclk_fb_div; pll->ct.vclk_post_div_real = clk.vclk_post_div; pll->ct.dsp_config = (clk.dsp_xclks_per_row & 0x3fff) | ((clk.dsp_loop_latency & 0xf)<<16)| ((clk.dsp_precision & 7)<<20); pll->ct.dsp_on_off = (clk.dsp_off & 0x7ff) | ((clk.dsp_on & 0x7ff)<<16); /*aty_calc_pll_ct(info, &pll->ct);*/ aty_set_pll_ct(info, pll); } else return -EINVAL; break; case ATYIO_FEATR: if (get_user(par->features, (u32 __user *) arg)) return -EFAULT; break; case ATYIO_FEATW: if (put_user(par->features, (u32 __user *) arg)) return -EFAULT; break; #endif /* DEBUG && CONFIG_FB_ATY_CT */ default: return -EINVAL; } return 0; } static int atyfb_sync(struct fb_info *info) { struct atyfb_par *par = (struct atyfb_par *) info->par; if (par->blitter_may_be_busy) wait_for_idle(par); return 0; } #ifdef __sparc__ static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct atyfb_par *par = (struct atyfb_par *) info->par; unsigned int size, page, map_size = 0; unsigned long map_offset = 0; unsigned long off; int i; if (!par->mmap_map) return -ENXIO; if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) return -EINVAL; off = vma->vm_pgoff << PAGE_SHIFT; size = vma->vm_end - vma->vm_start; /* To stop the swapper from even considering these pages. */ vma->vm_flags |= (VM_IO | VM_RESERVED); if (((vma->vm_pgoff == 0) && (size == info->fix.smem_len)) || ((off == info->fix.smem_len) && (size == PAGE_SIZE))) off += 0x8000000000000000UL; vma->vm_pgoff = off >> PAGE_SHIFT; /* propagate off changes */ /* Each page, see which map applies */ for (page = 0; page < size;) { map_size = 0; for (i = 0; par->mmap_map[i].size; i++) { unsigned long start = par->mmap_map[i].voff; unsigned long end = start + par->mmap_map[i].size; unsigned long offset = off + page; if (start > offset) continue; if (offset >= end) continue; map_size = par->mmap_map[i].size - (offset - start); map_offset = par->mmap_map[i].poff + (offset - start); break; } if (!map_size) { page += PAGE_SIZE; continue; } if (page + map_size > size) map_size = size - page; pgprot_val(vma->vm_page_prot) &= ~(par->mmap_map[i].prot_mask); pgprot_val(vma->vm_page_prot) |= par->mmap_map[i].prot_flag; if (remap_pfn_range(vma, vma->vm_start + page, map_offset >> PAGE_SHIFT, map_size, vma->vm_page_prot)) return -EAGAIN; page += map_size; } if (!map_size) return -EINVAL; if (!par->mmaped) par->mmaped = 1; return 0; } #endif /* __sparc__ */ #if defined(CONFIG_PM) && defined(CONFIG_PCI) #ifdef CONFIG_PPC_PMAC /* Power management routines. Those are used for PowerBook sleep. */ static int aty_power_mgmt(int sleep, struct atyfb_par *par) { u32 pm; int timeout; pm = aty_ld_lcd(POWER_MANAGEMENT, par); pm = (pm & ~PWR_MGT_MODE_MASK) | PWR_MGT_MODE_REG; aty_st_lcd(POWER_MANAGEMENT, pm, par); pm = aty_ld_lcd(POWER_MANAGEMENT, par); timeout = 2000; if (sleep) { /* Sleep */ pm &= ~PWR_MGT_ON; aty_st_lcd(POWER_MANAGEMENT, pm, par); pm = aty_ld_lcd(POWER_MANAGEMENT, par); udelay(10); pm &= ~(PWR_BLON | AUTO_PWR_UP); pm |= SUSPEND_NOW; aty_st_lcd(POWER_MANAGEMENT, pm, par); pm = aty_ld_lcd(POWER_MANAGEMENT, par); udelay(10); pm |= PWR_MGT_ON; aty_st_lcd(POWER_MANAGEMENT, pm, par); do { pm = aty_ld_lcd(POWER_MANAGEMENT, par); mdelay(1); if ((--timeout) == 0) break; } while ((pm & PWR_MGT_STATUS_MASK) != PWR_MGT_STATUS_SUSPEND); } else { /* Wakeup */ pm &= ~PWR_MGT_ON; aty_st_lcd(POWER_MANAGEMENT, pm, par); pm = aty_ld_lcd(POWER_MANAGEMENT, par); udelay(10); pm &= ~SUSPEND_NOW; pm |= (PWR_BLON | AUTO_PWR_UP); aty_st_lcd(POWER_MANAGEMENT, pm, par); pm = aty_ld_lcd(POWER_MANAGEMENT, par); udelay(10); pm |= PWR_MGT_ON; aty_st_lcd(POWER_MANAGEMENT, pm, par); do { pm = aty_ld_lcd(POWER_MANAGEMENT, par); mdelay(1); if ((--timeout) == 0) break; } while ((pm & PWR_MGT_STATUS_MASK) != 0); } mdelay(500); return timeout ? 0 : -EIO; } #endif static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct fb_info *info = pci_get_drvdata(pdev); struct atyfb_par *par = (struct atyfb_par *) info->par; if (state.event == pdev->dev.power.power_state.event) return 0; acquire_console_sem(); fb_set_suspend(info, 1); /* Idle & reset engine */ wait_for_idle(par); aty_reset_engine(par); /* Blank display and LCD */ atyfb_blank(FB_BLANK_POWERDOWN, info); par->asleep = 1; par->lock_blank = 1; #ifdef CONFIG_PPC_PMAC /* Set chip to "suspend" mode */ if (aty_power_mgmt(1, par)) { par->asleep = 0; par->lock_blank = 0; atyfb_blank(FB_BLANK_UNBLANK, info); fb_set_suspend(info, 0); release_console_sem(); return -EIO; } #else pci_set_power_state(pdev, pci_choose_state(pdev, state)); #endif release_console_sem(); pdev->dev.power.power_state = state; return 0; } static int atyfb_pci_resume(struct pci_dev *pdev) { struct fb_info *info = pci_get_drvdata(pdev); struct atyfb_par *par = (struct atyfb_par *) info->par; if (pdev->dev.power.power_state.event == PM_EVENT_ON) return 0; acquire_console_sem(); #ifdef CONFIG_PPC_PMAC if (pdev->dev.power.power_state.event == 2) aty_power_mgmt(0, par); #else pci_set_power_state(pdev, PCI_D0); #endif aty_resume_chip(info); par->asleep = 0; /* Restore display */ atyfb_set_par(info); /* Refresh */ fb_set_suspend(info, 0); /* Unblank */ par->lock_blank = 0; atyfb_blank(FB_BLANK_UNBLANK, info); release_console_sem(); pdev->dev.power.power_state = PMSG_ON; return 0; } #endif /* defined(CONFIG_PM) && defined(CONFIG_PCI) */ /* Backlight */ #ifdef CONFIG_FB_ATY_BACKLIGHT #define MAX_LEVEL 0xFF static int aty_bl_get_level_brightness(struct atyfb_par *par, int level) { struct fb_info *info = pci_get_drvdata(par->pdev); int atylevel; /* Get and convert the value */ /* No locking of bl_curve since we read a single value */ atylevel = info->bl_curve[level] * FB_BACKLIGHT_MAX / MAX_LEVEL; if (atylevel < 0) atylevel = 0; else if (atylevel > MAX_LEVEL) atylevel = MAX_LEVEL; return atylevel; } static int aty_bl_update_status(struct backlight_device *bd) { struct atyfb_par *par = bl_get_data(bd); unsigned int reg = aty_ld_lcd(LCD_MISC_CNTL, par); int level; if (bd->props.power != FB_BLANK_UNBLANK || bd->props.fb_blank != FB_BLANK_UNBLANK) level = 0; else level = bd->props.brightness; reg |= (BLMOD_EN | BIASMOD_EN); if (level > 0) { reg &= ~BIAS_MOD_LEVEL_MASK; reg |= (aty_bl_get_level_brightness(par, level) << BIAS_MOD_LEVEL_SHIFT); } else { reg &= ~BIAS_MOD_LEVEL_MASK; reg |= (aty_bl_get_level_brightness(par, 0) << BIAS_MOD_LEVEL_SHIFT); } aty_st_lcd(LCD_MISC_CNTL, reg, par); return 0; } static int aty_bl_get_brightness(struct backlight_device *bd) { return bd->props.brightness; } static struct backlight_ops aty_bl_data = { .get_brightness = aty_bl_get_brightness, .update_status = aty_bl_update_status, }; static void aty_bl_init(struct atyfb_par *par) { struct fb_info *info = pci_get_drvdata(par->pdev); struct backlight_device *bd; char name[12]; #ifdef CONFIG_PMAC_BACKLIGHT if (!pmac_has_backlight_type("ati")) return; #endif snprintf(name, sizeof(name), "atybl%d", info->node); bd = backlight_device_register(name, info->dev, par, &aty_bl_data); if (IS_ERR(bd)) { info->bl_dev = NULL; printk(KERN_WARNING "aty: Backlight registration failed\n"); goto error; } info->bl_dev = bd; fb_bl_default_curve(info, 0, 0x3F * FB_BACKLIGHT_MAX / MAX_LEVEL, 0xFF * FB_BACKLIGHT_MAX / MAX_LEVEL); bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1; bd->props.brightness = bd->props.max_brightness; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); printk("aty: Backlight initialized (%s)\n", name); return; error: return; } static void aty_bl_exit(struct backlight_device *bd) { backlight_device_unregister(bd); printk("aty: Backlight unloaded\n"); } #endif /* CONFIG_FB_ATY_BACKLIGHT */ static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk) { const int ragepro_tbl[] = { 44, 50, 55, 66, 75, 80, 100 }; const int ragexl_tbl[] = { 50, 66, 75, 83, 90, 95, 100, 105, 110, 115, 120, 125, 133, 143, 166 }; const int *refresh_tbl; int i, size; if (IS_XL(par->pci_id) || IS_MOBILITY(par->pci_id)) { refresh_tbl = ragexl_tbl; size = ARRAY_SIZE(ragexl_tbl); } else { refresh_tbl = ragepro_tbl; size = ARRAY_SIZE(ragepro_tbl); } for (i=0; i < size; i++) { if (xclk < refresh_tbl[i]) break; } par->mem_refresh_rate = i; } /* * Initialisation */ static struct fb_info *fb_list = NULL; #if defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD) static int __devinit atyfb_get_timings_from_lcd(struct atyfb_par *par, struct fb_var_screeninfo *var) { int ret = -EINVAL; if (par->lcd_table != 0 && (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) { *var = default_var; var->xres = var->xres_virtual = par->lcd_hdisp; var->right_margin = par->lcd_right_margin; var->left_margin = par->lcd_hblank_len - (par->lcd_right_margin + par->lcd_hsync_dly + par->lcd_hsync_len); var->hsync_len = par->lcd_hsync_len + par->lcd_hsync_dly; var->yres = var->yres_virtual = par->lcd_vdisp; var->lower_margin = par->lcd_lower_margin; var->upper_margin = par->lcd_vblank_len - (par->lcd_lower_margin + par->lcd_vsync_len); var->vsync_len = par->lcd_vsync_len; var->pixclock = par->lcd_pixclock; ret = 0; } return ret; } #endif /* defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD) */ static int __devinit aty_init(struct fb_info *info) { struct atyfb_par *par = (struct atyfb_par *) info->par; const char *ramname = NULL, *xtal; int gtb_memsize, has_var = 0; struct fb_var_screeninfo var; init_waitqueue_head(&par->vblank.wait); spin_lock_init(&par->int_lock); #ifdef CONFIG_FB_ATY_GX if (!M64_HAS(INTEGRATED)) { u32 stat0; u8 dac_type, dac_subtype, clk_type; stat0 = aty_ld_le32(CONFIG_STAT0, par); par->bus_type = (stat0 >> 0) & 0x07; par->ram_type = (stat0 >> 3) & 0x07; ramname = aty_gx_ram[par->ram_type]; /* FIXME: clockchip/RAMDAC probing? */ dac_type = (aty_ld_le32(DAC_CNTL, par) >> 16) & 0x07; #ifdef CONFIG_ATARI clk_type = CLK_ATI18818_1; dac_type = (stat0 >> 9) & 0x07; if (dac_type == 0x07) dac_subtype = DAC_ATT20C408; else dac_subtype = (aty_ld_8(SCRATCH_REG1 + 1, par) & 0xF0) | dac_type; #else dac_type = DAC_IBMRGB514; dac_subtype = DAC_IBMRGB514; clk_type = CLK_IBMRGB514; #endif switch (dac_subtype) { case DAC_IBMRGB514: par->dac_ops = &aty_dac_ibm514; break; #ifdef CONFIG_ATARI case DAC_ATI68860_B: case DAC_ATI68860_C: par->dac_ops = &aty_dac_ati68860b; break; case DAC_ATT20C408: case DAC_ATT21C498: par->dac_ops = &aty_dac_att21c498; break; #endif default: PRINTKI("aty_init: DAC type not implemented yet!\n"); par->dac_ops = &aty_dac_unsupported; break; } switch (clk_type) { #ifdef CONFIG_ATARI case CLK_ATI18818_1: par->pll_ops = &aty_pll_ati18818_1; break; #else case CLK_IBMRGB514: par->pll_ops = &aty_pll_ibm514; break; #endif #if 0 /* dead code */ case CLK_STG1703: par->pll_ops = &aty_pll_stg1703; break; case CLK_CH8398: par->pll_ops = &aty_pll_ch8398; break; case CLK_ATT20C408: par->pll_ops = &aty_pll_att20c408; break; #endif default: PRINTKI("aty_init: CLK type not implemented yet!"); par->pll_ops = &aty_pll_unsupported; break; } } #endif /* CONFIG_FB_ATY_GX */ #ifdef CONFIG_FB_ATY_CT if (M64_HAS(INTEGRATED)) { par->dac_ops = &aty_dac_ct; par->pll_ops = &aty_pll_ct; par->bus_type = PCI; par->ram_type = (aty_ld_le32(CONFIG_STAT0, par) & 0x07); ramname = aty_ct_ram[par->ram_type]; /* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */ if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM) par->pll_limits.mclk = 63; /* Mobility + 32bit memory interface need halved XCLK. */ if (M64_HAS(MOBIL_BUS) && par->ram_type == SDRAM32) par->pll_limits.xclk = (par->pll_limits.xclk + 1) >> 1; } #endif #ifdef CONFIG_PPC_PMAC /* The Apple iBook1 uses non-standard memory frequencies. We detect it * and set the frequency manually. */ if (machine_is_compatible("PowerBook2,1")) { par->pll_limits.mclk = 70; par->pll_limits.xclk = 53; } #endif /* Allow command line to override clocks. */ if (pll) par->pll_limits.pll_max = pll; if (mclk) par->pll_limits.mclk = mclk; if (xclk) par->pll_limits.xclk = xclk; aty_calc_mem_refresh(par, par->pll_limits.xclk); par->pll_per = 1000000/par->pll_limits.pll_max; par->mclk_per = 1000000/par->pll_limits.mclk; par->xclk_per = 1000000/par->pll_limits.xclk; par->ref_clk_per = 1000000000000ULL / 14318180; xtal = "14.31818"; #ifdef CONFIG_FB_ATY_CT if (M64_HAS(GTB_DSP)) { u8 pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par); if (pll_ref_div) { int diff1, diff2; diff1 = 510 * 14 / pll_ref_div - par->pll_limits.pll_max; diff2 = 510 * 29 / pll_ref_div - par->pll_limits.pll_max; if (diff1 < 0) diff1 = -diff1; if (diff2 < 0) diff2 = -diff2; if (diff2 < diff1) { par->ref_clk_per = 1000000000000ULL / 29498928; xtal = "29.498928"; } } } #endif /* CONFIG_FB_ATY_CT */ /* save previous video mode */ aty_get_crtc(par, &saved_crtc); if(par->pll_ops->get_pll) par->pll_ops->get_pll(info, &saved_pll); par->mem_cntl = aty_ld_le32(MEM_CNTL, par); gtb_memsize = M64_HAS(GTB_DSP); if (gtb_memsize) switch (par->mem_cntl & 0xF) { /* 0xF used instead of MEM_SIZE_ALIAS */ case MEM_SIZE_512K: info->fix.smem_len = 0x80000; break; case MEM_SIZE_1M: info->fix.smem_len = 0x100000; break; case MEM_SIZE_2M_GTB: info->fix.smem_len = 0x200000; break; case MEM_SIZE_4M_GTB: info->fix.smem_len = 0x400000; break; case MEM_SIZE_6M_GTB: info->fix.smem_len = 0x600000; break; case MEM_SIZE_8M_GTB: info->fix.smem_len = 0x800000; break; default: info->fix.smem_len = 0x80000; } else switch (par->mem_cntl & MEM_SIZE_ALIAS) { case MEM_SIZE_512K: info->fix.smem_len = 0x80000; break; case MEM_SIZE_1M: info->fix.smem_len = 0x100000; break; case MEM_SIZE_2M: info->fix.smem_len = 0x200000; break; case MEM_SIZE_4M: info->fix.smem_len = 0x400000; break; case MEM_SIZE_6M: info->fix.smem_len = 0x600000; break; case MEM_SIZE_8M: info->fix.smem_len = 0x800000; break; default: info->fix.smem_len = 0x80000; } if (M64_HAS(MAGIC_VRAM_SIZE)) { if (aty_ld_le32(CONFIG_STAT1, par) & 0x40000000) info->fix.smem_len += 0x400000; } if (vram) { info->fix.smem_len = vram * 1024; par->mem_cntl &= ~(gtb_memsize ? 0xF : MEM_SIZE_ALIAS); if (info->fix.smem_len <= 0x80000) par->mem_cntl |= MEM_SIZE_512K; else if (info->fix.smem_len <= 0x100000) par->mem_cntl |= MEM_SIZE_1M; else if (info->fix.smem_len <= 0x200000) par->mem_cntl |= gtb_memsize ? MEM_SIZE_2M_GTB : MEM_SIZE_2M; else if (info->fix.smem_len <= 0x400000) par->mem_cntl |= gtb_memsize ? MEM_SIZE_4M_GTB : MEM_SIZE_4M; else if (info->fix.smem_len <= 0x600000) par->mem_cntl |= gtb_memsize ? MEM_SIZE_6M_GTB : MEM_SIZE_6M; else par->mem_cntl |= gtb_memsize ? MEM_SIZE_8M_GTB : MEM_SIZE_8M; aty_st_le32(MEM_CNTL, par->mem_cntl, par); } /* * Reg Block 0 (CT-compatible block) is at mmio_start * Reg Block 1 (multimedia extensions) is at mmio_start - 0x400 */ if (M64_HAS(GX)) { info->fix.mmio_len = 0x400; info->fix.accel = FB_ACCEL_ATI_MACH64GX; } else if (M64_HAS(CT)) { info->fix.mmio_len = 0x400; info->fix.accel = FB_ACCEL_ATI_MACH64CT; } else if (M64_HAS(VT)) { info->fix.mmio_start -= 0x400; info->fix.mmio_len = 0x800; info->fix.accel = FB_ACCEL_ATI_MACH64VT; } else {/* GT */ info->fix.mmio_start -= 0x400; info->fix.mmio_len = 0x800; info->fix.accel = FB_ACCEL_ATI_MACH64GT; } PRINTKI("%d%c %s, %s MHz XTAL, %d MHz PLL, %d Mhz MCLK, %d MHz XCLK\n", info->fix.smem_len == 0x80000 ? 512 : (info->fix.smem_len >> 20), info->fix.smem_len == 0x80000 ? 'K' : 'M', ramname, xtal, par->pll_limits.pll_max, par->pll_limits.mclk, par->pll_limits.xclk); #if defined(DEBUG) && defined(CONFIG_FB_ATY_CT) if (M64_HAS(INTEGRATED)) { int i; printk("debug atyfb: BUS_CNTL DAC_CNTL MEM_CNTL EXT_MEM_CNTL CRTC_GEN_CNTL " "DSP_CONFIG DSP_ON_OFF CLOCK_CNTL\n" "debug atyfb: %08x %08x %08x %08x %08x %08x %08x %08x\n" "debug atyfb: PLL", aty_ld_le32(BUS_CNTL, par), aty_ld_le32(DAC_CNTL, par), aty_ld_le32(MEM_CNTL, par), aty_ld_le32(EXT_MEM_CNTL, par), aty_ld_le32(CRTC_GEN_CNTL, par), aty_ld_le32(DSP_CONFIG, par), aty_ld_le32(DSP_ON_OFF, par), aty_ld_le32(CLOCK_CNTL, par)); for (i = 0; i < 40; i++) printk(" %02x", aty_ld_pll_ct(i, par)); printk("\n"); } #endif if(par->pll_ops->init_pll) par->pll_ops->init_pll(info, &par->pll); if (par->pll_ops->resume_pll) par->pll_ops->resume_pll(info, &par->pll); /* * Last page of 8 MB (4 MB on ISA) aperture is MMIO, * unless the auxiliary register aperture is used. */ if (!par->aux_start && (info->fix.smem_len == 0x800000 || (par->bus_type == ISA && info->fix.smem_len == 0x400000))) info->fix.smem_len -= GUI_RESERVE; /* * Disable register access through the linear aperture * if the auxiliary aperture is used so we can access * the full 8 MB of video RAM on 8 MB boards. */ if (par->aux_start) aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL, par) | BUS_APER_REG_DIS, par); #ifdef CONFIG_MTRR par->mtrr_aper = -1; par->mtrr_reg = -1; if (!nomtrr) { /* Cover the whole resource. */ par->mtrr_aper = mtrr_add(par->res_start, par->res_size, MTRR_TYPE_WRCOMB, 1); if (par->mtrr_aper >= 0 && !par->aux_start) { /* Make a hole for mmio. */ par->mtrr_reg = mtrr_add(par->res_start + 0x800000 - GUI_RESERVE, GUI_RESERVE, MTRR_TYPE_UNCACHABLE, 1); if (par->mtrr_reg < 0) { mtrr_del(par->mtrr_aper, 0, 0); par->mtrr_aper = -1; } } } #endif info->fbops = &atyfb_ops; info->pseudo_palette = par->pseudo_palette; info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_YPAN; #ifdef CONFIG_PMAC_BACKLIGHT if (M64_HAS(G3_PB_1_1) && machine_is_compatible("PowerBook1,1")) { /* these bits let the 101 powerbook wake up from sleep -- paulus */ aty_st_lcd(POWER_MANAGEMENT, aty_ld_lcd(POWER_MANAGEMENT, par) | (USE_F32KHZ | TRISTATE_MEM_EN), par); } else #endif if (M64_HAS(MOBIL_BUS) && backlight) { #ifdef CONFIG_FB_ATY_BACKLIGHT aty_bl_init (par); #endif } memset(&var, 0, sizeof(var)); #ifdef CONFIG_PPC if (machine_is(powermac)) { /* * FIXME: The NVRAM stuff should be put in a Mac-specific file, as it * applies to all Mac video cards */ if (mode) { if (mac_find_mode(&var, info, mode, 8)) has_var = 1; } else { if (default_vmode == VMODE_CHOOSE) { int sense; if (M64_HAS(G3_PB_1024x768)) /* G3 PowerBook with 1024x768 LCD */ default_vmode = VMODE_1024_768_60; else if (machine_is_compatible("iMac")) default_vmode = VMODE_1024_768_75; else if (machine_is_compatible ("PowerBook2,1")) /* iBook with 800x600 LCD */ default_vmode = VMODE_800_600_60; else default_vmode = VMODE_640_480_67; sense = read_aty_sense(par); PRINTKI("monitor sense=%x, mode %d\n", sense, mac_map_monitor_sense(sense)); } if (default_vmode <= 0 || default_vmode > VMODE_MAX) default_vmode = VMODE_640_480_60; if (default_cmode < CMODE_8 || default_cmode > CMODE_32) default_cmode = CMODE_8; if (!mac_vmode_to_var(default_vmode, default_cmode, &var)) has_var = 1; } } #endif /* !CONFIG_PPC */ #if defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD) if (!atyfb_get_timings_from_lcd(par, &var)) has_var = 1; #endif if (mode && fb_find_mode(&var, info, mode, NULL, 0, &defmode, 8)) has_var = 1; if (!has_var) var = default_var; if (noaccel) var.accel_flags &= ~FB_ACCELF_TEXT; else var.accel_flags |= FB_ACCELF_TEXT; if (comp_sync != -1) { if (!comp_sync) var.sync &= ~FB_SYNC_COMP_HIGH_ACT; else var.sync |= FB_SYNC_COMP_HIGH_ACT; } if (var.yres == var.yres_virtual) { u32 videoram = (info->fix.smem_len - (PAGE_SIZE << 2)); var.yres_virtual = ((videoram * 8) / var.bits_per_pixel) / var.xres_virtual; if (var.yres_virtual < var.yres) var.yres_virtual = var.yres; } if (atyfb_check_var(&var, info)) { PRINTKE("can't set default video mode\n"); goto aty_init_exit; } #ifdef CONFIG_FB_ATY_CT if (!noaccel && M64_HAS(INTEGRATED)) aty_init_cursor(info); #endif /* CONFIG_FB_ATY_CT */ info->var = var; if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) goto aty_init_exit; if (register_framebuffer(info) < 0) { fb_dealloc_cmap(&info->cmap); goto aty_init_exit; } fb_list = info; PRINTKI("fb%d: %s frame buffer device on %s\n", info->node, info->fix.id, par->bus_type == ISA ? "ISA" : "PCI"); return 0; aty_init_exit: /* restore video mode */ aty_set_crtc(par, &saved_crtc); par->pll_ops->set_pll(info, &saved_pll); #ifdef CONFIG_MTRR if (par->mtrr_reg >= 0) { mtrr_del(par->mtrr_reg, 0, 0); par->mtrr_reg = -1; } if (par->mtrr_aper >= 0) { mtrr_del(par->mtrr_aper, 0, 0); par->mtrr_aper = -1; } #endif return -1; } static void aty_resume_chip(struct fb_info *info) { struct atyfb_par *par = info->par; aty_st_le32(MEM_CNTL, par->mem_cntl, par); if (par->pll_ops->resume_pll) par->pll_ops->resume_pll(info, &par->pll); if (par->aux_start) aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL, par) | BUS_APER_REG_DIS, par); } #ifdef CONFIG_ATARI static int __devinit store_video_par(char *video_str, unsigned char m64_num) { char *p; unsigned long vmembase, size, guiregbase; PRINTKI("store_video_par() '%s' \n", video_str); if (!(p = strsep(&video_str, ";")) || !*p) goto mach64_invalid; vmembase = simple_strtoul(p, NULL, 0); if (!(p = strsep(&video_str, ";")) || !*p) goto mach64_invalid; size = simple_strtoul(p, NULL, 0); if (!(p = strsep(&video_str, ";")) || !*p) goto mach64_invalid; guiregbase = simple_strtoul(p, NULL, 0); phys_vmembase[m64_num] = vmembase; phys_size[m64_num] = size; phys_guiregbase[m64_num] = guiregbase; PRINTKI("stored them all: $%08lX $%08lX $%08lX \n", vmembase, size, guiregbase); return 0; mach64_invalid: phys_vmembase[m64_num] = 0; return -1; } #endif /* CONFIG_ATARI */ /* * Blank the display. */ static int atyfb_blank(int blank, struct fb_info *info) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 gen_cntl; if (par->lock_blank || par->asleep) return 0; #ifdef CONFIG_FB_ATY_BACKLIGHT #elif defined(CONFIG_FB_ATY_GENERIC_LCD) if (par->lcd_table && blank > FB_BLANK_NORMAL && (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) { u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par); pm &= ~PWR_BLON; aty_st_lcd(POWER_MANAGEMENT, pm, par); } #endif gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par); gen_cntl &= ~0x400004c; switch (blank) { case FB_BLANK_UNBLANK: break; case FB_BLANK_NORMAL: gen_cntl |= 0x4000040; break; case FB_BLANK_VSYNC_SUSPEND: gen_cntl |= 0x4000048; break; case FB_BLANK_HSYNC_SUSPEND: gen_cntl |= 0x4000044; break; case FB_BLANK_POWERDOWN: gen_cntl |= 0x400004c; break; } aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par); #ifdef CONFIG_FB_ATY_BACKLIGHT #elif defined(CONFIG_FB_ATY_GENERIC_LCD) if (par->lcd_table && blank <= FB_BLANK_NORMAL && (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) { u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par); pm |= PWR_BLON; aty_st_lcd(POWER_MANAGEMENT, pm, par); } #endif return 0; } static void aty_st_pal(u_int regno, u_int red, u_int green, u_int blue, const struct atyfb_par *par) { aty_st_8(DAC_W_INDEX, regno, par); aty_st_8(DAC_DATA, red, par); aty_st_8(DAC_DATA, green, par); aty_st_8(DAC_DATA, blue, par); } /* * Set a single color register. The values supplied are already * rounded down to the hardware's capabilities (according to the * entries in the var structure). Return != 0 for invalid regno. * !! 4 & 8 = PSEUDO, > 8 = DIRECTCOLOR */ static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct atyfb_par *par = (struct atyfb_par *) info->par; int i, depth; u32 *pal = info->pseudo_palette; depth = info->var.bits_per_pixel; if (depth == 16) depth = (info->var.green.length == 5) ? 15 : 16; if (par->asleep) return 0; if (regno > 255 || (depth == 16 && regno > 63) || (depth == 15 && regno > 31)) return 1; red >>= 8; green >>= 8; blue >>= 8; par->palette[regno].red = red; par->palette[regno].green = green; par->palette[regno].blue = blue; if (regno < 16) { switch (depth) { case 15: pal[regno] = (regno << 10) | (regno << 5) | regno; break; case 16: pal[regno] = (regno << 11) | (regno << 5) | regno; break; case 24: pal[regno] = (regno << 16) | (regno << 8) | regno; break; case 32: i = (regno << 8) | regno; pal[regno] = (i << 16) | i; break; } } i = aty_ld_8(DAC_CNTL, par) & 0xfc; if (M64_HAS(EXTRA_BRIGHT)) i |= 0x2; /* DAC_CNTL | 0x2 turns off the extra brightness for gt */ aty_st_8(DAC_CNTL, i, par); aty_st_8(DAC_MASK, 0xff, par); if (M64_HAS(INTEGRATED)) { if (depth == 16) { if (regno < 32) aty_st_pal(regno << 3, red, par->palette[regno<<1].green, blue, par); red = par->palette[regno>>1].red; blue = par->palette[regno>>1].blue; regno <<= 2; } else if (depth == 15) { regno <<= 3; for(i = 0; i < 8; i++) { aty_st_pal(regno + i, red, green, blue, par); } } } aty_st_pal(regno, red, green, blue, par); return 0; } #ifdef CONFIG_PCI #ifdef __sparc__ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info, unsigned long addr) { struct atyfb_par *par = info->par; struct device_node *dp; char prop[128]; int node, len, i, j, ret; u32 mem, chip_id; /* * Map memory-mapped registers. */ par->ati_regbase = (void *)addr + 0x7ffc00UL; info->fix.mmio_start = addr + 0x7ffc00UL; /* * Map in big-endian aperture. */ info->screen_base = (char *) (addr + 0x800000UL); info->fix.smem_start = addr + 0x800000UL; /* * Figure mmap addresses from PCI config space. * Split Framebuffer in big- and little-endian halfs. */ for (i = 0; i < 6 && pdev->resource[i].start; i++) /* nothing */ ; j = i + 4; par->mmap_map = kcalloc(j, sizeof(*par->mmap_map), GFP_ATOMIC); if (!par->mmap_map) { PRINTKE("atyfb_setup_sparc() can't alloc mmap_map\n"); return -ENOMEM; } for (i = 0, j = 2; i < 6 && pdev->resource[i].start; i++) { struct resource *rp = &pdev->resource[i]; int io, breg = PCI_BASE_ADDRESS_0 + (i << 2); unsigned long base; u32 size, pbase; base = rp->start; io = (rp->flags & IORESOURCE_IO); size = rp->end - base + 1; pci_read_config_dword(pdev, breg, &pbase); if (io) size &= ~1; /* * Map the framebuffer a second time, this time without * the braindead _PAGE_IE setting. This is used by the * fixed Xserver, but we need to maintain the old mapping * to stay compatible with older ones... */ if (base == addr) { par->mmap_map[j].voff = (pbase + 0x10000000) & PAGE_MASK; par->mmap_map[j].poff = base & PAGE_MASK; par->mmap_map[j].size = (size + ~PAGE_MASK) & PAGE_MASK; par->mmap_map[j].prot_mask = _PAGE_CACHE; par->mmap_map[j].prot_flag = _PAGE_E; j++; } /* * Here comes the old framebuffer mapping with _PAGE_IE * set for the big endian half of the framebuffer... */ if (base == addr) { par->mmap_map[j].voff = (pbase + 0x800000) & PAGE_MASK; par->mmap_map[j].poff = (base + 0x800000) & PAGE_MASK; par->mmap_map[j].size = 0x800000; par->mmap_map[j].prot_mask = _PAGE_CACHE; par->mmap_map[j].prot_flag = _PAGE_E | _PAGE_IE; size -= 0x800000; j++; } par->mmap_map[j].voff = pbase & PAGE_MASK; par->mmap_map[j].poff = base & PAGE_MASK; par->mmap_map[j].size = (size + ~PAGE_MASK) & PAGE_MASK; par->mmap_map[j].prot_mask = _PAGE_CACHE; par->mmap_map[j].prot_flag = _PAGE_E; j++; } if((ret = correct_chipset(par))) return ret; if (IS_XL(pdev->device)) { /* * Fix PROMs idea of MEM_CNTL settings... */ mem = aty_ld_le32(MEM_CNTL, par); chip_id = aty_ld_le32(CONFIG_CHIP_ID, par); if (((chip_id & CFG_CHIP_TYPE) == VT_CHIP_ID) && !((chip_id >> 24) & 1)) { switch (mem & 0x0f) { case 3: mem = (mem & ~(0x0f)) | 2; break; case 7: mem = (mem & ~(0x0f)) | 3; break; case 9: mem = (mem & ~(0x0f)) | 4; break; case 11: mem = (mem & ~(0x0f)) | 5; break; default: break; } if ((aty_ld_le32(CONFIG_STAT0, par) & 7) >= SDRAM) mem &= ~(0x00700000); } mem &= ~(0xcf80e000); /* Turn off all undocumented bits. */ aty_st_le32(MEM_CNTL, mem, par); } /* * If this is the console device, we will set default video * settings to what the PROM left us with. */ node = prom_getchild(prom_root_node); node = prom_searchsiblings(node, "aliases"); if (node) { len = prom_getproperty(node, "screen", prop, sizeof(prop)); if (len > 0) { prop[len] = '\0'; node = prom_finddevice(prop); } else node = 0; } dp = pci_device_to_OF_node(pdev); if (node == dp->node) { struct fb_var_screeninfo *var = &default_var; unsigned int N, P, Q, M, T, R; u32 v_total, h_total; struct crtc crtc; u8 pll_regs[16]; u8 clock_cntl; crtc.vxres = prom_getintdefault(node, "width", 1024); crtc.vyres = prom_getintdefault(node, "height", 768); var->bits_per_pixel = prom_getintdefault(node, "depth", 8); var->xoffset = var->yoffset = 0; crtc.h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par); crtc.h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par); crtc.v_tot_disp = aty_ld_le32(CRTC_V_TOTAL_DISP, par); crtc.v_sync_strt_wid = aty_ld_le32(CRTC_V_SYNC_STRT_WID, par); crtc.gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par); aty_crtc_to_var(&crtc, var); h_total = var->xres + var->right_margin + var->hsync_len + var->left_margin; v_total = var->yres + var->lower_margin + var->vsync_len + var->upper_margin; /* * Read the PLL to figure actual Refresh Rate. */ clock_cntl = aty_ld_8(CLOCK_CNTL, par); /* DPRINTK("CLOCK_CNTL %02x\n", clock_cntl); */ for (i = 0; i < 16; i++) pll_regs[i] = aty_ld_pll_ct(i, par); /* * PLL Reference Divider M: */ M = pll_regs[2]; /* * PLL Feedback Divider N (Dependant on CLOCK_CNTL): */ N = pll_regs[7 + (clock_cntl & 3)]; /* * PLL Post Divider P (Dependant on CLOCK_CNTL): */ P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1)); /* * PLL Divider Q: */ Q = N / P; /* * Target Frequency: * * T * M * Q = ------- * 2 * R * * where R is XTALIN (= 14318 or 29498 kHz). */ if (IS_XL(pdev->device)) R = 29498; else R = 14318; T = 2 * Q * R / M; default_var.pixclock = 1000000000 / T; } return 0; } #else /* __sparc__ */ #ifdef __i386__ #ifdef CONFIG_FB_ATY_GENERIC_LCD static void __devinit aty_init_lcd(struct atyfb_par *par, u32 bios_base) { u32 driv_inf_tab, sig; u16 lcd_ofs; /* To support an LCD panel, we should know it's dimensions and * it's desired pixel clock. * There are two ways to do it: * - Check the startup video mode and calculate the panel * size from it. This is unreliable. * - Read it from the driver information table in the video BIOS. */ /* Address of driver information table is at offset 0x78. */ driv_inf_tab = bios_base + *((u16 *)(bios_base+0x78)); /* Check for the driver information table signature. */ sig = (*(u32 *)driv_inf_tab); if ((sig == 0x54504c24) || /* Rage LT pro */ (sig == 0x544d5224) || /* Rage mobility */ (sig == 0x54435824) || /* Rage XC */ (sig == 0x544c5824)) { /* Rage XL */ PRINTKI("BIOS contains driver information table.\n"); lcd_ofs = (*(u16 *)(driv_inf_tab + 10)); par->lcd_table = 0; if (lcd_ofs != 0) { par->lcd_table = bios_base + lcd_ofs; } } if (par->lcd_table != 0) { char model[24]; char strbuf[16]; char refresh_rates_buf[100]; int id, tech, f, i, m, default_refresh_rate; char *txtcolour; char *txtmonitor; char *txtdual; char *txtformat; u16 width, height, panel_type, refresh_rates; u16 *lcdmodeptr; u32 format; u8 lcd_refresh_rates[16] = {50,56,60,67,70,72,75,76,85,90,100,120,140,150,160,200}; /* The most important information is the panel size at * offset 25 and 27, but there's some other nice information * which we print to the screen. */ id = *(u8 *)par->lcd_table; strncpy(model,(char *)par->lcd_table+1,24); model[23]=0; width = par->lcd_width = *(u16 *)(par->lcd_table+25); height = par->lcd_height = *(u16 *)(par->lcd_table+27); panel_type = *(u16 *)(par->lcd_table+29); if (panel_type & 1) txtcolour = "colour"; else txtcolour = "monochrome"; if (panel_type & 2) txtdual = "dual (split) "; else txtdual = ""; tech = (panel_type>>2) & 63; switch (tech) { case 0: txtmonitor = "passive matrix"; break; case 1: txtmonitor = "active matrix"; break; case 2: txtmonitor = "active addressed STN"; break; case 3: txtmonitor = "EL"; break; case 4: txtmonitor = "plasma"; break; default: txtmonitor = "unknown"; } format = *(u32 *)(par->lcd_table+57); if (tech == 0 || tech == 2) { switch (format & 7) { case 0: txtformat = "12 bit interface"; break; case 1: txtformat = "16 bit interface"; break; case 2: txtformat = "24 bit interface"; break; default: txtformat = "unkown format"; } } else { switch (format & 7) { case 0: txtformat = "8 colours"; break; case 1: txtformat = "512 colours"; break; case 2: txtformat = "4096 colours"; break; case 4: txtformat = "262144 colours (LT mode)"; break; case 5: txtformat = "16777216 colours"; break; case 6: txtformat = "262144 colours (FDPI-2 mode)"; break; default: txtformat = "unkown format"; } } PRINTKI("%s%s %s monitor detected: %s\n", txtdual ,txtcolour, txtmonitor, model); PRINTKI(" id=%d, %dx%d pixels, %s\n", id, width, height, txtformat); refresh_rates_buf[0] = 0; refresh_rates = *(u16 *)(par->lcd_table+62); m = 1; f = 0; for (i=0;i<16;i++) { if (refresh_rates & m) { if (f == 0) { sprintf(strbuf, "%d", lcd_refresh_rates[i]); f++; } else { sprintf(strbuf, ",%d", lcd_refresh_rates[i]); } strcat(refresh_rates_buf,strbuf); } m = m << 1; } default_refresh_rate = (*(u8 *)(par->lcd_table+61) & 0xf0) >> 4; PRINTKI(" supports refresh rates [%s], default %d Hz\n", refresh_rates_buf, lcd_refresh_rates[default_refresh_rate]); par->lcd_refreshrate = lcd_refresh_rates[default_refresh_rate]; /* We now need to determine the crtc parameters for the * LCD monitor. This is tricky, because they are not stored * individually in the BIOS. Instead, the BIOS contains a * table of display modes that work for this monitor. * * The idea is that we search for a mode of the same dimensions * as the dimensions of the LCD monitor. Say our LCD monitor * is 800x600 pixels, we search for a 800x600 monitor. * The CRTC parameters we find here are the ones that we need * to use to simulate other resolutions on the LCD screen. */ lcdmodeptr = (u16 *)(par->lcd_table + 64); while (*lcdmodeptr != 0) { u32 modeptr; u16 mwidth, mheight, lcd_hsync_start, lcd_vsync_start; modeptr = bios_base + *lcdmodeptr; mwidth = *((u16 *)(modeptr+0)); mheight = *((u16 *)(modeptr+2)); if (mwidth == width && mheight == height) { par->lcd_pixclock = 100000000 / *((u16 *)(modeptr+9)); par->lcd_htotal = *((u16 *)(modeptr+17)) & 511; par->lcd_hdisp = *((u16 *)(modeptr+19)) & 511; lcd_hsync_start = *((u16 *)(modeptr+21)) & 511; par->lcd_hsync_dly = (*((u16 *)(modeptr+21)) >> 9) & 7; par->lcd_hsync_len = *((u8 *)(modeptr+23)) & 63; par->lcd_vtotal = *((u16 *)(modeptr+24)) & 2047; par->lcd_vdisp = *((u16 *)(modeptr+26)) & 2047; lcd_vsync_start = *((u16 *)(modeptr+28)) & 2047; par->lcd_vsync_len = (*((u16 *)(modeptr+28)) >> 11) & 31; par->lcd_htotal = (par->lcd_htotal + 1) * 8; par->lcd_hdisp = (par->lcd_hdisp + 1) * 8; lcd_hsync_start = (lcd_hsync_start + 1) * 8; par->lcd_hsync_len = par->lcd_hsync_len * 8; par->lcd_vtotal++; par->lcd_vdisp++; lcd_vsync_start++; par->lcd_right_margin = lcd_hsync_start - par->lcd_hdisp; par->lcd_lower_margin = lcd_vsync_start - par->lcd_vdisp; par->lcd_hblank_len = par->lcd_htotal - par->lcd_hdisp; par->lcd_vblank_len = par->lcd_vtotal - par->lcd_vdisp; break; } lcdmodeptr++; } if (*lcdmodeptr == 0) { PRINTKE("LCD monitor CRTC parameters not found!!!\n"); /* To do: Switch to CRT if possible. */ } else { PRINTKI(" LCD CRTC parameters: %d.%d %d %d %d %d %d %d %d %d\n", 1000000 / par->lcd_pixclock, 1000000 % par->lcd_pixclock, par->lcd_hdisp, par->lcd_hdisp + par->lcd_right_margin, par->lcd_hdisp + par->lcd_right_margin + par->lcd_hsync_dly + par->lcd_hsync_len, par->lcd_htotal, par->lcd_vdisp, par->lcd_vdisp + par->lcd_lower_margin, par->lcd_vdisp + par->lcd_lower_margin + par->lcd_vsync_len, par->lcd_vtotal); PRINTKI(" : %d %d %d %d %d %d %d %d %d\n", par->lcd_pixclock, par->lcd_hblank_len - (par->lcd_right_margin + par->lcd_hsync_dly + par->lcd_hsync_len), par->lcd_hdisp, par->lcd_right_margin, par->lcd_hsync_len, par->lcd_vblank_len - (par->lcd_lower_margin + par->lcd_vsync_len), par->lcd_vdisp, par->lcd_lower_margin, par->lcd_vsync_len); } } } #endif /* CONFIG_FB_ATY_GENERIC_LCD */ static int __devinit init_from_bios(struct atyfb_par *par) { u32 bios_base, rom_addr; int ret; rom_addr = 0xc0000 + ((aty_ld_le32(SCRATCH_REG1, par) & 0x7f) << 11); bios_base = (unsigned long)ioremap(rom_addr, 0x10000); /* The BIOS starts with 0xaa55. */ if (*((u16 *)bios_base) == 0xaa55) { u8 *bios_ptr; u16 rom_table_offset, freq_table_offset; PLL_BLOCK_MACH64 pll_block; PRINTKI("Mach64 BIOS is located at %x, mapped at %x.\n", rom_addr, bios_base); /* check for frequncy table */ bios_ptr = (u8*)bios_base; rom_table_offset = (u16)(bios_ptr[0x48] | (bios_ptr[0x49] << 8)); freq_table_offset = bios_ptr[rom_table_offset + 16] | (bios_ptr[rom_table_offset + 17] << 8); memcpy(&pll_block, bios_ptr + freq_table_offset, sizeof(PLL_BLOCK_MACH64)); PRINTKI("BIOS frequency table:\n"); PRINTKI("PCLK_min_freq %d, PCLK_max_freq %d, ref_freq %d, ref_divider %d\n", pll_block.PCLK_min_freq, pll_block.PCLK_max_freq, pll_block.ref_freq, pll_block.ref_divider); PRINTKI("MCLK_pwd %d, MCLK_max_freq %d, XCLK_max_freq %d, SCLK_freq %d\n", pll_block.MCLK_pwd, pll_block.MCLK_max_freq, pll_block.XCLK_max_freq, pll_block.SCLK_freq); par->pll_limits.pll_min = pll_block.PCLK_min_freq/100; par->pll_limits.pll_max = pll_block.PCLK_max_freq/100; par->pll_limits.ref_clk = pll_block.ref_freq/100; par->pll_limits.ref_div = pll_block.ref_divider; par->pll_limits.sclk = pll_block.SCLK_freq/100; par->pll_limits.mclk = pll_block.MCLK_max_freq/100; par->pll_limits.mclk_pm = pll_block.MCLK_pwd/100; par->pll_limits.xclk = pll_block.XCLK_max_freq/100; #ifdef CONFIG_FB_ATY_GENERIC_LCD aty_init_lcd(par, bios_base); #endif ret = 0; } else { PRINTKE("no BIOS frequency table found, use parameters\n"); ret = -ENXIO; } iounmap((void* __iomem )bios_base); return ret; } #endif /* __i386__ */ static int __devinit atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info, unsigned long addr) { struct atyfb_par *par = info->par; u16 tmp; unsigned long raddr; struct resource *rrp; int ret = 0; raddr = addr + 0x7ff000UL; rrp = &pdev->resource[2]; if ((rrp->flags & IORESOURCE_MEM) && request_mem_region(rrp->start, rrp->end - rrp->start + 1, "atyfb")) { par->aux_start = rrp->start; par->aux_size = rrp->end - rrp->start + 1; raddr = rrp->start; PRINTKI("using auxiliary register aperture\n"); } info->fix.mmio_start = raddr; par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000); if (par->ati_regbase == NULL) return -ENOMEM; info->fix.mmio_start += par->aux_start ? 0x400 : 0xc00; par->ati_regbase += par->aux_start ? 0x400 : 0xc00; /* * Enable memory-space accesses using config-space * command register. */ pci_read_config_word(pdev, PCI_COMMAND, &tmp); if (!(tmp & PCI_COMMAND_MEMORY)) { tmp |= PCI_COMMAND_MEMORY; pci_write_config_word(pdev, PCI_COMMAND, tmp); } #ifdef __BIG_ENDIAN /* Use the big-endian aperture */ addr += 0x800000; #endif /* Map in frame buffer */ info->fix.smem_start = addr; info->screen_base = ioremap(addr, 0x800000); if (info->screen_base == NULL) { ret = -ENOMEM; goto atyfb_setup_generic_fail; } if((ret = correct_chipset(par))) goto atyfb_setup_generic_fail; #ifdef __i386__ if((ret = init_from_bios(par))) goto atyfb_setup_generic_fail; #endif if (!(aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_EXT_DISP_EN)) par->clk_wr_offset = (inb(R_GENMO) & 0x0CU) >> 2; else par->clk_wr_offset = aty_ld_8(CLOCK_CNTL, par) & 0x03U; /* according to ATI, we should use clock 3 for acelerated mode */ par->clk_wr_offset = 3; return 0; atyfb_setup_generic_fail: iounmap(par->ati_regbase); par->ati_regbase = NULL; if (info->screen_base) { iounmap(info->screen_base); info->screen_base = NULL; } return ret; } #endif /* !__sparc__ */ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long addr, res_start, res_size; struct fb_info *info; struct resource *rp; struct atyfb_par *par; int i, rc = -ENOMEM; for (i = ARRAY_SIZE(aty_chips) - 1; i >= 0; i--) if (pdev->device == aty_chips[i].pci_id) break; if (i < 0) return -ENODEV; /* Enable device in PCI config */ if (pci_enable_device(pdev)) { PRINTKE("Cannot enable PCI device\n"); return -ENXIO; } /* Find which resource to use */ rp = &pdev->resource[0]; if (rp->flags & IORESOURCE_IO) rp = &pdev->resource[1]; addr = rp->start; if (!addr) return -ENXIO; /* Reserve space */ res_start = rp->start; res_size = rp->end - rp->start + 1; if (!request_mem_region (res_start, res_size, "atyfb")) return -EBUSY; /* Allocate framebuffer */ info = framebuffer_alloc(sizeof(struct atyfb_par), &pdev->dev); if (!info) { PRINTKE("atyfb_pci_probe() can't alloc fb_info\n"); return -ENOMEM; } par = info->par; info->fix = atyfb_fix; info->device = &pdev->dev; par->pci_id = aty_chips[i].pci_id; par->res_start = res_start; par->res_size = res_size; par->irq = pdev->irq; par->pdev = pdev; /* Setup "info" structure */ #ifdef __sparc__ rc = atyfb_setup_sparc(pdev, info, addr); #else rc = atyfb_setup_generic(pdev, info, addr); #endif if (rc) goto err_release_mem; pci_set_drvdata(pdev, info); /* Init chip & register framebuffer */ if (aty_init(info)) goto err_release_io; #ifdef __sparc__ /* * Add /dev/fb mmap values. */ par->mmap_map[0].voff = 0x8000000000000000UL; par->mmap_map[0].poff = (unsigned long) info->screen_base & PAGE_MASK; par->mmap_map[0].size = info->fix.smem_len; par->mmap_map[0].prot_mask = _PAGE_CACHE; par->mmap_map[0].prot_flag = _PAGE_E; par->mmap_map[1].voff = par->mmap_map[0].voff + info->fix.smem_len; par->mmap_map[1].poff = (long)par->ati_regbase & PAGE_MASK; par->mmap_map[1].size = PAGE_SIZE; par->mmap_map[1].prot_mask = _PAGE_CACHE; par->mmap_map[1].prot_flag = _PAGE_E; #endif /* __sparc__ */ return 0; err_release_io: #ifdef __sparc__ kfree(par->mmap_map); #else if (par->ati_regbase) iounmap(par->ati_regbase); if (info->screen_base) iounmap(info->screen_base); #endif err_release_mem: if (par->aux_start) release_mem_region(par->aux_start, par->aux_size); release_mem_region(par->res_start, par->res_size); framebuffer_release(info); return rc; } #endif /* CONFIG_PCI */ #ifdef CONFIG_ATARI static int __init atyfb_atari_probe(void) { struct atyfb_par *par; struct fb_info *info; int m64_num; u32 clock_r; int num_found = 0; for (m64_num = 0; m64_num < mach64_count; m64_num++) { if (!phys_vmembase[m64_num] || !phys_size[m64_num] || !phys_guiregbase[m64_num]) { PRINTKI("phys_*[%d] parameters not set => returning early. \n", m64_num); continue; } info = framebuffer_alloc(sizeof(struct atyfb_par), NULL); if (!info) { PRINTKE("atyfb_atari_probe() can't alloc fb_info\n"); return -ENOMEM; } par = info->par; info->fix = atyfb_fix; par->irq = (unsigned int) -1; /* something invalid */ /* * Map the video memory (physical address given) to somewhere in the * kernel address space. */ info->screen_base = ioremap(phys_vmembase[m64_num], phys_size[m64_num]); info->fix.smem_start = (unsigned long)info->screen_base; /* Fake! */ par->ati_regbase = ioremap(phys_guiregbase[m64_num], 0x10000) + 0xFC00ul; info->fix.mmio_start = (unsigned long)par->ati_regbase; /* Fake! */ aty_st_le32(CLOCK_CNTL, 0x12345678, par); clock_r = aty_ld_le32(CLOCK_CNTL, par); switch (clock_r & 0x003F) { case 0x12: par->clk_wr_offset = 3; /* */ break; case 0x34: par->clk_wr_offset = 2; /* Medusa ST-IO ISA Adapter etc. */ break; case 0x16: par->clk_wr_offset = 1; /* */ break; case 0x38: par->clk_wr_offset = 0; /* Panther 1 ISA Adapter (Gerald) */ break; } /* Fake pci_id for correct_chipset() */ switch (aty_ld_le32(CONFIG_CHIP_ID, par) & CFG_CHIP_TYPE) { case 0x00d7: par->pci_id = PCI_CHIP_MACH64GX; break; case 0x0057: par->pci_id = PCI_CHIP_MACH64CX; break; default: break; } if (correct_chipset(par) || aty_init(info)) { iounmap(info->screen_base); iounmap(par->ati_regbase); framebuffer_release(info); } else { num_found++; } } return num_found ? 0 : -ENXIO; } #endif /* CONFIG_ATARI */ #ifdef CONFIG_PCI static void __devexit atyfb_remove(struct fb_info *info) { struct atyfb_par *par = (struct atyfb_par *) info->par; /* restore video mode */ aty_set_crtc(par, &saved_crtc); par->pll_ops->set_pll(info, &saved_pll); unregister_framebuffer(info); #ifdef CONFIG_FB_ATY_BACKLIGHT if (M64_HAS(MOBIL_BUS)) aty_bl_exit(info->bl_dev); #endif #ifdef CONFIG_MTRR if (par->mtrr_reg >= 0) { mtrr_del(par->mtrr_reg, 0, 0); par->mtrr_reg = -1; } if (par->mtrr_aper >= 0) { mtrr_del(par->mtrr_aper, 0, 0); par->mtrr_aper = -1; } #endif #ifndef __sparc__ if (par->ati_regbase) iounmap(par->ati_regbase); if (info->screen_base) iounmap(info->screen_base); #ifdef __BIG_ENDIAN if (info->sprite.addr) iounmap(info->sprite.addr); #endif #endif #ifdef __sparc__ kfree(par->mmap_map); #endif if (par->aux_start) release_mem_region(par->aux_start, par->aux_size); if (par->res_start) release_mem_region(par->res_start, par->res_size); framebuffer_release(info); } static void __devexit atyfb_pci_remove(struct pci_dev *pdev) { struct fb_info *info = pci_get_drvdata(pdev); atyfb_remove(info); } /* * This driver uses its own matching table. That will be more difficult * to fix, so for now, we just match against any ATI ID and let the * probe() function find out what's up. That also mean we don't have * a module ID table though. */ static struct pci_device_id atyfb_pci_tbl[] = { { PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16, 0xff0000, 0 }, { 0, } }; static struct pci_driver atyfb_driver = { .name = "atyfb", .id_table = atyfb_pci_tbl, .probe = atyfb_pci_probe, .remove = __devexit_p(atyfb_pci_remove), #ifdef CONFIG_PM .suspend = atyfb_pci_suspend, .resume = atyfb_pci_resume, #endif /* CONFIG_PM */ }; #endif /* CONFIG_PCI */ #ifndef MODULE static int __init atyfb_setup(char *options) { char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!strncmp(this_opt, "noaccel", 7)) { noaccel = 1; #ifdef CONFIG_MTRR } else if (!strncmp(this_opt, "nomtrr", 6)) { nomtrr = 1; #endif } else if (!strncmp(this_opt, "vram:", 5)) vram = simple_strtoul(this_opt + 5, NULL, 0); else if (!strncmp(this_opt, "pll:", 4)) pll = simple_strtoul(this_opt + 4, NULL, 0); else if (!strncmp(this_opt, "mclk:", 5)) mclk = simple_strtoul(this_opt + 5, NULL, 0); else if (!strncmp(this_opt, "xclk:", 5)) xclk = simple_strtoul(this_opt+5, NULL, 0); else if (!strncmp(this_opt, "comp_sync:", 10)) comp_sync = simple_strtoul(this_opt+10, NULL, 0); else if (!strncmp(this_opt, "backlight:", 10)) backlight = simple_strtoul(this_opt+10, NULL, 0); #ifdef CONFIG_PPC else if (!strncmp(this_opt, "vmode:", 6)) { unsigned int vmode = simple_strtoul(this_opt + 6, NULL, 0); if (vmode > 0 && vmode <= VMODE_MAX) default_vmode = vmode; } else if (!strncmp(this_opt, "cmode:", 6)) { unsigned int cmode = simple_strtoul(this_opt + 6, NULL, 0); switch (cmode) { case 0: case 8: default_cmode = CMODE_8; break; case 15: case 16: default_cmode = CMODE_16; break; case 24: case 32: default_cmode = CMODE_32; break; } } #endif #ifdef CONFIG_ATARI /* * Why do we need this silly Mach64 argument? * We are already here because of mach64= so its redundant. */ else if (MACH_IS_ATARI && (!strncmp(this_opt, "Mach64:", 7))) { static unsigned char m64_num; static char mach64_str[80]; strlcpy(mach64_str, this_opt + 7, sizeof(mach64_str)); if (!store_video_par(mach64_str, m64_num)) { m64_num++; mach64_count = m64_num; } } #endif else mode = this_opt; } return 0; } #endif /* MODULE */ static int __init atyfb_init(void) { int err1 = 1, err2 = 1; #ifndef MODULE char *option = NULL; if (fb_get_options("atyfb", &option)) return -ENODEV; atyfb_setup(option); #endif #ifdef CONFIG_PCI err1 = pci_register_driver(&atyfb_driver); #endif #ifdef CONFIG_ATARI err2 = atyfb_atari_probe(); #endif return (err1 && err2) ? -ENODEV : 0; } static void __exit atyfb_exit(void) { #ifdef CONFIG_PCI pci_unregister_driver(&atyfb_driver); #endif } module_init(atyfb_init); module_exit(atyfb_exit); MODULE_DESCRIPTION("FBDev driver for ATI Mach64 cards"); MODULE_LICENSE("GPL"); module_param(noaccel, bool, 0); MODULE_PARM_DESC(noaccel, "bool: disable acceleration"); module_param(vram, int, 0); MODULE_PARM_DESC(vram, "int: override size of video ram"); module_param(pll, int, 0); MODULE_PARM_DESC(pll, "int: override video clock"); module_param(mclk, int, 0); MODULE_PARM_DESC(mclk, "int: override memory clock"); module_param(xclk, int, 0); MODULE_PARM_DESC(xclk, "int: override accelerated engine clock"); module_param(comp_sync, int, 0); MODULE_PARM_DESC(comp_sync, "Set composite sync signal to low (0) or high (1)"); module_param(mode, charp, 0); MODULE_PARM_DESC(mode, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" "); #ifdef CONFIG_MTRR module_param(nomtrr, bool, 0); MODULE_PARM_DESC(nomtrr, "bool: disable use of MTRR registers"); #endif
janrinze/loox7xxport
drivers/video/aty/atyfb_base.c
C
gpl-2.0
107,519
27.227094
141
0.625817
false
/* ==== compat.h ============================================================ * Copyright (c) 1994 by Chris Provenzano, proven@mit.edu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Chris Provenzano. * 4. The name of Chris Provenzano may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY CHRIS PROVENZANO ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL CHRIS PROVENZANO BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id$ * * Description : COmpat header to make socket code compile. * * 1.00 94/08/01 proven * -Started coding this file. */ #ifndef _SYS_COMPAT_H_ #define _SYS_COMPAT_H_ #endif
NickeyWoo/mysql-3.23.49
mit-pthreads/machdep/openbsd-2.0/compat.h
C
gpl-2.0
1,982
45.093023
77
0.726034
false
#include "Astronomy.h" #include "Body.h" #include "Moon.h" #include "Planet.h" #include "World.h"
worldforge/cyphesis
src/rules/astronomy/Test.cpp
C++
gpl-2.0
98
18.6
22
0.693878
false
/**************************************************************************** ** ** Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). ** Contact: Qt Software Information (qt-info@nokia.com) ** ** This file is part of the Qt Designer of the Qt Toolkit. ** ** Commercial Usage ** Licensees holding valid Qt Commercial licenses may use this file in ** accordance with the Qt Commercial License Agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and Nokia. ** ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU ** General Public License versions 2.0 or 3.0 as published by the Free ** Software Foundation and appearing in the file LICENSE.GPL included in ** the packaging of this file. Please review the following information ** to ensure GNU General Public Licensing requirements will be met: ** http://www.fsf.org/licensing/licenses/info/GPLv2.html and ** http://www.gnu.org/copyleft/gpl.html. In addition, as a special ** exception, Nokia gives you certain additional rights. These rights ** are described in the Nokia Qt GPL Exception version 1.3, included in ** the file GPL_EXCEPTION.txt in this package. ** ** Qt for Windows(R) Licensees ** As a special exception, Nokia, as the sole copyright holder for Qt ** Designer, grants users of the Qt/Eclipse Integration plug-in the ** right for the Qt/Eclipse Integration to link to functionality ** provided by Qt Designer and its related libraries. ** ** If you are unsure which license is appropriate for your use, please ** contact the sales department at qt-sales@nokia.com. ** ****************************************************************************/ #include "qtbrushmanager.h" #include <QtGui/QPixmap> #include <QtGui/QPainter> QT_BEGIN_NAMESPACE namespace qdesigner_internal { class QtBrushManagerPrivate { QtBrushManager *q_ptr; Q_DECLARE_PUBLIC(QtBrushManager) public: QMap<QString, QBrush> theBrushMap; QString theCurrentBrush; }; QtBrushManager::QtBrushManager(QObject *parent) : QDesignerBrushManagerInterface(parent) { d_ptr = new QtBrushManagerPrivate; d_ptr->q_ptr = this; } QtBrushManager::~QtBrushManager() { delete d_ptr; } QBrush QtBrushManager::brush(const QString &name) const { if (d_ptr->theBrushMap.contains(name)) return d_ptr->theBrushMap[name]; return QBrush(); } QMap<QString, QBrush> QtBrushManager::brushes() const { return d_ptr->theBrushMap; } QString QtBrushManager::currentBrush() const { return d_ptr->theCurrentBrush; } QString QtBrushManager::addBrush(const QString &name, const QBrush &brush) { if (name.isNull()) return QString(); QString newName = name; QString nameBase = newName; int i = 0; while (d_ptr->theBrushMap.contains(newName)) { newName = nameBase + QString::number(++i); } d_ptr->theBrushMap[newName] = brush; emit brushAdded(newName, brush); return newName; } void QtBrushManager::removeBrush(const QString &name) { if (!d_ptr->theBrushMap.contains(name)) return; if (currentBrush() == name) setCurrentBrush(QString()); emit brushRemoved(name); d_ptr->theBrushMap.remove(name); } void QtBrushManager::setCurrentBrush(const QString &name) { QBrush newBrush; if (!name.isNull()) { if (d_ptr->theBrushMap.contains(name)) newBrush = d_ptr->theBrushMap[name]; else return; } d_ptr->theCurrentBrush = name; emit currentBrushChanged(name, newBrush); } QPixmap QtBrushManager::brushPixmap(const QBrush &brush) const { int w = 64; int h = 64; QImage img(w, h, QImage::Format_ARGB32_Premultiplied); QPainter p(&img); p.setCompositionMode(QPainter::CompositionMode_Source); p.fillRect(QRect(0, 0, w, h), brush); return QPixmap::fromImage(img); } } // namespace qdesigner_internal QT_END_NAMESPACE
liuyanghejerry/qtextended
qtopiacore/qt/tools/designer/src/components/formeditor/qtbrushmanager.cpp
C++
gpl-2.0
3,998
27.76259
77
0.682341
false
#!/bin/sh om wifi maxperf eth0 1
Trim/qtmoko
devices/neo/src/devtools/scripts/wifi-maxperf-on.sh
Shell
gpl-2.0
33
15.5
22
0.69697
false
<?php /********************************************************************* login.php User access link recovery TODO: This is a temp. fix to allow for collaboration in lieu of real username and password coming in 1.8.2 Peter Rotich <peter@osticket.com> Copyright (c) 2006-2013 osTicket http://www.osticket.com Released under the GNU General Public License WITHOUT ANY WARRANTY. See LICENSE.TXT for details. vim: expandtab sw=4 ts=4 sts=4: **********************************************************************/ require_once('client.inc.php'); if(!defined('INCLUDE_DIR')) die('Fatal Error'); define('CLIENTINC_DIR',INCLUDE_DIR.'client/'); define('OSTCLIENTINC',TRUE); //make includes happy require_once(INCLUDE_DIR.'class.client.php'); require_once(INCLUDE_DIR.'class.ticket.php'); if ($cfg->getClientRegistrationMode() == 'disabled' || isset($_POST['lticket'])) $inc = 'accesslink.inc.php'; else $inc = 'login.inc.php'; $suggest_pwreset = false; // Check the CSRF token, and ensure that future requests will have to use a // different CSRF token. This will help ward off both parallel and serial // brute force attacks, because new tokens will have to be requested for // each attempt. if ($_POST) { // Check CSRF token if (!$ost->checkCSRFToken()) Http::response(400, __('Valid CSRF Token Required')); // Rotate the CSRF token (original cannot be reused) $ost->getCSRF()->rotate(); } if ($_POST && isset($_POST['luser'])) { if (!$_POST['luser']) $errors['err'] = __('Valid username or email address is required'); elseif (($user = UserAuthenticationBackend::process(trim($_POST['luser']), $_POST['lpasswd'], $errors))) { if ($user instanceof ClientCreateRequest) { if ($cfg && $cfg->isClientRegistrationEnabled()) { // Attempt to automatically register if ($user->attemptAutoRegister()) Http::redirect('tickets.php'); // Auto-registration failed. Show the user the info we have $inc = 'register.inc.php'; $user_form = UserForm::getUserForm()->getForm($user->getInfo()); } else { $errors['err'] = __('Access Denied. Contact your help desk administrator to have an account registered for you'); // fall through to show login page again } } else { Http::redirect($_SESSION['_client']['auth']['dest'] ?: 'tickets.php'); } } elseif(!$errors['err']) { $errors['err'] = sprintf('%s - %s', __('Invalid username or password'), __('Please try again!')); } $suggest_pwreset = true; } elseif ($_POST && isset($_POST['lticket'])) { if (!Validator::is_email($_POST['lemail'])) $errors['err'] = __('Valid email address and ticket number required'); elseif (($user = UserAuthenticationBackend::process($_POST['lemail'], $_POST['lticket'], $errors))) { // If email address verification is not required, then provide // immediate access to the ticket! if (!$cfg->isClientEmailVerificationRequired()) Http::redirect('tickets.php'); // This will succeed as it is checked in the authentication backend $ticket = Ticket::lookupByNumber($_POST['lticket'], $_POST['lemail']); // We're using authentication backend so we can guard aganist brute // force attempts (which doesn't buy much since the link is emailed) if ($ticket) { $ticket->sendAccessLink($user); $msg = sprintf(__("%s - access link sent to your email!"), Format::htmlchars($user->getName()->getFirst())); $_POST = null; } else { $errors['err'] = sprintf('%s - %s', __('Invalid email or ticket number'), __('Please try again!')); } } elseif(!$errors['err']) { $errors['err'] = sprintf('%s - %s', __('Invalid email or ticket number'), __('Please try again!')); } } elseif (isset($_GET['do'])) { switch($_GET['do']) { case 'ext': // Lookup external backend if ($bk = UserAuthenticationBackend::getBackend($_GET['bk'])) { $result = $bk->triggerAuth(); if ($result instanceof AccessDenied) { $errors['err'] = $result->getMessage(); } } } } elseif ($user = UserAuthenticationBackend::processSignOn($errors, false)) { // Users from the ticket access link if ($user && $user instanceof TicketUser && $user->getTicketId()) Http::redirect('tickets.php?id='.$user->getTicketId()); // Users imported from an external auth backend elseif ($user instanceof ClientCreateRequest) { if ($cfg && $cfg->isClientRegistrationEnabled()) { // Attempt to automatically register if ($user->attemptAutoRegister()) Http::redirect('tickets.php'); // Unable to auto-register. Fill in what we have and let the // user complete the info $inc = 'register.inc.php'; } else { $errors['err'] = __('Access Denied. Contact your help desk administrator to have an account registered for you'); // fall through to show login page again } } elseif ($user instanceof AuthenticatedUser) { Http::redirect($_SESSION['_client']['auth']['dest'] ?: 'tickets.php'); } } if (!$nav) { $nav = new UserNav(); $nav->setActiveNav('status'); } // Browsers shouldn't suggest saving that username/password Http::response(422); require CLIENTINC_DIR.'header.inc.php'; require CLIENTINC_DIR.$inc; require CLIENTINC_DIR.'footer.inc.php'; ?>
protich/osTicket
login.php
PHP
gpl-2.0
5,831
36.378205
129
0.5716
false
/* Copyright (c) 2003-2009, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.html or http://ckeditor.com/license */ CKEDITOR.plugins.add( 'styles', { requires : [ 'selection' ] }); /** * Registers a function to be called whenever a style changes its state in the * editing area. The current state is passed to the function. The possible * states are {@link CKEDITOR.TRISTATE_ON} and {@link CKEDITOR.TRISTATE_OFF}. * @param {CKEDITOR.style} The style to be watched. * @param {Function} The function to be called when the style state changes. * @example * // Create a style object for the &lt;b&gt; element. * var style = new CKEDITOR.style( { element : 'b' } ); * var editor = CKEDITOR.instances.editor1; * editor.attachStyleStateChange( style, function( state ) * { * if ( state == CKEDITOR.TRISTATE_ON ) * alert( 'The current state for the B element is ON' ); * else * alert( 'The current state for the B element is OFF' ); * }); */ CKEDITOR.editor.prototype.attachStyleStateChange = function( style, callback ) { // Try to get the list of attached callbacks. var styleStateChangeCallbacks = this._.styleStateChangeCallbacks; // If it doesn't exist, it means this is the first call. So, let's create // all the structure to manage the style checks and the callback calls. if ( !styleStateChangeCallbacks ) { // Create the callbacks array. styleStateChangeCallbacks = this._.styleStateChangeCallbacks = []; // Attach to the selectionChange event, so we can check the styles at // that point. this.on( 'selectionChange', function( ev ) { // Loop throw all registered callbacks. for ( var i = 0 ; i < styleStateChangeCallbacks.length ; i++ ) { var callback = styleStateChangeCallbacks[ i ]; // Check the current state for the style defined for that // callback. var currentState = callback.style.checkActive( ev.data.path ) ? CKEDITOR.TRISTATE_ON : CKEDITOR.TRISTATE_OFF; // If the state changed since the last check. if ( callback.state !== currentState ) { // Call the callback function, passing the current // state to it. callback.fn.call( this, currentState ); // Save the current state, so it can be compared next // time. callback.state !== currentState; } } }); } // Save the callback info, so it can be checked on the next occurence of // selectionChange. styleStateChangeCallbacks.push( { style : style, fn : callback } ); }; CKEDITOR.STYLE_BLOCK = 1; CKEDITOR.STYLE_INLINE = 2; CKEDITOR.STYLE_OBJECT = 3; (function() { var blockElements = { address:1,div:1,h1:1,h2:1,h3:1,h4:1,h5:1,h6:1,p:1,pre:1 }; var objectElements = { a:1,embed:1,hr:1,img:1,li:1,object:1,ol:1,table:1,td:1,tr:1,ul:1 }; var semicolonFixRegex = /\s*(?:;\s*|$)/; CKEDITOR.style = function( styleDefinition, variablesValues ) { if ( variablesValues ) { styleDefinition = CKEDITOR.tools.clone( styleDefinition ); replaceVariables( styleDefinition.attributes, variablesValues ); replaceVariables( styleDefinition.styles, variablesValues ); } var element = this.element = ( styleDefinition.element || '*' ).toLowerCase(); this.type = ( element == '#' || blockElements[ element ] ) ? CKEDITOR.STYLE_BLOCK : objectElements[ element ] ? CKEDITOR.STYLE_OBJECT : CKEDITOR.STYLE_INLINE; this._ = { definition : styleDefinition }; }; CKEDITOR.style.prototype = { apply : function( document ) { applyStyle.call( this, document, false ); }, remove : function( document ) { applyStyle.call( this, document, true ); }, applyToRange : function( range ) { return ( this.applyToRange = this.type == CKEDITOR.STYLE_INLINE ? applyInlineStyle : this.type == CKEDITOR.STYLE_BLOCK ? applyBlockStyle : null ).call( this, range ); }, removeFromRange : function( range ) { return ( this.removeFromRange = this.type == CKEDITOR.STYLE_INLINE ? removeInlineStyle : null ).call( this, range ); }, applyToObject : function( element ) { setupElement( element, this ); }, /** * Get the style state inside an element path. Returns "true" if the * element is active in the path. */ checkActive : function( elementPath ) { switch ( this.type ) { case CKEDITOR.STYLE_BLOCK : return this.checkElementRemovable( elementPath.block || elementPath.blockLimit, true ); case CKEDITOR.STYLE_INLINE : var elements = elementPath.elements; for ( var i = 0, element ; i < elements.length ; i++ ) { element = elements[i]; if ( element == elementPath.block || element == elementPath.blockLimit ) continue; if ( this.checkElementRemovable( element, true ) ) return true; } } return false; }, // Checks if an element, or any of its attributes, is removable by the // current style definition. checkElementRemovable : function( element, fullMatch ) { if ( !element ) return false; var def = this._.definition, attribs; // If the element name is the same as the style name. if ( element.getName() == this.element ) { // If no attributes are defined in the element. if ( !fullMatch && !element.hasAttributes() ) return true; attribs = getAttributesForComparison( def ); if ( attribs._length ) { for ( var attName in attribs ) { if ( attName == '_length' ) continue; var elementAttr = element.getAttribute( attName ); if ( attribs[attName] == ( attName == 'style' ? normalizeCssText( elementAttr, false ) : elementAttr ) ) { if ( !fullMatch ) return true; } else if ( fullMatch ) return false; } if( fullMatch ) return true; } else return true; } // Check if the element can be somehow overriden. var override = getOverrides( this )[ element.getName() ] ; if ( override ) { // If no attributes have been defined, remove the element. if ( !( attribs = override.attributes ) ) return true; for ( var i = 0 ; i < attribs.length ; i++ ) { attName = attribs[i][0]; var actualAttrValue = element.getAttribute( attName ); if ( actualAttrValue ) { var attValue = attribs[i][1]; // Remove the attribute if: // - The override definition value is null; // - The override definition value is a string that // matches the attribute value exactly. // - The override definition value is a regex that // has matches in the attribute value. if ( attValue === null || ( typeof attValue == 'string' && actualAttrValue == attValue ) || attValue.test( actualAttrValue ) ) return true; } } } return false; } }; // Build the cssText based on the styles definition. CKEDITOR.style.getStyleText = function( styleDefinition ) { // If we have already computed it, just return it. var stylesDef = styleDefinition._ST; if ( stylesDef ) return stylesDef; stylesDef = styleDefinition.styles; // Builds the StyleText. var stylesText = ( styleDefinition.attributes && styleDefinition.attributes[ 'style' ] ) || ''; if ( stylesText.length ) stylesText = stylesText.replace( semicolonFixRegex, ';' ); for ( var style in stylesDef ) stylesText += style + ':' + stylesDef[ style ] + ';'; // Browsers make some changes to the style when applying them. So, here // we normalize it to the browser format. if ( stylesText.length ) stylesText = normalizeCssText( stylesText ); // Return it, saving it to the next request. return ( styleDefinition._ST = stylesText ); }; function applyInlineStyle( range ) { var document = range.document; if ( range.collapsed ) { // Create the element to be inserted in the DOM. var collapsedElement = getElement( this, document ); // Insert the empty element into the DOM at the range position. range.insertNode( collapsedElement ); // Place the selection right inside the empty element. range.moveToPosition( collapsedElement, CKEDITOR.POSITION_BEFORE_END ); return; } var elementName = this.element; var def = this._.definition; var isUnknownElement; // Get the DTD definition for the element. Defaults to "span". var dtd = CKEDITOR.dtd[ elementName ] || ( isUnknownElement = true, CKEDITOR.dtd.span ); // Bookmark the range so we can re-select it after processing. var bookmark = range.createBookmark(); // Expand the range. range.enlarge( CKEDITOR.ENLARGE_ELEMENT ); range.trim(); // Get the first node to be processed and the last, which concludes the // processing. var boundaryNodes = range.getBoundaryNodes(); var firstNode = boundaryNodes.startNode; var lastNode = boundaryNodes.endNode.getNextSourceNode( true ); // Probably the document end is reached, we need a marker node. if ( !lastNode ) { var marker; lastNode = marker = document.createText( '' ); lastNode.insertAfter( range.endContainer ); } // The detection algorithm below skips the contents inside bookmark nodes, so // we'll need to make sure lastNode isn't the &nbsp; inside a bookmark node. var lastParent = lastNode.getParent(); if ( lastParent && lastParent.getAttribute( '_fck_bookmark' ) ) lastNode = lastParent; if ( lastNode.equals( firstNode ) ) { // If the last node is the same as the the first one, we must move // it to the next one, otherwise the first one will not be // processed. lastNode = lastNode.getNextSourceNode( true ); // It may happen that there are no more nodes after it (the end of // the document), so we must add something there to make our code // simpler. if ( !lastNode ) { lastNode = marker = document.createText( '' ); lastNode.insertAfter( firstNode ); } } var currentNode = firstNode; var styleRange; // Indicates that that some useful inline content has been found, so // the style should be applied. var hasContents; while ( currentNode ) { var applyStyle = false; if ( currentNode.equals( lastNode ) ) { currentNode = null; applyStyle = true; } else { var nodeType = currentNode.type; var nodeName = nodeType == CKEDITOR.NODE_ELEMENT ? currentNode.getName() : null; if ( nodeName && currentNode.getAttribute( '_fck_bookmark' ) ) { currentNode = currentNode.getNextSourceNode( true ); continue; } // Check if the current node can be a child of the style element. if ( !nodeName || ( dtd[ nodeName ] && ( currentNode.getPosition( lastNode ) | CKEDITOR.POSITION_PRECEDING | CKEDITOR.POSITION_IDENTICAL | CKEDITOR.POSITION_IS_CONTAINED ) == ( CKEDITOR.POSITION_PRECEDING + CKEDITOR.POSITION_IDENTICAL + CKEDITOR.POSITION_IS_CONTAINED ) ) ) { var currentParent = currentNode.getParent(); // Check if the style element can be a child of the current // node parent or if the element is not defined in the DTD. if ( currentParent && ( ( currentParent.getDtd() || CKEDITOR.dtd.span )[ elementName ] || isUnknownElement ) ) { // This node will be part of our range, so if it has not // been started, place its start right before the node. // In the case of an element node, it will be included // only if it is entirely inside the range. if ( !styleRange && ( !nodeName || !CKEDITOR.dtd.$removeEmpty[ nodeName ] || ( currentNode.getPosition( lastNode ) | CKEDITOR.POSITION_PRECEDING | CKEDITOR.POSITION_IDENTICAL | CKEDITOR.POSITION_IS_CONTAINED ) == ( CKEDITOR.POSITION_PRECEDING + CKEDITOR.POSITION_IDENTICAL + CKEDITOR.POSITION_IS_CONTAINED ) ) ) { styleRange = new CKEDITOR.dom.range( document ); styleRange.setStartBefore( currentNode ); } // Non element nodes, or empty elements can be added // completely to the range. if ( nodeType == CKEDITOR.NODE_TEXT || ( nodeType == CKEDITOR.NODE_ELEMENT && !currentNode.getChildCount() ) ) { var includedNode = currentNode; var parentNode; // This node is about to be included completelly, but, // if this is the last node in its parent, we must also // check if the parent itself can be added completelly // to the range. while ( !includedNode.$.nextSibling && ( parentNode = includedNode.getParent(), dtd[ parentNode.getName() ] ) && ( parentNode.getPosition( firstNode ) | CKEDITOR.POSITION_FOLLOWING | CKEDITOR.POSITION_IDENTICAL | CKEDITOR.POSITION_IS_CONTAINED ) == ( CKEDITOR.POSITION_FOLLOWING + CKEDITOR.POSITION_IDENTICAL + CKEDITOR.POSITION_IS_CONTAINED ) ) { includedNode = parentNode; } styleRange.setEndAfter( includedNode ); // If the included node still is the last node in its // parent, it means that the parent can't be included // in this style DTD, so apply the style immediately. if ( !includedNode.$.nextSibling ) applyStyle = true; if ( !hasContents ) hasContents = ( nodeType != CKEDITOR.NODE_TEXT || (/[^\s\ufeff]/).test( currentNode.getText() ) ); } } else applyStyle = true; } else applyStyle = true; // Get the next node to be processed. currentNode = currentNode.getNextSourceNode(); } // Apply the style if we have something to which apply it. if ( applyStyle && hasContents && styleRange && !styleRange.collapsed ) { // Build the style element, based on the style object definition. var styleNode = getElement( this, document ); // Get the element that holds the entire range. var parent = styleRange.getCommonAncestor(); // Loop through the parents, removing the redundant attributes // from the element to be applied. while ( styleNode && parent ) { if ( parent.getName() == elementName ) { for ( var attName in def.attributes ) { if ( styleNode.getAttribute( attName ) == parent.getAttribute( attName ) ) styleNode.removeAttribute( attName ); } for ( var styleName in def.styles ) { if ( styleNode.getStyle( styleName ) == parent.getStyle( styleName ) ) styleNode.removeStyle( styleName ); } if ( !styleNode.hasAttributes() ) { styleNode = null; break; } } parent = parent.getParent(); } if ( styleNode ) { // Move the contents of the range to the style element. styleRange.extractContents().appendTo( styleNode ); // Here we do some cleanup, removing all duplicated // elements from the style element. removeFromInsideElement( this, styleNode ); // Insert it into the range position (it is collapsed after // extractContents. styleRange.insertNode( styleNode ); // Let's merge our new style with its neighbors, if possible. mergeSiblings( styleNode ); // As the style system breaks text nodes constantly, let's normalize // things for performance. // With IE, some paragraphs get broken when calling normalize() // repeatedly. Also, for IE, we must normalize body, not documentElement. // IE is also known for having a "crash effect" with normalize(). // We should try to normalize with IE too in some way, somewhere. if ( !CKEDITOR.env.ie ) styleNode.$.normalize(); } // Style applied, let's release the range, so it gets // re-initialization in the next loop. styleRange = null; } } // Remove the temporary marking node.(#4111) marker && marker.remove(); range.moveToBookmark( bookmark ); } function removeInlineStyle( range ) { /* * Make sure our range has included all "collpased" parent inline nodes so * that our operation logic can be simpler. */ range.enlarge( CKEDITOR.ENLARGE_ELEMENT ); var bookmark = range.createBookmark(), startNode = bookmark.startNode; if ( range.collapsed ) { var startPath = new CKEDITOR.dom.elementPath( startNode.getParent() ), // The topmost element in elementspatch which we should jump out of. boundaryElement; for ( var i = 0, element ; i < startPath.elements.length && ( element = startPath.elements[i] ) ; i++ ) { /* * 1. If it's collaped inside text nodes, try to remove the style from the whole element. * * 2. Otherwise if it's collapsed on element boundaries, moving the selection * outside the styles instead of removing the whole tag, * also make sure other inner styles were well preserverd.(#3309) */ if ( element == startPath.block || element == startPath.blockLimit ) break; if ( this.checkElementRemovable( element ) ) { var endOfElement = range.checkBoundaryOfElement( element, CKEDITOR.END ), startOfElement = !endOfElement && range.checkBoundaryOfElement( element, CKEDITOR.START ); if ( startOfElement || endOfElement ) { boundaryElement = element; boundaryElement.match = startOfElement ? 'start' : 'end'; } else { /* * Before removing the style node, there may be a sibling to the style node * that's exactly the same to the one to be removed. To the user, it makes * no difference that they're separate entities in the DOM tree. So, merge * them before removal. */ mergeSiblings( element ); removeFromElement( this, element ); } } } // Re-create the style tree after/before the boundary element, // the replication start from bookmark start node to define the // new range. if ( boundaryElement ) { var clonedElement = startNode; for ( i = 0 ;; i++ ) { var newElement = startPath.elements[ i ]; if ( newElement.equals( boundaryElement ) ) break; // Avoid copying any matched element. else if( newElement.match ) continue; else newElement = newElement.clone(); newElement.append( clonedElement ); clonedElement = newElement; } clonedElement[ boundaryElement.match == 'start' ? 'insertBefore' : 'insertAfter' ]( boundaryElement ); } } else { /* * Now our range isn't collapsed. Lets walk from the start node to the end * node via DFS and remove the styles one-by-one. */ var endNode = bookmark.endNode, me = this; /* * Find out the style ancestor that needs to be broken down at startNode * and endNode. */ function breakNodes() { var startPath = new CKEDITOR.dom.elementPath( startNode.getParent() ), endPath = new CKEDITOR.dom.elementPath( endNode.getParent() ), breakStart = null, breakEnd = null; for ( var i = 0 ; i < startPath.elements.length ; i++ ) { var element = startPath.elements[ i ]; if ( element == startPath.block || element == startPath.blockLimit ) break; if ( me.checkElementRemovable( element ) ) breakStart = element; } for ( i = 0 ; i < endPath.elements.length ; i++ ) { element = endPath.elements[ i ]; if ( element == endPath.block || element == endPath.blockLimit ) break; if ( me.checkElementRemovable( element ) ) breakEnd = element; } if ( breakEnd ) endNode.breakParent( breakEnd ); if ( breakStart ) startNode.breakParent( breakStart ); } breakNodes(); // Now, do the DFS walk. var currentNode = startNode.getNext(); while ( !currentNode.equals( endNode ) ) { /* * Need to get the next node first because removeFromElement() can remove * the current node from DOM tree. */ var nextNode = currentNode.getNextSourceNode(); if ( currentNode.type == CKEDITOR.NODE_ELEMENT && this.checkElementRemovable( currentNode ) ) { // Remove style from element or overriding element. if( currentNode.getName() == this.element ) removeFromElement( this, currentNode ); else removeOverrides( currentNode, getOverrides( this )[ currentNode.getName() ] ); /* * removeFromElement() may have merged the next node with something before * the startNode via mergeSiblings(). In that case, the nextNode would * contain startNode and we'll have to call breakNodes() again and also * reassign the nextNode to something after startNode. */ if ( nextNode.type == CKEDITOR.NODE_ELEMENT && nextNode.contains( startNode ) ) { breakNodes(); nextNode = startNode.getNext(); } } currentNode = nextNode; } } range.moveToBookmark( bookmark ); } function applyBlockStyle( range ) { // Serializible bookmarks is needed here since // elements may be merged. var bookmark = range.createBookmark( true ); var iterator = range.createIterator(); iterator.enforceRealBlocks = true; var block; var doc = range.document; var previousPreBlock; while( ( block = iterator.getNextParagraph() ) ) // Only one = { var newBlock = getElement( this, doc ); replaceBlock( block, newBlock ); } range.moveToBookmark( bookmark ); } // Replace the original block with new one, with special treatment // for <pre> blocks to make sure content format is well preserved, and merging/splitting adjacent // when necessary.(#3188) function replaceBlock( block, newBlock ) { var newBlockIsPre = newBlock.is( 'pre' ); var blockIsPre = block.is( 'pre' ); var isToPre = newBlockIsPre && !blockIsPre; var isFromPre = !newBlockIsPre && blockIsPre; if ( isToPre ) newBlock = toPre( block, newBlock ); else if ( isFromPre ) // Split big <pre> into pieces before start to convert. newBlock = fromPres( splitIntoPres( block ), newBlock ); else block.moveChildren( newBlock ); newBlock.replace( block ); if ( newBlockIsPre ) { // Merge previous <pre> blocks. mergePre( newBlock ); } } /** * Merge a <pre> block with a previous sibling if available. */ function mergePre( preBlock ) { var previousBlock; if ( !( ( previousBlock = preBlock.getPreviousSourceNode( true, CKEDITOR.NODE_ELEMENT ) ) && previousBlock.is && previousBlock.is( 'pre') ) ) return; // Merge the previous <pre> block contents into the current <pre> // block. // // Another thing to be careful here is that currentBlock might contain // a '\n' at the beginning, and previousBlock might contain a '\n' // towards the end. These new lines are not normally displayed but they // become visible after merging. var mergedHtml = replace( previousBlock.getHtml(), /\n$/, '' ) + '\n\n' + replace( preBlock.getHtml(), /^\n/, '' ) ; // Krugle: IE normalizes innerHTML from <pre>, breaking whitespaces. if ( CKEDITOR.env.ie ) preBlock.$.outerHTML = '<pre>' + mergedHtml + '</pre>'; else preBlock.setHtml( mergedHtml ); previousBlock.remove(); } /** * Split into multiple <pre> blocks separated by double line-break. * @param preBlock */ function splitIntoPres( preBlock ) { // Exclude the ones at header OR at tail, // and ignore bookmark content between them. var duoBrRegex = /(\S\s*)\n(?:\s|(<span[^>]+_fck_bookmark.*?\/span>))*\n(?!$)/gi, blockName = preBlock.getName(), splitedHtml = replace( preBlock.getOuterHtml(), duoBrRegex, function( match, charBefore, bookmark ) { return charBefore + '</pre>' + bookmark + '<pre>'; } ); var pres = []; splitedHtml.replace( /<pre>([\s\S]*?)<\/pre>/gi, function( match, preContent ){ pres.push( preContent ); } ); return pres; } // Wrapper function of String::replace without considering of head/tail bookmarks nodes. function replace( str, regexp, replacement ) { var headBookmark = '', tailBookmark = ''; str = str.replace( /(^<span[^>]+_fck_bookmark.*?\/span>)|(<span[^>]+_fck_bookmark.*?\/span>$)/gi, function( str, m1, m2 ){ m1 && ( headBookmark = m1 ); m2 && ( tailBookmark = m2 ); return ''; } ); return headBookmark + str.replace( regexp, replacement ) + tailBookmark; } /** * Converting a list of <pre> into blocks with format well preserved. */ function fromPres( preHtmls, newBlock ) { var docFrag = new CKEDITOR.dom.documentFragment( newBlock.getDocument() ); for ( var i = 0 ; i < preHtmls.length ; i++ ) { var blockHtml = preHtmls[ i ]; // 1. Trim the first and last line-breaks immediately after and before <pre>, // they're not visible. blockHtml = blockHtml.replace( /(\r\n|\r)/g, '\n' ) ; blockHtml = replace( blockHtml, /^[ \t]*\n/, '' ) ; blockHtml = replace( blockHtml, /\n$/, '' ) ; // 2. Convert spaces or tabs at the beginning or at the end to &nbsp; blockHtml = replace( blockHtml, /^[ \t]+|[ \t]+$/g, function( match, offset, s ) { if ( match.length == 1 ) // one space, preserve it return '&nbsp;' ; else if ( !offset ) // beginning of block return CKEDITOR.tools.repeat( '&nbsp;', match.length - 1 ) + ' '; else // end of block return ' ' + CKEDITOR.tools.repeat( '&nbsp;', match.length - 1 ); } ) ; // 3. Convert \n to <BR>. // 4. Convert contiguous (i.e. non-singular) spaces or tabs to &nbsp; blockHtml = blockHtml.replace( /\n/g, '<br>' ) ; blockHtml = blockHtml.replace( /[ \t]{2,}/g, function ( match ) { return CKEDITOR.tools.repeat( '&nbsp;', match.length - 1 ) + ' ' ; } ) ; var newBlockClone = newBlock.clone(); newBlockClone.setHtml( blockHtml ); docFrag.append( newBlockClone ); } return docFrag; } /** * Converting from a non-PRE block to a PRE block in formatting operations. */ function toPre( block, newBlock ) { // First trim the block content. var preHtml = block.getHtml(); // 1. Trim head/tail spaces, they're not visible. preHtml = replace( preHtml, /(?:^[ \t\n\r]+)|(?:[ \t\n\r]+$)/g, '' ); // 2. Delete ANSI whitespaces immediately before and after <BR> because // they are not visible. preHtml = preHtml.replace( /[ \t\r\n]*(<br[^>]*>)[ \t\r\n]*/gi, '$1' ); // 3. Compress other ANSI whitespaces since they're only visible as one // single space previously. // 4. Convert &nbsp; to spaces since &nbsp; is no longer needed in <PRE>. preHtml = preHtml.replace( /([ \t\n\r]+|&nbsp;)/g, ' ' ); // 5. Convert any <BR /> to \n. This must not be done earlier because // the \n would then get compressed. preHtml = preHtml.replace( /<br\b[^>]*>/gi, '\n' ); // Krugle: IE normalizes innerHTML to <pre>, breaking whitespaces. if ( CKEDITOR.env.ie ) { var temp = block.getDocument().createElement( 'div' ); temp.append( newBlock ); newBlock.$.outerHTML = '<pre>' + preHtml + '</pre>'; newBlock = temp.getFirst().remove(); } else newBlock.setHtml( preHtml ); return newBlock; } // Removes a style from an element itself, don't care about its subtree. function removeFromElement( style, element ) { var def = style._.definition, attributes = def.attributes, styles = def.styles, overrides = getOverrides( style ); function removeAttrs() { for ( var attName in attributes ) { // The 'class' element value must match (#1318). if ( attName == 'class' && element.getAttribute( attName ) != attributes[ attName ] ) continue; element.removeAttribute( attName ); } } // Remove definition attributes/style from the elemnt. removeAttrs(); for ( var styleName in styles ) element.removeStyle( styleName ); // Now remove override styles on the element. attributes = overrides[ element.getName() ]; if( attributes ) removeAttrs(); removeNoAttribsElement( element ); } // Removes a style from inside an element. function removeFromInsideElement( style, element ) { var def = style._.definition, attribs = def.attributes, styles = def.styles, overrides = getOverrides( style ); var innerElements = element.getElementsByTag( style.element ); for ( var i = innerElements.count(); --i >= 0 ; ) removeFromElement( style, innerElements.getItem( i ) ); // Now remove any other element with different name that is // defined to be overriden. for ( var overrideElement in overrides ) { if ( overrideElement != style.element ) { innerElements = element.getElementsByTag( overrideElement ) ; for ( i = innerElements.count() - 1 ; i >= 0 ; i-- ) { var innerElement = innerElements.getItem( i ); removeOverrides( innerElement, overrides[ overrideElement ] ) ; } } } } /** * Remove overriding styles/attributes from the specific element. * Note: Remove the element if no attributes remain. * @param {Object} element * @param {Object} overrides */ function removeOverrides( element, overrides ) { var attributes = overrides && overrides.attributes ; if ( attributes ) { for ( var i = 0 ; i < attributes.length ; i++ ) { var attName = attributes[i][0], actualAttrValue ; if ( ( actualAttrValue = element.getAttribute( attName ) ) ) { var attValue = attributes[i][1] ; // Remove the attribute if: // - The override definition value is null ; // - The override definition valie is a string that // matches the attribute value exactly. // - The override definition value is a regex that // has matches in the attribute value. if ( attValue === null || ( attValue.test && attValue.test( actualAttrValue ) ) || ( typeof attValue == 'string' && actualAttrValue == attValue ) ) element.removeAttribute( attName ) ; } } } removeNoAttribsElement( element ); } // If the element has no more attributes, remove it. function removeNoAttribsElement( element ) { // If no more attributes remained in the element, remove it, // leaving its children. if ( !element.hasAttributes() ) { // Removing elements may open points where merging is possible, // so let's cache the first and last nodes for later checking. var firstChild = element.getFirst(); var lastChild = element.getLast(); element.remove( true ); if ( firstChild ) { // Check the cached nodes for merging. mergeSiblings( firstChild ); if ( lastChild && !firstChild.equals( lastChild ) ) mergeSiblings( lastChild ); } } } function mergeSiblings( element ) { if ( !element || element.type != CKEDITOR.NODE_ELEMENT || !CKEDITOR.dtd.$removeEmpty[ element.getName() ] ) return; mergeElements( element, element.getNext(), true ); mergeElements( element, element.getPrevious() ); } function mergeElements( element, sibling, isNext ) { if ( sibling && sibling.type == CKEDITOR.NODE_ELEMENT ) { var hasBookmark = sibling.getAttribute( '_fck_bookmark' ); if ( hasBookmark ) sibling = isNext ? sibling.getNext() : sibling.getPrevious(); if ( sibling && sibling.type == CKEDITOR.NODE_ELEMENT && element.isIdentical( sibling ) ) { // Save the last child to be checked too, to merge things like // <b><i></i></b><b><i></i></b> => <b><i></i></b> var innerSibling = isNext ? element.getLast() : element.getFirst(); if ( hasBookmark ) ( isNext ? sibling.getPrevious() : sibling.getNext() ).move( element, !isNext ); sibling.moveChildren( element, !isNext ); sibling.remove(); // Now check the last inner child (see two comments above). if ( innerSibling ) mergeSiblings( innerSibling ); } } } function getElement( style, targetDocument ) { var el; var def = style._.definition; var elementName = style.element; // The "*" element name will always be a span for this function. if ( elementName == '*' ) elementName = 'span'; // Create the element. el = new CKEDITOR.dom.element( elementName, targetDocument ); return setupElement( el, style ); } function setupElement( el, style ) { var def = style._.definition; var attributes = def.attributes; var styles = CKEDITOR.style.getStyleText( def ); // Assign all defined attributes. if ( attributes ) { for ( var att in attributes ) { el.setAttribute( att, attributes[ att ] ); } } // Assign all defined styles. if ( styles ) el.setAttribute( 'style', styles ); return el; } var varRegex = /#\((.+?)\)/g; function replaceVariables( list, variablesValues ) { for ( var item in list ) { list[ item ] = list[ item ].replace( varRegex, function( match, varName ) { return variablesValues[ varName ]; }); } } // Returns an object that can be used for style matching comparison. // Attributes names and values are all lowercased, and the styles get // merged with the style attribute. function getAttributesForComparison( styleDefinition ) { // If we have already computed it, just return it. var attribs = styleDefinition._AC; if ( attribs ) return attribs; attribs = {}; var length = 0; // Loop through all defined attributes. var styleAttribs = styleDefinition.attributes; if ( styleAttribs ) { for ( var styleAtt in styleAttribs ) { length++; attribs[ styleAtt ] = styleAttribs[ styleAtt ]; } } // Includes the style definitions. var styleText = CKEDITOR.style.getStyleText( styleDefinition ); if ( styleText ) { if ( !attribs[ 'style' ] ) length++; attribs[ 'style' ] = styleText; } // Appends the "length" information to the object. attribs._length = length; // Return it, saving it to the next request. return ( styleDefinition._AC = attribs ); } /** * Get the the collection used to compare the elements and attributes, * defined in this style overrides, with other element. All information in * it is lowercased. * @param {CKEDITOR.style} style */ function getOverrides( style ) { if( style._.overrides ) return style._.overrides; var overrides = ( style._.overrides = {} ), definition = style._.definition.overrides; if ( definition ) { // The override description can be a string, object or array. // Internally, well handle arrays only, so transform it if needed. if ( !CKEDITOR.tools.isArray( definition ) ) definition = [ definition ]; // Loop through all override definitions. for ( var i = 0 ; i < definition.length ; i++ ) { var override = definition[i]; var elementName; var overrideEl; var attrs; // If can be a string with the element name. if ( typeof override == 'string' ) elementName = override.toLowerCase(); // Or an object. else { elementName = override.element ? override.element.toLowerCase() : style.element; attrs = override.attributes; } // We can have more than one override definition for the same // element name, so we attempt to simply append information to // it if it already exists. overrideEl = overrides[ elementName ] || ( overrides[ elementName ] = {} ); if ( attrs ) { // The returning attributes list is an array, because we // could have different override definitions for the same // attribute name. var overrideAttrs = ( overrideEl.attributes = overrideEl.attributes || new Array() ); for ( var attName in attrs ) { // Each item in the attributes array is also an array, // where [0] is the attribute name and [1] is the // override value. overrideAttrs.push( [ attName.toLowerCase(), attrs[ attName ] ] ); } } } } return overrides; } function normalizeCssText( unparsedCssText, nativeNormalize ) { var styleText; if ( nativeNormalize !== false ) { // Injects the style in a temporary span object, so the browser parses it, // retrieving its final format. var temp = new CKEDITOR.dom.element( 'span' ); temp.setAttribute( 'style', unparsedCssText ); styleText = temp.getAttribute( 'style' ); } else styleText = unparsedCssText; // Shrinking white-spaces around colon and semi-colon (#4147). // Compensate tail semi-colon. return styleText.replace( /\s*([;:])\s*/, '$1' ) .replace( /([^\s;])$/, '$1;') .toLowerCase(); } function applyStyle( document, remove ) { // Get all ranges from the selection. var selection = document.getSelection(); var ranges = selection.getRanges(); var func = remove ? this.removeFromRange : this.applyToRange; // Apply the style to the ranges. for ( var i = 0 ; i < ranges.length ; i++ ) func.call( this, ranges[ i ] ); // Select the ranges again. selection.selectRanges( ranges ); } })(); CKEDITOR.styleCommand = function( style ) { this.style = style; }; CKEDITOR.styleCommand.prototype.exec = function( editor ) { editor.focus(); var doc = editor.document; if ( doc ) { if ( this.state == CKEDITOR.TRISTATE_OFF ) this.style.apply( doc ); else if ( this.state == CKEDITOR.TRISTATE_ON ) this.style.remove( doc ); } return !!doc; };
equalitie/rightscase
sites/all/libraries/ckeditor/_source/plugins/styles/plugin.js
JavaScript
gpl-2.0
37,074
28.848631
317
0.64461
false
#define _GNU_SOURCE #include <sys/stat.h> #include <fcntl.h> #include <limits.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #define BUF_SIZE 1024 int main(int argc, char *argv[]) { int tot, j; ssize_t nread; char buf[BUF_SIZE]; FILE *fp; char cwd[PATH_MAX]; /* Change our current working directory to that of the crashing process */ snprintf(cwd, PATH_MAX, "/proc/%s/cwd", argv[1]); chdir(cwd); /* Write output to file "core.info" in that directory */ fp = fopen("core.info", "w+"); if (fp == NULL) exit(EXIT_FAILURE); /* Display command-line arguments given to core_pattern pipe program */ fprintf(fp, "argc=%d\n", argc); for (j = 0; j < argc; j++) fprintf(fp, "argc[%d]=<%s>\n", j, argv[j]); /* Count bytes in standard input (the core dump) */ tot = 0; while ((nread = read(STDIN_FILENO, buf, BUF_SIZE)) > 0) tot += nread; fprintf(fp, "Total bytes in core dump: %d\n", tot); exit(EXIT_SUCCESS); }
mhabrnal/abrt
tests/runtests/bz618602-core_pattern-handler-truncates-parameters/core_pattern_pipe_test.c
C
gpl-2.0
1,040
21.12766
60
0.582692
false
/* Copyright (C) 2014 Paul Davis Author: David Robillard This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __ardour_parameter_descriptor_h__ #define __ardour_parameter_descriptor_h__ #include "ardour/variant.h" #include "evoral/Parameter.hpp" #include "evoral/ParameterDescriptor.hpp" namespace ARDOUR { typedef std::map<const std::string, const float> ScalePoints; /** Descriptor of a parameter or control. * * Essentially a union of LADSPA, VST and LV2 info. */ struct LIBARDOUR_API ParameterDescriptor : public Evoral::ParameterDescriptor { enum Unit { NONE, ///< No unit DB, ///< Decibels MIDI_NOTE, ///< MIDI note number HZ, ///< Frequency in Hertz }; ParameterDescriptor(const Evoral::Parameter& parameter); ParameterDescriptor(); /** Set step, smallstep, and largestep, based on current description. */ void update_steps(); std::string label; std::string print_fmt; ///< format string for pretty printing boost::shared_ptr<ScalePoints> scale_points; uint32_t key; ///< for properties Variant::Type datatype; ///< for properties AutomationType type; Unit unit; float step; float smallstep; float largestep; bool integer_step; bool logarithmic; bool sr_dependent; bool min_unbound; bool max_unbound; bool enumeration; }; } // namespace ARDOUR #endif // __ardour_parameter_descriptor_h__
satriani-vai/ardour
libs/ardour/ardour/parameter_descriptor.h
C
gpl-2.0
2,387
32.152778
82
0.613741
false
/* Relative (relocatable) prefix support. Copyright (C) 1987, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc. This file is part of libiberty. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* @deftypefn Extension {const char*} make_relative_prefix (const char *@var{progname}, const char *@var{bin_prefix}, const char *@var{prefix}) Given three strings @var{progname}, @var{bin_prefix}, @var{prefix}, return a string that gets to @var{prefix} starting with the directory portion of @var{progname} and a relative pathname of the difference between @var{bin_prefix} and @var{prefix}. For example, if @var{bin_prefix} is @code{/alpha/beta/gamma/gcc/delta}, @var{prefix} is @code{/alpha/beta/gamma/omega/}, and @var{progname} is @code{/red/green/blue/gcc}, then this function will return @code{/red/green/blue/../../omega/}. The return value is normally allocated via @code{malloc}. If no relative prefix can be found, return @code{NULL}. @end deftypefn */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <string.h> #include "ansidecl.h" #include "libiberty.h" #ifndef R_OK #define R_OK 4 #define W_OK 2 #define X_OK 1 #endif #ifndef DIR_SEPARATOR # define DIR_SEPARATOR '/' #endif #if defined (_WIN32) || defined (__MSDOS__) \ || defined (__DJGPP__) || defined (__OS2__) # define HAVE_DOS_BASED_FILE_SYSTEM # define HAVE_HOST_EXECUTABLE_SUFFIX # define HOST_EXECUTABLE_SUFFIX ".exe" # ifndef DIR_SEPARATOR_2 # define DIR_SEPARATOR_2 '\\' # endif # define PATH_SEPARATOR ';' #else # define PATH_SEPARATOR ':' #endif #ifndef DIR_SEPARATOR_2 # define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) #else # define IS_DIR_SEPARATOR(ch) \ (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) #endif #define DIR_UP ".." static char *save_string PARAMS ((const char *, int)); static char **split_directories PARAMS ((const char *, int *)); static void free_split_directories PARAMS ((char **)); static char * save_string (s, len) const char *s; int len; { char *result = malloc (len + 1); memcpy (result, s, len); result[len] = 0; return result; } /* Split a filename into component directories. */ static char ** split_directories (name, ptr_num_dirs) const char *name; int *ptr_num_dirs; { int num_dirs = 0; char **dirs; const char *p, *q; int ch; /* Count the number of directories. Special case MSDOS disk names as part of the initial directory. */ p = name; #ifdef HAVE_DOS_BASED_FILE_SYSTEM if (name[1] == ':' && IS_DIR_SEPARATOR (name[2])) { p += 3; num_dirs++; } #endif /* HAVE_DOS_BASED_FILE_SYSTEM */ while ((ch = *p++) != '\0') { if (IS_DIR_SEPARATOR (ch)) { num_dirs++; while (IS_DIR_SEPARATOR (*p)) p++; } } dirs = (char **) malloc (sizeof (char *) * (num_dirs + 2)); if (dirs == NULL) return NULL; /* Now copy the directory parts. */ num_dirs = 0; p = name; #ifdef HAVE_DOS_BASED_FILE_SYSTEM if (name[1] == ':' && IS_DIR_SEPARATOR (name[2])) { dirs[num_dirs++] = save_string (p, 3); if (dirs[num_dirs - 1] == NULL) { free (dirs); return NULL; } p += 3; } #endif /* HAVE_DOS_BASED_FILE_SYSTEM */ q = p; while ((ch = *p++) != '\0') { if (IS_DIR_SEPARATOR (ch)) { while (IS_DIR_SEPARATOR (*p)) p++; dirs[num_dirs++] = save_string (q, p - q); if (dirs[num_dirs - 1] == NULL) { dirs[num_dirs] = NULL; free_split_directories (dirs); return NULL; } q = p; } } if (p - 1 - q > 0) dirs[num_dirs++] = save_string (q, p - 1 - q); dirs[num_dirs] = NULL; if (dirs[num_dirs - 1] == NULL) { free_split_directories (dirs); return NULL; } if (ptr_num_dirs) *ptr_num_dirs = num_dirs; return dirs; } /* Release storage held by split directories. */ static void free_split_directories (dirs) char **dirs; { int i = 0; while (dirs[i] != NULL) free (dirs[i++]); free ((char *) dirs); } /* Given three strings PROGNAME, BIN_PREFIX, PREFIX, return a string that gets to PREFIX starting with the directory portion of PROGNAME and a relative pathname of the difference between BIN_PREFIX and PREFIX. For example, if BIN_PREFIX is /alpha/beta/gamma/gcc/delta, PREFIX is /alpha/beta/gamma/omega/, and PROGNAME is /red/green/blue/gcc, then this function will return /red/green/blue/../../omega/. If no relative prefix can be found, return NULL. */ char * make_relative_prefix (progname, bin_prefix, prefix) const char *progname; const char *bin_prefix; const char *prefix; { char **prog_dirs, **bin_dirs, **prefix_dirs; int prog_num, bin_num, prefix_num; int i, n, common; int needed_len; char *ret, *ptr; if (progname == NULL || bin_prefix == NULL || prefix == NULL) return NULL; prog_dirs = split_directories (progname, &prog_num); bin_dirs = split_directories (bin_prefix, &bin_num); if (bin_dirs == NULL || prog_dirs == NULL) return NULL; /* If there is no full pathname, try to find the program by checking in each of the directories specified in the PATH environment variable. */ if (prog_num == 1) { char *temp; temp = getenv ("PATH"); if (temp) { char *startp, *endp, *nstore; size_t prefixlen = strlen (temp) + 1; if (prefixlen < 2) prefixlen = 2; nstore = (char *) alloca (prefixlen + strlen (progname) + 1); startp = endp = temp; while (1) { if (*endp == PATH_SEPARATOR || *endp == 0) { if (endp == startp) { nstore[0] = '.'; nstore[1] = DIR_SEPARATOR; nstore[2] = '\0'; } else { strncpy (nstore, startp, endp - startp); if (! IS_DIR_SEPARATOR (endp[-1])) { nstore[endp - startp] = DIR_SEPARATOR; nstore[endp - startp + 1] = 0; } else nstore[endp - startp] = 0; } strcat (nstore, progname); if (! access (nstore, X_OK) #ifdef HAVE_HOST_EXECUTABLE_SUFFIX || ! access (strcat (nstore, HOST_EXECUTABLE_SUFFIX), X_OK) #endif ) { free_split_directories (prog_dirs); progname = nstore; prog_dirs = split_directories (progname, &prog_num); if (prog_dirs == NULL) { free_split_directories (bin_dirs); return NULL; } break; } if (*endp == 0) break; endp = startp = endp + 1; } else endp++; } } } /* Remove the program name from comparison of directory names. */ prog_num--; /* If we are still installed in the standard location, we don't need to specify relative directories. Also, if argv[0] still doesn't contain any directory specifiers after the search above, then there is not much we can do. */ if (prog_num == bin_num) { for (i = 0; i < bin_num; i++) { if (strcmp (prog_dirs[i], bin_dirs[i]) != 0) break; } if (prog_num <= 0 || i == bin_num) { free_split_directories (prog_dirs); free_split_directories (bin_dirs); prog_dirs = bin_dirs = (char **) 0; return NULL; } } prefix_dirs = split_directories (prefix, &prefix_num); if (prefix_dirs == NULL) { free_split_directories (prog_dirs); free_split_directories (bin_dirs); return NULL; } /* Find how many directories are in common between bin_prefix & prefix. */ n = (prefix_num < bin_num) ? prefix_num : bin_num; for (common = 0; common < n; common++) { if (strcmp (bin_dirs[common], prefix_dirs[common]) != 0) break; } /* If there are no common directories, there can be no relative prefix. */ if (common == 0) { free_split_directories (prog_dirs); free_split_directories (bin_dirs); free_split_directories (prefix_dirs); return NULL; } /* Two passes: first figure out the size of the result string, and then construct it. */ needed_len = 0; for (i = 0; i < prog_num; i++) needed_len += strlen (prog_dirs[i]); needed_len += sizeof (DIR_UP) * (bin_num - common); for (i = common; i < prefix_num; i++) needed_len += strlen (prefix_dirs[i]); needed_len += 1; /* Trailing NUL. */ ret = (char *) malloc (needed_len); if (ret == NULL) return NULL; /* Build up the pathnames in argv[0]. */ *ret = '\0'; for (i = 0; i < prog_num; i++) strcat (ret, prog_dirs[i]); /* Now build up the ..'s. */ ptr = ret + strlen(ret); for (i = common; i < bin_num; i++) { strcpy (ptr, DIR_UP); ptr += sizeof (DIR_UP) - 1; *(ptr++) = DIR_SEPARATOR; } *ptr = '\0'; /* Put in directories to move over to prefix. */ for (i = common; i < prefix_num; i++) strcat (ret, prefix_dirs[i]); free_split_directories (prog_dirs); free_split_directories (bin_dirs); free_split_directories (prefix_dirs); return ret; }
unofficial-opensource-apple/gccfast
libiberty/make-relative-prefix.c
C
gpl-2.0
9,663
23.840617
140
0.607989
false
/******************************************************************************* * file name: predict_the_winner.cpp * author: Hui Chen. (c) 2021 * mail: alex.chenhui@gmail.com * created time: 2021/09/30-11:06:52 * modified time: 2021/09/30-11:06:52 *******************************************************************************/ #include <iostream> #include <vector> #include <map> using namespace std; class Solution { public: bool PredictTheWinner(vector<int>& nums) { bool ret = false; int sum = 0; for (auto e : nums) sum += e; int v1 = f(nums, 0, nums.size()-1); if (v1 >= sum - v1) ret = true; return ret; } int f(vector<int>& vec, int start, int end) { //cout<<"start:"<<start<<",end:"<<end<<endl; if (start > end) return 0; if (start == end) return vec[start]; if (end - start == 1) return max(vec[start], vec[end]); vector<int> v; v.push_back(start); v.push_back(end); if (mp.count(v)) return mp[v]; int v1 = min(vec[start] + f(vec, start+1, end-1), vec[start] + f(vec, start+2, end)); int v2 = min(vec[end]+ f(vec, start, end -2), vec[end]+f(vec, start+1, end-1)); //cout<<"v1:"<<v1<<",v2:"<<v2<<endl; int ret = max(v1, v2); mp[v] = ret; return ret; } private: map<vector<int>, int> mp; }; int main() {}
wisehead/Leetcode
73.GameTheory/0486.Predict_the_Winner.Array_Math_DynamicProgramming_Recursion_GameTheory.Medium/predict_the_winner.memoization.cpp
C++
gpl-3.0
1,607
34.711111
93
0.41257
false
/*---------------------------------------------------------------------------*\ ========= | \\ / F ield | foam-extend: Open Source CFD \\ / O peration | \\ / A nd | For copyright notice see file Copyright \\/ M anipulation | ------------------------------------------------------------------------------- License This file is part of foam-extend. foam-extend is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. foam-extend is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with foam-extend. If not, see <http://www.gnu.org/licenses/>. \*---------------------------------------------------------------------------*/ #include "SRFTotalTemperatureFvPatchScalarField.H" #include "addToRunTimeSelectionTable.H" #include "fvPatchFieldMapper.H" #include "volFields.H" #include "surfaceFields.H" #include "SRFModel.H" // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // namespace Foam { // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // SRFTotalTemperatureFvPatchScalarField::SRFTotalTemperatureFvPatchScalarField ( const fvPatch& p, const DimensionedField<scalar, volMesh>& iF ) : fixedValueFvPatchScalarField(p, iF), UName_("Undefined"), phiName_("Undefined"), psiName_("Undefined"), gamma_(0.0), relative_(0), T0_(p.size(), 0.0) {} SRFTotalTemperatureFvPatchScalarField::SRFTotalTemperatureFvPatchScalarField ( const fvPatch& p, const DimensionedField<scalar, volMesh>& iF, const dictionary& dict ) : fixedValueFvPatchScalarField(p, iF), UName_(dict.lookup("U")), phiName_(dict.lookup("phi")), psiName_(dict.lookup("psi")), gamma_(readScalar(dict.lookup("gamma"))), relative_(dict.lookup("relative")), T0_("T0", dict, p.size()) { if (dict.found("value")) { fvPatchField<scalar>::operator= ( scalarField("value", dict, p.size()) ); } else { fvPatchField<scalar>::operator=(T0_); } } SRFTotalTemperatureFvPatchScalarField::SRFTotalTemperatureFvPatchScalarField ( const SRFTotalTemperatureFvPatchScalarField& ptf, const fvPatch& p, const DimensionedField<scalar, volMesh>& iF, const fvPatchFieldMapper& mapper ) : fixedValueFvPatchScalarField(ptf, p, iF, mapper), UName_(ptf.UName_), phiName_(ptf.phiName_), psiName_(ptf.psiName_), gamma_(ptf.gamma_), relative_(ptf.relative_), T0_(ptf.T0_, mapper) {} SRFTotalTemperatureFvPatchScalarField::SRFTotalTemperatureFvPatchScalarField ( const SRFTotalTemperatureFvPatchScalarField& tppsf ) : fixedValueFvPatchScalarField(tppsf), UName_(tppsf.UName_), phiName_(tppsf.phiName_), psiName_(tppsf.psiName_), gamma_(tppsf.gamma_), relative_(tppsf.relative_), T0_(tppsf.T0_) {} SRFTotalTemperatureFvPatchScalarField::SRFTotalTemperatureFvPatchScalarField ( const SRFTotalTemperatureFvPatchScalarField& tppsf, const DimensionedField<scalar, volMesh>& iF ) : fixedValueFvPatchScalarField(tppsf, iF), UName_(tppsf.UName_), phiName_(tppsf.phiName_), psiName_(tppsf.psiName_), gamma_(tppsf.gamma_), relative_(tppsf.relative_), T0_(tppsf.T0_) {} // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * // void SRFTotalTemperatureFvPatchScalarField::autoMap ( const fvPatchFieldMapper& m ) { fixedValueFvPatchScalarField::autoMap(m); T0_.autoMap(m); } void SRFTotalTemperatureFvPatchScalarField::rmap ( const fvPatchScalarField& ptf, const labelList& addr ) { fixedValueFvPatchScalarField::rmap(ptf, addr); const SRFTotalTemperatureFvPatchScalarField& tiptf = refCast<const SRFTotalTemperatureFvPatchScalarField>(ptf); T0_.rmap(tiptf.T0_, addr); } void SRFTotalTemperatureFvPatchScalarField::updateCoeffs(const vectorField& Up) { if (updated()) { return; } const fvsPatchScalarField& phip = patch().lookupPatchField<surfaceScalarField, scalar>(phiName_); const fvPatchScalarField& psip = patch().lookupPatchField<volScalarField, scalar>(psiName_); scalar gM1ByG = (gamma_ - 1.0)/gamma_; operator== ( T0_/(1.0 + 0.5*psip*gM1ByG*(1.0 - pos(phip))*magSqr(Up)) ); fixedValueFvPatchScalarField::updateCoeffs(); } void SRFTotalTemperatureFvPatchScalarField::updateCoeffs() { const fvPatchVectorField& U = patch().lookupPatchField<volVectorField, vector>(UName_); // If relative, include the effect of the SRF if (relative_) { // Get reference to the SRF model const SRF::SRFModel& srf = db().lookupObject<SRF::SRFModel>("SRFProperties"); // Determine patch velocity due to SRF const vectorField SRFSurfaceNormalVelocity = srf.velocity(patch().Cf()); updateCoeffs(U - SRFSurfaceNormalVelocity); } else { updateCoeffs(U); } } void SRFTotalTemperatureFvPatchScalarField::write(Ostream& os) const { fvPatchScalarField::write(os); os.writeKeyword("U") << UName_ << token::END_STATEMENT << nl; os.writeKeyword("phi") << phiName_ << token::END_STATEMENT << nl; os.writeKeyword("psi") << psiName_ << token::END_STATEMENT << nl; os.writeKeyword("gamma") << gamma_ << token::END_STATEMENT << endl; os.writeKeyword("relative") << relative_ << token::END_STATEMENT << nl; T0_.writeEntry("T0", os); writeEntry("value", os); } // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // makePatchTypeField(fvPatchScalarField, SRFTotalTemperatureFvPatchScalarField); // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // } // End namespace Foam // ************************************************************************* //
WensiWu/openfoam-extend-foam-extend-3.1
src/finiteVolume/cfdTools/general/SRF/derivedFvPatchFields/SRFTotalTemperature/SRFTotalTemperatureFvPatchScalarField.C
C++
gpl-3.0
6,342
26.694323
79
0.602176
false
<?xml version="1.0" encoding="UTF-8" standalone="no"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> <title>Compatibility with historic UNIX interfaces</title> <link rel="stylesheet" href="gettingStarted.css" type="text/css" /> <meta name="generator" content="DocBook XSL Stylesheets V1.73.2" /> <link rel="start" href="index.html" title="Berkeley DB Programmer's Reference Guide" /> <link rel="up" href="program.html" title="Chapter 15.  Programmer Notes" /> <link rel="prev" href="program_copy.html" title="Copying or moving databases" /> <link rel="next" href="program_runtime.html" title="Run-time configuration" /> </head> <body> <div xmlns="" class="navheader"> <div class="libver"> <p>Library Version 12.1.6.1</p> </div> <table width="100%" summary="Navigation header"> <tr> <th colspan="3" align="center">Compatibility with historic UNIX interfaces</th> </tr> <tr> <td width="20%" align="left"><a accesskey="p" href="program_copy.html">Prev</a> </td> <th width="60%" align="center">Chapter 15.  Programmer Notes </th> <td width="20%" align="right"> <a accesskey="n" href="program_runtime.html">Next</a></td> </tr> </table> <hr /> </div> <div class="sect1" lang="en" xml:lang="en"> <div class="titlepage"> <div> <div> <h2 class="title" style="clear: both"><a id="program_compatible"></a>Compatibility with historic UNIX interfaces</h2> </div> </div> </div> <p> The Berkeley DB version 2 library provides backward-compatible interfaces for the historic UNIX <a href="../api_reference/C/dbm.html" class="olink">dbm</a>, <a href="../api_reference/C/dbm.html" class="olink">ndbm</a> and <a href="../api_reference/C/hsearch.html" class="olink">hsearch</a> interfaces. It also provides a backward-compatible interface for the historic Berkeley DB 1.85 release. </p> <p> Berkeley DB version 2 does not provide database compatibility for any of the previous interfaces, and existing databases must be converted manually. To convert existing databases from the Berkeley DB 1.85 format to the Berkeley DB version 2 format, review the <a href="../api_reference/C/db_dump.html" class="olink">db_dump185</a> utility and the <a href="../api_reference/C/db_load.html" class="olink">db_load</a> utility information. No utilities are provided to convert UNIX <a href="../api_reference/C/dbm.html" class="olink">dbm</a>, <a href="../api_reference/C/dbm.html" class="olink">ndbm</a> or <a href="../api_reference/C/hsearch.html" class="olink">hsearch</a> databases. </p> </div> <div class="navfooter"> <hr /> <table width="100%" summary="Navigation footer"> <tr> <td width="40%" align="left"><a accesskey="p" href="program_copy.html">Prev</a> </td> <td width="20%" align="center"> <a accesskey="u" href="program.html">Up</a> </td> <td width="40%" align="right"> <a accesskey="n" href="program_runtime.html">Next</a></td> </tr> <tr> <td width="40%" align="left" valign="top">Copying or moving databases </td> <td width="20%" align="center"> <a accesskey="h" href="index.html">Home</a> </td> <td width="40%" align="right" valign="top"> Run-time configuration</td> </tr> </table> </div> </body> </html>
malin1993ml/h-store
third_party/cpp/berkeleydb/docs/programmer_reference/program_compatible.html
HTML
gpl-3.0
3,798
48.842105
199
0.606125
false
// Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors // License: GNU General Public License v3. See license.txt frappe.ui.form.on("Production Order", { setup: function(frm) { frm.custom_make_buttons = { 'Timesheet': 'Make Timesheet', 'Stock Entry': 'Make Stock Entry', } // Set query for warehouses frm.set_query("wip_warehouse", function(doc) { return { filters: { 'company': frm.doc.company, } } }); frm.set_query("source_warehouse", "required_items", function() { return { filters: { 'company': frm.doc.company, } } }); frm.set_query("fg_warehouse", function() { return { filters: { 'company': frm.doc.company, 'is_group': 0 } } }); frm.set_query("scrap_warehouse", function() { return { filters: { 'company': frm.doc.company, 'is_group': 0 } } }); // Set query for BOM frm.set_query("bom_no", function() { if (frm.doc.production_item) { return{ query: "erpnext.controllers.queries.bom", filters: {item: cstr(frm.doc.production_item)} } } else msgprint(__("Please enter Production Item first")); }); // Set query for FG Item frm.set_query("production_item", function() { return { query: "erpnext.controllers.queries.item_query", filters:{ 'is_stock_item': 1, } } }); // Set query for FG Item frm.set_query("project", function() { return{ filters:[ ['Project', 'status', 'not in', 'Completed, Cancelled'] ] } }); }, onload: function(frm) { if (!frm.doc.status) frm.doc.status = 'Draft'; frm.add_fetch("sales_order", "project", "project"); if(frm.doc.__islocal) { frm.set_value({ "actual_start_date": "", "actual_end_date": "" }); erpnext.production_order.set_default_warehouse(frm); } // formatter for production order operation frm.set_indicator_formatter('operation', function(doc) { return (frm.doc.qty==doc.completed_qty) ? "green" : "orange" }); }, refresh: function(frm) { erpnext.toggle_naming_series(); erpnext.production_order.set_custom_buttons(frm); frm.set_intro(""); if (frm.doc.docstatus === 0 && !frm.doc.__islocal) { frm.set_intro(__("Submit this Production Order for further processing.")); } if (frm.doc.docstatus===1) { frm.trigger('show_progress'); } if(frm.doc.docstatus == 1 && frm.doc.status != 'Stopped'){ frm.add_custom_button(__('Make Timesheet'), function(){ frappe.model.open_mapped_doc({ method: "erpnext.manufacturing.doctype.production_order.production_order.make_new_timesheet", frm: cur_frm }) }) } }, show_progress: function(frm) { var bars = []; var message = ''; var added_min = false; // produced qty var title = __('{0} items produced', [frm.doc.produced_qty]); bars.push({ 'title': title, 'width': (frm.doc.produced_qty / frm.doc.qty * 100) + '%', 'progress_class': 'progress-bar-success' }); if (bars[0].width == '0%') { bars[0].width = '0.5%'; added_min = 0.5; } message = title; // pending qty if(!frm.doc.skip_transfer){ var pending_complete = frm.doc.material_transferred_for_manufacturing - frm.doc.produced_qty; if(pending_complete) { var title = __('{0} items in progress', [pending_complete]); bars.push({ 'title': title, 'width': ((pending_complete / frm.doc.qty * 100) - added_min) + '%', 'progress_class': 'progress-bar-warning' }) message = message + '. ' + title; } } frm.dashboard.add_progress(__('Status'), bars, message); }, production_item: function(frm) { if (frm.doc.production_item) { frappe.call({ method: "erpnext.manufacturing.doctype.production_order.production_order.get_item_details", args: { item: frm.doc.production_item, project: frm.doc.project }, callback: function(r) { if(r.message) { erpnext.in_production_item_onchange = true; $.each(["description", "stock_uom", "project", "bom_no"], function(i, field) { frm.set_value(field, r.message[field]); }); if(r.message["set_scrap_wh_mandatory"]){ frm.toggle_reqd("scrap_warehouse", true); } erpnext.in_production_item_onchange = false; } } }); } }, project: function(frm) { if(!erpnext.in_production_item_onchange) { frm.trigger("production_item"); } }, bom_no: function(frm) { return frm.call({ doc: frm.doc, method: "get_items_and_operations_from_bom", callback: function(r) { if(r.message["set_scrap_wh_mandatory"]){ frm.toggle_reqd("scrap_warehouse", true); } } }); }, use_multi_level_bom: function(frm) { if(frm.doc.bom_no) { frm.trigger("bom_no"); } }, qty: function(frm) { frm.trigger('bom_no'); }, before_submit: function(frm) { frm.toggle_reqd(["fg_warehouse", "wip_warehouse"], true); frm.fields_dict.required_items.grid.toggle_reqd("source_warehouse", true); } }); frappe.ui.form.on("Production Order Item", { source_warehouse: function(frm, cdt, cdn) { var row = locals[cdt][cdn]; if(!row.item_code) { frappe.throw(__("Please set the Item Code first")); } else if(row.source_warehouse) { frappe.call({ "method": "erpnext.stock.utils.get_latest_stock_qty", args: { item_code: row.item_code, warehouse: row.source_warehouse }, callback: function (r) { frappe.model.set_value(row.doctype, row.name, "available_qty_at_source_warehouse", r.message); } }) } } }) frappe.ui.form.on("Production Order Operation", { workstation: function(frm, cdt, cdn) { var d = locals[cdt][cdn]; if (d.workstation) { frappe.call({ "method": "frappe.client.get", args: { doctype: "Workstation", name: d.workstation }, callback: function (data) { frappe.model.set_value(d.doctype, d.name, "hour_rate", data.message.hour_rate); erpnext.production_order.calculate_cost(frm.doc); erpnext.production_order.calculate_total_cost(frm); } }) } }, time_in_mins: function(frm, cdt, cdn) { erpnext.production_order.calculate_cost(frm.doc); erpnext.production_order.calculate_total_cost(frm); }, }); erpnext.production_order = { set_custom_buttons: function(frm) { var doc = frm.doc; if (doc.docstatus === 1) { if (doc.status != 'Stopped' && doc.status != 'Completed') { frm.add_custom_button(__('Stop'), function() { erpnext.production_order.stop_production_order(frm, "Stopped"); }, __("Status")); } else if (doc.status == 'Stopped') { frm.add_custom_button(__('Re-open'), function() { erpnext.production_order.stop_production_order(frm, "Resumed"); }, __("Status")); } if(!frm.doc.skip_transfer){ if ((flt(doc.material_transferred_for_manufacturing) < flt(doc.qty)) && frm.doc.status != 'Stopped') { frm.has_start_btn = true; var start_btn = frm.add_custom_button(__('Start'), function() { erpnext.production_order.make_se(frm, 'Material Transfer for Manufacture'); }); start_btn.addClass('btn-primary'); } } if(!frm.doc.skip_transfer){ if ((flt(doc.produced_qty) < flt(doc.material_transferred_for_manufacturing)) && frm.doc.status != 'Stopped') { frm.has_finish_btn = true; var finish_btn = frm.add_custom_button(__('Finish'), function() { erpnext.production_order.make_se(frm, 'Manufacture'); }); if(doc.material_transferred_for_manufacturing==doc.qty) { // all materials transferred for manufacturing, make this primary finish_btn.addClass('btn-primary'); } } } else { if ((flt(doc.produced_qty) < flt(doc.qty)) && frm.doc.status != 'Stopped') { frm.has_finish_btn = true; var finish_btn = frm.add_custom_button(__('Finish'), function() { erpnext.production_order.make_se(frm, 'Manufacture'); }); finish_btn.addClass('btn-primary'); } } } }, calculate_cost: function(doc) { if (doc.operations){ var op = doc.operations; doc.planned_operating_cost = 0.0; for(var i=0;i<op.length;i++) { var planned_operating_cost = flt(flt(op[i].hour_rate) * flt(op[i].time_in_mins) / 60, 2); frappe.model.set_value('Production Order Operation', op[i].name, "planned_operating_cost", planned_operating_cost); doc.planned_operating_cost += planned_operating_cost; } refresh_field('planned_operating_cost'); } }, calculate_total_cost: function(frm) { var variable_cost = frm.doc.actual_operating_cost ? flt(frm.doc.actual_operating_cost) : flt(frm.doc.planned_operating_cost) frm.set_value("total_operating_cost", (flt(frm.doc.additional_operating_cost) + variable_cost)) }, set_default_warehouse: function(frm) { if (!(frm.doc.wip_warehouse || frm.doc.fg_warehouse)) { frappe.call({ method: "erpnext.manufacturing.doctype.production_order.production_order.get_default_warehouse", callback: function(r) { if(!r.exe) { frm.set_value("wip_warehouse", r.message.wip_warehouse); frm.set_value("fg_warehouse", r.message.fg_warehouse) } } }); } }, make_se: function(frm, purpose) { if(!frm.doc.skip_transfer){ var max = (purpose === "Manufacture") ? flt(frm.doc.material_transferred_for_manufacturing) - flt(frm.doc.produced_qty) : flt(frm.doc.qty) - flt(frm.doc.material_transferred_for_manufacturing); } else { var max = flt(frm.doc.qty) - flt(frm.doc.produced_qty); } max = flt(max, precision("qty")); frappe.prompt({fieldtype:"Float", label: __("Qty for {0}", [purpose]), fieldname:"qty", description: __("Max: {0}", [max]), 'default': max }, function(data) { if(data.qty > max) { frappe.msgprint(__("Quantity must not be more than {0}", [max])); return; } frappe.call({ method:"erpnext.manufacturing.doctype.production_order.production_order.make_stock_entry", args: { "production_order_id": frm.doc.name, "purpose": purpose, "qty": data.qty }, callback: function(r) { var doclist = frappe.model.sync(r.message); frappe.set_route("Form", doclist[0].doctype, doclist[0].name); } }); }, __("Select Quantity"), __("Make")); }, stop_production_order: function(frm, status) { frappe.call({ method: "erpnext.manufacturing.doctype.production_order.production_order.stop_unstop", args: { production_order: frm.doc.name, status: status }, callback: function(r) { if(r.message) { frm.set_value("status", r.message); frm.reload_doc(); } } }) } }
bohlian/erpnext
erpnext/manufacturing/doctype/production_order/production_order.js
JavaScript
gpl-3.0
10,598
26.385013
100
0.616531
false
# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help help: @echo "Please use \`make <target>' where <target> is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" .PHONY: clean clean: rm -rf $(BUILDDIR)/* .PHONY: html html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: dirhtml dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: singlehtml singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." .PHONY: pickle pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." .PHONY: json json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." .PHONY: htmlhelp htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." .PHONY: qthelp qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ACALIB.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ACALIB.qhc" .PHONY: applehelp applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." .PHONY: devhelp devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/ACALIB" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/ACALIB" @echo "# devhelp" .PHONY: epub epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: latex latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." .PHONY: latexpdf latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: latexpdfja latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: text text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." .PHONY: man man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." .PHONY: texinfo texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." .PHONY: info info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." .PHONY: gettext gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." .PHONY: changes changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." .PHONY: linkcheck linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." .PHONY: doctest doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." .PHONY: coverage coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." .PHONY: xml xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." .PHONY: pseudoxml pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
cvalenzu/acalib
doc/Makefile
Makefile
gpl-3.0
7,656
34.444444
343
0.705982
false