code_text
stringlengths 604
999k
| repo_name
stringlengths 4
100
| file_path
stringlengths 4
873
| language
stringclasses 23
values | license
stringclasses 15
values | size
int32 1.02k
999k
|
---|---|---|---|---|---|
/**
* Copyright (C) 2005 Brian Rogan <bcr6@cornell.edu>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
**/
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/compat.h>
#define STACK_SP(STACK) *(STACK)
#define STACK_LR64(STACK) *((unsigned long *)(STACK) + 2)
#define STACK_LR32(STACK) *((unsigned int *)(STACK) + 1)
#ifdef CONFIG_PPC64
#define STACK_LR(STACK) STACK_LR64(STACK)
#else
#define STACK_LR(STACK) STACK_LR32(STACK)
#endif
static unsigned int user_getsp32(unsigned int sp, int is_first)
{
unsigned int stack_frame[2];
void __user *p = compat_ptr(sp);
if (!access_ok(VERIFY_READ, p, sizeof(stack_frame)))
return 0;
/*
* The most likely reason for this is that we returned -EFAULT,
* which means that we've done all that we can do from
* interrupt context.
*/
if (__copy_from_user_inatomic(stack_frame, p, sizeof(stack_frame)))
return 0;
if (!is_first)
oprofile_add_trace(STACK_LR32(stack_frame));
/*
* We do not enforce increasing stack addresses here because
* we may transition to a different stack, eg a signal handler.
*/
return STACK_SP(stack_frame);
}
#ifdef CONFIG_PPC64
static unsigned long user_getsp64(unsigned long sp, int is_first)
{
unsigned long stack_frame[3];
if (!access_ok(VERIFY_READ, (void __user *)sp, sizeof(stack_frame)))
return 0;
if (__copy_from_user_inatomic(stack_frame, (void __user *)sp,
sizeof(stack_frame)))
return 0;
if (!is_first)
oprofile_add_trace(STACK_LR64(stack_frame));
return STACK_SP(stack_frame);
}
#endif
static unsigned long kernel_getsp(unsigned long sp, int is_first)
{
unsigned long *stack_frame = (unsigned long *)sp;
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return 0;
if (!is_first)
oprofile_add_trace(STACK_LR(stack_frame));
/*
* We do not enforce increasing stack addresses here because
* we might be transitioning from an interrupt stack to a kernel
* stack. validate_sp() is designed to understand this, so just
* use it.
*/
return STACK_SP(stack_frame);
}
void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
{
unsigned long sp = regs->gpr[1];
int first_frame = 1;
/* We ditch the top stackframe so need to loop through an extra time */
depth += 1;
if (!user_mode(regs)) {
while (depth--) {
sp = kernel_getsp(sp, first_frame);
if (!sp)
break;
first_frame = 0;
}
} else {
#ifdef CONFIG_PPC64
if (!is_32bit_task()) {
while (depth--) {
sp = user_getsp64(sp, first_frame);
if (!sp)
break;
first_frame = 0;
}
return;
}
#endif
while (depth--) {
sp = user_getsp32(sp, first_frame);
if (!sp)
break;
first_frame = 0;
}
}
}
| oxp-edward/linux-3.5 | arch/powerpc/oprofile/backtrace.c | C | gpl-2.0 | 2,953 |
#!/bin/bash
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
# Author/Copyright(c): 2009, Thomas Renninger <trenn@suse.de>, Novell Inc.
# Ondemand up_threshold and sampling rate test script for cpufreq-bench
# mircobenchmark.
# Modify the general variables at the top or extend or copy out parts
# if you want to test other things
#
# Default with latest kernels is 95, before micro account patches
# it was 80, cmp. with git commit 808009131046b62ac434dbc796
UP_THRESHOLD="60 80 95"
# Depending on the kernel and the HW sampling rate could be restricted
# and cannot be set that low...
# E.g. before git commit cef9615a853ebc4972084f7 one could only set
# min sampling rate of 80000 if CONFIG_HZ=250
SAMPLING_RATE="20000 80000"
function measure()
{
local -i up_threshold_set
local -i sampling_rate_set
for up_threshold in $UP_THRESHOLD;do
for sampling_rate in $SAMPLING_RATE;do
# Set values in sysfs
echo $up_threshold >/sys/devices/system/cpu/cpu0/cpufreq/ondemand/up_threshold
echo $sampling_rate >/sys/devices/system/cpu/cpu0/cpufreq/ondemand/sampling_rate
up_threshold_set=$(cat /sys/devices/system/cpu/cpu0/cpufreq/ondemand/up_threshold)
sampling_rate_set=$(cat /sys/devices/system/cpu/cpu0/cpufreq/ondemand/sampling_rate)
# Verify set values in sysfs
if [ ${up_threshold_set} -eq ${up_threshold} ];then
echo "up_threshold: $up_threshold, set in sysfs: ${up_threshold_set}"
else
echo "WARNING: Tried to set up_threshold: $up_threshold, set in sysfs: ${up_threshold_set}"
fi
if [ ${sampling_rate_set} -eq ${sampling_rate} ];then
echo "sampling_rate: $sampling_rate, set in sysfs: ${sampling_rate_set}"
else
echo "WARNING: Tried to set sampling_rate: $sampling_rate, set in sysfs: ${sampling_rate_set}"
fi
# Benchmark
cpufreq-bench -o /var/log/cpufreq-bench/up_threshold_${up_threshold}_sampling_rate_${sampling_rate}
done
done
}
function create_plots()
{
local command
for up_threshold in $UP_THRESHOLD;do
command="cpufreq-bench_plot.sh -o \"sampling_rate_${SAMPLING_RATE}_up_threshold_${up_threshold}\" -t \"Ondemand sampling_rate: ${SAMPLING_RATE} comparison - Up_threshold: $up_threshold %\""
for sampling_rate in $SAMPLING_RATE;do
command="${command} /var/log/cpufreq-bench/up_threshold_${up_threshold}_sampling_rate_${sampling_rate}/* \"sampling_rate = $sampling_rate\""
done
echo $command
eval "$command"
echo
done
for sampling_rate in $SAMPLING_RATE;do
command="cpufreq-bench_plot.sh -o \"up_threshold_${UP_THRESHOLD}_sampling_rate_${sampling_rate}\" -t \"Ondemand up_threshold: ${UP_THRESHOLD} % comparison - sampling_rate: $sampling_rate\""
for up_threshold in $UP_THRESHOLD;do
command="${command} /var/log/cpufreq-bench/up_threshold_${up_threshold}_sampling_rate_${sampling_rate}/* \"up_threshold = $up_threshold\""
done
echo $command
eval "$command"
echo
done
command="cpufreq-bench_plot.sh -o \"up_threshold_${UP_THRESHOLD}_sampling_rate_${SAMPLING_RATE}\" -t \"Ondemand up_threshold: ${UP_THRESHOLD} and sampling_rate ${SAMPLING_RATE} comparison\""
for sampling_rate in $SAMPLING_RATE;do
for up_threshold in $UP_THRESHOLD;do
command="${command} /var/log/cpufreq-bench/up_threshold_${up_threshold}_sampling_rate_${sampling_rate}/* \"up_threshold = $up_threshold - sampling_rate = $sampling_rate\""
done
done
echo "$command"
eval "$command"
}
measure
create_plots | zarboz/android_kernel_flounder | tools/power/cpupower/bench/cpufreq-bench_script.sh | Shell | gpl-2.0 | 4,098 |
/*
Hopper VP-3028 driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include "dmxdev.h"
#include "dvbdev.h"
#include "dvb_demux.h"
#include "dvb_frontend.h"
#include "dvb_net.h"
#include "zl10353.h"
#include "mantis_common.h"
#include "mantis_ioc.h"
#include "mantis_dvb.h"
#include "hopper_vp3028.h"
struct zl10353_config hopper_vp3028_config = {
.demod_address = 0x0f,
};
#define MANTIS_MODEL_NAME "VP-3028"
#define MANTIS_DEV_TYPE "DVB-T"
static int vp3028_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
{
struct i2c_adapter *adapter = &mantis->adapter;
struct mantis_hwconfig *config = mantis->hwconfig;
int err = 0;
mantis_gpio_set_bits(mantis, config->reset, 0);
msleep(100);
err = mantis_frontend_power(mantis, POWER_ON);
msleep(100);
mantis_gpio_set_bits(mantis, config->reset, 1);
err = mantis_frontend_power(mantis, POWER_ON);
if (err == 0) {
msleep(250);
dprintk(MANTIS_ERROR, 1, "Probing for 10353 (DVB-T)");
fe = dvb_attach(zl10353_attach, &hopper_vp3028_config, adapter);
if (!fe)
return -1;
} else {
dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
adapter->name,
err);
return -EIO;
}
dprintk(MANTIS_ERROR, 1, "Done!");
return 0;
}
struct mantis_hwconfig vp3028_config = {
.model_name = MANTIS_MODEL_NAME,
.dev_type = MANTIS_DEV_TYPE,
.ts_size = MANTIS_TS_188,
.baud_rate = MANTIS_BAUD_9600,
.parity = MANTIS_PARITY_NONE,
.bytes = 0,
.frontend_init = vp3028_frontend_init,
.power = GPIF_A00,
.reset = GPIF_A03,
};
| Psycho666/Simplicity_trlte_kernel | drivers/media/pci/mantis/hopper_vp3028.c | C | gpl-2.0 | 2,297 |
/*
* arch/m32r/boot/compressed/m32r_sio.c
*
* 2003-02-12: Takeo Takahashi
* 2006-11-30: OPSPUT support by Kazuhiro Inaoka
*
*/
#include <asm/processor.h>
static void putc(char c);
static int puts(const char *s)
{
char c;
while ((c = *s++)) putc(c);
return 0;
}
#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_OPSPUT)
#include <asm/m32r.h>
#include <asm/io.h>
#define USE_FPGA_MAP 0
#if USE_FPGA_MAP
/*
* fpga configuration program uses MMU, and define map as same as
* M32104 uT-Engine board.
*/
#define BOOT_SIO0STS (volatile unsigned short *)(0x02c00000 + 0x20006)
#define BOOT_SIO0TXB (volatile unsigned short *)(0x02c00000 + 0x2000c)
#else
#undef PLD_BASE
#if defined(CONFIG_PLAT_OPSPUT)
#define PLD_BASE 0x1cc00000
#else
#define PLD_BASE 0xa4c00000
#endif
#define BOOT_SIO0STS PLD_ESIO0STS
#define BOOT_SIO0TXB PLD_ESIO0TXB
#endif
static void putc(char c)
{
while ((*BOOT_SIO0STS & 0x3) != 0x3)
cpu_relax();
if (c == '\n') {
*BOOT_SIO0TXB = '\r';
while ((*BOOT_SIO0STS & 0x3) != 0x3)
cpu_relax();
}
*BOOT_SIO0TXB = c;
}
#else /* !(CONFIG_PLAT_M32700UT) */
#if defined(CONFIG_PLAT_MAPPI2)
#define SIO0STS (volatile unsigned short *)(0xa0efd000 + 14)
#define SIO0TXB (volatile unsigned short *)(0xa0efd000 + 30)
#else
#define SIO0STS (volatile unsigned short *)(0x00efd000 + 14)
#define SIO0TXB (volatile unsigned short *)(0x00efd000 + 30)
#endif
static void putc(char c)
{
while ((*SIO0STS & 0x1) == 0)
cpu_relax();
if (c == '\n') {
*SIO0TXB = '\r';
while ((*SIO0STS & 0x1) == 0)
cpu_relax();
}
*SIO0TXB = c;
}
#endif
| thomhastings/linux-3.14 | arch/m32r/boot/compressed/m32r_sio.c | C | gpl-2.0 | 1,577 |
/*! jQuery v1.7.1 jquery.com | jquery.org/license */
(function(a,b){function cy(a){return f.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:!1}function cv(a){if(!ck[a]){var b=c.body,d=f("<"+a+">").appendTo(b),e=d.css("display");d.remove();if(e==="none"||e===""){cl||(cl=c.createElement("iframe"),cl.frameBorder=cl.width=cl.height=0),b.appendChild(cl);if(!cm||!cl.createElement)cm=(cl.contentWindow||cl.contentDocument).document,cm.write((c.compatMode==="CSS1Compat"?"<!doctype html>":"")+"<html><body>"),cm.close();d=cm.createElement(a),cm.body.appendChild(d),e=f.css(d,"display"),b.removeChild(cl)}ck[a]=e}return ck[a]}function cu(a,b){var c={};f.each(cq.concat.apply([],cq.slice(0,b)),function(){c[this]=a});return c}function ct(){cr=b}function cs(){setTimeout(ct,0);return cr=f.now()}function cj(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}}function ci(){try{return new a.XMLHttpRequest}catch(b){}}function cc(a,c){a.dataFilter&&(c=a.dataFilter(c,a.dataType));var d=a.dataTypes,e={},g,h,i=d.length,j,k=d[0],l,m,n,o,p;for(g=1;g<i;g++){if(g===1)for(h in a.converters)typeof h=="string"&&(e[h.toLowerCase()]=a.converters[h]);l=k,k=d[g];if(k==="*")k=l;else if(l!=="*"&&l!==k){m=l+" "+k,n=e[m]||e["* "+k];if(!n){p=b;for(o in e){j=o.split(" ");if(j[0]===l||j[0]==="*"){p=e[j[1]+" "+k];if(p){o=e[o],o===!0?n=p:p===!0&&(n=o);break}}}}!n&&!p&&f.error("No conversion from "+m.replace(" "," to ")),n!==!0&&(c=n?n(c):p(o(c)))}}return c}function cb(a,c,d){var e=a.contents,f=a.dataTypes,g=a.responseFields,h,i,j,k;for(i in g)i in d&&(c[g[i]]=d[i]);while(f[0]==="*")f.shift(),h===b&&(h=a.mimeType||c.getResponseHeader("content-type"));if(h)for(i in e)if(e[i]&&e[i].test(h)){f.unshift(i);break}if(f[0]in d)j=f[0];else{for(i in d){if(!f[0]||a.converters[i+" "+f[0]]){j=i;break}k||(k=i)}j=j||k}if(j){j!==f[0]&&f.unshift(j);return d[j]}}function ca(a,b,c,d){if(f.isArray(b))f.each(b,function(b,e){c||bE.test(a)?d(a,e):ca(a+"["+(typeof e=="object"||f.isArray(e)?b:"")+"]",e,c,d)});else if(!c&&b!=null&&typeof b=="object")for(var e in b)ca(a+"["+e+"]",b[e],c,d);else d(a,b)}function b_(a,c){var d,e,g=f.ajaxSettings.flatOptions||{};for(d in c)c[d]!==b&&((g[d]?a:e||(e={}))[d]=c[d]);e&&f.extend(!0,a,e)}function b$(a,c,d,e,f,g){f=f||c.dataTypes[0],g=g||{},g[f]=!0;var h=a[f],i=0,j=h?h.length:0,k=a===bT,l;for(;i<j&&(k||!l);i++)l=h[i](c,d,e),typeof l=="string"&&(!k||g[l]?l=b:(c.dataTypes.unshift(l),l=b$(a,c,d,e,l,g)));(k||!l)&&!g["*"]&&(l=b$(a,c,d,e,"*",g));return l}function bZ(a){return function(b,c){typeof b!="string"&&(c=b,b="*");if(f.isFunction(c)){var d=b.toLowerCase().split(bP),e=0,g=d.length,h,i,j;for(;e<g;e++)h=d[e],j=/^\+/.test(h),j&&(h=h.substr(1)||"*"),i=a[h]=a[h]||[],i[j?"unshift":"push"](c)}}}function bC(a,b,c){var d=b==="width"?a.offsetWidth:a.offsetHeight,e=b==="width"?bx:by,g=0,h=e.length;if(d>0){if(c!=="border")for(;g<h;g++)c||(d-=parseFloat(f.css(a,"padding"+e[g]))||0),c==="margin"?d+=parseFloat(f.css(a,c+e[g]))||0:d-=parseFloat(f.css(a,"border"+e[g]+"Width"))||0;return d+"px"}d=bz(a,b,b);if(d<0||d==null)d=a.style[b]||0;d=parseFloat(d)||0;if(c)for(;g<h;g++)d+=parseFloat(f.css(a,"padding"+e[g]))||0,c!=="padding"&&(d+=parseFloat(f.css(a,"border"+e[g]+"Width"))||0),c==="margin"&&(d+=parseFloat(f.css(a,c+e[g]))||0);return d+"px"}function bp(a,b){b.src?f.ajax({url:b.src,async:!1,dataType:"script"}):f.globalEval((b.text||b.textContent||b.innerHTML||"").replace(bf,"/*$0*/")),b.parentNode&&b.parentNode.removeChild(b)}function bo(a){var b=c.createElement("div");bh.appendChild(b),b.innerHTML=a.outerHTML;return b.firstChild}function bn(a){var b=(a.nodeName||"").toLowerCase();b==="input"?bm(a):b!=="script"&&typeof a.getElementsByTagName!="undefined"&&f.grep(a.getElementsByTagName("input"),bm)}function bm(a){if(a.type==="checkbox"||a.type==="radio")a.defaultChecked=a.checked}function bl(a){return typeof a.getElementsByTagName!="undefined"?a.getElementsByTagName("*"):typeof a.querySelectorAll!="undefined"?a.querySelectorAll("*"):[]}function bk(a,b){var c;if(b.nodeType===1){b.clearAttributes&&b.clearAttributes(),b.mergeAttributes&&b.mergeAttributes(a),c=b.nodeName.toLowerCase();if(c==="object")b.outerHTML=a.outerHTML;else if(c!=="input"||a.type!=="checkbox"&&a.type!=="radio"){if(c==="option")b.selected=a.defaultSelected;else if(c==="input"||c==="textarea")b.defaultValue=a.defaultValue}else a.checked&&(b.defaultChecked=b.checked=a.checked),b.value!==a.value&&(b.value=a.value);b.removeAttribute(f.expando)}}function bj(a,b){if(b.nodeType===1&&!!f.hasData(a)){var c,d,e,g=f._data(a),h=f._data(b,g),i=g.events;if(i){delete h.handle,h.events={};for(c in i)for(d=0,e=i[c].length;d<e;d++)f.event.add(b,c+(i[c][d].namespace?".":"")+i[c][d].namespace,i[c][d],i[c][d].data)}h.data&&(h.data=f.extend({},h.data))}}function bi(a,b){return f.nodeName(a,"table")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function U(a){var b=V.split("|"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}function T(a,b,c){b=b||0;if(f.isFunction(b))return f.grep(a,function(a,d){var e=!!b.call(a,d,a);return e===c});if(b.nodeType)return f.grep(a,function(a,d){return a===b===c});if(typeof b=="string"){var d=f.grep(a,function(a){return a.nodeType===1});if(O.test(b))return f.filter(b,d,!c);b=f.filter(b,d)}return f.grep(a,function(a,d){return f.inArray(a,b)>=0===c})}function S(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function K(){return!0}function J(){return!1}function n(a,b,c){var d=b+"defer",e=b+"queue",g=b+"mark",h=f._data(a,d);h&&(c==="queue"||!f._data(a,e))&&(c==="mark"||!f._data(a,g))&&setTimeout(function(){!f._data(a,e)&&!f._data(a,g)&&(f.removeData(a,d,!0),h.fire())},0)}function m(a){for(var b in a){if(b==="data"&&f.isEmptyObject(a[b]))continue;if(b!=="toJSON")return!1}return!0}function l(a,c,d){if(d===b&&a.nodeType===1){var e="data-"+c.replace(k,"-$1").toLowerCase();d=a.getAttribute(e);if(typeof d=="string"){try{d=d==="true"?!0:d==="false"?!1:d==="null"?null:f.isNumeric(d)?parseFloat(d):j.test(d)?f.parseJSON(d):d}catch(g){}f.data(a,c,d)}else d=b}return d}function h(a){var b=g[a]={},c,d;a=a.split(/\s+/);for(c=0,d=a.length;c<d;c++)b[a[c]]=!0;return b}var c=a.document,d=a.navigator,e=a.location,f=function(){function J(){if(!e.isReady){try{c.documentElement.doScroll("left")}catch(a){setTimeout(J,1);return}e.ready()}}var e=function(a,b){return new e.fn.init(a,b,h)},f=a.jQuery,g=a.$,h,i=/^(?:[^#<]*(<[\w\W]+>)[^>]*$|#([\w\-]*)$)/,j=/\S/,k=/^\s+/,l=/\s+$/,m=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,n=/^[\],:{}\s]*$/,o=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,p=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,q=/(?:^|:|,)(?:\s*\[)+/g,r=/(webkit)[ \/]([\w.]+)/,s=/(opera)(?:.*version)?[ \/]([\w.]+)/,t=/(msie) ([\w.]+)/,u=/(mozilla)(?:.*? rv:([\w.]+))?/,v=/-([a-z]|[0-9])/ig,w=/^-ms-/,x=function(a,b){return(b+"").toUpperCase()},y=d.userAgent,z,A,B,C=Object.prototype.toString,D=Object.prototype.hasOwnProperty,E=Array.prototype.push,F=Array.prototype.slice,G=String.prototype.trim,H=Array.prototype.indexOf,I={};e.fn=e.prototype={constructor:e,init:function(a,d,f){var g,h,j,k;if(!a)return this;if(a.nodeType){this.context=this[0]=a,this.length=1;return this}if(a==="body"&&!d&&c.body){this.context=c,this[0]=c.body,this.selector=a,this.length=1;return this}if(typeof a=="string"){a.charAt(0)!=="<"||a.charAt(a.length-1)!==">"||a.length<3?g=i.exec(a):g=[null,a,null];if(g&&(g[1]||!d)){if(g[1]){d=d instanceof e?d[0]:d,k=d?d.ownerDocument||d:c,j=m.exec(a),j?e.isPlainObject(d)?(a=[c.createElement(j[1])],e.fn.attr.call(a,d,!0)):a=[k.createElement(j[1])]:(j=e.buildFragment([g[1]],[k]),a=(j.cacheable?e.clone(j.fragment):j.fragment).childNodes);return e.merge(this,a)}h=c.getElementById(g[2]);if(h&&h.parentNode){if(h.id!==g[2])return f.find(a);this.length=1,this[0]=h}this.context=c,this.selector=a;return this}return!d||d.jquery?(d||f).find(a):this.constructor(d).find(a)}if(e.isFunction(a))return f.ready(a);a.selector!==b&&(this.selector=a.selector,this.context=a.context);return e.makeArray(a,this)},selector:"",jquery:"1.7.1",length:0,size:function(){return this.length},toArray:function(){return F.call(this,0)},get:function(a){return a==null?this.toArray():a<0?this[this.length+a]:this[a]},pushStack:function(a,b,c){var d=this.constructor();e.isArray(a)?E.apply(d,a):e.merge(d,a),d.prevObject=this,d.context=this.context,b==="find"?d.selector=this.selector+(this.selector?" ":"")+c:b&&(d.selector=this.selector+"."+b+"("+c+")");return d},each:function(a,b){return e.each(this,a,b)},ready:function(a){e.bindReady(),A.add(a);return this},eq:function(a){a=+a;return a===-1?this.slice(a):this.slice(a,a+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(F.apply(this,arguments),"slice",F.call(arguments).join(","))},map:function(a){return this.pushStack(e.map(this,function(b,c){return a.call(b,c,b)}))},end:function(){return this.prevObject||this.constructor(null)},push:E,sort:[].sort,splice:[].splice},e.fn.init.prototype=e.fn,e.extend=e.fn.extend=function(){var a,c,d,f,g,h,i=arguments[0]||{},j=1,k=arguments.length,l=!1;typeof i=="boolean"&&(l=i,i=arguments[1]||{},j=2),typeof i!="object"&&!e.isFunction(i)&&(i={}),k===j&&(i=this,--j);for(;j<k;j++)if((a=arguments[j])!=null)for(c in a){d=i[c],f=a[c];if(i===f)continue;l&&f&&(e.isPlainObject(f)||(g=e.isArray(f)))?(g?(g=!1,h=d&&e.isArray(d)?d:[]):h=d&&e.isPlainObject(d)?d:{},i[c]=e.extend(l,h,f)):f!==b&&(i[c]=f)}return i},e.extend({noConflict:function(b){a.$===e&&(a.$=g),b&&a.jQuery===e&&(a.jQuery=f);return e},isReady:!1,readyWait:1,holdReady:function(a){a?e.readyWait++:e.ready(!0)},ready:function(a){if(a===!0&&!--e.readyWait||a!==!0&&!e.isReady){if(!c.body)return setTimeout(e.ready,1);e.isReady=!0;if(a!==!0&&--e.readyWait>0)return;A.fireWith(c,[e]),e.fn.trigger&&e(c).trigger("ready").off("ready")}},bindReady:function(){if(!A){A=e.Callbacks("once memory");if(c.readyState==="complete")return setTimeout(e.ready,1);if(c.addEventListener)c.addEventListener("DOMContentLoaded",B,!1),a.addEventListener("load",e.ready,!1);else if(c.attachEvent){c.attachEvent("onreadystatechange",B),a.attachEvent("onload",e.ready);var b=!1;try{b=a.frameElement==null}catch(d){}c.documentElement.doScroll&&b&&J()}}},isFunction:function(a){return e.type(a)==="function"},isArray:Array.isArray||function(a){return e.type(a)==="array"},isWindow:function(a){return a&&typeof a=="object"&&"setInterval"in a},isNumeric:function(a){return!isNaN(parseFloat(a))&&isFinite(a)},type:function(a){return a==null?String(a):I[C.call(a)]||"object"},isPlainObject:function(a){if(!a||e.type(a)!=="object"||a.nodeType||e.isWindow(a))return!1;try{if(a.constructor&&!D.call(a,"constructor")&&!D.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}var d;for(d in a);return d===b||D.call(a,d)},isEmptyObject:function(a){for(var b in a)return!1;return!0},error:function(a){throw new Error(a)},parseJSON:function(b){if(typeof b!="string"||!b)return null;b=e.trim(b);if(a.JSON&&a.JSON.parse)return a.JSON.parse(b);if(n.test(b.replace(o,"@").replace(p,"]").replace(q,"")))return(new Function("return "+b))();e.error("Invalid JSON: "+b)},parseXML:function(c){var d,f;try{a.DOMParser?(f=new DOMParser,d=f.parseFromString(c,"text/xml")):(d=new ActiveXObject("Microsoft.XMLDOM"),d.async="false",d.loadXML(c))}catch(g){d=b}(!d||!d.documentElement||d.getElementsByTagName("parsererror").length)&&e.error("Invalid XML: "+c);return d},noop:function(){},globalEval:function(b){b&&j.test(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(w,"ms-").replace(v,x)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toUpperCase()===b.toUpperCase()},each:function(a,c,d){var f,g=0,h=a.length,i=h===b||e.isFunction(a);if(d){if(i){for(f in a)if(c.apply(a[f],d)===!1)break}else for(;g<h;)if(c.apply(a[g++],d)===!1)break}else if(i){for(f in a)if(c.call(a[f],f,a[f])===!1)break}else for(;g<h;)if(c.call(a[g],g,a[g++])===!1)break;return a},trim:G?function(a){return a==null?"":G.call(a)}:function(a){return a==null?"":(a+"").replace(k,"").replace(l,"")},makeArray:function(a,b){var c=b||[];if(a!=null){var d=e.type(a);a.length==null||d==="string"||d==="function"||d==="regexp"||e.isWindow(a)?E.call(c,a):e.merge(c,a)}return c},inArray:function(a,b,c){var d;if(b){if(H)return H.call(b,a,c);d=b.length,c=c?c<0?Math.max(0,d+c):c:0;for(;c<d;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,c){var d=a.length,e=0;if(typeof c.length=="number")for(var f=c.length;e<f;e++)a[d++]=c[e];else while(c[e]!==b)a[d++]=c[e++];a.length=d;return a},grep:function(a,b,c){var d=[],e;c=!!c;for(var f=0,g=a.length;f<g;f++)e=!!b(a[f],f),c!==e&&d.push(a[f]);return d},map:function(a,c,d){var f,g,h=[],i=0,j=a.length,k=a instanceof e||j!==b&&typeof j=="number"&&(j>0&&a[0]&&a[j-1]||j===0||e.isArray(a));if(k)for(;i<j;i++)f=c(a[i],i,d),f!=null&&(h[h.length]=f);else for(g in a)f=c(a[g],g,d),f!=null&&(h[h.length]=f);return h.concat.apply([],h)},guid:1,proxy:function(a,c){if(typeof c=="string"){var d=a[c];c=a,a=d}if(!e.isFunction(a))return b;var f=F.call(arguments,2),g=function(){return a.apply(c,f.concat(F.call(arguments)))};g.guid=a.guid=a.guid||g.guid||e.guid++;return g},access:function(a,c,d,f,g,h){var i=a.length;if(typeof c=="object"){for(var j in c)e.access(a,j,c[j],f,g,d);return a}if(d!==b){f=!h&&f&&e.isFunction(d);for(var k=0;k<i;k++)g(a[k],c,f?d.call(a[k],k,g(a[k],c)):d,h);return a}return i?g(a[0],c):b},now:function(){return(new Date).getTime()},uaMatch:function(a){a=a.toLowerCase();var b=r.exec(a)||s.exec(a)||t.exec(a)||a.indexOf("compatible")<0&&u.exec(a)||[];return{browser:b[1]||"",version:b[2]||"0"}},sub:function(){function a(b,c){return new a.fn.init(b,c)}e.extend(!0,a,this),a.superclass=this,a.fn=a.prototype=this(),a.fn.constructor=a,a.sub=this.sub,a.fn.init=function(d,f){f&&f instanceof e&&!(f instanceof a)&&(f=a(f));return e.fn.init.call(this,d,f,b)},a.fn.init.prototype=a.fn;var b=a(c);return a},browser:{}}),e.each("Boolean Number String Function Array Date RegExp Object".split(" "),function(a,b){I["[object "+b+"]"]=b.toLowerCase()}),z=e.uaMatch(y),z.browser&&(e.browser[z.browser]=!0,e.browser.version=z.version),e.browser.webkit&&(e.browser.safari=!0),j.test(" ")&&(k=/^[\s\xA0]+/,l=/[\s\xA0]+$/),h=e(c),c.addEventListener?B=function(){c.removeEventListener("DOMContentLoaded",B,!1),e.ready()}:c.attachEvent&&(B=function(){c.readyState==="complete"&&(c.detachEvent("onreadystatechange",B),e.ready())});return e}(),g={};f.Callbacks=function(a){a=a?g[a]||h(a):{};var c=[],d=[],e,i,j,k,l,m=function(b){var d,e,g,h,i;for(d=0,e=b.length;d<e;d++)g=b[d],h=f.type(g),h==="array"?m(g):h==="function"&&(!a.unique||!o.has(g))&&c.push(g)},n=function(b,f){f=f||[],e=!a.memory||[b,f],i=!0,l=j||0,j=0,k=c.length;for(;c&&l<k;l++)if(c[l].apply(b,f)===!1&&a.stopOnFalse){e=!0;break}i=!1,c&&(a.once?e===!0?o.disable():c=[]:d&&d.length&&(e=d.shift(),o.fireWith(e[0],e[1])))},o={add:function(){if(c){var a=c.length;m(arguments),i?k=c.length:e&&e!==!0&&(j=a,n(e[0],e[1]))}return this},remove:function(){if(c){var b=arguments,d=0,e=b.length;for(;d<e;d++)for(var f=0;f<c.length;f++)if(b[d]===c[f]){i&&f<=k&&(k--,f<=l&&l--),c.splice(f--,1);if(a.unique)break}}return this},has:function(a){if(c){var b=0,d=c.length;for(;b<d;b++)if(a===c[b])return!0}return!1},empty:function(){c=[];return this},disable:function(){c=d=e=b;return this},disabled:function(){return!c},lock:function(){d=b,(!e||e===!0)&&o.disable();return this},locked:function(){return!d},fireWith:function(b,c){d&&(i?a.once||d.push([b,c]):(!a.once||!e)&&n(b,c));return this},fire:function(){o.fireWith(this,arguments);return this},fired:function(){return!!e}};return o};var i=[].slice;f.extend({Deferred:function(a){var b=f.Callbacks("once memory"),c=f.Callbacks("once memory"),d=f.Callbacks("memory"),e="pending",g={resolve:b,reject:c,notify:d},h={done:b.add,fail:c.add,progress:d.add,state:function(){return e},isResolved:b.fired,isRejected:c.fired,then:function(a,b,c){i.done(a).fail(b).progress(c);return this},always:function(){i.done.apply(i,arguments).fail.apply(i,arguments);return this},pipe:function(a,b,c){return f.Deferred(function(d){f.each({done:[a,"resolve"],fail:[b,"reject"],progress:[c,"notify"]},function(a,b){var c=b[0],e=b[1],g;f.isFunction(c)?i[a](function(){g=c.apply(this,arguments),g&&f.isFunction(g.promise)?g.promise().then(d.resolve,d.reject,d.notify):d[e+"With"](this===i?d:this,[g])}):i[a](d[e])})}).promise()},promise:function(a){if(a==null)a=h;else for(var b in h)a[b]=h[b];return a}},i=h.promise({}),j;for(j in g)i[j]=g[j].fire,i[j+"With"]=g[j].fireWith;i.done(function(){e="resolved"},c.disable,d.lock).fail(function(){e="rejected"},b.disable,d.lock),a&&a.call(i,i);return i},when:function(a){function m(a){return function(b){e[a]=arguments.length>1?i.call(arguments,0):b,j.notifyWith(k,e)}}function l(a){return function(c){b[a]=arguments.length>1?i.call(arguments,0):c,--g||j.resolveWith(j,b)}}var b=i.call(arguments,0),c=0,d=b.length,e=Array(d),g=d,h=d,j=d<=1&&a&&f.isFunction(a.promise)?a:f.Deferred(),k=j.promise();if(d>1){for(;c<d;c++)b[c]&&b[c].promise&&f.isFunction(b[c].promise)?b[c].promise().then(l(c),j.reject,m(c)):--g;g||j.resolveWith(j,b)}else j!==a&&j.resolveWith(j,d?[a]:[]);return k}}),f.support=function(){var b,d,e,g,h,i,j,k,l,m,n,o,p,q=c.createElement("div"),r=c.documentElement;q.setAttribute("className","t"),q.innerHTML=" <link/><table></table><a href='/a' style='top:1px;float:left;opacity:.55;'>a</a><input type='checkbox'/>",d=q.getElementsByTagName("*"),e=q.getElementsByTagName("a")[0];if(!d||!d.length||!e)return{};g=c.createElement("select"),h=g.appendChild(c.createElement("option")),i=q.getElementsByTagName("input")[0],b={leadingWhitespace:q.firstChild.nodeType===3,tbody:!q.getElementsByTagName("tbody").length,htmlSerialize:!!q.getElementsByTagName("link").length,style:/top/.test(e.getAttribute("style")),hrefNormalized:e.getAttribute("href")==="/a",opacity:/^0.55/.test(e.style.opacity),cssFloat:!!e.style.cssFloat,checkOn:i.value==="on",optSelected:h.selected,getSetAttribute:q.className!=="t",enctype:!!c.createElement("form").enctype,html5Clone:c.createElement("nav").cloneNode(!0).outerHTML!=="<:nav></:nav>",submitBubbles:!0,changeBubbles:!0,focusinBubbles:!1,deleteExpando:!0,noCloneEvent:!0,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableMarginRight:!0},i.checked=!0,b.noCloneChecked=i.cloneNode(!0).checked,g.disabled=!0,b.optDisabled=!h.disabled;try{delete q.test}catch(s){b.deleteExpando=!1}!q.addEventListener&&q.attachEvent&&q.fireEvent&&(q.attachEvent("onclick",function(){b.noCloneEvent=!1}),q.cloneNode(!0).fireEvent("onclick")),i=c.createElement("input"),i.value="t",i.setAttribute("type","radio"),b.radioValue=i.value==="t",i.setAttribute("checked","checked"),q.appendChild(i),k=c.createDocumentFragment(),k.appendChild(q.lastChild),b.checkClone=k.cloneNode(!0).cloneNode(!0).lastChild.checked,b.appendChecked=i.checked,k.removeChild(i),k.appendChild(q),q.innerHTML="",a.getComputedStyle&&(j=c.createElement("div"),j.style.width="0",j.style.marginRight="0",q.style.width="2px",q.appendChild(j),b.reliableMarginRight=(parseInt((a.getComputedStyle(j,null)||{marginRight:0}).marginRight,10)||0)===0);if(q.attachEvent)for(o in{submit:1,change:1,focusin:1})n="on"+o,p=n in q,p||(q.setAttribute(n,"return;"),p=typeof q[n]=="function"),b[o+"Bubbles"]=p;k.removeChild(q),k=g=h=j=q=i=null,f(function(){var a,d,e,g,h,i,j,k,m,n,o,r=c.getElementsByTagName("body")[0];!r||(j=1,k="position:absolute;top:0;left:0;width:1px;height:1px;margin:0;",m="visibility:hidden;border:0;",n="style='"+k+"border:5px solid #000;padding:0;'",o="<div "+n+"><div></div></div>"+"<table "+n+" cellpadding='0' cellspacing='0'>"+"<tr><td></td></tr></table>",a=c.createElement("div"),a.style.cssText=m+"width:0;height:0;position:static;top:0;margin-top:"+j+"px",r.insertBefore(a,r.firstChild),q=c.createElement("div"),a.appendChild(q),q.innerHTML="<table><tr><td style='padding:0;border:0;display:none'></td><td>t</td></tr></table>",l=q.getElementsByTagName("td"),p=l[0].offsetHeight===0,l[0].style.display="",l[1].style.display="none",b.reliableHiddenOffsets=p&&l[0].offsetHeight===0,q.innerHTML="",q.style.width=q.style.paddingLeft="1px",f.boxModel=b.boxModel=q.offsetWidth===2,typeof q.style.zoom!="undefined"&&(q.style.display="inline",q.style.zoom=1,b.inlineBlockNeedsLayout=q.offsetWidth===2,q.style.display="",q.innerHTML="<div style='width:4px;'></div>",b.shrinkWrapBlocks=q.offsetWidth!==2),q.style.cssText=k+m,q.innerHTML=o,d=q.firstChild,e=d.firstChild,h=d.nextSibling.firstChild.firstChild,i={doesNotAddBorder:e.offsetTop!==5,doesAddBorderForTableAndCells:h.offsetTop===5},e.style.position="fixed",e.style.top="20px",i.fixedPosition=e.offsetTop===20||e.offsetTop===15,e.style.position=e.style.top="",d.style.overflow="hidden",d.style.position="relative",i.subtractsBorderForOverflowNotVisible=e.offsetTop===-5,i.doesNotIncludeMarginInBodyOffset=r.offsetTop!==j,r.removeChild(a),q=a=null,f.extend(b,i))});return b}();var j=/^(?:\{.*\}|\[.*\])$/,k=/([A-Z])/g;f.extend({cache:{},uuid:0,expando:"jQuery"+(f.fn.jquery+Math.random()).replace(/\D/g,""),noData:{embed:!0,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(a){a=a.nodeType?f.cache[a[f.expando]]:a[f.expando];return!!a&&!m(a)},data:function(a,c,d,e){if(!!f.acceptData(a)){var g,h,i,j=f.expando,k=typeof c=="string",l=a.nodeType,m=l?f.cache:a,n=l?a[j]:a[j]&&j,o=c==="events";if((!n||!m[n]||!o&&!e&&!m[n].data)&&k&&d===b)return;n||(l?a[j]=n=++f.uuid:n=j),m[n]||(m[n]={},l||(m[n].toJSON=f.noop));if(typeof c=="object"||typeof c=="function")e?m[n]=f.extend(m[n],c):m[n].data=f.extend(m[n].data,c);g=h=m[n],e||(h.data||(h.data={}),h=h.data),d!==b&&(h[f.camelCase(c)]=d);if(o&&!h[c])return g.events;k?(i=h[c],i==null&&(i=h[f.camelCase(c)])):i=h;return i}},removeData:function(a,b,c){if(!!f.acceptData(a)){var d,e,g,h=f.expando,i=a.nodeType,j=i?f.cache:a,k=i?a[h]:h;if(!j[k])return;if(b){d=c?j[k]:j[k].data;if(d){f.isArray(b)||(b in d?b=[b]:(b=f.camelCase(b),b in d?b=[b]:b=b.split(" ")));for(e=0,g=b.length;e<g;e++)delete d[b[e]];if(!(c?m:f.isEmptyObject)(d))return}}if(!c){delete j[k].data;if(!m(j[k]))return}f.support.deleteExpando||!j.setInterval?delete j[k]:j[k]=null,i&&(f.support.deleteExpando?delete a[h]:a.removeAttribute?a.removeAttribute(h):a[h]=null)}},_data:function(a,b,c){return f.data(a,b,c,!0)},acceptData:function(a){if(a.nodeName){var b=f.noData[a.nodeName.toLowerCase()];if(b)return b!==!0&&a.getAttribute("classid")===b}return!0}}),f.fn.extend({data:function(a,c){var d,e,g,h=null;if(typeof a=="undefined"){if(this.length){h=f.data(this[0]);if(this[0].nodeType===1&&!f._data(this[0],"parsedAttrs")){e=this[0].attributes;for(var i=0,j=e.length;i<j;i++)g=e[i].name,g.indexOf("data-")===0&&(g=f.camelCase(g.substring(5)),l(this[0],g,h[g]));f._data(this[0],"parsedAttrs",!0)}}return h}if(typeof a=="object")return this.each(function(){f.data(this,a)});d=a.split("."),d[1]=d[1]?"."+d[1]:"";if(c===b){h=this.triggerHandler("getData"+d[1]+"!",[d[0]]),h===b&&this.length&&(h=f.data(this[0],a),h=l(this[0],a,h));return h===b&&d[1]?this.data(d[0]):h}return this.each(function(){var b=f(this),e=[d[0],c];b.triggerHandler("setData"+d[1]+"!",e),f.data(this,a,c),b.triggerHandler("changeData"+d[1]+"!",e)})},removeData:function(a){return this.each(function(){f.removeData(this,a)})}}),f.extend({_mark:function(a,b){a&&(b=(b||"fx")+"mark",f._data(a,b,(f._data(a,b)||0)+1))},_unmark:function(a,b,c){a!==!0&&(c=b,b=a,a=!1);if(b){c=c||"fx";var d=c+"mark",e=a?0:(f._data(b,d)||1)-1;e?f._data(b,d,e):(f.removeData(b,d,!0),n(b,c,"mark"))}},queue:function(a,b,c){var d;if(a){b=(b||"fx")+"queue",d=f._data(a,b),c&&(!d||f.isArray(c)?d=f._data(a,b,f.makeArray(c)):d.push(c));return d||[]}},dequeue:function(a,b){b=b||"fx";var c=f.queue(a,b),d=c.shift(),e={};d==="inprogress"&&(d=c.shift()),d&&(b==="fx"&&c.unshift("inprogress"),f._data(a,b+".run",e),d.call(a,function(){f.dequeue(a,b)},e)),c.length||(f.removeData(a,b+"queue "+b+".run",!0),n(a,b,"queue"))}}),f.fn.extend({queue:function(a,c){typeof a!="string"&&(c=a,a="fx");if(c===b)return f.queue(this[0],a);return this.each(function(){var b=f.queue(this,a,c);a==="fx"&&b[0]!=="inprogress"&&f.dequeue(this,a)})},dequeue:function(a){return this.each(function(){f.dequeue(this,a)})},delay:function(a,b){a=f.fx?f.fx.speeds[a]||a:a,b=b||"fx";return this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,c){function m(){--h||d.resolveWith(e,[e])}typeof a!="string"&&(c=a,a=b),a=a||"fx";var d=f.Deferred(),e=this,g=e.length,h=1,i=a+"defer",j=a+"queue",k=a+"mark",l;while(g--)if(l=f.data(e[g],i,b,!0)||(f.data(e[g],j,b,!0)||f.data(e[g],k,b,!0))&&f.data(e[g],i,f.Callbacks("once memory"),!0))h++,l.add(m);m();return d.promise()}});var o=/[\n\t\r]/g,p=/\s+/,q=/\r/g,r=/^(?:button|input)$/i,s=/^(?:button|input|object|select|textarea)$/i,t=/^a(?:rea)?$/i,u=/^(?:autofocus|autoplay|async|checked|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped|selected)$/i,v=f.support.getSetAttribute,w,x,y;f.fn.extend({attr:function(a,b){return f.access(this,a,b,!0,f.attr)},removeAttr:function(a){return this.each(function(){f.removeAttr(this,a)})},prop:function(a,b){return f.access(this,a,b,!0,f.prop)},removeProp:function(a){a=f.propFix[a]||a;return this.each(function(){try{this[a]=b,delete this[a]}catch(c){}})},addClass:function(a){var b,c,d,e,g,h,i;if(f.isFunction(a))return this.each(function(b){f(this).addClass(a.call(this,b,this.className))});if(a&&typeof a=="string"){b=a.split(p);for(c=0,d=this.length;c<d;c++){e=this[c];if(e.nodeType===1)if(!e.className&&b.length===1)e.className=a;else{g=" "+e.className+" ";for(h=0,i=b.length;h<i;h++)~g.indexOf(" "+b[h]+" ")||(g+=b[h]+" ");e.className=f.trim(g)}}}return this},removeClass:function(a){var c,d,e,g,h,i,j;if(f.isFunction(a))return this.each(function(b){f(this).removeClass(a.call(this,b,this.className))});if(a&&typeof a=="string"||a===b){c=(a||"").split(p);for(d=0,e=this.length;d<e;d++){g=this[d];if(g.nodeType===1&&g.className)if(a){h=(" "+g.className+" ").replace(o," ");for(i=0,j=c.length;i<j;i++)h=h.replace(" "+c[i]+" "," ");g.className=f.trim(h)}else g.className=""}}return this},toggleClass:function(a,b){var c=typeof a,d=typeof b=="boolean";if(f.isFunction(a))return this.each(function(c){f(this).toggleClass(a.call(this,c,this.className,b),b)});return this.each(function(){if(c==="string"){var e,g=0,h=f(this),i=b,j=a.split(p);while(e=j[g++])i=d?i:!h.hasClass(e),h[i?"addClass":"removeClass"](e)}else if(c==="undefined"||c==="boolean")this.className&&f._data(this,"__className__",this.className),this.className=this.className||a===!1?"":f._data(this,"__className__")||""})},hasClass:function(a){var b=" "+a+" ",c=0,d=this.length;for(;c<d;c++)if(this[c].nodeType===1&&(" "+this[c].className+" ").replace(o," ").indexOf(b)>-1)return!0;return!1},val:function(a){var c,d,e,g=this[0];{if(!!arguments.length){e=f.isFunction(a);return this.each(function(d){var g=f(this),h;if(this.nodeType===1){e?h=a.call(this,d,g.val()):h=a,h==null?h="":typeof h=="number"?h+="":f.isArray(h)&&(h=f.map(h,function(a){return a==null?"":a+""})),c=f.valHooks[this.nodeName.toLowerCase()]||f.valHooks[this.type];if(!c||!("set"in c)||c.set(this,h,"value")===b)this.value=h}})}if(g){c=f.valHooks[g.nodeName.toLowerCase()]||f.valHooks[g.type];if(c&&"get"in c&&(d=c.get(g,"value"))!==b)return d;d=g.value;return typeof d=="string"?d.replace(q,""):d==null?"":d}}}}),f.extend({valHooks:{option:{get:function(a){var b=a.attributes.value;return!b||b.specified?a.value:a.text}},select:{get:function(a){var b,c,d,e,g=a.selectedIndex,h=[],i=a.options,j=a.type==="select-one";if(g<0)return null;c=j?g:0,d=j?g+1:i.length;for(;c<d;c++){e=i[c];if(e.selected&&(f.support.optDisabled?!e.disabled:e.getAttribute("disabled")===null)&&(!e.parentNode.disabled||!f.nodeName(e.parentNode,"optgroup"))){b=f(e).val();if(j)return b;h.push(b)}}if(j&&!h.length&&i.length)return f(i[g]).val();return h},set:function(a,b){var c=f.makeArray(b);f(a).find("option").each(function(){this.selected=f.inArray(f(this).val(),c)>=0}),c.length||(a.selectedIndex=-1);return c}}},attrFn:{val:!0,css:!0,html:!0,text:!0,data:!0,width:!0,height:!0,offset:!0},attr:function(a,c,d,e){var g,h,i,j=a.nodeType;if(!!a&&j!==3&&j!==8&&j!==2){if(e&&c in f.attrFn)return f(a)[c](d);if(typeof a.getAttribute=="undefined")return f.prop(a,c,d);i=j!==1||!f.isXMLDoc(a),i&&(c=c.toLowerCase(),h=f.attrHooks[c]||(u.test(c)?x:w));if(d!==b){if(d===null){f.removeAttr(a,c);return}if(h&&"set"in h&&i&&(g=h.set(a,d,c))!==b)return g;a.setAttribute(c,""+d);return d}if(h&&"get"in h&&i&&(g=h.get(a,c))!==null)return g;g=a.getAttribute(c);return g===null?b:g}},removeAttr:function(a,b){var c,d,e,g,h=0;if(b&&a.nodeType===1){d=b.toLowerCase().split(p),g=d.length;for(;h<g;h++)e=d[h],e&&(c=f.propFix[e]||e,f.attr(a,e,""),a.removeAttribute(v?e:c),u.test(e)&&c in a&&(a[c]=!1))}},attrHooks:{type:{set:function(a,b){if(r.test(a.nodeName)&&a.parentNode)f.error("type property can't be changed");else if(!f.support.radioValue&&b==="radio"&&f.nodeName(a,"input")){var c=a.value;a.setAttribute("type",b),c&&(a.value=c);return b}}},value:{get:function(a,b){if(w&&f.nodeName(a,"button"))return w.get(a,b);return b in a?a.value:null},set:function(a,b,c){if(w&&f.nodeName(a,"button"))return w.set(a,b,c);a.value=b}}},propFix:{tabindex:"tabIndex",readonly:"readOnly","for":"htmlFor","class":"className",maxlength:"maxLength",cellspacing:"cellSpacing",cellpadding:"cellPadding",rowspan:"rowSpan",colspan:"colSpan",usemap:"useMap",frameborder:"frameBorder",contenteditable:"contentEditable"},prop:function(a,c,d){var e,g,h,i=a.nodeType;if(!!a&&i!==3&&i!==8&&i!==2){h=i!==1||!f.isXMLDoc(a),h&&(c=f.propFix[c]||c,g=f.propHooks[c]);return d!==b?g&&"set"in g&&(e=g.set(a,d,c))!==b?e:a[c]=d:g&&"get"in g&&(e=g.get(a,c))!==null?e:a[c]}},propHooks:{tabIndex:{get:function(a){var c=a.getAttributeNode("tabindex");return c&&c.specified?parseInt(c.value,10):s.test(a.nodeName)||t.test(a.nodeName)&&a.href?0:b}}}}),f.attrHooks.tabindex=f.propHooks.tabIndex,x={get:function(a,c){var d,e=f.prop(a,c);return e===!0||typeof e!="boolean"&&(d=a.getAttributeNode(c))&&d.nodeValue!==!1?c.toLowerCase():b},set:function(a,b,c){var d;b===!1?f.removeAttr(a,c):(d=f.propFix[c]||c,d in a&&(a[d]=!0),a.setAttribute(c,c.toLowerCase()));return c}},v||(y={name:!0,id:!0},w=f.valHooks.button={get:function(a,c){var d;d=a.getAttributeNode(c);return d&&(y[c]?d.nodeValue!=="":d.specified)?d.nodeValue:b},set:function(a,b,d){var e=a.getAttributeNode(d);e||(e=c.createAttribute(d),a.setAttributeNode(e));return e.nodeValue=b+""}},f.attrHooks.tabindex.set=w.set,f.each(["width","height"],function(a,b){f.attrHooks[b]=f.extend(f.attrHooks[b],{set:function(a,c){if(c===""){a.setAttribute(b,"auto");return c}}})}),f.attrHooks.contenteditable={get:w.get,set:function(a,b,c){b===""&&(b="false"),w.set(a,b,c)}}),f.support.hrefNormalized||f.each(["href","src","width","height"],function(a,c){f.attrHooks[c]=f.extend(f.attrHooks[c],{get:function(a){var d=a.getAttribute(c,2);return d===null?b:d}})}),f.support.style||(f.attrHooks.style={get:function(a){return a.style.cssText.toLowerCase()||b},set:function(a,b){return a.style.cssText=""+b}}),f.support.optSelected||(f.propHooks.selected=f.extend(f.propHooks.selected,{get:function(a){var b=a.parentNode;b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex);return null}})),f.support.enctype||(f.propFix.enctype="encoding"),f.support.checkOn||f.each(["radio","checkbox"],function(){f.valHooks[this]={get:function(a){return a.getAttribute("value")===null?"on":a.value}}}),f.each(["radio","checkbox"],function(){f.valHooks[this]=f.extend(f.valHooks[this],{set:function(a,b){if(f.isArray(b))return a.checked=f.inArray(f(a).val(),b)>=0}})});var z=/^(?:textarea|input|select)$/i,A=/^([^\.]*)?(?:\.(.+))?$/,B=/\bhover(\.\S+)?\b/,C=/^key/,D=/^(?:mouse|contextmenu)|click/,E=/^(?:focusinfocus|focusoutblur)$/,F=/^(\w*)(?:#([\w\-]+))?(?:\.([\w\-]+))?$/,G=function(a){var b=F.exec(a);b&&(b[1]=(b[1]||"").toLowerCase(),b[3]=b[3]&&new RegExp("(?:^|\\s)"+b[3]+"(?:\\s|$)"));return b},H=function(a,b){var c=a.attributes||{};return(!b[1]||a.nodeName.toLowerCase()===b[1])&&(!b[2]||(c.id||{}).value===b[2])&&(!b[3]||b[3].test((c["class"]||{}).value))},I=function(a){return f.event.special.hover?a:a.replace(B,"mouseenter$1 mouseleave$1")};
f.event={add:function(a,c,d,e,g){var h,i,j,k,l,m,n,o,p,q,r,s;if(!(a.nodeType===3||a.nodeType===8||!c||!d||!(h=f._data(a)))){d.handler&&(p=d,d=p.handler),d.guid||(d.guid=f.guid++),j=h.events,j||(h.events=j={}),i=h.handle,i||(h.handle=i=function(a){return typeof f!="undefined"&&(!a||f.event.triggered!==a.type)?f.event.dispatch.apply(i.elem,arguments):b},i.elem=a),c=f.trim(I(c)).split(" ");for(k=0;k<c.length;k++){l=A.exec(c[k])||[],m=l[1],n=(l[2]||"").split(".").sort(),s=f.event.special[m]||{},m=(g?s.delegateType:s.bindType)||m,s=f.event.special[m]||{},o=f.extend({type:m,origType:l[1],data:e,handler:d,guid:d.guid,selector:g,quick:G(g),namespace:n.join(".")},p),r=j[m];if(!r){r=j[m]=[],r.delegateCount=0;if(!s.setup||s.setup.call(a,e,n,i)===!1)a.addEventListener?a.addEventListener(m,i,!1):a.attachEvent&&a.attachEvent("on"+m,i)}s.add&&(s.add.call(a,o),o.handler.guid||(o.handler.guid=d.guid)),g?r.splice(r.delegateCount++,0,o):r.push(o),f.event.global[m]=!0}a=null}},global:{},remove:function(a,b,c,d,e){var g=f.hasData(a)&&f._data(a),h,i,j,k,l,m,n,o,p,q,r,s;if(!!g&&!!(o=g.events)){b=f.trim(I(b||"")).split(" ");for(h=0;h<b.length;h++){i=A.exec(b[h])||[],j=k=i[1],l=i[2];if(!j){for(j in o)f.event.remove(a,j+b[h],c,d,!0);continue}p=f.event.special[j]||{},j=(d?p.delegateType:p.bindType)||j,r=o[j]||[],m=r.length,l=l?new RegExp("(^|\\.)"+l.split(".").sort().join("\\.(?:.*\\.)?")+"(\\.|$)"):null;for(n=0;n<r.length;n++)s=r[n],(e||k===s.origType)&&(!c||c.guid===s.guid)&&(!l||l.test(s.namespace))&&(!d||d===s.selector||d==="**"&&s.selector)&&(r.splice(n--,1),s.selector&&r.delegateCount--,p.remove&&p.remove.call(a,s));r.length===0&&m!==r.length&&((!p.teardown||p.teardown.call(a,l)===!1)&&f.removeEvent(a,j,g.handle),delete o[j])}f.isEmptyObject(o)&&(q=g.handle,q&&(q.elem=null),f.removeData(a,["events","handle"],!0))}},customEvent:{getData:!0,setData:!0,changeData:!0},trigger:function(c,d,e,g){if(!e||e.nodeType!==3&&e.nodeType!==8){var h=c.type||c,i=[],j,k,l,m,n,o,p,q,r,s;if(E.test(h+f.event.triggered))return;h.indexOf("!")>=0&&(h=h.slice(0,-1),k=!0),h.indexOf(".")>=0&&(i=h.split("."),h=i.shift(),i.sort());if((!e||f.event.customEvent[h])&&!f.event.global[h])return;c=typeof c=="object"?c[f.expando]?c:new f.Event(h,c):new f.Event(h),c.type=h,c.isTrigger=!0,c.exclusive=k,c.namespace=i.join("."),c.namespace_re=c.namespace?new RegExp("(^|\\.)"+i.join("\\.(?:.*\\.)?")+"(\\.|$)"):null,o=h.indexOf(":")<0?"on"+h:"";if(!e){j=f.cache;for(l in j)j[l].events&&j[l].events[h]&&f.event.trigger(c,d,j[l].handle.elem,!0);return}c.result=b,c.target||(c.target=e),d=d!=null?f.makeArray(d):[],d.unshift(c),p=f.event.special[h]||{};if(p.trigger&&p.trigger.apply(e,d)===!1)return;r=[[e,p.bindType||h]];if(!g&&!p.noBubble&&!f.isWindow(e)){s=p.delegateType||h,m=E.test(s+h)?e:e.parentNode,n=null;for(;m;m=m.parentNode)r.push([m,s]),n=m;n&&n===e.ownerDocument&&r.push([n.defaultView||n.parentWindow||a,s])}for(l=0;l<r.length&&!c.isPropagationStopped();l++)m=r[l][0],c.type=r[l][1],q=(f._data(m,"events")||{})[c.type]&&f._data(m,"handle"),q&&q.apply(m,d),q=o&&m[o],q&&f.acceptData(m)&&q.apply(m,d)===!1&&c.preventDefault();c.type=h,!g&&!c.isDefaultPrevented()&&(!p._default||p._default.apply(e.ownerDocument,d)===!1)&&(h!=="click"||!f.nodeName(e,"a"))&&f.acceptData(e)&&o&&e[h]&&(h!=="focus"&&h!=="blur"||c.target.offsetWidth!==0)&&!f.isWindow(e)&&(n=e[o],n&&(e[o]=null),f.event.triggered=h,e[h](),f.event.triggered=b,n&&(e[o]=n));return c.result}},dispatch:function(c){c=f.event.fix(c||a.event);var d=(f._data(this,"events")||{})[c.type]||[],e=d.delegateCount,g=[].slice.call(arguments,0),h=!c.exclusive&&!c.namespace,i=[],j,k,l,m,n,o,p,q,r,s,t;g[0]=c,c.delegateTarget=this;if(e&&!c.target.disabled&&(!c.button||c.type!=="click")){m=f(this),m.context=this.ownerDocument||this;for(l=c.target;l!=this;l=l.parentNode||this){o={},q=[],m[0]=l;for(j=0;j<e;j++)r=d[j],s=r.selector,o[s]===b&&(o[s]=r.quick?H(l,r.quick):m.is(s)),o[s]&&q.push(r);q.length&&i.push({elem:l,matches:q})}}d.length>e&&i.push({elem:this,matches:d.slice(e)});for(j=0;j<i.length&&!c.isPropagationStopped();j++){p=i[j],c.currentTarget=p.elem;for(k=0;k<p.matches.length&&!c.isImmediatePropagationStopped();k++){r=p.matches[k];if(h||!c.namespace&&!r.namespace||c.namespace_re&&c.namespace_re.test(r.namespace))c.data=r.data,c.handleObj=r,n=((f.event.special[r.origType]||{}).handle||r.handler).apply(p.elem,g),n!==b&&(c.result=n,n===!1&&(c.preventDefault(),c.stopPropagation()))}}return c.result},props:"attrChange attrName relatedNode srcElement altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){a.which==null&&(a.which=b.charCode!=null?b.charCode:b.keyCode);return a}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,d){var e,f,g,h=d.button,i=d.fromElement;a.pageX==null&&d.clientX!=null&&(e=a.target.ownerDocument||c,f=e.documentElement,g=e.body,a.pageX=d.clientX+(f&&f.scrollLeft||g&&g.scrollLeft||0)-(f&&f.clientLeft||g&&g.clientLeft||0),a.pageY=d.clientY+(f&&f.scrollTop||g&&g.scrollTop||0)-(f&&f.clientTop||g&&g.clientTop||0)),!a.relatedTarget&&i&&(a.relatedTarget=i===a.target?d.toElement:i),!a.which&&h!==b&&(a.which=h&1?1:h&2?3:h&4?2:0);return a}},fix:function(a){if(a[f.expando])return a;var d,e,g=a,h=f.event.fixHooks[a.type]||{},i=h.props?this.props.concat(h.props):this.props;a=f.Event(g);for(d=i.length;d;)e=i[--d],a[e]=g[e];a.target||(a.target=g.srcElement||c),a.target.nodeType===3&&(a.target=a.target.parentNode),a.metaKey===b&&(a.metaKey=a.ctrlKey);return h.filter?h.filter(a,g):a},special:{ready:{setup:f.bindReady},load:{noBubble:!0},focus:{delegateType:"focusin"},blur:{delegateType:"focusout"},beforeunload:{setup:function(a,b,c){f.isWindow(this)&&(this.onbeforeunload=c)},teardown:function(a,b){this.onbeforeunload===b&&(this.onbeforeunload=null)}}},simulate:function(a,b,c,d){var e=f.extend(new f.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?f.event.trigger(e,null,b):f.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},f.event.handle=f.event.dispatch,f.removeEvent=c.removeEventListener?function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)}:function(a,b,c){a.detachEvent&&a.detachEvent("on"+b,c)},f.Event=function(a,b){if(!(this instanceof f.Event))return new f.Event(a,b);a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||a.returnValue===!1||a.getPreventDefault&&a.getPreventDefault()?K:J):this.type=a,b&&f.extend(this,b),this.timeStamp=a&&a.timeStamp||f.now(),this[f.expando]=!0},f.Event.prototype={preventDefault:function(){this.isDefaultPrevented=K;var a=this.originalEvent;!a||(a.preventDefault?a.preventDefault():a.returnValue=!1)},stopPropagation:function(){this.isPropagationStopped=K;var a=this.originalEvent;!a||(a.stopPropagation&&a.stopPropagation(),a.cancelBubble=!0)},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=K,this.stopPropagation()},isDefaultPrevented:J,isPropagationStopped:J,isImmediatePropagationStopped:J},f.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){f.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c=this,d=a.relatedTarget,e=a.handleObj,g=e.selector,h;if(!d||d!==c&&!f.contains(c,d))a.type=e.origType,h=e.handler.apply(this,arguments),a.type=b;return h}}}),f.support.submitBubbles||(f.event.special.submit={setup:function(){if(f.nodeName(this,"form"))return!1;f.event.add(this,"click._submit keypress._submit",function(a){var c=a.target,d=f.nodeName(c,"input")||f.nodeName(c,"button")?c.form:b;d&&!d._submit_attached&&(f.event.add(d,"submit._submit",function(a){this.parentNode&&!a.isTrigger&&f.event.simulate("submit",this.parentNode,a,!0)}),d._submit_attached=!0)})},teardown:function(){if(f.nodeName(this,"form"))return!1;f.event.remove(this,"._submit")}}),f.support.changeBubbles||(f.event.special.change={setup:function(){if(z.test(this.nodeName)){if(this.type==="checkbox"||this.type==="radio")f.event.add(this,"propertychange._change",function(a){a.originalEvent.propertyName==="checked"&&(this._just_changed=!0)}),f.event.add(this,"click._change",function(a){this._just_changed&&!a.isTrigger&&(this._just_changed=!1,f.event.simulate("change",this,a,!0))});return!1}f.event.add(this,"beforeactivate._change",function(a){var b=a.target;z.test(b.nodeName)&&!b._change_attached&&(f.event.add(b,"change._change",function(a){this.parentNode&&!a.isSimulated&&!a.isTrigger&&f.event.simulate("change",this.parentNode,a,!0)}),b._change_attached=!0)})},handle:function(a){var b=a.target;if(this!==b||a.isSimulated||a.isTrigger||b.type!=="radio"&&b.type!=="checkbox")return a.handleObj.handler.apply(this,arguments)},teardown:function(){f.event.remove(this,"._change");return z.test(this.nodeName)}}),f.support.focusinBubbles||f.each({focus:"focusin",blur:"focusout"},function(a,b){var d=0,e=function(a){f.event.simulate(b,a.target,f.event.fix(a),!0)};f.event.special[b]={setup:function(){d++===0&&c.addEventListener(a,e,!0)},teardown:function(){--d===0&&c.removeEventListener(a,e,!0)}}}),f.fn.extend({on:function(a,c,d,e,g){var h,i;if(typeof a=="object"){typeof c!="string"&&(d=c,c=b);for(i in a)this.on(i,c,d,a[i],g);return this}d==null&&e==null?(e=c,d=c=b):e==null&&(typeof c=="string"?(e=d,d=b):(e=d,d=c,c=b));if(e===!1)e=J;else if(!e)return this;g===1&&(h=e,e=function(a){f().off(a);return h.apply(this,arguments)},e.guid=h.guid||(h.guid=f.guid++));return this.each(function(){f.event.add(this,a,e,d,c)})},one:function(a,b,c,d){return this.on.call(this,a,b,c,d,1)},off:function(a,c,d){if(a&&a.preventDefault&&a.handleObj){var e=a.handleObj;f(a.delegateTarget).off(e.namespace?e.type+"."+e.namespace:e.type,e.selector,e.handler);return this}if(typeof a=="object"){for(var g in a)this.off(g,c,a[g]);return this}if(c===!1||typeof c=="function")d=c,c=b;d===!1&&(d=J);return this.each(function(){f.event.remove(this,a,d,c)})},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},live:function(a,b,c){f(this.context).on(a,this.selector,b,c);return this},die:function(a,b){f(this.context).off(a,this.selector||"**",b);return this},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return arguments.length==1?this.off(a,"**"):this.off(b,a,c)},trigger:function(a,b){return this.each(function(){f.event.trigger(a,b,this)})},triggerHandler:function(a,b){if(this[0])return f.event.trigger(a,b,this[0],!0)},toggle:function(a){var b=arguments,c=a.guid||f.guid++,d=0,e=function(c){var e=(f._data(this,"lastToggle"+a.guid)||0)%d;f._data(this,"lastToggle"+a.guid,e+1),c.preventDefault();return b[e].apply(this,arguments)||!1};e.guid=c;while(d<b.length)b[d++].guid=c;return this.click(e)},hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}}),f.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){f.fn[b]=function(a,c){c==null&&(c=a,a=null);return arguments.length>0?this.on(b,null,a,c):this.trigger(b)},f.attrFn&&(f.attrFn[b]=!0),C.test(b)&&(f.event.fixHooks[b]=f.event.keyHooks),D.test(b)&&(f.event.fixHooks[b]=f.event.mouseHooks)}),function(){function x(a,b,c,e,f,g){for(var h=0,i=e.length;h<i;h++){var j=e[h];if(j){var k=!1;j=j[a];while(j){if(j[d]===c){k=e[j.sizset];break}if(j.nodeType===1){g||(j[d]=c,j.sizset=h);if(typeof b!="string"){if(j===b){k=!0;break}}else if(m.filter(b,[j]).length>0){k=j;break}}j=j[a]}e[h]=k}}}function w(a,b,c,e,f,g){for(var h=0,i=e.length;h<i;h++){var j=e[h];if(j){var k=!1;j=j[a];while(j){if(j[d]===c){k=e[j.sizset];break}j.nodeType===1&&!g&&(j[d]=c,j.sizset=h);if(j.nodeName.toLowerCase()===b){k=j;break}j=j[a]}e[h]=k}}}var a=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,d="sizcache"+(Math.random()+"").replace(".",""),e=0,g=Object.prototype.toString,h=!1,i=!0,j=/\\/g,k=/\r\n/g,l=/\W/;[0,0].sort(function(){i=!1;return 0});var m=function(b,d,e,f){e=e||[],d=d||c;var h=d;if(d.nodeType!==1&&d.nodeType!==9)return[];if(!b||typeof b!="string")return e;var i,j,k,l,n,q,r,t,u=!0,v=m.isXML(d),w=[],x=b;do{a.exec(""),i=a.exec(x);if(i){x=i[3],w.push(i[1]);if(i[2]){l=i[3];break}}}while(i);if(w.length>1&&p.exec(b))if(w.length===2&&o.relative[w[0]])j=y(w[0]+w[1],d,f);else{j=o.relative[w[0]]?[d]:m(w.shift(),d);while(w.length)b=w.shift(),o.relative[b]&&(b+=w.shift()),j=y(b,j,f)}else{!f&&w.length>1&&d.nodeType===9&&!v&&o.match.ID.test(w[0])&&!o.match.ID.test(w[w.length-1])&&(n=m.find(w.shift(),d,v),d=n.expr?m.filter(n.expr,n.set)[0]:n.set[0]);if(d){n=f?{expr:w.pop(),set:s(f)}:m.find(w.pop(),w.length===1&&(w[0]==="~"||w[0]==="+")&&d.parentNode?d.parentNode:d,v),j=n.expr?m.filter(n.expr,n.set):n.set,w.length>0?k=s(j):u=!1;while(w.length)q=w.pop(),r=q,o.relative[q]?r=w.pop():q="",r==null&&(r=d),o.relative[q](k,r,v)}else k=w=[]}k||(k=j),k||m.error(q||b);if(g.call(k)==="[object Array]")if(!u)e.push.apply(e,k);else if(d&&d.nodeType===1)for(t=0;k[t]!=null;t++)k[t]&&(k[t]===!0||k[t].nodeType===1&&m.contains(d,k[t]))&&e.push(j[t]);else for(t=0;k[t]!=null;t++)k[t]&&k[t].nodeType===1&&e.push(j[t]);else s(k,e);l&&(m(l,h,e,f),m.uniqueSort(e));return e};m.uniqueSort=function(a){if(u){h=i,a.sort(u);if(h)for(var b=1;b<a.length;b++)a[b]===a[b-1]&&a.splice(b--,1)}return a},m.matches=function(a,b){return m(a,null,null,b)},m.matchesSelector=function(a,b){return m(b,null,null,[a]).length>0},m.find=function(a,b,c){var d,e,f,g,h,i;if(!a)return[];for(e=0,f=o.order.length;e<f;e++){h=o.order[e];if(g=o.leftMatch[h].exec(a)){i=g[1],g.splice(1,1);if(i.substr(i.length-1)!=="\\"){g[1]=(g[1]||"").replace(j,""),d=o.find[h](g,b,c);if(d!=null){a=a.replace(o.match[h],"");break}}}}d||(d=typeof b.getElementsByTagName!="undefined"?b.getElementsByTagName("*"):[]);return{set:d,expr:a}},m.filter=function(a,c,d,e){var f,g,h,i,j,k,l,n,p,q=a,r=[],s=c,t=c&&c[0]&&m.isXML(c[0]);while(a&&c.length){for(h in o.filter)if((f=o.leftMatch[h].exec(a))!=null&&f[2]){k=o.filter[h],l=f[1],g=!1,f.splice(1,1);if(l.substr(l.length-1)==="\\")continue;s===r&&(r=[]);if(o.preFilter[h]){f=o.preFilter[h](f,s,d,r,e,t);if(!f)g=i=!0;else if(f===!0)continue}if(f)for(n=0;(j=s[n])!=null;n++)j&&(i=k(j,f,n,s),p=e^i,d&&i!=null?p?g=!0:s[n]=!1:p&&(r.push(j),g=!0));if(i!==b){d||(s=r),a=a.replace(o.match[h],"");if(!g)return[];break}}if(a===q)if(g==null)m.error(a);else break;q=a}return s},m.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)};var n=m.getText=function(a){var b,c,d=a.nodeType,e="";if(d){if(d===1||d===9){if(typeof a.textContent=="string")return a.textContent;if(typeof a.innerText=="string")return a.innerText.replace(k,"");for(a=a.firstChild;a;a=a.nextSibling)e+=n(a)}else if(d===3||d===4)return a.nodeValue}else for(b=0;c=a[b];b++)c.nodeType!==8&&(e+=n(c));return e},o=m.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,CLASS:/\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(?:(['"])(.*?)\3|(#?(?:[\w\u00c0-\uFFFF\-]|\\.)*)|)|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\(\s*(even|odd|(?:[+\-]?\d+|(?:[+\-]?\d*)?n\s*(?:[+\-]\s*\d+)?))\s*\))?/,POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/},leftMatch:{},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(a){return a.getAttribute("href")},type:function(a){return a.getAttribute("type")}},relative:{"+":function(a,b){var c=typeof b=="string",d=c&&!l.test(b),e=c&&!d;d&&(b=b.toLowerCase());for(var f=0,g=a.length,h;f<g;f++)if(h=a[f]){while((h=h.previousSibling)&&h.nodeType!==1);a[f]=e||h&&h.nodeName.toLowerCase()===b?h||!1:h===b}e&&m.filter(b,a,!0)},">":function(a,b){var c,d=typeof b=="string",e=0,f=a.length;if(d&&!l.test(b)){b=b.toLowerCase();for(;e<f;e++){c=a[e];if(c){var g=c.parentNode;a[e]=g.nodeName.toLowerCase()===b?g:!1}}}else{for(;e<f;e++)c=a[e],c&&(a[e]=d?c.parentNode:c.parentNode===b);d&&m.filter(b,a,!0)}},"":function(a,b,c){var d,f=e++,g=x;typeof b=="string"&&!l.test(b)&&(b=b.toLowerCase(),d=b,g=w),g("parentNode",b,f,a,d,c)},"~":function(a,b,c){var d,f=e++,g=x;typeof b=="string"&&!l.test(b)&&(b=b.toLowerCase(),d=b,g=w),g("previousSibling",b,f,a,d,c)}},find:{ID:function(a,b,c){if(typeof b.getElementById!="undefined"&&!c){var d=b.getElementById(a[1]);return d&&d.parentNode?[d]:[]}},NAME:function(a,b){if(typeof b.getElementsByName!="undefined"){var c=[],d=b.getElementsByName(a[1]);for(var e=0,f=d.length;e<f;e++)d[e].getAttribute("name")===a[1]&&c.push(d[e]);return c.length===0?null:c}},TAG:function(a,b){if(typeof b.getElementsByTagName!="undefined")return b.getElementsByTagName(a[1])}},preFilter:{CLASS:function(a,b,c,d,e,f){a=" "+a[1].replace(j,"")+" ";if(f)return a;for(var g=0,h;(h=b[g])!=null;g++)h&&(e^(h.className&&(" "+h.className+" ").replace(/[\t\n\r]/g," ").indexOf(a)>=0)?c||d.push(h):c&&(b[g]=!1));return!1},ID:function(a){return a[1].replace(j,"")},TAG:function(a,b){return a[1].replace(j,"").toLowerCase()},CHILD:function(a){if(a[1]==="nth"){a[2]||m.error(a[0]),a[2]=a[2].replace(/^\+|\s*/g,"");var b=/(-?)(\d*)(?:n([+\-]?\d*))?/.exec(a[2]==="even"&&"2n"||a[2]==="odd"&&"2n+1"||!/\D/.test(a[2])&&"0n+"+a[2]||a[2]);a[2]=b[1]+(b[2]||1)-0,a[3]=b[3]-0}else a[2]&&m.error(a[0]);a[0]=e++;return a},ATTR:function(a,b,c,d,e,f){var g=a[1]=a[1].replace(j,"");!f&&o.attrMap[g]&&(a[1]=o.attrMap[g]),a[4]=(a[4]||a[5]||"").replace(j,""),a[2]==="~="&&(a[4]=" "+a[4]+" ");return a},PSEUDO:function(b,c,d,e,f){if(b[1]==="not")if((a.exec(b[3])||"").length>1||/^\w/.test(b[3]))b[3]=m(b[3],null,null,c);else{var g=m.filter(b[3],c,d,!0^f);d||e.push.apply(e,g);return!1}else if(o.match.POS.test(b[0])||o.match.CHILD.test(b[0]))return!0;return b},POS:function(a){a.unshift(!0);return a}},filters:{enabled:function(a){return a.disabled===!1&&a.type!=="hidden"},disabled:function(a){return a.disabled===!0},checked:function(a){return a.checked===!0},selected:function(a){a.parentNode&&a.parentNode.selectedIndex;return a.selected===!0},parent:function(a){return!!a.firstChild},empty:function(a){return!a.firstChild},has:function(a,b,c){return!!m(c[3],a).length},header:function(a){return/h\d/i.test(a.nodeName)},text:function(a){var b=a.getAttribute("type"),c=a.type;return a.nodeName.toLowerCase()==="input"&&"text"===c&&(b===c||b===null)},radio:function(a){return a.nodeName.toLowerCase()==="input"&&"radio"===a.type},checkbox:function(a){return a.nodeName.toLowerCase()==="input"&&"checkbox"===a.type},file:function(a){return a.nodeName.toLowerCase()==="input"&&"file"===a.type},password:function(a){return a.nodeName.toLowerCase()==="input"&&"password"===a.type},submit:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"submit"===a.type},image:function(a){return a.nodeName.toLowerCase()==="input"&&"image"===a.type},reset:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"reset"===a.type},button:function(a){var b=a.nodeName.toLowerCase();return b==="input"&&"button"===a.type||b==="button"},input:function(a){return/input|select|textarea|button/i.test(a.nodeName)},focus:function(a){return a===a.ownerDocument.activeElement}},setFilters:{first:function(a,b){return b===0},last:function(a,b,c,d){return b===d.length-1},even:function(a,b){return b%2===0},odd:function(a,b){return b%2===1},lt:function(a,b,c){return b<c[3]-0},gt:function(a,b,c){return b>c[3]-0},nth:function(a,b,c){return c[3]-0===b},eq:function(a,b,c){return c[3]-0===b}},filter:{PSEUDO:function(a,b,c,d){var e=b[1],f=o.filters[e];if(f)return f(a,c,b,d);if(e==="contains")return(a.textContent||a.innerText||n([a])||"").indexOf(b[3])>=0;if(e==="not"){var g=b[3];for(var h=0,i=g.length;h<i;h++)if(g[h]===a)return!1;return!0}m.error(e)},CHILD:function(a,b){var c,e,f,g,h,i,j,k=b[1],l=a;switch(k){case"only":case"first":while(l=l.previousSibling)if(l.nodeType===1)return!1;if(k==="first")return!0;l=a;case"last":while(l=l.nextSibling)if(l.nodeType===1)return!1;return!0;case"nth":c=b[2],e=b[3];if(c===1&&e===0)return!0;f=b[0],g=a.parentNode;if(g&&(g[d]!==f||!a.nodeIndex)){i=0;for(l=g.firstChild;l;l=l.nextSibling)l.nodeType===1&&(l.nodeIndex=++i);g[d]=f}j=a.nodeIndex-e;return c===0?j===0:j%c===0&&j/c>=0}},ID:function(a,b){return a.nodeType===1&&a.getAttribute("id")===b},TAG:function(a,b){return b==="*"&&a.nodeType===1||!!a.nodeName&&a.nodeName.toLowerCase()===b},CLASS:function(a,b){return(" "+(a.className||a.getAttribute("class"))+" ").indexOf(b)>-1},ATTR:function(a,b){var c=b[1],d=m.attr?m.attr(a,c):o.attrHandle[c]?o.attrHandle[c](a):a[c]!=null?a[c]:a.getAttribute(c),e=d+"",f=b[2],g=b[4];return d==null?f==="!=":!f&&m.attr?d!=null:f==="="?e===g:f==="*="?e.indexOf(g)>=0:f==="~="?(" "+e+" ").indexOf(g)>=0:g?f==="!="?e!==g:f==="^="?e.indexOf(g)===0:f==="$="?e.substr(e.length-g.length)===g:f==="|="?e===g||e.substr(0,g.length+1)===g+"-":!1:e&&d!==!1},POS:function(a,b,c,d){var e=b[2],f=o.setFilters[e];if(f)return f(a,c,b,d)}}},p=o.match.POS,q=function(a,b){return"\\"+(b-0+1)};for(var r in o.match)o.match[r]=new RegExp(o.match[r].source+/(?![^\[]*\])(?![^\(]*\))/.source),o.leftMatch[r]=new RegExp(/(^(?:.|\r|\n)*?)/.source+o.match[r].source.replace(/\\(\d+)/g,q));var s=function(a,b){a=Array.prototype.slice.call(a,0);if(b){b.push.apply(b,a);return b}return a};try{Array.prototype.slice.call(c.documentElement.childNodes,0)[0].nodeType}catch(t){s=function(a,b){var c=0,d=b||[];if(g.call(a)==="[object Array]")Array.prototype.push.apply(d,a);else if(typeof a.length=="number")for(var e=a.length;c<e;c++)d.push(a[c]);else for(;a[c];c++)d.push(a[c]);return d}}var u,v;c.documentElement.compareDocumentPosition?u=function(a,b){if(a===b){h=!0;return 0}if(!a.compareDocumentPosition||!b.compareDocumentPosition)return a.compareDocumentPosition?-1:1;return a.compareDocumentPosition(b)&4?-1:1}:(u=function(a,b){if(a===b){h=!0;return 0}if(a.sourceIndex&&b.sourceIndex)return a.sourceIndex-b.sourceIndex;var c,d,e=[],f=[],g=a.parentNode,i=b.parentNode,j=g;if(g===i)return v(a,b);if(!g)return-1;if(!i)return 1;while(j)e.unshift(j),j=j.parentNode;j=i;while(j)f.unshift(j),j=j.parentNode;c=e.length,d=f.length;for(var k=0;k<c&&k<d;k++)if(e[k]!==f[k])return v(e[k],f[k]);return k===c?v(a,f[k],-1):v(e[k],b,1)},v=function(a,b,c){if(a===b)return c;var d=a.nextSibling;while(d){if(d===b)return-1;d=d.nextSibling}return 1}),function(){var a=c.createElement("div"),d="script"+(new Date).getTime(),e=c.documentElement;a.innerHTML="<a name='"+d+"'/>",e.insertBefore(a,e.firstChild),c.getElementById(d)&&(o.find.ID=function(a,c,d){if(typeof c.getElementById!="undefined"&&!d){var e=c.getElementById(a[1]);return e?e.id===a[1]||typeof e.getAttributeNode!="undefined"&&e.getAttributeNode("id").nodeValue===a[1]?[e]:b:[]}},o.filter.ID=function(a,b){var c=typeof a.getAttributeNode!="undefined"&&a.getAttributeNode("id");return a.nodeType===1&&c&&c.nodeValue===b}),e.removeChild(a),e=a=null}(),function(){var a=c.createElement("div");a.appendChild(c.createComment("")),a.getElementsByTagName("*").length>0&&(o.find.TAG=function(a,b){var c=b.getElementsByTagName(a[1]);if(a[1]==="*"){var d=[];for(var e=0;c[e];e++)c[e].nodeType===1&&d.push(c[e]);c=d}return c}),a.innerHTML="<a href='#'></a>",a.firstChild&&typeof a.firstChild.getAttribute!="undefined"&&a.firstChild.getAttribute("href")!=="#"&&(o.attrHandle.href=function(a){return a.getAttribute("href",2)}),a=null}(),c.querySelectorAll&&function(){var a=m,b=c.createElement("div"),d="__sizzle__";b.innerHTML="<p class='TEST'></p>";if(!b.querySelectorAll||b.querySelectorAll(".TEST").length!==0){m=function(b,e,f,g){e=e||c;if(!g&&!m.isXML(e)){var h=/^(\w+$)|^\.([\w\-]+$)|^#([\w\-]+$)/.exec(b);if(h&&(e.nodeType===1||e.nodeType===9)){if(h[1])return s(e.getElementsByTagName(b),f);if(h[2]&&o.find.CLASS&&e.getElementsByClassName)return s(e.getElementsByClassName(h[2]),f)}if(e.nodeType===9){if(b==="body"&&e.body)return s([e.body],f);if(h&&h[3]){var i=e.getElementById(h[3]);if(!i||!i.parentNode)return s([],f);if(i.id===h[3])return s([i],f)}try{return s(e.querySelectorAll(b),f)}catch(j){}}else if(e.nodeType===1&&e.nodeName.toLowerCase()!=="object"){var k=e,l=e.getAttribute("id"),n=l||d,p=e.parentNode,q=/^\s*[+~]/.test(b);l?n=n.replace(/'/g,"\\$&"):e.setAttribute("id",n),q&&p&&(e=e.parentNode);try{if(!q||p)return s(e.querySelectorAll("[id='"+n+"'] "+b),f)}catch(r){}finally{l||k.removeAttribute("id")}}}return a(b,e,f,g)};for(var e in a)m[e]=a[e];b=null}}(),function(){var a=c.documentElement,b=a.matchesSelector||a.mozMatchesSelector||a.webkitMatchesSelector||a.msMatchesSelector;if(b){var d=!b.call(c.createElement("div"),"div"),e=!1;try{b.call(c.documentElement,"[test!='']:sizzle")}catch(f){e=!0}m.matchesSelector=function(a,c){c=c.replace(/\=\s*([^'"\]]*)\s*\]/g,"='$1']");if(!m.isXML(a))try{if(e||!o.match.PSEUDO.test(c)&&!/!=/.test(c)){var f=b.call(a,c);if(f||!d||a.document&&a.document.nodeType!==11)return f}}catch(g){}return m(c,null,null,[a]).length>0}}}(),function(){var a=c.createElement("div");a.innerHTML="<div class='test e'></div><div class='test'></div>";if(!!a.getElementsByClassName&&a.getElementsByClassName("e").length!==0){a.lastChild.className="e";if(a.getElementsByClassName("e").length===1)return;o.order.splice(1,0,"CLASS"),o.find.CLASS=function(a,b,c){if(typeof b.getElementsByClassName!="undefined"&&!c)return b.getElementsByClassName(a[1])},a=null}}(),c.documentElement.contains?m.contains=function(a,b){return a!==b&&(a.contains?a.contains(b):!0)}:c.documentElement.compareDocumentPosition?m.contains=function(a,b){return!!(a.compareDocumentPosition(b)&16)}:m.contains=function(){return!1},m.isXML=function(a){var b=(a?a.ownerDocument||a:0).documentElement;return b?b.nodeName!=="HTML":!1};var y=function(a,b,c){var d,e=[],f="",g=b.nodeType?[b]:b;while(d=o.match.PSEUDO.exec(a))f+=d[0],a=a.replace(o.match.PSEUDO,"");a=o.relative[a]?a+"*":a;for(var h=0,i=g.length;h<i;h++)m(a,g[h],e,c);return m.filter(f,e)};m.attr=f.attr,m.selectors.attrMap={},f.find=m,f.expr=m.selectors,f.expr[":"]=f.expr.filters,f.unique=m.uniqueSort,f.text=m.getText,f.isXMLDoc=m.isXML,f.contains=m.contains}();var L=/Until$/,M=/^(?:parents|prevUntil|prevAll)/,N=/,/,O=/^.[^:#\[\.,]*$/,P=Array.prototype.slice,Q=f.expr.match.POS,R={children:!0,contents:!0,next:!0,prev:!0};f.fn.extend({find:function(a){var b=this,c,d;if(typeof a!="string")return f(a).filter(function(){for(c=0,d=b.length;c<d;c++)if(f.contains(b[c],this))return!0});var e=this.pushStack("","find",a),g,h,i;for(c=0,d=this.length;c<d;c++){g=e.length,f.find(a,this[c],e);if(c>0)for(h=g;h<e.length;h++)for(i=0;i<g;i++)if(e[i]===e[h]){e.splice(h--,1);break}}return e},has:function(a){var b=f(a);return this.filter(function(){for(var a=0,c=b.length;a<c;a++)if(f.contains(this,b[a]))return!0})},not:function(a){return this.pushStack(T(this,a,!1),"not",a)},filter:function(a){return this.pushStack(T(this,a,!0),"filter",a)},is:function(a){return!!a&&(typeof a=="string"?Q.test(a)?f(a,this.context).index(this[0])>=0:f.filter(a,this).length>0:this.filter(a).length>0)},closest:function(a,b){var c=[],d,e,g=this[0];if(f.isArray(a)){var h=1;while(g&&g.ownerDocument&&g!==b){for(d=0;d<a.length;d++)f(g).is(a[d])&&c.push({selector:a[d],elem:g,level:h});g=g.parentNode,h++}return c}var i=Q.test(a)||typeof a!="string"?f(a,b||this.context):0;for(d=0,e=this.length;d<e;d++){g=this[d];while(g){if(i?i.index(g)>-1:f.find.matchesSelector(g,a)){c.push(g);break}g=g.parentNode;if(!g||!g.ownerDocument||g===b||g.nodeType===11)break}}c=c.length>1?f.unique(c):c;return this.pushStack(c,"closest",a)},index:function(a){if(!a)return this[0]&&this[0].parentNode?this.prevAll().length:-1;if(typeof a=="string")return f.inArray(this[0],f(a));return f.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var c=typeof a=="string"?f(a,b):f.makeArray(a&&a.nodeType?[a]:a),d=f.merge(this.get(),c);return this.pushStack(S(c[0])||S(d[0])?d:f.unique(d))},andSelf:function(){return this.add(this.prevObject)}}),f.each({parent:function(a){var b=a.parentNode;return b&&b.nodeType!==11?b:null},parents:function(a){return f.dir(a,"parentNode")},parentsUntil:function(a,b,c){return f.dir(a,"parentNode",c)},next:function(a){return f.nth(a,2,"nextSibling")},prev:function(a){return f.nth(a,2,"previousSibling")},nextAll:function(a){return f.dir(a,"nextSibling")},prevAll:function(a){return f.dir(a,"previousSibling")},nextUntil:function(a,b,c){return f.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return f.dir(a,"previousSibling",c)},siblings:function(a){return f.sibling(a.parentNode.firstChild,a)},children:function(a){return f.sibling(a.firstChild)},contents:function(a){return f.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:f.makeArray(a.childNodes)}},function(a,b){f.fn[a]=function(c,d){var e=f.map(this,b,c);L.test(a)||(d=c),d&&typeof d=="string"&&(e=f.filter(d,e)),e=this.length>1&&!R[a]?f.unique(e):e,(this.length>1||N.test(d))&&M.test(a)&&(e=e.reverse());return this.pushStack(e,a,P.call(arguments).join(","))}}),f.extend({filter:function(a,b,c){c&&(a=":not("+a+")");return b.length===1?f.find.matchesSelector(b[0],a)?[b[0]]:[]:f.find.matches(a,b)},dir:function(a,c,d){var e=[],g=a[c];while(g&&g.nodeType!==9&&(d===b||g.nodeType!==1||!f(g).is(d)))g.nodeType===1&&e.push(g),g=g[c];return e},nth:function(a,b,c,d){b=b||1;var e=0;for(;a;a=a[c])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){var c=[];for(;a;a=a.nextSibling)a.nodeType===1&&a!==b&&c.push(a);return c}});var V="abbr|article|aside|audio|canvas|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",W=/ jQuery\d+="(?:\d+|null)"/g,X=/^\s+/,Y=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,Z=/<([\w:]+)/,$=/<tbody/i,_=/<|&#?\w+;/,ba=/<(?:script|style)/i,bb=/<(?:script|object|embed|option|style)/i,bc=new RegExp("<(?:"+V+")","i"),bd=/checked\s*(?:[^=]|=\s*.checked.)/i,be=/\/(java|ecma)script/i,bf=/^\s*<!(?:\[CDATA\[|\-\-)/,bg={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],area:[1,"<map>","</map>"],_default:[0,"",""]},bh=U(c);bg.optgroup=bg.option,bg.tbody=bg.tfoot=bg.colgroup=bg.caption=bg.thead,bg.th=bg.td,f.support.htmlSerialize||(bg._default=[1,"div<div>","</div>"]),f.fn.extend({text:function(a){if(f.isFunction(a))return this.each(function(b){var c=f(this);c.text(a.call(this,b,c.text()))});if(typeof a!="object"&&a!==b)return this.empty().append((this[0]&&this[0].ownerDocument||c).createTextNode(a));return f.text(this)},wrapAll:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapAll(a.call(this,b))});if(this[0]){var b=f(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&a.firstChild.nodeType===1)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapInner(a.call(this,b))});return this.each(function(){var b=f(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=f.isFunction(a);return this.each(function(c){f(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){f.nodeName(this,"body")||f(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.appendChild(a)})},prepend:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this)});if(arguments.length){var a=f.clean(arguments);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this.nextSibling)});if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,f.clean(arguments));return a}},remove:function(a,b){for(var c=0,d;(d=this[c])!=null;c++)if(!a||f.filter(a,[d]).length)!b&&d.nodeType===1&&(f.cleanData(d.getElementsByTagName("*")),f.cleanData([d])),d.parentNode&&d.parentNode.removeChild(d);return this},empty:function()
{for(var a=0,b;(b=this[a])!=null;a++){b.nodeType===1&&f.cleanData(b.getElementsByTagName("*"));while(b.firstChild)b.removeChild(b.firstChild)}return this},clone:function(a,b){a=a==null?!1:a,b=b==null?a:b;return this.map(function(){return f.clone(this,a,b)})},html:function(a){if(a===b)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(W,""):null;if(typeof a=="string"&&!ba.test(a)&&(f.support.leadingWhitespace||!X.test(a))&&!bg[(Z.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Y,"<$1></$2>");try{for(var c=0,d=this.length;c<d;c++)this[c].nodeType===1&&(f.cleanData(this[c].getElementsByTagName("*")),this[c].innerHTML=a)}catch(e){this.empty().append(a)}}else f.isFunction(a)?this.each(function(b){var c=f(this);c.html(a.call(this,b,c.html()))}):this.empty().append(a);return this},replaceWith:function(a){if(this[0]&&this[0].parentNode){if(f.isFunction(a))return this.each(function(b){var c=f(this),d=c.html();c.replaceWith(a.call(this,b,d))});typeof a!="string"&&(a=f(a).detach());return this.each(function(){var b=this.nextSibling,c=this.parentNode;f(this).remove(),b?f(b).before(a):f(c).append(a)})}return this.length?this.pushStack(f(f.isFunction(a)?a():a),"replaceWith",a):this},detach:function(a){return this.remove(a,!0)},domManip:function(a,c,d){var e,g,h,i,j=a[0],k=[];if(!f.support.checkClone&&arguments.length===3&&typeof j=="string"&&bd.test(j))return this.each(function(){f(this).domManip(a,c,d,!0)});if(f.isFunction(j))return this.each(function(e){var g=f(this);a[0]=j.call(this,e,c?g.html():b),g.domManip(a,c,d)});if(this[0]){i=j&&j.parentNode,f.support.parentNode&&i&&i.nodeType===11&&i.childNodes.length===this.length?e={fragment:i}:e=f.buildFragment(a,this,k),h=e.fragment,h.childNodes.length===1?g=h=h.firstChild:g=h.firstChild;if(g){c=c&&f.nodeName(g,"tr");for(var l=0,m=this.length,n=m-1;l<m;l++)d.call(c?bi(this[l],g):this[l],e.cacheable||m>1&&l<n?f.clone(h,!0,!0):h)}k.length&&f.each(k,bp)}return this}}),f.buildFragment=function(a,b,d){var e,g,h,i,j=a[0];b&&b[0]&&(i=b[0].ownerDocument||b[0]),i.createDocumentFragment||(i=c),a.length===1&&typeof j=="string"&&j.length<512&&i===c&&j.charAt(0)==="<"&&!bb.test(j)&&(f.support.checkClone||!bd.test(j))&&(f.support.html5Clone||!bc.test(j))&&(g=!0,h=f.fragments[j],h&&h!==1&&(e=h)),e||(e=i.createDocumentFragment(),f.clean(a,i,e,d)),g&&(f.fragments[j]=h?e:1);return{fragment:e,cacheable:g}},f.fragments={},f.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){f.fn[a]=function(c){var d=[],e=f(c),g=this.length===1&&this[0].parentNode;if(g&&g.nodeType===11&&g.childNodes.length===1&&e.length===1){e[b](this[0]);return this}for(var h=0,i=e.length;h<i;h++){var j=(h>0?this.clone(!0):this).get();f(e[h])[b](j),d=d.concat(j)}return this.pushStack(d,a,e.selector)}}),f.extend({clone:function(a,b,c){var d,e,g,h=f.support.html5Clone||!bc.test("<"+a.nodeName)?a.cloneNode(!0):bo(a);if((!f.support.noCloneEvent||!f.support.noCloneChecked)&&(a.nodeType===1||a.nodeType===11)&&!f.isXMLDoc(a)){bk(a,h),d=bl(a),e=bl(h);for(g=0;d[g];++g)e[g]&&bk(d[g],e[g])}if(b){bj(a,h);if(c){d=bl(a),e=bl(h);for(g=0;d[g];++g)bj(d[g],e[g])}}d=e=null;return h},clean:function(a,b,d,e){var g;b=b||c,typeof b.createElement=="undefined"&&(b=b.ownerDocument||b[0]&&b[0].ownerDocument||c);var h=[],i;for(var j=0,k;(k=a[j])!=null;j++){typeof k=="number"&&(k+="");if(!k)continue;if(typeof k=="string")if(!_.test(k))k=b.createTextNode(k);else{k=k.replace(Y,"<$1></$2>");var l=(Z.exec(k)||["",""])[1].toLowerCase(),m=bg[l]||bg._default,n=m[0],o=b.createElement("div");b===c?bh.appendChild(o):U(b).appendChild(o),o.innerHTML=m[1]+k+m[2];while(n--)o=o.lastChild;if(!f.support.tbody){var p=$.test(k),q=l==="table"&&!p?o.firstChild&&o.firstChild.childNodes:m[1]==="<table>"&&!p?o.childNodes:[];for(i=q.length-1;i>=0;--i)f.nodeName(q[i],"tbody")&&!q[i].childNodes.length&&q[i].parentNode.removeChild(q[i])}!f.support.leadingWhitespace&&X.test(k)&&o.insertBefore(b.createTextNode(X.exec(k)[0]),o.firstChild),k=o.childNodes}var r;if(!f.support.appendChecked)if(k[0]&&typeof (r=k.length)=="number")for(i=0;i<r;i++)bn(k[i]);else bn(k);k.nodeType?h.push(k):h=f.merge(h,k)}if(d){g=function(a){return!a.type||be.test(a.type)};for(j=0;h[j];j++)if(e&&f.nodeName(h[j],"script")&&(!h[j].type||h[j].type.toLowerCase()==="text/javascript"))e.push(h[j].parentNode?h[j].parentNode.removeChild(h[j]):h[j]);else{if(h[j].nodeType===1){var s=f.grep(h[j].getElementsByTagName("script"),g);h.splice.apply(h,[j+1,0].concat(s))}d.appendChild(h[j])}}return h},cleanData:function(a){var b,c,d=f.cache,e=f.event.special,g=f.support.deleteExpando;for(var h=0,i;(i=a[h])!=null;h++){if(i.nodeName&&f.noData[i.nodeName.toLowerCase()])continue;c=i[f.expando];if(c){b=d[c];if(b&&b.events){for(var j in b.events)e[j]?f.event.remove(i,j):f.removeEvent(i,j,b.handle);b.handle&&(b.handle.elem=null)}g?delete i[f.expando]:i.removeAttribute&&i.removeAttribute(f.expando),delete d[c]}}}});var bq=/alpha\([^)]*\)/i,br=/opacity=([^)]*)/,bs=/([A-Z]|^ms)/g,bt=/^-?\d+(?:px)?$/i,bu=/^-?\d/,bv=/^([\-+])=([\-+.\de]+)/,bw={position:"absolute",visibility:"hidden",display:"block"},bx=["Left","Right"],by=["Top","Bottom"],bz,bA,bB;f.fn.css=function(a,c){if(arguments.length===2&&c===b)return this;return f.access(this,a,c,!0,function(a,c,d){return d!==b?f.style(a,c,d):f.css(a,c)})},f.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=bz(a,"opacity","opacity");return c===""?"1":c}return a.style.opacity}}},cssNumber:{fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":f.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,c,d,e){if(!!a&&a.nodeType!==3&&a.nodeType!==8&&!!a.style){var g,h,i=f.camelCase(c),j=a.style,k=f.cssHooks[i];c=f.cssProps[i]||i;if(d===b){if(k&&"get"in k&&(g=k.get(a,!1,e))!==b)return g;return j[c]}h=typeof d,h==="string"&&(g=bv.exec(d))&&(d=+(g[1]+1)*+g[2]+parseFloat(f.css(a,c)),h="number");if(d==null||h==="number"&&isNaN(d))return;h==="number"&&!f.cssNumber[i]&&(d+="px");if(!k||!("set"in k)||(d=k.set(a,d))!==b)try{j[c]=d}catch(l){}}},css:function(a,c,d){var e,g;c=f.camelCase(c),g=f.cssHooks[c],c=f.cssProps[c]||c,c==="cssFloat"&&(c="float");if(g&&"get"in g&&(e=g.get(a,!0,d))!==b)return e;if(bz)return bz(a,c)},swap:function(a,b,c){var d={};for(var e in b)d[e]=a.style[e],a.style[e]=b[e];c.call(a);for(e in b)a.style[e]=d[e]}}),f.curCSS=f.css,f.each(["height","width"],function(a,b){f.cssHooks[b]={get:function(a,c,d){var e;if(c){if(a.offsetWidth!==0)return bC(a,b,d);f.swap(a,bw,function(){e=bC(a,b,d)});return e}},set:function(a,b){if(!bt.test(b))return b;b=parseFloat(b);if(b>=0)return b+"px"}}}),f.support.opacity||(f.cssHooks.opacity={get:function(a,b){return br.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=f.isNumeric(b)?"alpha(opacity="+b*100+")":"",g=d&&d.filter||c.filter||"";c.zoom=1;if(b>=1&&f.trim(g.replace(bq,""))===""){c.removeAttribute("filter");if(d&&!d.filter)return}c.filter=bq.test(g)?g.replace(bq,e):g+" "+e}}),f(function(){f.support.reliableMarginRight||(f.cssHooks.marginRight={get:function(a,b){var c;f.swap(a,{display:"inline-block"},function(){b?c=bz(a,"margin-right","marginRight"):c=a.style.marginRight});return c}})}),c.defaultView&&c.defaultView.getComputedStyle&&(bA=function(a,b){var c,d,e;b=b.replace(bs,"-$1").toLowerCase(),(d=a.ownerDocument.defaultView)&&(e=d.getComputedStyle(a,null))&&(c=e.getPropertyValue(b),c===""&&!f.contains(a.ownerDocument.documentElement,a)&&(c=f.style(a,b)));return c}),c.documentElement.currentStyle&&(bB=function(a,b){var c,d,e,f=a.currentStyle&&a.currentStyle[b],g=a.style;f===null&&g&&(e=g[b])&&(f=e),!bt.test(f)&&bu.test(f)&&(c=g.left,d=a.runtimeStyle&&a.runtimeStyle.left,d&&(a.runtimeStyle.left=a.currentStyle.left),g.left=b==="fontSize"?"1em":f||0,f=g.pixelLeft+"px",g.left=c,d&&(a.runtimeStyle.left=d));return f===""?"auto":f}),bz=bA||bB,f.expr&&f.expr.filters&&(f.expr.filters.hidden=function(a){var b=a.offsetWidth,c=a.offsetHeight;return b===0&&c===0||!f.support.reliableHiddenOffsets&&(a.style&&a.style.display||f.css(a,"display"))==="none"},f.expr.filters.visible=function(a){return!f.expr.filters.hidden(a)});var bD=/%20/g,bE=/\[\]$/,bF=/\r?\n/g,bG=/#.*$/,bH=/^(.*?):[ \t]*([^\r\n]*)\r?$/mg,bI=/^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,bJ=/^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/,bK=/^(?:GET|HEAD)$/,bL=/^\/\//,bM=/\?/,bN=/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,bO=/^(?:select|textarea)/i,bP=/\s+/,bQ=/([?&])_=[^&]*/,bR=/^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+))?)?/,bS=f.fn.load,bT={},bU={},bV,bW,bX=["*/"]+["*"];try{bV=e.href}catch(bY){bV=c.createElement("a"),bV.href="",bV=bV.href}bW=bR.exec(bV.toLowerCase())||[],f.fn.extend({load:function(a,c,d){if(typeof a!="string"&&bS)return bS.apply(this,arguments);if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var g=a.slice(e,a.length);a=a.slice(0,e)}var h="GET";c&&(f.isFunction(c)?(d=c,c=b):typeof c=="object"&&(c=f.param(c,f.ajaxSettings.traditional),h="POST"));var i=this;f.ajax({url:a,type:h,dataType:"html",data:c,complete:function(a,b,c){c=a.responseText,a.isResolved()&&(a.done(function(a){c=a}),i.html(g?f("<div>").append(c.replace(bN,"")).find(g):c)),d&&i.each(d,[c,b,a])}});return this},serialize:function(){return f.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?f.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||bO.test(this.nodeName)||bI.test(this.type))}).map(function(a,b){var c=f(this).val();return c==null?null:f.isArray(c)?f.map(c,function(a,c){return{name:b.name,value:a.replace(bF,"\r\n")}}):{name:b.name,value:c.replace(bF,"\r\n")}}).get()}}),f.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){f.fn[b]=function(a){return this.on(b,a)}}),f.each(["get","post"],function(a,c){f[c]=function(a,d,e,g){f.isFunction(d)&&(g=g||e,e=d,d=b);return f.ajax({type:c,url:a,data:d,success:e,dataType:g})}}),f.extend({getScript:function(a,c){return f.get(a,b,c,"script")},getJSON:function(a,b,c){return f.get(a,b,c,"json")},ajaxSetup:function(a,b){b?b_(a,f.ajaxSettings):(b=a,a=f.ajaxSettings),b_(a,b);return a},ajaxSettings:{url:bV,isLocal:bJ.test(bW[1]),global:!0,type:"GET",contentType:"application/x-www-form-urlencoded",processData:!0,async:!0,accepts:{xml:"application/xml, text/xml",html:"text/html",text:"text/plain",json:"application/json, text/javascript","*":bX},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText"},converters:{"* text":a.String,"text html":!0,"text json":f.parseJSON,"text xml":f.parseXML},flatOptions:{context:!0,url:!0}},ajaxPrefilter:bZ(bT),ajaxTransport:bZ(bU),ajax:function(a,c){function w(a,c,l,m){if(s!==2){s=2,q&&clearTimeout(q),p=b,n=m||"",v.readyState=a>0?4:0;var o,r,u,w=c,x=l?cb(d,v,l):b,y,z;if(a>=200&&a<300||a===304){if(d.ifModified){if(y=v.getResponseHeader("Last-Modified"))f.lastModified[k]=y;if(z=v.getResponseHeader("Etag"))f.etag[k]=z}if(a===304)w="notmodified",o=!0;else try{r=cc(d,x),w="success",o=!0}catch(A){w="parsererror",u=A}}else{u=w;if(!w||a)w="error",a<0&&(a=0)}v.status=a,v.statusText=""+(c||w),o?h.resolveWith(e,[r,w,v]):h.rejectWith(e,[v,w,u]),v.statusCode(j),j=b,t&&g.trigger("ajax"+(o?"Success":"Error"),[v,d,o?r:u]),i.fireWith(e,[v,w]),t&&(g.trigger("ajaxComplete",[v,d]),--f.active||f.event.trigger("ajaxStop"))}}typeof a=="object"&&(c=a,a=b),c=c||{};var d=f.ajaxSetup({},c),e=d.context||d,g=e!==d&&(e.nodeType||e instanceof f)?f(e):f.event,h=f.Deferred(),i=f.Callbacks("once memory"),j=d.statusCode||{},k,l={},m={},n,o,p,q,r,s=0,t,u,v={readyState:0,setRequestHeader:function(a,b){if(!s){var c=a.toLowerCase();a=m[c]=m[c]||a,l[a]=b}return this},getAllResponseHeaders:function(){return s===2?n:null},getResponseHeader:function(a){var c;if(s===2){if(!o){o={};while(c=bH.exec(n))o[c[1].toLowerCase()]=c[2]}c=o[a.toLowerCase()]}return c===b?null:c},overrideMimeType:function(a){s||(d.mimeType=a);return this},abort:function(a){a=a||"abort",p&&p.abort(a),w(0,a);return this}};h.promise(v),v.success=v.done,v.error=v.fail,v.complete=i.add,v.statusCode=function(a){if(a){var b;if(s<2)for(b in a)j[b]=[j[b],a[b]];else b=a[v.status],v.then(b,b)}return this},d.url=((a||d.url)+"").replace(bG,"").replace(bL,bW[1]+"//"),d.dataTypes=f.trim(d.dataType||"*").toLowerCase().split(bP),d.crossDomain==null&&(r=bR.exec(d.url.toLowerCase()),d.crossDomain=!(!r||r[1]==bW[1]&&r[2]==bW[2]&&(r[3]||(r[1]==="http:"?80:443))==(bW[3]||(bW[1]==="http:"?80:443)))),d.data&&d.processData&&typeof d.data!="string"&&(d.data=f.param(d.data,d.traditional)),b$(bT,d,c,v);if(s===2)return!1;t=d.global,d.type=d.type.toUpperCase(),d.hasContent=!bK.test(d.type),t&&f.active++===0&&f.event.trigger("ajaxStart");if(!d.hasContent){d.data&&(d.url+=(bM.test(d.url)?"&":"?")+d.data,delete d.data),k=d.url;if(d.cache===!1){var x=f.now(),y=d.url.replace(bQ,"$1_="+x);d.url=y+(y===d.url?(bM.test(d.url)?"&":"?")+"_="+x:"")}}(d.data&&d.hasContent&&d.contentType!==!1||c.contentType)&&v.setRequestHeader("Content-Type",d.contentType),d.ifModified&&(k=k||d.url,f.lastModified[k]&&v.setRequestHeader("If-Modified-Since",f.lastModified[k]),f.etag[k]&&v.setRequestHeader("If-None-Match",f.etag[k])),v.setRequestHeader("Accept",d.dataTypes[0]&&d.accepts[d.dataTypes[0]]?d.accepts[d.dataTypes[0]]+(d.dataTypes[0]!=="*"?", "+bX+"; q=0.01":""):d.accepts["*"]);for(u in d.headers)v.setRequestHeader(u,d.headers[u]);if(d.beforeSend&&(d.beforeSend.call(e,v,d)===!1||s===2)){v.abort();return!1}for(u in{success:1,error:1,complete:1})v[u](d[u]);p=b$(bU,d,c,v);if(!p)w(-1,"No Transport");else{v.readyState=1,t&&g.trigger("ajaxSend",[v,d]),d.async&&d.timeout>0&&(q=setTimeout(function(){v.abort("timeout")},d.timeout));try{s=1,p.send(l,w)}catch(z){if(s<2)w(-1,z);else throw z}}return v},param:function(a,c){var d=[],e=function(a,b){b=f.isFunction(b)?b():b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};c===b&&(c=f.ajaxSettings.traditional);if(f.isArray(a)||a.jquery&&!f.isPlainObject(a))f.each(a,function(){e(this.name,this.value)});else for(var g in a)ca(g,a[g],c,e);return d.join("&").replace(bD,"+")}}),f.extend({active:0,lastModified:{},etag:{}});var cd=f.now(),ce=/(\=)\?(&|$)|\?\?/i;f.ajaxSetup({jsonp:"callback",jsonpCallback:function(){return f.expando+"_"+cd++}}),f.ajaxPrefilter("json jsonp",function(b,c,d){var e=b.contentType==="application/x-www-form-urlencoded"&&typeof b.data=="string";if(b.dataTypes[0]==="jsonp"||b.jsonp!==!1&&(ce.test(b.url)||e&&ce.test(b.data))){var g,h=b.jsonpCallback=f.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,i=a[h],j=b.url,k=b.data,l="$1"+h+"$2";b.jsonp!==!1&&(j=j.replace(ce,l),b.url===j&&(e&&(k=k.replace(ce,l)),b.data===k&&(j+=(/\?/.test(j)?"&":"?")+b.jsonp+"="+h))),b.url=j,b.data=k,a[h]=function(a){g=[a]},d.always(function(){a[h]=i,g&&f.isFunction(i)&&a[h](g[0])}),b.converters["script json"]=function(){g||f.error(h+" was not called");return g[0]},b.dataTypes[0]="json";return"script"}}),f.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/javascript|ecmascript/},converters:{"text script":function(a){f.globalEval(a);return a}}}),f.ajaxPrefilter("script",function(a){a.cache===b&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),f.ajaxTransport("script",function(a){if(a.crossDomain){var d,e=c.head||c.getElementsByTagName("head")[0]||c.documentElement;return{send:function(f,g){d=c.createElement("script"),d.async="async",a.scriptCharset&&(d.charset=a.scriptCharset),d.src=a.url,d.onload=d.onreadystatechange=function(a,c){if(c||!d.readyState||/loaded|complete/.test(d.readyState))d.onload=d.onreadystatechange=null,e&&d.parentNode&&e.removeChild(d),d=b,c||g(200,"success")},e.insertBefore(d,e.firstChild)},abort:function(){d&&d.onload(0,1)}}}});var cf=a.ActiveXObject?function(){for(var a in ch)ch[a](0,1)}:!1,cg=0,ch;f.ajaxSettings.xhr=a.ActiveXObject?function(){return!this.isLocal&&ci()||cj()}:ci,function(a){f.extend(f.support,{ajax:!!a,cors:!!a&&"withCredentials"in a})}(f.ajaxSettings.xhr()),f.support.ajax&&f.ajaxTransport(function(c){if(!c.crossDomain||f.support.cors){var d;return{send:function(e,g){var h=c.xhr(),i,j;c.username?h.open(c.type,c.url,c.async,c.username,c.password):h.open(c.type,c.url,c.async);if(c.xhrFields)for(j in c.xhrFields)h[j]=c.xhrFields[j];c.mimeType&&h.overrideMimeType&&h.overrideMimeType(c.mimeType),!c.crossDomain&&!e["X-Requested-With"]&&(e["X-Requested-With"]="XMLHttpRequest");try{for(j in e)h.setRequestHeader(j,e[j])}catch(k){}h.send(c.hasContent&&c.data||null),d=function(a,e){var j,k,l,m,n;try{if(d&&(e||h.readyState===4)){d=b,i&&(h.onreadystatechange=f.noop,cf&&delete ch[i]);if(e)h.readyState!==4&&h.abort();else{j=h.status,l=h.getAllResponseHeaders(),m={},n=h.responseXML,n&&n.documentElement&&(m.xml=n),m.text=h.responseText;try{k=h.statusText}catch(o){k=""}!j&&c.isLocal&&!c.crossDomain?j=m.text?200:404:j===1223&&(j=204)}}}catch(p){e||g(-1,p)}m&&g(j,k,m,l)},!c.async||h.readyState===4?d():(i=++cg,cf&&(ch||(ch={},f(a).unload(cf)),ch[i]=d),h.onreadystatechange=d)},abort:function(){d&&d(0,1)}}}});var ck={},cl,cm,cn=/^(?:toggle|show|hide)$/,co=/^([+\-]=)?([\d+.\-]+)([a-z%]*)$/i,cp,cq=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]],cr;f.fn.extend({show:function(a,b,c){var d,e;if(a||a===0)return this.animate(cu("show",3),a,b,c);for(var g=0,h=this.length;g<h;g++)d=this[g],d.style&&(e=d.style.display,!f._data(d,"olddisplay")&&e==="none"&&(e=d.style.display=""),e===""&&f.css(d,"display")==="none"&&f._data(d,"olddisplay",cv(d.nodeName)));for(g=0;g<h;g++){d=this[g];if(d.style){e=d.style.display;if(e===""||e==="none")d.style.display=f._data(d,"olddisplay")||""}}return this},hide:function(a,b,c){if(a||a===0)return this.animate(cu("hide",3),a,b,c);var d,e,g=0,h=this.length;for(;g<h;g++)d=this[g],d.style&&(e=f.css(d,"display"),e!=="none"&&!f._data(d,"olddisplay")&&f._data(d,"olddisplay",e));for(g=0;g<h;g++)this[g].style&&(this[g].style.display="none");return this},_toggle:f.fn.toggle,toggle:function(a,b,c){var d=typeof a=="boolean";f.isFunction(a)&&f.isFunction(b)?this._toggle.apply(this,arguments):a==null||d?this.each(function(){var b=d?a:f(this).is(":hidden");f(this)[b?"show":"hide"]()}):this.animate(cu("toggle",3),a,b,c);return this},fadeTo:function(a,b,c,d){return this.filter(":hidden").css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){function g(){e.queue===!1&&f._mark(this);var b=f.extend({},e),c=this.nodeType===1,d=c&&f(this).is(":hidden"),g,h,i,j,k,l,m,n,o;b.animatedProperties={};for(i in a){g=f.camelCase(i),i!==g&&(a[g]=a[i],delete a[i]),h=a[g],f.isArray(h)?(b.animatedProperties[g]=h[1],h=a[g]=h[0]):b.animatedProperties[g]=b.specialEasing&&b.specialEasing[g]||b.easing||"swing";if(h==="hide"&&d||h==="show"&&!d)return b.complete.call(this);c&&(g==="height"||g==="width")&&(b.overflow=[this.style.overflow,this.style.overflowX,this.style.overflowY],f.css(this,"display")==="inline"&&f.css(this,"float")==="none"&&(!f.support.inlineBlockNeedsLayout||cv(this.nodeName)==="inline"?this.style.display="inline-block":this.style.zoom=1))}b.overflow!=null&&(this.style.overflow="hidden");for(i in a)j=new f.fx(this,b,i),h=a[i],cn.test(h)?(o=f._data(this,"toggle"+i)||(h==="toggle"?d?"show":"hide":0),o?(f._data(this,"toggle"+i,o==="show"?"hide":"show"),j[o]()):j[h]()):(k=co.exec(h),l=j.cur(),k?(m=parseFloat(k[2]),n=k[3]||(f.cssNumber[i]?"":"px"),n!=="px"&&(f.style(this,i,(m||1)+n),l=(m||1)/j.cur()*l,f.style(this,i,l+n)),k[1]&&(m=(k[1]==="-="?-1:1)*m+l),j.custom(l,m,n)):j.custom(l,h,""));return!0}var e=f.speed(b,c,d);if(f.isEmptyObject(a))return this.each(e.complete,[!1]);a=f.extend({},a);return e.queue===!1?this.each(g):this.queue(e.queue,g)},stop:function(a,c,d){typeof a!="string"&&(d=c,c=a,a=b),c&&a!==!1&&this.queue(a||"fx",[]);return this.each(function(){function h(a,b,c){var e=b[c];f.removeData(a,c,!0),e.stop(d)}var b,c=!1,e=f.timers,g=f._data(this);d||f._unmark(!0,this);if(a==null)for(b in g)g[b]&&g[b].stop&&b.indexOf(".run")===b.length-4&&h(this,g,b);else g[b=a+".run"]&&g[b].stop&&h(this,g,b);for(b=e.length;b--;)e[b].elem===this&&(a==null||e[b].queue===a)&&(d?e[b](!0):e[b].saveState(),c=!0,e.splice(b,1));(!d||!c)&&f.dequeue(this,a)})}}),f.each({slideDown:cu("show",1),slideUp:cu("hide",1),slideToggle:cu("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){f.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),f.extend({speed:function(a,b,c){var d=a&&typeof a=="object"?f.extend({},a):{complete:c||!c&&b||f.isFunction(a)&&a,duration:a,easing:c&&b||b&&!f.isFunction(b)&&b};d.duration=f.fx.off?0:typeof d.duration=="number"?d.duration:d.duration in f.fx.speeds?f.fx.speeds[d.duration]:f.fx.speeds._default;if(d.queue==null||d.queue===!0)d.queue="fx";d.old=d.complete,d.complete=function(a){f.isFunction(d.old)&&d.old.call(this),d.queue?f.dequeue(this,d.queue):a!==!1&&f._unmark(this)};return d},easing:{linear:function(a,b,c,d){return c+d*a},swing:function(a,b,c,d){return(-Math.cos(a*Math.PI)/2+.5)*d+c}},timers:[],fx:function(a,b,c){this.options=b,this.elem=a,this.prop=c,b.orig=b.orig||{}}}),f.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this),(f.fx.step[this.prop]||f.fx.step._default)(this)},cur:function(){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];var a,b=f.css(this.elem,this.prop);return isNaN(a=parseFloat(b))?!b||b==="auto"?0:b:a},custom:function(a,c,d){function h(a){return e.step(a)}var e=this,g=f.fx;this.startTime=cr||cs(),this.end=c,this.now=this.start=a,this.pos=this.state=0,this.unit=d||this.unit||(f.cssNumber[this.prop]?"":"px"),h.queue=this.options.queue,h.elem=this.elem,h.saveState=function(){e.options.hide&&f._data(e.elem,"fxshow"+e.prop)===b&&f._data(e.elem,"fxshow"+e.prop,e.start)},h()&&f.timers.push(h)&&!cp&&(cp=setInterval(g.tick,g.interval))},show:function(){var a=f._data(this.elem,"fxshow"+this.prop);this.options.orig[this.prop]=a||f.style(this.elem,this.prop),this.options.show=!0,a!==b?this.custom(this.cur(),a):this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur()),f(this.elem).show()},hide:function(){this.options.orig[this.prop]=f._data(this.elem,"fxshow"+this.prop)||f.style(this.elem,this.prop),this.options.hide=!0,this.custom(this.cur(),0)},step:function(a){var b,c,d,e=cr||cs(),g=!0,h=this.elem,i=this.options;if(a||e>=i.duration+this.startTime){this.now=this.end,this.pos=this.state=1,this.update(),i.animatedProperties[this.prop]=!0;for(b in i.animatedProperties)i.animatedProperties[b]!==!0&&(g=!1);if(g){i.overflow!=null&&!f.support.shrinkWrapBlocks&&f.each(["","X","Y"],function(a,b){h.style["overflow"+b]=i.overflow[a]}),i.hide&&f(h).hide();if(i.hide||i.show)for(b in i.animatedProperties)f.style(h,b,i.orig[b]),f.removeData(h,"fxshow"+b,!0),f.removeData(h,"toggle"+b,!0);d=i.complete,d&&(i.complete=!1,d.call(h))}return!1}i.duration==Infinity?this.now=e:(c=e-this.startTime,this.state=c/i.duration,this.pos=f.easing[i.animatedProperties[this.prop]](this.state,c,0,1,i.duration),this.now=this.start+(this.end-this.start)*this.pos),this.update();return!0}},f.extend(f.fx,{tick:function(){var a,b=f.timers,c=0;for(;c<b.length;c++)a=b[c],!a()&&b[c]===a&&b.splice(c--,1);b.length||f.fx.stop()},interval:13,stop:function(){clearInterval(cp),cp=null},speeds:{slow:600,fast:200,_default:400},step:{opacity:function(a){f.style(a.elem,"opacity",a.now)},_default:function(a){a.elem.style&&a.elem.style[a.prop]!=null?a.elem.style[a.prop]=a.now+a.unit:a.elem[a.prop]=a.now}}}),f.each(["width","height"],function(a,b){f.fx.step[b]=function(a){f.style(a.elem,b,Math.max(0,a.now)+a.unit)}}),f.expr&&f.expr.filters&&(f.expr.filters.animated=function(a){return f.grep(f.timers,function(b){return a===b.elem}).length});var cw=/^t(?:able|d|h)$/i,cx=/^(?:body|html)$/i;"getBoundingClientRect"in c.documentElement?f.fn.offset=function(a){var b=this[0],c;if(a)return this.each(function(b){f.offset.setOffset(this,a,b)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return f.offset.bodyOffset(b);try{c=b.getBoundingClientRect()}catch(d){}var e=b.ownerDocument,g=e.documentElement;if(!c||!f.contains(g,b))return c?{top:c.top,left:c.left}:{top:0,left:0};var h=e.body,i=cy(e),j=g.clientTop||h.clientTop||0,k=g.clientLeft||h.clientLeft||0,l=i.pageYOffset||f.support.boxModel&&g.scrollTop||h.scrollTop,m=i.pageXOffset||f.support.boxModel&&g.scrollLeft||h.scrollLeft,n=c.top+l-j,o=c.left+m-k;return{top:n,left:o}}:f.fn.offset=function(a){var b=this[0];if(a)return this.each(function(b){f.offset.setOffset(this,a,b)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return f.offset.bodyOffset(b);var c,d=b.offsetParent,e=b,g=b.ownerDocument,h=g.documentElement,i=g.body,j=g.defaultView,k=j?j.getComputedStyle(b,null):b.currentStyle,l=b.offsetTop,m=b.offsetLeft;while((b=b.parentNode)&&b!==i&&b!==h){if(f.support.fixedPosition&&k.position==="fixed")break;c=j?j.getComputedStyle(b,null):b.currentStyle,l-=b.scrollTop,m-=b.scrollLeft,b===d&&(l+=b.offsetTop,m+=b.offsetLeft,f.support.doesNotAddBorder&&(!f.support.doesAddBorderForTableAndCells||!cw.test(b.nodeName))&&(l+=parseFloat(c.borderTopWidth)||0,m+=parseFloat(c.borderLeftWidth)||0),e=d,d=b.offsetParent),f.support.subtractsBorderForOverflowNotVisible&&c.overflow!=="visible"&&(l+=parseFloat(c.borderTopWidth)||0,m+=parseFloat(c.borderLeftWidth)||0),k=c}if(k.position==="relative"||k.position==="static")l+=i.offsetTop,m+=i.offsetLeft;f.support.fixedPosition&&k.position==="fixed"&&(l+=Math.max(h.scrollTop,i.scrollTop),m+=Math.max(h.scrollLeft,i.scrollLeft));return{top:l,left:m}},f.offset={bodyOffset:function(a){var b=a.offsetTop,c=a.offsetLeft;f.support.doesNotIncludeMarginInBodyOffset&&(b+=parseFloat(f.css(a,"marginTop"))||0,c+=parseFloat(f.css(a,"marginLeft"))||0);return{top:b,left:c}},setOffset:function(a,b,c){var d=f.css(a,"position");d==="static"&&(a.style.position="relative");var e=f(a),g=e.offset(),h=f.css(a,"top"),i=f.css(a,"left"),j=(d==="absolute"||d==="fixed")&&f.inArray("auto",[h,i])>-1,k={},l={},m,n;j?(l=e.position(),m=l.top,n=l.left):(m=parseFloat(h)||0,n=parseFloat(i)||0),f.isFunction(b)&&(b=b.call(a,c,g)),b.top!=null&&(k.top=b.top-g.top+m),b.left!=null&&(k.left=b.left-g.left+n),"using"in b?b.using.call(a,k):e.css(k)}},f.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),c=this.offset(),d=cx.test(b[0].nodeName)?{top:0,left:0}:b.offset();c.top-=parseFloat(f.css(a,"marginTop"))||0,c.left-=parseFloat(f.css(a,"marginLeft"))||0,d.top+=parseFloat(f.css(b[0],"borderTopWidth"))||0,d.left+=parseFloat(f.css(b[0],"borderLeftWidth"))||0;return{top:c.top-d.top,left:c.left-d.left}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||c.body;while(a&&!cx.test(a.nodeName)&&f.css(a,"position")==="static")a=a.offsetParent;return a})}}),f.each(["Left","Top"],function(a,c){var d="scroll"+c;f.fn[d]=function(c){var e,g;if(c===b){e=this[0];if(!e)return null;g=cy(e);return g?"pageXOffset"in g?g[a?"pageYOffset":"pageXOffset"]:f.support.boxModel&&g.document.documentElement[d]||g.document.body[d]:e[d]}return this.each(function(){g=cy(this),g?g.scrollTo(a?f(g).scrollLeft():c,a?c:f(g).scrollTop()):this[d]=c})}}),f.each(["Height","Width"],function(a,c){var d=c.toLowerCase();f.fn["inner"+c]=function(){var a=this[0];return a?a.style?parseFloat(f.css(a,d,"padding")):this[d]():null},f.fn["outer"+c]=function(a){var b=this[0];return b?b.style?parseFloat(f.css(b,d,a?"margin":"border")):this[d]():null},f.fn[d]=function(a){var e=this[0];if(!e)return a==null?null:this;if(f.isFunction(a))return this.each(function(b){var c=f(this);c[d](a.call(this,b,c[d]()))});if(f.isWindow(e)){var g=e.document.documentElement["client"+c],h=e.document.body;return e.document.compatMode==="CSS1Compat"&&g||h&&h["client"+c]||g}if(e.nodeType===9)return Math.max(e.documentElement["client"+c],e.body["scroll"+c],e.documentElement["scroll"+c],e.body["offset"+c],e.documentElement["offset"+c]);if(a===b){var i=f.css(e,d),j=parseFloat(i);return f.isNumeric(j)?j:i}return this.css(d,typeof a=="string"?a:a+"px")}}),a.jQuery=a.$=f,typeof define=="function"&&define.amd&&define.amd.jQuery&&define("jquery",[],function(){return f})})(window); | amandashi/ashoka-networkdirectory | sites/all/modules/jquery_update/replace/jquery/1.7/jquery.min.js | JavaScript | gpl-2.0 | 93,868 |
void foo(void)
{
if ((a != 0)
&& (b == 0)
&& (c < 0)
&& (d > 0))
{
printf("hi");
}
if (flag1
#ifdef FLAG2
|| flag2
#endif
)
{
printf("yar");
}
if (flag1 &&
#ifdef FLAG2
flag2 &&
#endif
flag3)
{
printf("bo");
}
if ((a != 0)
&& (b == 0)
&& (c < 0))
{
printf("hi");
}
if ((a != 0)
&&
(b == 0)
&&
(c < 0))
{
printf("hi");
}
if (!this->writeOwiFile () // comment1
|| broken ()
|| !saveArchiveData () /* comment2 */
|| broken ()
|| !deleteCentralArchive () // comment3
|| broken ()
|| !copyArchivFiles () // comment4
|| broken ()
|| !appendToPlanetDb ()) // comment5
{
;
}
foobar(param1
, param2
, param3
, param4);
foobar2(param1
, param2
, param3
, param4);
}
| uncrustify/uncrustify | tests/expected/c/00511-bool-pos.c | C | gpl-2.0 | 1,053 |
/*
* Copyright 1994, 1995 Massachusetts Institute of Technology
*
* Permission to use, copy, modify, and distribute this software and
* its documentation for any purpose and without fee is hereby
* granted, provided that both the above copyright notice and this
* permission notice appear in all copies, that both the above
* copyright notice and this permission notice appear in all
* supporting documentation, and that the name of M.I.T. not be used
* in advertising or publicity pertaining to distribution of the
* software without specific, written prior permission. M.I.T. makes
* no representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied
* warranty.
*
* THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
* ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
* SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This code does two things necessary for the enhanced TCP metrics to
* function in a useful manner:
* 1) It marks all non-host routes as `cloning', thus ensuring that
* every actual reference to such a route actually gets turned
* into a reference to a host route to the specific destination
* requested.
* 2) When such routes lose all their references, it arranges for them
* to be deleted in some random collection of circumstances, so that
* a large quantity of stale routing data is not kept in kernel memory
* indefinitely. See in_rtqtimo() below for the exact mechanism.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/mbuf.h>
#include <sys/syslog.h>
#include <net/if.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#include <netinet/ip_var.h>
#include <netinet/tcp.h>
#include <netinet/tcp_seq.h>
#include <netinet/tcp_timer.h>
#include <netinet/tcp_var.h>
extern int in_inithead(void **head, int off);
#define RTPRF_OURS RTF_PROTO3 /* set on routes we manage */
/*
* Do what we need to do when inserting a route.
*/
static struct radix_node *
in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
struct radix_node *treenodes)
{
struct rtentry *rt = (struct rtentry *)treenodes;
struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt);
struct radix_node *ret;
/*
* For IP, all unicast non-host routes are automatically cloning.
*/
if(IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
rt->rt_flags |= RTF_MULTICAST;
if(!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) {
rt->rt_flags |= RTF_PRCLONING;
}
/*
* A little bit of help for both IP output and input:
* For host routes, we make sure that RTF_BROADCAST
* is set for anything that looks like a broadcast address.
* This way, we can avoid an expensive call to in_broadcast()
* in ip_output() most of the time (because the route passed
* to ip_output() is almost always a host route).
*
* We also do the same for local addresses, with the thought
* that this might one day be used to speed up ip_input().
*
* We also mark routes to multicast addresses as such, because
* it's easy to do and might be useful (but this is much more
* dubious since it's so easy to inspect the address). (This
* is done above.)
*/
if (rt->rt_flags & RTF_HOST) {
if (in_broadcast(sin->sin_addr, rt->rt_ifp)) {
rt->rt_flags |= RTF_BROADCAST;
} else {
#define satosin(sa) ((struct sockaddr_in *)sa)
if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr
== sin->sin_addr.s_addr)
rt->rt_flags |= RTF_LOCAL;
#undef satosin
}
}
/*
* We also specify a send and receive pipe size for every
* route added, to help TCP a bit. TCP doesn't actually
* want a true pipe size, which would be prohibitive in memory
* costs and is hard to compute anyway; it simply uses these
* values to size its buffers. So, we fill them in with the
* same values that TCP would have used anyway, and allow the
* installing program or the link layer to override these values
* as it sees fit. This will hopefully allow TCP more
* opportunities to save its ssthresh value.
*/
if (!rt->rt_rmx.rmx_sendpipe && !(rt->rt_rmx.rmx_locks & RTV_SPIPE))
rt->rt_rmx.rmx_sendpipe = tcp_sendspace;
if (!rt->rt_rmx.rmx_recvpipe && !(rt->rt_rmx.rmx_locks & RTV_RPIPE))
rt->rt_rmx.rmx_recvpipe = tcp_recvspace;
if (!rt->rt_rmx.rmx_mtu && !(rt->rt_rmx.rmx_locks & RTV_MTU)
&& rt->rt_ifp)
rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
ret = rn_addroute(v_arg, n_arg, head, treenodes);
if (ret == NULL && rt->rt_flags & RTF_HOST) {
struct rtentry *rt2;
/*
* We are trying to add a host route, but can't.
* Find out if it is because of an
* ARP entry and delete it if so.
*/
rt2 = rtalloc1((struct sockaddr *)sin, 0,
RTF_CLONING | RTF_PRCLONING);
if (rt2) {
if (rt2->rt_flags & RTF_LLINFO &&
rt2->rt_flags & RTF_HOST &&
rt2->rt_gateway &&
rt2->rt_gateway->sa_family == AF_LINK) {
rtrequest(RTM_DELETE,
(struct sockaddr *)rt_key(rt2),
rt2->rt_gateway,
rt_mask(rt2), rt2->rt_flags, 0);
ret = rn_addroute(v_arg, n_arg, head,
treenodes);
}
RTFREE(rt2);
}
}
return ret;
}
/*
* This code is the inverse of in_clsroute: on first reference, if we
* were managing the route, stop doing so and set the expiration timer
* back off again.
*/
static struct radix_node *
in_matroute(void *v_arg, struct radix_node_head *head)
{
struct radix_node *rn = rn_match(v_arg, head);
struct rtentry *rt = (struct rtentry *)rn;
if(rt && rt->rt_refcnt == 0) { /* this is first reference */
if(rt->rt_flags & RTPRF_OURS) {
rt->rt_flags &= ~RTPRF_OURS;
rt->rt_rmx.rmx_expire = 0;
}
}
return rn;
}
static int rtq_reallyold = 60*60;
/* one hour is ``really old'' */
SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire,
CTLFLAG_RW, &rtq_reallyold , 0, "");
static int rtq_minreallyold = 10;
/* never automatically crank down to less */
SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire,
CTLFLAG_RW, &rtq_minreallyold , 0, "");
static int rtq_toomany = 128;
/* 128 cached routes is ``too many'' */
SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache,
CTLFLAG_RW, &rtq_toomany , 0, "");
/*
* On last reference drop, mark the route as belong to us so that it can be
* timed out.
*/
static void
in_clsroute(struct radix_node *rn, struct radix_node_head *head)
{
struct rtentry *rt = (struct rtentry *)rn;
if(!(rt->rt_flags & RTF_UP))
return; /* prophylactic measures */
if((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST)
return;
if((rt->rt_flags & (RTF_WASCLONED | RTPRF_OURS))
!= RTF_WASCLONED)
return;
/*
* As requested by David Greenman:
* If rtq_reallyold is 0, just delete the route without
* waiting for a timeout cycle to kill it.
*/
if(rtq_reallyold != 0) {
rt->rt_flags |= RTPRF_OURS;
rt->rt_rmx.rmx_expire = rtems_bsdnet_seconds_since_boot() + rtq_reallyold;
} else {
rtrequest(RTM_DELETE,
(struct sockaddr *)rt_key(rt),
rt->rt_gateway, rt_mask(rt),
rt->rt_flags, 0);
}
}
struct rtqk_arg {
struct radix_node_head *rnh;
int draining;
int killed;
int found;
int updating;
time_t nextstop;
};
/*
* Get rid of old routes. When draining, this deletes everything, even when
* the timeout is not expired yet. When updating, this makes sure that
* nothing has a timeout longer than the current value of rtq_reallyold.
*/
static int
in_rtqkill(struct radix_node *rn, void *rock)
{
struct rtqk_arg *ap = rock;
struct rtentry *rt = (struct rtentry *)rn;
int err;
if(rt->rt_flags & RTPRF_OURS) {
ap->found++;
if(ap->draining || rt->rt_rmx.rmx_expire <= rtems_bsdnet_seconds_since_boot()) {
if(rt->rt_refcnt > 0)
panic("rtqkill route really not free");
err = rtrequest(RTM_DELETE,
(struct sockaddr *)rt_key(rt),
rt->rt_gateway, rt_mask(rt),
rt->rt_flags, 0);
if(err) {
log(LOG_WARNING, "in_rtqkill: error %d\n", err);
} else {
ap->killed++;
}
} else {
if(ap->updating
&& (rt->rt_rmx.rmx_expire - rtems_bsdnet_seconds_since_boot()
> rtq_reallyold)) {
rt->rt_rmx.rmx_expire = rtems_bsdnet_seconds_since_boot()
+ rtq_reallyold;
}
ap->nextstop = lmin(ap->nextstop,
rt->rt_rmx.rmx_expire);
}
}
return 0;
}
#define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */
static int rtq_timeout = RTQ_TIMEOUT;
static void
in_rtqtimo(void *rock)
{
struct radix_node_head *rnh = rock;
struct rtqk_arg arg;
struct timeval atv;
static time_t last_adjusted_timeout = 0;
int s;
arg.found = arg.killed = 0;
arg.rnh = rnh;
arg.nextstop = rtems_bsdnet_seconds_since_boot() + rtq_timeout;
arg.draining = arg.updating = 0;
s = splnet();
rnh->rnh_walktree(rnh, in_rtqkill, &arg);
splx(s);
/*
* Attempt to be somewhat dynamic about this:
* If there are ``too many'' routes sitting around taking up space,
* then crank down the timeout, and see if we can't make some more
* go away. However, we make sure that we will never adjust more
* than once in rtq_timeout seconds, to keep from cranking down too
* hard.
*/
if((arg.found - arg.killed > rtq_toomany)
&& (rtems_bsdnet_seconds_since_boot() - last_adjusted_timeout >= rtq_timeout)
&& rtq_reallyold > rtq_minreallyold) {
rtq_reallyold = 2*rtq_reallyold / 3;
if(rtq_reallyold < rtq_minreallyold) {
rtq_reallyold = rtq_minreallyold;
}
last_adjusted_timeout = rtems_bsdnet_seconds_since_boot();
#ifdef DIAGNOSTIC
log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n",
rtq_reallyold);
#endif
arg.found = arg.killed = 0;
arg.updating = 1;
s = splnet();
rnh->rnh_walktree(rnh, in_rtqkill, &arg);
splx(s);
}
atv.tv_usec = 0;
atv.tv_sec = arg.nextstop;
timeout(in_rtqtimo, rock, hzto(&atv));
}
void
in_rtqdrain(void)
{
struct radix_node_head *rnh = rt_tables[AF_INET];
struct rtqk_arg arg;
int s;
arg.found = arg.killed = 0;
arg.rnh = rnh;
arg.nextstop = 0;
arg.draining = 1;
arg.updating = 0;
s = splnet();
rnh->rnh_walktree(rnh, in_rtqkill, &arg);
splx(s);
}
/*
* Initialize our routing tree.
*/
int
in_inithead(void **head, int off)
{
struct radix_node_head *rnh;
if(!rn_inithead(head, off))
return 0;
if(head != (void **)&rt_tables[AF_INET]) /* BOGUS! */
return 1; /* only do this for the real routing table */
rnh = *head;
rnh->rnh_addaddr = in_addroute;
rnh->rnh_matchaddr = in_matroute;
rnh->rnh_close = in_clsroute;
in_rtqtimo(rnh); /* kick off timeout first time */
return 1;
}
| heshamelmatary/rtems-rumpkernel | cpukit/libnetworking/netinet/in_rmx.c | C | gpl-2.0 | 11,385 |
/* packet-coseventcomm.c
*
* Routines for IDL dissection
*
* Autogenerated from idl2wrs
* Copyright 2001 Frank Singleton <frank.singleton@ericsson.com>
*/
/*
* Wireshark - Network traffic analyzer
* By Gerald Combs
* Copyright 1999 - 2012 Gerald Combs
*/
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "config.h"
#include <gmodule.h>
#include <string.h>
#include <glib.h>
#include <epan/packet.h>
#include <epan/proto.h>
#include <epan/dissectors/packet-giop.h>
#include <epan/expert.h>
#ifdef _MSC_VER
/* disable warning: "unreference local variable" */
#pragma warning(disable:4101)
#endif
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wunused-function"
#pragma GCC diagnostic ignored "-Wunused-variable"
#endif
void proto_register_giop_coseventcomm(void);
void proto_reg_handoff_giop_coseventcomm(void);
/* Initialise the protocol and subtree pointers */
static int proto_coseventcomm = -1;
static gint ett_coseventcomm = -1;
/* Initialise the initial Alignment */
static guint32 boundary = GIOP_HEADER_SIZE; /* initial value */
static int hf_operationrequest = -1;/* Request_Operation field */
/* Operation filters */
static int hf_CosEventComm_PullSupplier_try_pull_has_event = -1;
/* User exception filters */
/* Expert info filters */
static expert_field ei_coseventcomm_unknown_giop_msg = EI_INIT;
static expert_field ei_coseventcomm_unknown_exception = EI_INIT;
static expert_field ei_coseventcomm_unknown_reply_status = EI_INIT;
static proto_tree *start_dissecting(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset);
/* Begin Exception Helper Functions */
/* End Exception Helper Functions */
/*
* Main delegator for exception handling
*
*/
static gboolean
decode_user_exception(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *ptree _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_tree *tree _U_;
if (!header->exception_id)
return FALSE;
return FALSE; /* user exception not found */
}
/*
* IDL:omg.org/CosEventComm/PushConsumer/push:1.0
*/
static void
decode_CosEventComm_PushConsumer_push(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
switch(header->message_type) {
case Request:
get_CDR_any(tvb, pinfo, tree, item, offset, stream_is_big_endian, boundary, header);
break;
case Reply:
switch(header->rep_status) {
case NO_EXCEPTION:
/* Function returns void */
break;
case USER_EXCEPTION:
break;
default:
/* Unknown Exception */
expert_add_info_format(pinfo, item, &ei_coseventcomm_unknown_exception, "Unknown exception %d", header->rep_status);
break;
} /* switch(header->rep_status) */
break;
default:
/* Unknown GIOP Message */
expert_add_info_format(pinfo, item, &ei_coseventcomm_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);
break;
} /* switch(header->message_type) */
}
/*
* IDL:omg.org/CosEventComm/PushConsumer/disconnect_push_consumer:1.0
*/
static void
decode_CosEventComm_PushConsumer_disconnect_push_consumer(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
switch(header->message_type) {
case Request:
break;
case Reply:
switch(header->rep_status) {
case NO_EXCEPTION:
/* Function returns void */
break;
case USER_EXCEPTION:
break;
default:
/* Unknown Exception */
expert_add_info_format(pinfo, item, &ei_coseventcomm_unknown_exception, "Unknown exception %d", header->rep_status);
break;
} /* switch(header->rep_status) */
break;
default:
/* Unknown GIOP Message */
expert_add_info_format(pinfo, item, &ei_coseventcomm_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);
break;
} /* switch(header->message_type) */
}
/*
* IDL:omg.org/CosEventComm/PushSupplier/disconnect_push_supplier:1.0
*/
static void
decode_CosEventComm_PushSupplier_disconnect_push_supplier(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
switch(header->message_type) {
case Request:
break;
case Reply:
switch(header->rep_status) {
case NO_EXCEPTION:
/* Function returns void */
break;
case USER_EXCEPTION:
break;
default:
/* Unknown Exception */
expert_add_info_format(pinfo, item, &ei_coseventcomm_unknown_exception, "Unknown exception %d", header->rep_status);
break;
} /* switch(header->rep_status) */
break;
default:
/* Unknown GIOP Message */
expert_add_info_format(pinfo, item, &ei_coseventcomm_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);
break;
} /* switch(header->message_type) */
}
/*
* IDL:omg.org/CosEventComm/PullSupplier/pull:1.0
*/
static void
decode_CosEventComm_PullSupplier_pull(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
switch(header->message_type) {
case Request:
break;
case Reply:
switch(header->rep_status) {
case NO_EXCEPTION:
get_CDR_any(tvb, pinfo, tree, item, offset, stream_is_big_endian, boundary, header);
break;
case USER_EXCEPTION:
break;
default:
/* Unknown Exception */
expert_add_info_format(pinfo, item, &ei_coseventcomm_unknown_exception, "Unknown exception %d", header->rep_status);
break;
} /* switch(header->rep_status) */
break;
default:
/* Unknown GIOP Message */
expert_add_info_format(pinfo, item, &ei_coseventcomm_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);
break;
} /* switch(header->message_type) */
}
/*
* IDL:omg.org/CosEventComm/PullSupplier/try_pull:1.0
*/
static void
decode_CosEventComm_PullSupplier_try_pull(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
switch(header->message_type) {
case Request:
break;
case Reply:
switch(header->rep_status) {
case NO_EXCEPTION:
get_CDR_any(tvb, pinfo, tree, item, offset, stream_is_big_endian, boundary, header);
proto_tree_add_boolean(tree, hf_CosEventComm_PullSupplier_try_pull_has_event, tvb, *offset-1, 1, get_CDR_boolean(tvb,offset));
break;
case USER_EXCEPTION:
break;
default:
/* Unknown Exception */
expert_add_info_format(pinfo, item, &ei_coseventcomm_unknown_exception, "Unknown exception %d", header->rep_status);
break;
} /* switch(header->rep_status) */
break;
default:
/* Unknown GIOP Message */
expert_add_info_format(pinfo, item, &ei_coseventcomm_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);
break;
} /* switch(header->message_type) */
}
/*
* IDL:omg.org/CosEventComm/PullSupplier/disconnect_pull_supplier:1.0
*/
static void
decode_CosEventComm_PullSupplier_disconnect_pull_supplier(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
switch(header->message_type) {
case Request:
break;
case Reply:
switch(header->rep_status) {
case NO_EXCEPTION:
/* Function returns void */
break;
case USER_EXCEPTION:
break;
default:
/* Unknown Exception */
expert_add_info_format(pinfo, item, &ei_coseventcomm_unknown_exception, "Unknown exception %d", header->rep_status);
break;
} /* switch(header->rep_status) */
break;
default:
/* Unknown GIOP Message */
expert_add_info_format(pinfo, item, &ei_coseventcomm_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);
break;
} /* switch(header->message_type) */
}
/*
* IDL:omg.org/CosEventComm/PullConsumer/disconnect_pull_consumer:1.0
*/
static void
decode_CosEventComm_PullConsumer_disconnect_pull_consumer(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
switch(header->message_type) {
case Request:
break;
case Reply:
switch(header->rep_status) {
case NO_EXCEPTION:
/* Function returns void */
break;
case USER_EXCEPTION:
break;
default:
/* Unknown Exception */
expert_add_info_format(pinfo, item, &ei_coseventcomm_unknown_exception, "Unknown exception %d", header->rep_status);
break;
} /* switch(header->rep_status) */
break;
default:
/* Unknown GIOP Message */
expert_add_info_format(pinfo, item, &ei_coseventcomm_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);
break;
} /* switch(header->message_type) */
}
/*
* Called once we accept the packet as being for us; it sets the
* Protocol and Info columns and creates the top-level protocol
* tree item.
*/
static proto_tree *
start_dissecting(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset)
{
proto_item *ti = NULL;
proto_tree *tree = NULL; /* init later, inside if(tree) */
col_set_str(pinfo->cinfo, COL_PROTOCOL, "COSEVENTCOMM");
/*
* Do not clear COL_INFO, as nothing is being written there by
* this dissector yet. So leave it as is from the GIOP dissector.
* TODO: add something useful to COL_INFO
* col_clear(pinfo->cinfo, COL_INFO);
*/
if (ptree) {
ti = proto_tree_add_item(ptree, proto_coseventcomm, tvb, *offset, tvb_reported_length_remaining(tvb, *offset), ENC_NA);
tree = proto_item_add_subtree(ti, ett_coseventcomm);
}
return tree;
}
static proto_item*
process_RequestOperation(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, MessageHeader *header, const gchar *operation)
{
proto_item *pi;
if(header->message_type == Reply) {
/* fill-up info column */
col_append_fstr(pinfo->cinfo, COL_INFO, " op = %s",operation);
}
/* fill-up the field */
pi=proto_tree_add_string(ptree, hf_operationrequest, tvb, 0, 0, operation);
PROTO_ITEM_SET_GENERATED(pi);
return pi;
}
static gboolean
dissect_coseventcomm(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset, MessageHeader *header, const gchar *operation, gchar *idlname)
{
proto_item *item _U_;
proto_tree *tree _U_;
gboolean stream_is_big_endian = is_big_endian(header); /* get endianess */
/* If we have a USER Exception, then decode it and return */
if ((header->message_type == Reply) && (header->rep_status == USER_EXCEPTION)) {
return decode_user_exception(tvb, pinfo, ptree, offset, header, operation, stream_is_big_endian);
}
switch(header->message_type) {
case Request:
case Reply:
if (strcmp(operation, "push") == 0
&& (!idlname || strcmp(idlname, "CosEventComm/PushConsumer") == 0)) {
item = process_RequestOperation(tvb, pinfo, ptree, header, operation); /* fill-up Request_Operation field & info column */
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_CosEventComm_PushConsumer_push(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);
return TRUE;
}
if (strcmp(operation, "disconnect_push_consumer") == 0
&& (!idlname || strcmp(idlname, "CosEventComm/PushConsumer") == 0)) {
item = process_RequestOperation(tvb, pinfo, ptree, header, operation); /* fill-up Request_Operation field & info column */
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_CosEventComm_PushConsumer_disconnect_push_consumer(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);
return TRUE;
}
if (strcmp(operation, "disconnect_push_supplier") == 0
&& (!idlname || strcmp(idlname, "CosEventComm/PushSupplier") == 0)) {
item = process_RequestOperation(tvb, pinfo, ptree, header, operation); /* fill-up Request_Operation field & info column */
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_CosEventComm_PushSupplier_disconnect_push_supplier(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);
return TRUE;
}
if (strcmp(operation, "pull") == 0
&& (!idlname || strcmp(idlname, "CosEventComm/PullSupplier") == 0)) {
item = process_RequestOperation(tvb, pinfo, ptree, header, operation); /* fill-up Request_Operation field & info column */
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_CosEventComm_PullSupplier_pull(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);
return TRUE;
}
if (strcmp(operation, "try_pull") == 0
&& (!idlname || strcmp(idlname, "CosEventComm/PullSupplier") == 0)) {
item = process_RequestOperation(tvb, pinfo, ptree, header, operation); /* fill-up Request_Operation field & info column */
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_CosEventComm_PullSupplier_try_pull(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);
return TRUE;
}
if (strcmp(operation, "disconnect_pull_supplier") == 0
&& (!idlname || strcmp(idlname, "CosEventComm/PullSupplier") == 0)) {
item = process_RequestOperation(tvb, pinfo, ptree, header, operation); /* fill-up Request_Operation field & info column */
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_CosEventComm_PullSupplier_disconnect_pull_supplier(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);
return TRUE;
}
if (strcmp(operation, "disconnect_pull_consumer") == 0
&& (!idlname || strcmp(idlname, "CosEventComm/PullConsumer") == 0)) {
item = process_RequestOperation(tvb, pinfo, ptree, header, operation); /* fill-up Request_Operation field & info column */
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_CosEventComm_PullConsumer_disconnect_pull_consumer(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);
return TRUE;
}
break;
case CancelRequest:
case LocateRequest:
case LocateReply:
case CloseConnection:
case MessageError:
case Fragment:
return FALSE; /* not handled yet */
default:
return FALSE; /* not handled yet */
} /* switch */
return FALSE;
} /* End of main dissector */
/* Register the protocol with Wireshark */
void proto_register_giop_coseventcomm(void)
{
/* setup list of header fields */
static hf_register_info hf[] = {
/* field that indicates the currently ongoing request/reply exchange */
{&hf_operationrequest, {"Request_Operation","giop-coseventcomm.Request_Operation",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},
/* Operation filters */
{&hf_CosEventComm_PullSupplier_try_pull_has_event, {"has_event","giop-coseventcomm.PullSupplier.try_pull.has_event",FT_BOOLEAN,8,NULL,0x01,NULL,HFILL}},
/* Attribute filters */
/* Struct filters */
/* User exception filters */
/* Union filters */
};
static ei_register_info ei[] = {
{ &ei_coseventcomm_unknown_giop_msg, { "giop-coseventcomm.unknown_giop_msg", PI_PROTOCOL, PI_WARN, "Unknown GIOP message", EXPFILL }},
{ &ei_coseventcomm_unknown_exception, { "giop-coseventcomm.unknown_exception", PI_PROTOCOL, PI_WARN, "Unknown exception", EXPFILL }},
{ &ei_coseventcomm_unknown_reply_status, { "giop-coseventcomm.unknown_reply_status", PI_PROTOCOL, PI_WARN, "Unknown reply status", EXPFILL }},
};
/* setup protocol subtree array */
static gint *ett[] = {
&ett_coseventcomm,
};
expert_module_t* expert_coseventcomm;
/* Register the protocol name and description */
proto_coseventcomm = proto_register_protocol("Coseventcomm Dissector Using GIOP API" , "COSEVENTCOMM", "giop-coseventcomm" );
proto_register_field_array(proto_coseventcomm, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
expert_coseventcomm = expert_register_protocol(proto_coseventcomm);
expert_register_field_array(expert_coseventcomm, ei, array_length(ei));
}
/* register me as handler for these interfaces */
void proto_reg_handoff_giop_coseventcomm(void)
{
/* Register for Explicit Dissection */
register_giop_user_module(dissect_coseventcomm, "COSEVENTCOMM", "CosEventComm/PullConsumer", proto_coseventcomm ); /* explicit dissector */
/* Register for Explicit Dissection */
register_giop_user_module(dissect_coseventcomm, "COSEVENTCOMM", "CosEventComm/PullSupplier", proto_coseventcomm ); /* explicit dissector */
/* Register for Explicit Dissection */
register_giop_user_module(dissect_coseventcomm, "COSEVENTCOMM", "CosEventComm/PushConsumer", proto_coseventcomm ); /* explicit dissector */
/* Register for Explicit Dissection */
register_giop_user_module(dissect_coseventcomm, "COSEVENTCOMM", "CosEventComm/PushSupplier", proto_coseventcomm ); /* explicit dissector */
/* Register for Heuristic Dissection */
register_giop_user(dissect_coseventcomm, "COSEVENTCOMM" ,proto_coseventcomm); /* heuristic dissector */
}
/*
* Editor modelines
*
* Local Variables:
* c-basic-offset: 4
* tab-width: 8
* indent-tabs-mode: nil
* End:
*
* ex: set shiftwidth=4 tabstop=8 expandtab:
* :indentSize=4:tabSize=8:noTabs=true:
*/
| koppa/wireshark | epan/dissectors/packet-coseventcomm.c | C | gpl-2.0 | 19,613 |
/* Copyright (C) 1998-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <endian.h>
#include "localeinfo.h"
/* This table's entries are taken from ISO 14652, the table in section
4.11 "LC_TELEPHONE". */
const struct __locale_data _nl_C_LC_TELEPHONE attribute_hidden =
{
_nl_C_name,
NULL, 0, 0, /* no file mapped */
{ NULL, }, /* no cached data */
UNDELETABLE,
0,
5,
{
{ .string = "+%c %a %l" },
{ .string = "" },
{ .string = "" },
{ .string = "" },
{ .string = _nl_C_codeset }
}
};
| xb446909/personalprojects | ARMToolChain/source/glibc-2.21/locale/C-telephone.c | C | gpl-2.0 | 1,315 |
/*****************************************************************************
* image.c: Image demuxer
*****************************************************************************
* Copyright (C) 2010 Laurent Aimar
* $Id$
*
* Authors: Laurent Aimar <fenrir _AT_ videolan _DOT_ org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
/*****************************************************************************
* Preamble
*****************************************************************************/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <vlc_common.h>
#include <vlc_plugin.h>
#include <vlc_demux.h>
#include <vlc_image.h>
#include "mxpeg_helper.h"
/*****************************************************************************
* Module descriptor
*****************************************************************************/
static int Open (vlc_object_t *);
static void Close(vlc_object_t *);
#define ID_TEXT N_("ES ID")
#define ID_LONGTEXT N_( \
"Set the ID of the elementary stream")
#define GROUP_TEXT N_("Group")
#define GROUP_LONGTEXT N_(\
"Set the group of the elementary stream")
#define DECODE_TEXT N_("Decode")
#define DECODE_LONGTEXT N_( \
"Decode at the demuxer stage")
#define CHROMA_TEXT N_("Forced chroma")
#define CHROMA_LONGTEXT N_( \
"If non empty and image-decode is true, the image will be " \
"converted to the specified chroma.")
#define DURATION_TEXT N_("Duration in seconds")
#define DURATION_LONGTEXT N_( \
"Duration in seconds before simulating an end of file. " \
"A negative value means an unlimited play time.")
#define FPS_TEXT N_("Frame rate")
#define FPS_LONGTEXT N_( \
"Frame rate of the elementary stream produced.")
#define RT_TEXT N_("Real-time")
#define RT_LONGTEXT N_( \
"Use real-time mode suitable for being used as a master input and " \
"real-time input slaves.")
vlc_module_begin()
set_description(N_("Image demuxer"))
set_shortname(N_("Image"))
set_category(CAT_INPUT)
set_subcategory(SUBCAT_INPUT_DEMUX)
add_integer("image-id", -1, ID_TEXT, ID_LONGTEXT, true)
change_safe()
add_integer("image-group", 0, GROUP_TEXT, GROUP_LONGTEXT, true)
change_safe()
add_bool("image-decode", true, DECODE_TEXT, DECODE_LONGTEXT, true)
change_safe()
add_string("image-chroma", "", CHROMA_TEXT, CHROMA_LONGTEXT, true)
change_safe()
add_float("image-duration", 10, DURATION_TEXT, DURATION_LONGTEXT, false)
change_safe()
add_string("image-fps", "10/1", FPS_TEXT, FPS_LONGTEXT, true)
change_safe()
add_bool("image-realtime", false, RT_TEXT, RT_LONGTEXT, true)
change_safe()
set_capability("demux", 10)
set_callbacks(Open, Close)
vlc_module_end()
/*****************************************************************************
* Local prototypes
*****************************************************************************/
struct demux_sys_t
{
block_t *data;
es_out_id_t *es;
mtime_t duration;
bool is_realtime;
mtime_t pts_origin;
mtime_t pts_next;
date_t pts;
};
static block_t *Load(demux_t *demux)
{
const int max_size = 4096 * 4096 * 8;
const int64_t size = stream_Size(demux->s);
if (size < 0 || size > max_size) {
msg_Err(demux, "Rejecting image based on its size (%"PRId64" > %d)", size, max_size);
return NULL;
}
if (size > 0)
return stream_Block(demux->s, size);
/* TODO */
return NULL;
}
static block_t *Decode(demux_t *demux,
video_format_t *fmt, vlc_fourcc_t chroma, block_t *data)
{
image_handler_t *handler = image_HandlerCreate(demux);
if (!handler) {
block_Release(data);
return NULL;
}
video_format_t decoded;
video_format_Init(&decoded, chroma);
picture_t *image = image_Read(handler, data, fmt, &decoded);
image_HandlerDelete(handler);
if (!image)
return NULL;
video_format_Clean(fmt);
*fmt = decoded;
size_t size = 0;
for (int i = 0; i < image->i_planes; i++)
size += image->p[i].i_pitch * image->p[i].i_lines;
data = block_Alloc(size);
if (!data) {
picture_Release(image);
return NULL;
}
size_t offset = 0;
for (int i = 0; i < image->i_planes; i++) {
const plane_t *src = &image->p[i];
for (int y = 0; y < src->i_visible_lines; y++) {
memcpy(&data->p_buffer[offset],
&src->p_pixels[y * src->i_pitch],
src->i_visible_pitch);
offset += src->i_visible_pitch;
}
}
picture_Release(image);
return data;
}
static int Demux(demux_t *demux)
{
demux_sys_t *sys = demux->p_sys;
if (!sys->data)
return 0;
mtime_t deadline;
const mtime_t pts_first = sys->pts_origin + date_Get(&sys->pts);
if (sys->pts_next > VLC_TS_INVALID) {
deadline = sys->pts_next;
} else if (sys->is_realtime) {
deadline = mdate();
const mtime_t max_wait = CLOCK_FREQ / 50;
if (deadline + max_wait < pts_first) {
es_out_Control(demux->out, ES_OUT_SET_PCR, deadline);
/* That's ugly, but not yet easily fixable */
mwait(deadline + max_wait);
return 1;
}
} else {
deadline = 1 + pts_first;
}
for (;;) {
const mtime_t pts = sys->pts_origin + date_Get(&sys->pts);
if (sys->duration >= 0 && pts >= sys->pts_origin + sys->duration)
return 0;
if (pts >= deadline)
return 1;
block_t *data = block_Duplicate(sys->data);
if (!data)
return -1;
data->i_dts =
data->i_pts = VLC_TS_0 + pts;
es_out_Control(demux->out, ES_OUT_SET_PCR, data->i_pts);
es_out_Send(demux->out, sys->es, data);
date_Increment(&sys->pts, 1);
}
}
static int Control(demux_t *demux, int query, va_list args)
{
demux_sys_t *sys = demux->p_sys;
switch (query) {
case DEMUX_GET_POSITION: {
double *position = va_arg(args, double *);
if (sys->duration > 0)
*position = date_Get(&sys->pts) / (double)sys->duration;
else
*position = 0;
return VLC_SUCCESS;
}
case DEMUX_SET_POSITION: {
if (sys->duration < 0 || sys->is_realtime)
return VLC_EGENERIC;
double position = va_arg(args, double);
date_Set(&sys->pts, position * sys->duration);
return VLC_SUCCESS;
}
case DEMUX_GET_TIME: {
int64_t *time = va_arg(args, int64_t *);
*time = sys->pts_origin + date_Get(&sys->pts);
return VLC_SUCCESS;
}
case DEMUX_SET_TIME: {
if (sys->duration < 0 || sys->is_realtime)
return VLC_EGENERIC;
int64_t time = va_arg(args, int64_t);
date_Set(&sys->pts, VLC_CLIP(time - sys->pts_origin, 0, sys->duration));
return VLC_SUCCESS;
}
case DEMUX_SET_NEXT_DEMUX_TIME: {
int64_t pts_next = VLC_TS_0 + va_arg(args, int64_t);
if (sys->pts_next <= VLC_TS_INVALID)
sys->pts_origin = pts_next;
sys->pts_next = pts_next;
return VLC_SUCCESS;
}
case DEMUX_GET_LENGTH: {
int64_t *length = va_arg(args, int64_t *);
*length = __MAX(sys->duration, 0);
return VLC_SUCCESS;
}
case DEMUX_GET_FPS: {
double *fps = va_arg(args, double *);
*fps = (double)sys->pts.i_divider_num / sys->pts.i_divider_den;
return VLC_SUCCESS;
}
case DEMUX_GET_META:
case DEMUX_HAS_UNSUPPORTED_META:
case DEMUX_GET_ATTACHMENTS:
default:
return VLC_EGENERIC;
}
}
static bool IsBmp(stream_t *s)
{
const uint8_t *header;
if (stream_Peek(s, &header, 18) < 18)
return false;
if (memcmp(header, "BM", 2) &&
memcmp(header, "BA", 2) &&
memcmp(header, "CI", 2) &&
memcmp(header, "CP", 2) &&
memcmp(header, "IC", 2) &&
memcmp(header, "PT", 2))
return false;
uint32_t file_size = GetDWLE(&header[2]);
uint32_t data_offset = GetDWLE(&header[10]);
uint32_t header_size = GetDWLE(&header[14]);
if (file_size != 14 && file_size != 14 + header_size &&
file_size <= data_offset)
return false;
if (data_offset < header_size + 14)
return false;
if (header_size != 12 && header_size < 40)
return false;
return true;
}
static bool IsPcx(stream_t *s)
{
const uint8_t *header;
if (stream_Peek(s, &header, 66) < 66)
return false;
if (header[0] != 0x0A || /* marker */
(header[1] != 0x00 && header[1] != 0x02 &&
header[1] != 0x03 && header[1] != 0x05) || /* version */
(header[2] != 0 && header[2] != 1) || /* encoding */
(header[3] != 1 && header[3] != 2 &&
header[3] != 4 && header[3] != 8) || /* bits per pixel per plane */
header[64] != 0 || /* reserved */
header[65] == 0 || header[65] > 4) /* plane count */
return false;
if (GetWLE(&header[4]) > GetWLE(&header[8]) || /* xmin vs xmax */
GetWLE(&header[6]) > GetWLE(&header[10])) /* ymin vs ymax */
return false;
return true;
}
static bool IsLbm(stream_t *s)
{
const uint8_t *header;
if (stream_Peek(s, &header, 12) < 12)
return false;
if (memcmp(&header[0], "FORM", 4) ||
GetDWBE(&header[4]) <= 4 ||
(memcmp(&header[8], "ILBM", 4) && memcmp(&header[8], "PBM ", 4)))
return false;
return true;
}
static bool IsPnmBlank(uint8_t v)
{
return v == ' ' || v == '\t' || v == '\r' || v == '\n';
}
static bool IsPnm(stream_t *s)
{
const uint8_t *header;
int size = stream_Peek(s, &header, 256);
if (size < 3)
return false;
if (header[0] != 'P' ||
header[1] < '1' || header[1] > '6' ||
!IsPnmBlank(header[2]))
return false;
int number_count = 0;
for (int i = 3, parsing_number = 0; i < size && number_count < 2; i++) {
if (IsPnmBlank(header[i])) {
if (parsing_number) {
parsing_number = 0;
number_count++;
}
} else {
if (header[i] < '0' || header[i] > '9')
break;
parsing_number = 1;
}
}
if (number_count < 2)
return false;
return true;
}
static uint8_t FindJpegMarker(int *position, const uint8_t *data, int size)
{
for (int i = *position; i + 1 < size; i++) {
if (data[i + 0] != 0xff || data[i + 1] == 0x00)
return 0xff;
if (data[i + 1] != 0xff) {
*position = i + 2;
return data[i + 1];
}
}
return 0xff;
}
static bool IsJfif(stream_t *s)
{
const uint8_t *header;
int size = stream_Peek(s, &header, 256);
int position = 0;
if (FindJpegMarker(&position, header, size) != 0xd8)
return false;
if (FindJpegMarker(&position, header, size) != 0xe0)
return false;
position += 2; /* Skip size */
if (position + 5 > size)
return false;
if (memcmp(&header[position], "JFIF\0", 5))
return false;
return true;
}
static bool IsSpiff(stream_t *s)
{
const uint8_t *header;
if (stream_Peek(s, &header, 36) < 36) /* SPIFF header size */
return false;
if (header[0] != 0xff || header[1] != 0xd8 ||
header[2] != 0xff || header[3] != 0xe8)
return false;
if (memcmp(&header[6], "SPIFF\0", 6))
return false;
return true;
}
static bool IsExif(stream_t *s)
{
const uint8_t *header;
int size = stream_Peek(s, &header, 256);
int position = 0;
if (FindJpegMarker(&position, header, size) != 0xd8)
return false;
if (FindJpegMarker(&position, header, size) != 0xe1)
return false;
position += 2; /* Skip size */
if (position + 5 > size)
return false;
if (memcmp(&header[position], "Exif\0", 5))
return false;
return true;
}
static bool FindSVGmarker(int *position, const uint8_t *data, const int size, const char *marker)
{
for( int i = *position; i < size; i++)
{
if (memcmp(&data[i], marker, strlen(marker)) == 0)
{
*position = i;
return true;
}
}
return false;
}
static bool IsSVG(stream_t *s)
{
if (s->psz_url == NULL)
return false;
char *ext = strstr(s->psz_url, ".svg");
if (!ext) return false;
const uint8_t *header;
int size = stream_Peek(s, &header, 4096);
int position = 0;
const char xml[] = "<?xml version=\"";
if (!FindSVGmarker(&position, header, size, xml))
return false;
if (position != 0)
return false;
const char endxml[] = ">\0";
if (!FindSVGmarker(&position, header, size, endxml))
return false;
if (position <= 15)
return false;
const char svg[] = "<svg";
if (!FindSVGmarker(&position, header, size, svg))
return false;
if (position < 19)
return false;
/* SVG Scalable Vector Graphics image */
/* NOTE: some SVG images have the mimetype set in a meta data section
* and some do not */
return true;
}
static bool IsTarga(stream_t *s)
{
/* The header is not enough to ensure proper detection, we need
* to have a look at the footer. But doing so can be slow. So
* try to avoid it when possible */
const uint8_t *header;
if (stream_Peek(s, &header, 18) < 18) /* Targa fixed header */
return false;
if (header[1] > 1) /* Color Map Type */
return false;
if ((header[1] != 0 || header[3 + 4] != 0) &&
header[3 + 4] != 8 &&
header[3 + 4] != 15 && header[3 + 4] != 16 &&
header[3 + 4] != 24 && header[3 + 4] != 32)
return false;
if ((header[2] > 3 && header[2] < 9) || header[2] > 11) /* Image Type */
return false;
if (GetWLE(&header[8 + 4]) <= 0 || /* Width */
GetWLE(&header[8 + 6]) <= 0) /* Height */
return false;
if (header[8 + 8] != 8 &&
header[8 + 8] != 15 && header[8 + 8] != 16 &&
header[8 + 8] != 24 && header[8 + 8] != 32)
return false;
if (header[8 + 9] & 0xc0) /* Reserved bits */
return false;
const int64_t size = stream_Size(s);
if (size <= 18 + 26)
return false;
bool can_seek;
if (stream_Control(s, STREAM_CAN_SEEK, &can_seek) || !can_seek)
return false;
const int64_t position = stream_Tell(s);
if (stream_Seek(s, size - 26))
return false;
const uint8_t *footer;
bool is_targa = stream_Peek(s, &footer, 26) >= 26 &&
!memcmp(&footer[8], "TRUEVISION-XFILE.\x00", 18);
stream_Seek(s, position);
return is_targa;
}
typedef struct {
vlc_fourcc_t codec;
int marker_size;
const uint8_t marker[14];
bool (*detect)(stream_t *s);
} image_format_t;
#define VLC_CODEC_XCF VLC_FOURCC('X', 'C', 'F', ' ')
#define VLC_CODEC_LBM VLC_FOURCC('L', 'B', 'M', ' ')
static const image_format_t formats[] = {
{ .codec = VLC_CODEC_XCF,
.marker_size = 9 + 4 + 1,
.marker = { 'g', 'i', 'm', 'p', ' ', 'x', 'c', 'f', ' ',
'f', 'i', 'l', 'e', '\0' }
},
{ .codec = VLC_CODEC_XCF,
.marker_size = 9 + 4 + 1,
.marker = { 'g', 'i', 'm', 'p', ' ', 'x', 'c', 'f', ' ',
'v', '0', '0', '1', '\0' }
},
{ .codec = VLC_CODEC_XCF,
.marker_size = 9 + 4 + 1,
.marker = { 'g', 'i', 'm', 'p', ' ', 'x', 'c', 'f', ' ',
'v', '0', '0', '2', '\0' }
},
{ .codec = VLC_CODEC_PNG,
.marker_size = 8,
.marker = { 0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A }
},
{ .codec = VLC_CODEC_GIF,
.marker_size = 6,
.marker = { 'G', 'I', 'F', '8', '7', 'a' }
},
{ .codec = VLC_CODEC_GIF,
.marker_size = 6,
.marker = { 'G', 'I', 'F', '8', '9', 'a' }
},
/* XXX TIFF detection may be a bit weak */
{ .codec = VLC_CODEC_TIFF,
.marker_size = 4,
.marker = { 'I', 'I', 0x2a, 0x00 },
},
{ .codec = VLC_CODEC_TIFF,
.marker_size = 4,
.marker = { 'M', 'M', 0x00, 0x2a },
},
{ .codec = VLC_CODEC_BMP,
.detect = IsBmp,
},
{ .codec = VLC_CODEC_PCX,
.detect = IsPcx,
},
{ .codec = VLC_CODEC_LBM,
.detect = IsLbm,
},
{ .codec = VLC_CODEC_PNM,
.detect = IsPnm,
},
{ .codec = VLC_CODEC_MXPEG,
.detect = IsMxpeg,
},
{ .codec = VLC_CODEC_JPEG,
.detect = IsJfif,
},
{ .codec = VLC_CODEC_JPEG,
.detect = IsSpiff,
},
{ .codec = VLC_CODEC_JPEG,
.detect = IsExif,
},
{ .codec = VLC_CODEC_BPG,
.marker_size = 4,
.marker = { 'B', 'P', 'G', 0xFB },
},
{ .codec = VLC_CODEC_SVG,
.detect = IsSVG,
},
{ .codec = VLC_CODEC_TARGA,
.detect = IsTarga,
},
{ .codec = 0 }
};
static int Open(vlc_object_t *object)
{
demux_t *demux = (demux_t*)object;
/* Detect the image type */
const image_format_t *img;
const uint8_t *peek;
int peek_size = 0;
for (int i = 0; ; i++) {
img = &formats[i];
if (!img->codec)
return VLC_EGENERIC;
if (img->detect) {
if (img->detect(demux->s))
break;
} else {
if (peek_size < img->marker_size)
peek_size = stream_Peek(demux->s, &peek, img->marker_size);
if (peek_size >= img->marker_size &&
!memcmp(peek, img->marker, img->marker_size))
break;
}
}
msg_Dbg(demux, "Detected image: %s",
vlc_fourcc_GetDescription(VIDEO_ES, img->codec));
if( img->codec == VLC_CODEC_MXPEG )
{
return VLC_EGENERIC; //let avformat demux this file
}
/* Load and if selected decode */
es_format_t fmt;
es_format_Init(&fmt, VIDEO_ES, img->codec);
fmt.video.i_chroma = fmt.i_codec;
block_t *data = Load(demux);
if (data && var_InheritBool(demux, "image-decode")) {
char *string = var_InheritString(demux, "image-chroma");
vlc_fourcc_t chroma = vlc_fourcc_GetCodecFromString(VIDEO_ES, string);
free(string);
data = Decode(demux, &fmt.video, chroma, data);
fmt.i_codec = fmt.video.i_chroma;
}
fmt.i_id = var_InheritInteger(demux, "image-id");
fmt.i_group = var_InheritInteger(demux, "image-group");
if (var_InheritURational(demux,
&fmt.video.i_frame_rate,
&fmt.video.i_frame_rate_base,
"image-fps") ||
fmt.video.i_frame_rate <= 0 || fmt.video.i_frame_rate_base <= 0) {
msg_Err(demux, "Invalid frame rate, using 10/1 instead");
fmt.video.i_frame_rate = 10;
fmt.video.i_frame_rate_base = 1;
}
/* If loadind failed, we still continue to avoid mis-detection
* by other demuxers. */
if (!data)
msg_Err(demux, "Failed to load the image");
/* */
demux_sys_t *sys = malloc(sizeof(*sys));
if (!sys) {
if (data)
block_Release(data);
es_format_Clean(&fmt);
return VLC_ENOMEM;
}
sys->data = data;
sys->es = es_out_Add(demux->out, &fmt);
sys->duration = CLOCK_FREQ * var_InheritFloat(demux, "image-duration");
sys->is_realtime = var_InheritBool(demux, "image-realtime");
sys->pts_origin = sys->is_realtime ? mdate() : 0;
sys->pts_next = VLC_TS_INVALID;
date_Init(&sys->pts, fmt.video.i_frame_rate, fmt.video.i_frame_rate_base);
date_Set(&sys->pts, 0);
es_format_Clean(&fmt);
demux->pf_demux = Demux;
demux->pf_control = Control;
demux->p_sys = sys;
return VLC_SUCCESS;
}
static void Close(vlc_object_t *object)
{
demux_t *demux = (demux_t*)object;
demux_sys_t *sys = demux->p_sys;
if (sys->data)
block_Release(sys->data);
free(sys);
}
| keshwans/vlc | modules/demux/image.c | C | gpl-2.0 | 20,975 |
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package com.sun.xml.internal.ws.addressing.policy;
import com.sun.xml.internal.ws.api.addressing.AddressingVersion;
import com.sun.xml.internal.ws.policy.AssertionSet;
import com.sun.xml.internal.ws.policy.NestedPolicy;
import com.sun.xml.internal.ws.policy.Policy;
import com.sun.xml.internal.ws.policy.PolicyAssertion;
import com.sun.xml.internal.ws.policy.PolicyException;
import com.sun.xml.internal.ws.policy.PolicyMap;
import com.sun.xml.internal.ws.policy.PolicyMapKey;
import com.sun.xml.internal.ws.policy.jaxws.spi.PolicyFeatureConfigurator;
import com.sun.xml.internal.ws.policy.privateutil.PolicyLogger;
import com.sun.xml.internal.ws.addressing.W3CAddressingMetadataConstants;
import com.sun.xml.internal.ws.resources.ModelerMessages;
import com.sun.xml.internal.bind.util.Which;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.logging.Level;
import javax.xml.namespace.QName;
import javax.xml.ws.WebServiceFeature;
import javax.xml.ws.WebServiceException;
import javax.xml.ws.soap.AddressingFeature;
/**
* This Policy extension configures the WSDLModel with AddressingFeature when Addressing assertions are present in the
* PolicyMap.
*
* @author japod
* @author Rama Pulavarthi
*/
public class AddressingFeatureConfigurator implements PolicyFeatureConfigurator {
private static final PolicyLogger LOGGER = PolicyLogger.getLogger(AddressingFeatureConfigurator.class);
private static final QName[] ADDRESSING_ASSERTIONS = {
new QName(AddressingVersion.MEMBER.policyNsUri, "UsingAddressing")};
/**
* Creates a new instance of AddressingFeatureConfigurator
*/
public AddressingFeatureConfigurator() {
}
public Collection<WebServiceFeature> getFeatures(final PolicyMapKey key, final PolicyMap policyMap) throws PolicyException {
LOGGER.entering(key, policyMap);
final Collection<WebServiceFeature> features = new LinkedList<WebServiceFeature>();
if ((key != null) && (policyMap != null)) {
final Policy policy = policyMap.getEndpointEffectivePolicy(key);
for (QName addressingAssertionQName : ADDRESSING_ASSERTIONS) {
if ((policy != null) && policy.contains(addressingAssertionQName)) {
final Iterator <AssertionSet> assertions = policy.iterator();
while(assertions.hasNext()){
final AssertionSet assertionSet = assertions.next();
final Iterator<PolicyAssertion> policyAssertion = assertionSet.iterator();
while(policyAssertion.hasNext()){
final PolicyAssertion assertion = policyAssertion.next();
if(assertion.getName().equals(addressingAssertionQName)){
final WebServiceFeature feature = AddressingVersion.getFeature(addressingAssertionQName.getNamespaceURI(), true, !assertion.isOptional());
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("Added addressing feature \"" + feature + "\" for element \"" + key + "\"");
}
features.add(feature);
} // end-if non optional wsa assertion found
} // next assertion
} // next alternative
} // end-if policy contains wsa assertion
} //end foreach addr assertion
// Deal with WS-Addressing 1.0 Metadata assertions
if (policy != null && policy.contains(W3CAddressingMetadataConstants.WSAM_ADDRESSING_ASSERTION)) {
for (AssertionSet assertions : policy) {
for (PolicyAssertion assertion : assertions) {
if (assertion.getName().equals(W3CAddressingMetadataConstants.WSAM_ADDRESSING_ASSERTION)) {
NestedPolicy nestedPolicy = assertion.getNestedPolicy();
boolean requiresAnonymousResponses = false;
boolean requiresNonAnonymousResponses = false;
if (nestedPolicy != null) {
requiresAnonymousResponses = nestedPolicy.contains(W3CAddressingMetadataConstants.WSAM_ANONYMOUS_NESTED_ASSERTION);
requiresNonAnonymousResponses = nestedPolicy.contains(W3CAddressingMetadataConstants.WSAM_NONANONYMOUS_NESTED_ASSERTION);
}
if(requiresAnonymousResponses && requiresNonAnonymousResponses) {
throw new WebServiceException("Only one among AnonymousResponses and NonAnonymousResponses can be nested in an Addressing assertion");
}
final WebServiceFeature feature;
try {
if (requiresAnonymousResponses) {
feature = new AddressingFeature(true, !assertion.isOptional(), AddressingFeature.Responses.ANONYMOUS);
} else if (requiresNonAnonymousResponses) {
feature = new AddressingFeature(true, !assertion.isOptional(), AddressingFeature.Responses.NON_ANONYMOUS);
} else {
feature = new AddressingFeature(true, !assertion.isOptional());
}
} catch (NoSuchMethodError e) {
throw LOGGER.logSevereException(new PolicyException(ModelerMessages.RUNTIME_MODELER_ADDRESSING_RESPONSES_NOSUCHMETHOD(toJar(Which.which(AddressingFeature.class))), e));
}
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("Added addressing feature \"" + feature + "\" for element \"" + key + "\"");
}
features.add(feature);
}
}
}
}
}
LOGGER.exiting(features);
return features;
}
/**
* Given the URL String inside jar, returns the URL to the jar itself.
*/
private static String toJar(String url) {
if(!url.startsWith("jar:"))
return url;
url = url.substring(4); // cut off jar:
return url.substring(0,url.lastIndexOf('!')); // cut off everything after '!'
}
}
| upenn-acg/REMIX | jvm-remix/openjdk/jaxws/src/share/jaxws_classes/com/sun/xml/internal/ws/addressing/policy/AddressingFeatureConfigurator.java | Java | gpl-2.0 | 7,812 |
/*
* Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/if_arp.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/llc.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/openvswitch.h>
#include <net/llc.h>
#include "datapath.h"
#include "vport-internal_dev.h"
#include "vport-netdev.h"
static struct vport_ops ovs_netdev_vport_ops;
/* Must be called with rcu_read_lock. */
static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
{
if (unlikely(!vport))
goto error;
if (unlikely(skb_warn_if_lro(skb)))
goto error;
/* Make our own copy of the packet. Otherwise we will mangle the
* packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
*/
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return;
skb_push(skb, ETH_HLEN);
ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
ovs_vport_receive(vport, skb, NULL);
return;
error:
kfree_skb(skb);
}
/* Called with rcu_read_lock and bottom-halves disabled. */
static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct vport *vport;
if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
return RX_HANDLER_PASS;
vport = ovs_netdev_get_vport(skb->dev);
netdev_port_receive(vport, skb);
return RX_HANDLER_CONSUMED;
}
static struct net_device *get_dpdev(const struct datapath *dp)
{
struct vport *local;
local = ovs_vport_ovsl(dp, OVSP_LOCAL);
BUG_ON(!local);
return netdev_vport_priv(local)->dev;
}
static struct vport *netdev_create(const struct vport_parms *parms)
{
struct vport *vport;
struct netdev_vport *netdev_vport;
int err;
vport = ovs_vport_alloc(sizeof(struct netdev_vport),
&ovs_netdev_vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
}
netdev_vport = netdev_vport_priv(vport);
netdev_vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), parms->name);
if (!netdev_vport->dev) {
err = -ENODEV;
goto error_free_vport;
}
if (netdev_vport->dev->flags & IFF_LOOPBACK ||
netdev_vport->dev->type != ARPHRD_ETHER ||
ovs_is_internal_dev(netdev_vport->dev)) {
err = -EINVAL;
goto error_put;
}
rtnl_lock();
err = netdev_master_upper_dev_link(netdev_vport->dev,
get_dpdev(vport->dp));
if (err)
goto error_unlock;
err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
vport);
if (err)
goto error_master_upper_dev_unlink;
dev_set_promiscuity(netdev_vport->dev, 1);
netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
rtnl_unlock();
return vport;
error_master_upper_dev_unlink:
netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
error_unlock:
rtnl_unlock();
error_put:
dev_put(netdev_vport->dev);
error_free_vport:
ovs_vport_free(vport);
error:
return ERR_PTR(err);
}
static void free_port_rcu(struct rcu_head *rcu)
{
struct netdev_vport *netdev_vport = container_of(rcu,
struct netdev_vport, rcu);
dev_put(netdev_vport->dev);
ovs_vport_free(vport_from_priv(netdev_vport));
}
void ovs_netdev_detach_dev(struct vport *vport)
{
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
ASSERT_RTNL();
netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
netdev_rx_handler_unregister(netdev_vport->dev);
netdev_upper_dev_unlink(netdev_vport->dev,
netdev_master_upper_dev_get(netdev_vport->dev));
dev_set_promiscuity(netdev_vport->dev, -1);
}
static void netdev_destroy(struct vport *vport)
{
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
rtnl_lock();
if (netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH)
ovs_netdev_detach_dev(vport);
rtnl_unlock();
call_rcu(&netdev_vport->rcu, free_port_rcu);
}
const char *ovs_netdev_get_name(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return netdev_vport->dev->name;
}
static unsigned int packet_length(const struct sk_buff *skb)
{
unsigned int length = skb->len - ETH_HLEN;
if (skb->protocol == htons(ETH_P_8021Q))
length -= VLAN_HLEN;
return length;
}
static int netdev_send(struct vport *vport, struct sk_buff *skb)
{
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
int mtu = netdev_vport->dev->mtu;
int len;
if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
netdev_vport->dev->name,
packet_length(skb), mtu);
goto drop;
}
skb->dev = netdev_vport->dev;
len = skb->len;
dev_queue_xmit(skb);
return len;
drop:
kfree_skb(skb);
return 0;
}
/* Returns null if this device is not attached to a datapath. */
struct vport *ovs_netdev_get_vport(struct net_device *dev)
{
if (likely(dev->priv_flags & IFF_OVS_DATAPATH))
return (struct vport *)
rcu_dereference_rtnl(dev->rx_handler_data);
else
return NULL;
}
static struct vport_ops ovs_netdev_vport_ops = {
.type = OVS_VPORT_TYPE_NETDEV,
.create = netdev_create,
.destroy = netdev_destroy,
.get_name = ovs_netdev_get_name,
.send = netdev_send,
};
int __init ovs_netdev_init(void)
{
return ovs_vport_ops_register(&ovs_netdev_vport_ops);
}
void ovs_netdev_exit(void)
{
ovs_vport_ops_unregister(&ovs_netdev_vport_ops);
}
| andreamerello/linux-analogdevices | net/openvswitch/vport-netdev.c | C | gpl-2.0 | 5,951 |
<?php
namespace Drupal\Tests\link\Unit\Plugin\Validation\Constraint;
use Drupal\Component\Utility\UrlHelper;
use Drupal\Core\Url;
use Drupal\link\Plugin\Validation\Constraint\LinkExternalProtocolsConstraint;
use Drupal\link\Plugin\Validation\Constraint\LinkExternalProtocolsConstraintValidator;
use Drupal\Tests\UnitTestCase;
/**
* @coversDefaultClass \Drupal\link\Plugin\Validation\Constraint\LinkExternalProtocolsConstraintValidator
* @group Link
*/
class LinkExternalProtocolsConstraintValidatorTest extends UnitTestCase {
/**
* @covers ::validate
* @dataProvider providerValidate
*/
public function testValidate($value, $valid) {
$context = $this->getMock('Symfony\Component\Validator\ExecutionContextInterface');
if ($valid) {
$context->expects($this->never())
->method('addViolation');
}
else {
$context->expects($this->once())
->method('addViolation');
}
// Setup some more allowed protocols.
UrlHelper::setAllowedProtocols(['http', 'https', 'magnet']);
$constraint = new LinkExternalProtocolsConstraint();
$validator = new LinkExternalProtocolsConstraintValidator();
$validator->initialize($context);
$validator->validate($value, $constraint);
}
/**
* Data provider for ::testValidate
*/
public function providerValidate() {
$data = [];
// Test allowed protocols.
$data[] = ['http://www.drupal.org', TRUE];
$data[] = ['https://www.drupal.org', TRUE];
$data[] = ['magnet:?xt=urn:sha1:YNCKHTQCWBTRNJIV4WNAE52SJUQCZO5C', TRUE];
// Invalid protocols.
$data[] = ['ftp://ftp.funet.fi/pub/standards/RFC/rfc959.txt', FALSE];
foreach ($data as &$single_data) {
$url = Url::fromUri($single_data[0]);
$link = $this->getMock('Drupal\link\LinkItemInterface');
$link->expects($this->any())
->method('getUrl')
->willReturn($url);
$single_data[0] = $link;
}
return $data;
}
/**
* @covers ::validate
*
* @see \Drupal\Core\Url::fromUri
*/
public function testValidateWithMalformedUri() {
$link = $this->getMock('Drupal\link\LinkItemInterface');
$link->expects($this->any())
->method('getUrl')
->willThrowException(new \InvalidArgumentException());
$context = $this->getMock('Symfony\Component\Validator\ExecutionContextInterface');
$context->expects($this->never())
->method('addViolation');
$constraint = new LinkExternalProtocolsConstraint();
$validator = new LinkExternalProtocolsConstraintValidator();
$validator->initialize($context);
$validator->validate($link, $constraint);
}
/**
* @covers ::validate
*/
public function testValidateIgnoresInternalUrls() {
$link = $this->getMock('Drupal\link\LinkItemInterface');
$link->expects($this->any())
->method('getUrl')
->willReturn(Url::fromRoute('example.test'));
$context = $this->getMock('Symfony\Component\Validator\ExecutionContextInterface');
$context->expects($this->never())
->method('addViolation');
$constraint = new LinkExternalProtocolsConstraint();
$validator = new LinkExternalProtocolsConstraintValidator();
$validator->initialize($context);
$validator->validate($link, $constraint);
}
}
| willykaram/drupaltv | core/modules/link/tests/src/Unit/Plugin/Validation/Constraint/LinkExternalProtocolsConstraintValidatorTest.php | PHP | gpl-2.0 | 3,277 |
<?php
namespace Drupal\Core\FileTransfer;
/**
* The SSH connection class for the update module.
*/
class SSH extends FileTransfer implements ChmodInterface {
/**
* {@inheritdoc}
*/
public function __construct($jail, $username, $password, $hostname = "localhost", $port = 22) {
$this->username = $username;
$this->password = $password;
$this->hostname = $hostname;
$this->port = $port;
parent::__construct($jail);
}
/**
* {@inheritdoc}
*/
public function connect() {
$this->connection = @ssh2_connect($this->hostname, $this->port);
if (!$this->connection) {
throw new FileTransferException('SSH Connection failed to @host:@port', NULL, ['@host' => $this->hostname, '@port' => $this->port]);
}
if (!@ssh2_auth_password($this->connection, $this->username, $this->password)) {
throw new FileTransferException('The supplied username/password combination was not accepted.');
}
}
/**
* {@inheritdoc}
*/
public static function factory($jail, $settings) {
$username = empty($settings['username']) ? '' : $settings['username'];
$password = empty($settings['password']) ? '' : $settings['password'];
$hostname = empty($settings['advanced']['hostname']) ? 'localhost' : $settings['advanced']['hostname'];
$port = empty($settings['advanced']['port']) ? 22 : $settings['advanced']['port'];
return new SSH($jail, $username, $password, $hostname, $port);
}
/**
* {@inheritdoc}
*/
protected function copyFileJailed($source, $destination) {
if (!@ssh2_scp_send($this->connection, $source, $destination)) {
throw new FileTransferException('Cannot copy @source_file to @destination_file.', NULL, ['@source' => $source, '@destination' => $destination]);
}
}
/**
* {@inheritdoc}
*/
protected function copyDirectoryJailed($source, $destination) {
if (@!ssh2_exec($this->connection, 'cp -Rp ' . escapeshellarg($source) . ' ' . escapeshellarg($destination))) {
throw new FileTransferException('Cannot copy directory @directory.', NULL, ['@directory' => $source]);
}
}
/**
* {@inheritdoc}
*/
protected function createDirectoryJailed($directory) {
if (@!ssh2_exec($this->connection, 'mkdir ' . escapeshellarg($directory))) {
throw new FileTransferException('Cannot create directory @directory.', NULL, ['@directory' => $directory]);
}
}
/**
* {@inheritdoc}
*/
protected function removeDirectoryJailed($directory) {
if (@!ssh2_exec($this->connection, 'rm -Rf ' . escapeshellarg($directory))) {
throw new FileTransferException('Cannot remove @directory.', NULL, ['@directory' => $directory]);
}
}
/**
* {@inheritdoc}
*/
protected function removeFileJailed($destination) {
if (!@ssh2_exec($this->connection, 'rm ' . escapeshellarg($destination))) {
throw new FileTransferException('Cannot remove @directory.', NULL, ['@directory' => $destination]);
}
}
/**
* Implements Drupal\Core\FileTransfer\FileTransfer::isDirectory().
*
* WARNING: This is untested. It is not currently used, but should do the
* trick.
*/
public function isDirectory($path) {
$directory = escapeshellarg($path);
$cmd = "[ -d {$directory} ] && echo 'yes'";
if ($output = @ssh2_exec($this->connection, $cmd)) {
if ($output == 'yes') {
return TRUE;
}
return FALSE;
}
else {
throw new FileTransferException('Cannot check @path.', NULL, ['@path' => $path]);
}
}
/**
* {@inheritdoc}
*/
public function isFile($path) {
$file = escapeshellarg($path);
$cmd = "[ -f {$file} ] && echo 'yes'";
if ($output = @ssh2_exec($this->connection, $cmd)) {
if ($output == 'yes') {
return TRUE;
}
return FALSE;
}
else {
throw new FileTransferException('Cannot check @path.', NULL, ['@path' => $path]);
}
}
/**
* {@inheritdoc}
*/
public function chmodJailed($path, $mode, $recursive) {
$cmd = sprintf("chmod %s%o %s", $recursive ? '-R ' : '', $mode, escapeshellarg($path));
if (@!ssh2_exec($this->connection, $cmd)) {
throw new FileTransferException('Cannot change permissions of @path.', NULL, ['@path' => $path]);
}
}
/**
* {@inheritdoc}
*/
public function getSettingsForm() {
$form = parent::getSettingsForm();
$form['advanced']['port']['#default_value'] = 22;
return $form;
}
}
| Jay5066/drupalDockerCompose | web/core/lib/Drupal/Core/FileTransfer/SSH.php | PHP | gpl-2.0 | 4,455 |
(function(nacl) {
'use strict';
// Ported in 2014 by Dmitry Chestnykh and Devi Mandiri.
// Public domain.
//
// Implementation derived from TweetNaCl version 20140427.
// See for details: http://tweetnacl.cr.yp.to/
var gf = function(init) {
var i, r = new Float64Array(16);
if (init) for (i = 0; i < init.length; i++) r[i] = init[i];
return r;
};
// Pluggable, initialized in high-level API below.
var randombytes = function(/* x, n */) { throw new Error('no PRNG'); };
var _0 = new Uint8Array(16);
var _9 = new Uint8Array(32); _9[0] = 9;
var gf0 = gf(),
gf1 = gf([1]),
_121665 = gf([0xdb41, 1]),
D = gf([0x78a3, 0x1359, 0x4dca, 0x75eb, 0xd8ab, 0x4141, 0x0a4d, 0x0070, 0xe898, 0x7779, 0x4079, 0x8cc7, 0xfe73, 0x2b6f, 0x6cee, 0x5203]),
D2 = gf([0xf159, 0x26b2, 0x9b94, 0xebd6, 0xb156, 0x8283, 0x149a, 0x00e0, 0xd130, 0xeef3, 0x80f2, 0x198e, 0xfce7, 0x56df, 0xd9dc, 0x2406]),
X = gf([0xd51a, 0x8f25, 0x2d60, 0xc956, 0xa7b2, 0x9525, 0xc760, 0x692c, 0xdc5c, 0xfdd6, 0xe231, 0xc0a4, 0x53fe, 0xcd6e, 0x36d3, 0x2169]),
Y = gf([0x6658, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666]),
I = gf([0xa0b0, 0x4a0e, 0x1b27, 0xc4ee, 0xe478, 0xad2f, 0x1806, 0x2f43, 0xd7a7, 0x3dfb, 0x0099, 0x2b4d, 0xdf0b, 0x4fc1, 0x2480, 0x2b83]);
function ts64(x, i, h, l) {
x[i] = (h >> 24) & 0xff;
x[i+1] = (h >> 16) & 0xff;
x[i+2] = (h >> 8) & 0xff;
x[i+3] = h & 0xff;
x[i+4] = (l >> 24) & 0xff;
x[i+5] = (l >> 16) & 0xff;
x[i+6] = (l >> 8) & 0xff;
x[i+7] = l & 0xff;
}
function vn(x, xi, y, yi, n) {
var i,d = 0;
for (i = 0; i < n; i++) d |= x[xi+i]^y[yi+i];
return (1 & ((d - 1) >>> 8)) - 1;
}
function crypto_verify_16(x, xi, y, yi) {
return vn(x,xi,y,yi,16);
}
function crypto_verify_32(x, xi, y, yi) {
return vn(x,xi,y,yi,32);
}
function core_salsa20(o, p, k, c) {
var j0 = c[ 0] & 0xff | (c[ 1] & 0xff)<<8 | (c[ 2] & 0xff)<<16 | (c[ 3] & 0xff)<<24,
j1 = k[ 0] & 0xff | (k[ 1] & 0xff)<<8 | (k[ 2] & 0xff)<<16 | (k[ 3] & 0xff)<<24,
j2 = k[ 4] & 0xff | (k[ 5] & 0xff)<<8 | (k[ 6] & 0xff)<<16 | (k[ 7] & 0xff)<<24,
j3 = k[ 8] & 0xff | (k[ 9] & 0xff)<<8 | (k[10] & 0xff)<<16 | (k[11] & 0xff)<<24,
j4 = k[12] & 0xff | (k[13] & 0xff)<<8 | (k[14] & 0xff)<<16 | (k[15] & 0xff)<<24,
j5 = c[ 4] & 0xff | (c[ 5] & 0xff)<<8 | (c[ 6] & 0xff)<<16 | (c[ 7] & 0xff)<<24,
j6 = p[ 0] & 0xff | (p[ 1] & 0xff)<<8 | (p[ 2] & 0xff)<<16 | (p[ 3] & 0xff)<<24,
j7 = p[ 4] & 0xff | (p[ 5] & 0xff)<<8 | (p[ 6] & 0xff)<<16 | (p[ 7] & 0xff)<<24,
j8 = p[ 8] & 0xff | (p[ 9] & 0xff)<<8 | (p[10] & 0xff)<<16 | (p[11] & 0xff)<<24,
j9 = p[12] & 0xff | (p[13] & 0xff)<<8 | (p[14] & 0xff)<<16 | (p[15] & 0xff)<<24,
j10 = c[ 8] & 0xff | (c[ 9] & 0xff)<<8 | (c[10] & 0xff)<<16 | (c[11] & 0xff)<<24,
j11 = k[16] & 0xff | (k[17] & 0xff)<<8 | (k[18] & 0xff)<<16 | (k[19] & 0xff)<<24,
j12 = k[20] & 0xff | (k[21] & 0xff)<<8 | (k[22] & 0xff)<<16 | (k[23] & 0xff)<<24,
j13 = k[24] & 0xff | (k[25] & 0xff)<<8 | (k[26] & 0xff)<<16 | (k[27] & 0xff)<<24,
j14 = k[28] & 0xff | (k[29] & 0xff)<<8 | (k[30] & 0xff)<<16 | (k[31] & 0xff)<<24,
j15 = c[12] & 0xff | (c[13] & 0xff)<<8 | (c[14] & 0xff)<<16 | (c[15] & 0xff)<<24;
var x0 = j0, x1 = j1, x2 = j2, x3 = j3, x4 = j4, x5 = j5, x6 = j6, x7 = j7,
x8 = j8, x9 = j9, x10 = j10, x11 = j11, x12 = j12, x13 = j13, x14 = j14,
x15 = j15, u;
for (var i = 0; i < 20; i += 2) {
u = x0 + x12 | 0;
x4 ^= u<<7 | u>>>(32-7);
u = x4 + x0 | 0;
x8 ^= u<<9 | u>>>(32-9);
u = x8 + x4 | 0;
x12 ^= u<<13 | u>>>(32-13);
u = x12 + x8 | 0;
x0 ^= u<<18 | u>>>(32-18);
u = x5 + x1 | 0;
x9 ^= u<<7 | u>>>(32-7);
u = x9 + x5 | 0;
x13 ^= u<<9 | u>>>(32-9);
u = x13 + x9 | 0;
x1 ^= u<<13 | u>>>(32-13);
u = x1 + x13 | 0;
x5 ^= u<<18 | u>>>(32-18);
u = x10 + x6 | 0;
x14 ^= u<<7 | u>>>(32-7);
u = x14 + x10 | 0;
x2 ^= u<<9 | u>>>(32-9);
u = x2 + x14 | 0;
x6 ^= u<<13 | u>>>(32-13);
u = x6 + x2 | 0;
x10 ^= u<<18 | u>>>(32-18);
u = x15 + x11 | 0;
x3 ^= u<<7 | u>>>(32-7);
u = x3 + x15 | 0;
x7 ^= u<<9 | u>>>(32-9);
u = x7 + x3 | 0;
x11 ^= u<<13 | u>>>(32-13);
u = x11 + x7 | 0;
x15 ^= u<<18 | u>>>(32-18);
u = x0 + x3 | 0;
x1 ^= u<<7 | u>>>(32-7);
u = x1 + x0 | 0;
x2 ^= u<<9 | u>>>(32-9);
u = x2 + x1 | 0;
x3 ^= u<<13 | u>>>(32-13);
u = x3 + x2 | 0;
x0 ^= u<<18 | u>>>(32-18);
u = x5 + x4 | 0;
x6 ^= u<<7 | u>>>(32-7);
u = x6 + x5 | 0;
x7 ^= u<<9 | u>>>(32-9);
u = x7 + x6 | 0;
x4 ^= u<<13 | u>>>(32-13);
u = x4 + x7 | 0;
x5 ^= u<<18 | u>>>(32-18);
u = x10 + x9 | 0;
x11 ^= u<<7 | u>>>(32-7);
u = x11 + x10 | 0;
x8 ^= u<<9 | u>>>(32-9);
u = x8 + x11 | 0;
x9 ^= u<<13 | u>>>(32-13);
u = x9 + x8 | 0;
x10 ^= u<<18 | u>>>(32-18);
u = x15 + x14 | 0;
x12 ^= u<<7 | u>>>(32-7);
u = x12 + x15 | 0;
x13 ^= u<<9 | u>>>(32-9);
u = x13 + x12 | 0;
x14 ^= u<<13 | u>>>(32-13);
u = x14 + x13 | 0;
x15 ^= u<<18 | u>>>(32-18);
}
x0 = x0 + j0 | 0;
x1 = x1 + j1 | 0;
x2 = x2 + j2 | 0;
x3 = x3 + j3 | 0;
x4 = x4 + j4 | 0;
x5 = x5 + j5 | 0;
x6 = x6 + j6 | 0;
x7 = x7 + j7 | 0;
x8 = x8 + j8 | 0;
x9 = x9 + j9 | 0;
x10 = x10 + j10 | 0;
x11 = x11 + j11 | 0;
x12 = x12 + j12 | 0;
x13 = x13 + j13 | 0;
x14 = x14 + j14 | 0;
x15 = x15 + j15 | 0;
o[ 0] = x0 >>> 0 & 0xff;
o[ 1] = x0 >>> 8 & 0xff;
o[ 2] = x0 >>> 16 & 0xff;
o[ 3] = x0 >>> 24 & 0xff;
o[ 4] = x1 >>> 0 & 0xff;
o[ 5] = x1 >>> 8 & 0xff;
o[ 6] = x1 >>> 16 & 0xff;
o[ 7] = x1 >>> 24 & 0xff;
o[ 8] = x2 >>> 0 & 0xff;
o[ 9] = x2 >>> 8 & 0xff;
o[10] = x2 >>> 16 & 0xff;
o[11] = x2 >>> 24 & 0xff;
o[12] = x3 >>> 0 & 0xff;
o[13] = x3 >>> 8 & 0xff;
o[14] = x3 >>> 16 & 0xff;
o[15] = x3 >>> 24 & 0xff;
o[16] = x4 >>> 0 & 0xff;
o[17] = x4 >>> 8 & 0xff;
o[18] = x4 >>> 16 & 0xff;
o[19] = x4 >>> 24 & 0xff;
o[20] = x5 >>> 0 & 0xff;
o[21] = x5 >>> 8 & 0xff;
o[22] = x5 >>> 16 & 0xff;
o[23] = x5 >>> 24 & 0xff;
o[24] = x6 >>> 0 & 0xff;
o[25] = x6 >>> 8 & 0xff;
o[26] = x6 >>> 16 & 0xff;
o[27] = x6 >>> 24 & 0xff;
o[28] = x7 >>> 0 & 0xff;
o[29] = x7 >>> 8 & 0xff;
o[30] = x7 >>> 16 & 0xff;
o[31] = x7 >>> 24 & 0xff;
o[32] = x8 >>> 0 & 0xff;
o[33] = x8 >>> 8 & 0xff;
o[34] = x8 >>> 16 & 0xff;
o[35] = x8 >>> 24 & 0xff;
o[36] = x9 >>> 0 & 0xff;
o[37] = x9 >>> 8 & 0xff;
o[38] = x9 >>> 16 & 0xff;
o[39] = x9 >>> 24 & 0xff;
o[40] = x10 >>> 0 & 0xff;
o[41] = x10 >>> 8 & 0xff;
o[42] = x10 >>> 16 & 0xff;
o[43] = x10 >>> 24 & 0xff;
o[44] = x11 >>> 0 & 0xff;
o[45] = x11 >>> 8 & 0xff;
o[46] = x11 >>> 16 & 0xff;
o[47] = x11 >>> 24 & 0xff;
o[48] = x12 >>> 0 & 0xff;
o[49] = x12 >>> 8 & 0xff;
o[50] = x12 >>> 16 & 0xff;
o[51] = x12 >>> 24 & 0xff;
o[52] = x13 >>> 0 & 0xff;
o[53] = x13 >>> 8 & 0xff;
o[54] = x13 >>> 16 & 0xff;
o[55] = x13 >>> 24 & 0xff;
o[56] = x14 >>> 0 & 0xff;
o[57] = x14 >>> 8 & 0xff;
o[58] = x14 >>> 16 & 0xff;
o[59] = x14 >>> 24 & 0xff;
o[60] = x15 >>> 0 & 0xff;
o[61] = x15 >>> 8 & 0xff;
o[62] = x15 >>> 16 & 0xff;
o[63] = x15 >>> 24 & 0xff;
}
function core_hsalsa20(o,p,k,c) {
var j0 = c[ 0] & 0xff | (c[ 1] & 0xff)<<8 | (c[ 2] & 0xff)<<16 | (c[ 3] & 0xff)<<24,
j1 = k[ 0] & 0xff | (k[ 1] & 0xff)<<8 | (k[ 2] & 0xff)<<16 | (k[ 3] & 0xff)<<24,
j2 = k[ 4] & 0xff | (k[ 5] & 0xff)<<8 | (k[ 6] & 0xff)<<16 | (k[ 7] & 0xff)<<24,
j3 = k[ 8] & 0xff | (k[ 9] & 0xff)<<8 | (k[10] & 0xff)<<16 | (k[11] & 0xff)<<24,
j4 = k[12] & 0xff | (k[13] & 0xff)<<8 | (k[14] & 0xff)<<16 | (k[15] & 0xff)<<24,
j5 = c[ 4] & 0xff | (c[ 5] & 0xff)<<8 | (c[ 6] & 0xff)<<16 | (c[ 7] & 0xff)<<24,
j6 = p[ 0] & 0xff | (p[ 1] & 0xff)<<8 | (p[ 2] & 0xff)<<16 | (p[ 3] & 0xff)<<24,
j7 = p[ 4] & 0xff | (p[ 5] & 0xff)<<8 | (p[ 6] & 0xff)<<16 | (p[ 7] & 0xff)<<24,
j8 = p[ 8] & 0xff | (p[ 9] & 0xff)<<8 | (p[10] & 0xff)<<16 | (p[11] & 0xff)<<24,
j9 = p[12] & 0xff | (p[13] & 0xff)<<8 | (p[14] & 0xff)<<16 | (p[15] & 0xff)<<24,
j10 = c[ 8] & 0xff | (c[ 9] & 0xff)<<8 | (c[10] & 0xff)<<16 | (c[11] & 0xff)<<24,
j11 = k[16] & 0xff | (k[17] & 0xff)<<8 | (k[18] & 0xff)<<16 | (k[19] & 0xff)<<24,
j12 = k[20] & 0xff | (k[21] & 0xff)<<8 | (k[22] & 0xff)<<16 | (k[23] & 0xff)<<24,
j13 = k[24] & 0xff | (k[25] & 0xff)<<8 | (k[26] & 0xff)<<16 | (k[27] & 0xff)<<24,
j14 = k[28] & 0xff | (k[29] & 0xff)<<8 | (k[30] & 0xff)<<16 | (k[31] & 0xff)<<24,
j15 = c[12] & 0xff | (c[13] & 0xff)<<8 | (c[14] & 0xff)<<16 | (c[15] & 0xff)<<24;
var x0 = j0, x1 = j1, x2 = j2, x3 = j3, x4 = j4, x5 = j5, x6 = j6, x7 = j7,
x8 = j8, x9 = j9, x10 = j10, x11 = j11, x12 = j12, x13 = j13, x14 = j14,
x15 = j15, u;
for (var i = 0; i < 20; i += 2) {
u = x0 + x12 | 0;
x4 ^= u<<7 | u>>>(32-7);
u = x4 + x0 | 0;
x8 ^= u<<9 | u>>>(32-9);
u = x8 + x4 | 0;
x12 ^= u<<13 | u>>>(32-13);
u = x12 + x8 | 0;
x0 ^= u<<18 | u>>>(32-18);
u = x5 + x1 | 0;
x9 ^= u<<7 | u>>>(32-7);
u = x9 + x5 | 0;
x13 ^= u<<9 | u>>>(32-9);
u = x13 + x9 | 0;
x1 ^= u<<13 | u>>>(32-13);
u = x1 + x13 | 0;
x5 ^= u<<18 | u>>>(32-18);
u = x10 + x6 | 0;
x14 ^= u<<7 | u>>>(32-7);
u = x14 + x10 | 0;
x2 ^= u<<9 | u>>>(32-9);
u = x2 + x14 | 0;
x6 ^= u<<13 | u>>>(32-13);
u = x6 + x2 | 0;
x10 ^= u<<18 | u>>>(32-18);
u = x15 + x11 | 0;
x3 ^= u<<7 | u>>>(32-7);
u = x3 + x15 | 0;
x7 ^= u<<9 | u>>>(32-9);
u = x7 + x3 | 0;
x11 ^= u<<13 | u>>>(32-13);
u = x11 + x7 | 0;
x15 ^= u<<18 | u>>>(32-18);
u = x0 + x3 | 0;
x1 ^= u<<7 | u>>>(32-7);
u = x1 + x0 | 0;
x2 ^= u<<9 | u>>>(32-9);
u = x2 + x1 | 0;
x3 ^= u<<13 | u>>>(32-13);
u = x3 + x2 | 0;
x0 ^= u<<18 | u>>>(32-18);
u = x5 + x4 | 0;
x6 ^= u<<7 | u>>>(32-7);
u = x6 + x5 | 0;
x7 ^= u<<9 | u>>>(32-9);
u = x7 + x6 | 0;
x4 ^= u<<13 | u>>>(32-13);
u = x4 + x7 | 0;
x5 ^= u<<18 | u>>>(32-18);
u = x10 + x9 | 0;
x11 ^= u<<7 | u>>>(32-7);
u = x11 + x10 | 0;
x8 ^= u<<9 | u>>>(32-9);
u = x8 + x11 | 0;
x9 ^= u<<13 | u>>>(32-13);
u = x9 + x8 | 0;
x10 ^= u<<18 | u>>>(32-18);
u = x15 + x14 | 0;
x12 ^= u<<7 | u>>>(32-7);
u = x12 + x15 | 0;
x13 ^= u<<9 | u>>>(32-9);
u = x13 + x12 | 0;
x14 ^= u<<13 | u>>>(32-13);
u = x14 + x13 | 0;
x15 ^= u<<18 | u>>>(32-18);
}
o[ 0] = x0 >>> 0 & 0xff;
o[ 1] = x0 >>> 8 & 0xff;
o[ 2] = x0 >>> 16 & 0xff;
o[ 3] = x0 >>> 24 & 0xff;
o[ 4] = x5 >>> 0 & 0xff;
o[ 5] = x5 >>> 8 & 0xff;
o[ 6] = x5 >>> 16 & 0xff;
o[ 7] = x5 >>> 24 & 0xff;
o[ 8] = x10 >>> 0 & 0xff;
o[ 9] = x10 >>> 8 & 0xff;
o[10] = x10 >>> 16 & 0xff;
o[11] = x10 >>> 24 & 0xff;
o[12] = x15 >>> 0 & 0xff;
o[13] = x15 >>> 8 & 0xff;
o[14] = x15 >>> 16 & 0xff;
o[15] = x15 >>> 24 & 0xff;
o[16] = x6 >>> 0 & 0xff;
o[17] = x6 >>> 8 & 0xff;
o[18] = x6 >>> 16 & 0xff;
o[19] = x6 >>> 24 & 0xff;
o[20] = x7 >>> 0 & 0xff;
o[21] = x7 >>> 8 & 0xff;
o[22] = x7 >>> 16 & 0xff;
o[23] = x7 >>> 24 & 0xff;
o[24] = x8 >>> 0 & 0xff;
o[25] = x8 >>> 8 & 0xff;
o[26] = x8 >>> 16 & 0xff;
o[27] = x8 >>> 24 & 0xff;
o[28] = x9 >>> 0 & 0xff;
o[29] = x9 >>> 8 & 0xff;
o[30] = x9 >>> 16 & 0xff;
o[31] = x9 >>> 24 & 0xff;
}
function crypto_core_salsa20(out,inp,k,c) {
core_salsa20(out,inp,k,c);
}
function crypto_core_hsalsa20(out,inp,k,c) {
core_hsalsa20(out,inp,k,c);
}
var sigma = new Uint8Array([101, 120, 112, 97, 110, 100, 32, 51, 50, 45, 98, 121, 116, 101, 32, 107]);
// "expand 32-byte k"
function crypto_stream_salsa20_xor(c,cpos,m,mpos,b,n,k) {
var z = new Uint8Array(16), x = new Uint8Array(64);
var u, i;
for (i = 0; i < 16; i++) z[i] = 0;
for (i = 0; i < 8; i++) z[i] = n[i];
while (b >= 64) {
crypto_core_salsa20(x,z,k,sigma);
for (i = 0; i < 64; i++) c[cpos+i] = m[mpos+i] ^ x[i];
u = 1;
for (i = 8; i < 16; i++) {
u = u + (z[i] & 0xff) | 0;
z[i] = u & 0xff;
u >>>= 8;
}
b -= 64;
cpos += 64;
mpos += 64;
}
if (b > 0) {
crypto_core_salsa20(x,z,k,sigma);
for (i = 0; i < b; i++) c[cpos+i] = m[mpos+i] ^ x[i];
}
return 0;
}
function crypto_stream_salsa20(c,cpos,b,n,k) {
var z = new Uint8Array(16), x = new Uint8Array(64);
var u, i;
for (i = 0; i < 16; i++) z[i] = 0;
for (i = 0; i < 8; i++) z[i] = n[i];
while (b >= 64) {
crypto_core_salsa20(x,z,k,sigma);
for (i = 0; i < 64; i++) c[cpos+i] = x[i];
u = 1;
for (i = 8; i < 16; i++) {
u = u + (z[i] & 0xff) | 0;
z[i] = u & 0xff;
u >>>= 8;
}
b -= 64;
cpos += 64;
}
if (b > 0) {
crypto_core_salsa20(x,z,k,sigma);
for (i = 0; i < b; i++) c[cpos+i] = x[i];
}
return 0;
}
function crypto_stream(c,cpos,d,n,k) {
var s = new Uint8Array(32);
crypto_core_hsalsa20(s,n,k,sigma);
var sn = new Uint8Array(8);
for (var i = 0; i < 8; i++) sn[i] = n[i+16];
return crypto_stream_salsa20(c,cpos,d,sn,s);
}
function crypto_stream_xor(c,cpos,m,mpos,d,n,k) {
var s = new Uint8Array(32);
crypto_core_hsalsa20(s,n,k,sigma);
var sn = new Uint8Array(8);
for (var i = 0; i < 8; i++) sn[i] = n[i+16];
return crypto_stream_salsa20_xor(c,cpos,m,mpos,d,sn,s);
}
/*
* Port of Andrew Moon's Poly1305-donna-16. Public domain.
* https://github.com/floodyberry/poly1305-donna
*/
var poly1305 = function(key) {
this.buffer = new Uint8Array(16);
this.r = new Uint16Array(10);
this.h = new Uint16Array(10);
this.pad = new Uint16Array(8);
this.leftover = 0;
this.fin = 0;
var t0, t1, t2, t3, t4, t5, t6, t7;
t0 = key[ 0] & 0xff | (key[ 1] & 0xff) << 8; this.r[0] = ( t0 ) & 0x1fff;
t1 = key[ 2] & 0xff | (key[ 3] & 0xff) << 8; this.r[1] = ((t0 >>> 13) | (t1 << 3)) & 0x1fff;
t2 = key[ 4] & 0xff | (key[ 5] & 0xff) << 8; this.r[2] = ((t1 >>> 10) | (t2 << 6)) & 0x1f03;
t3 = key[ 6] & 0xff | (key[ 7] & 0xff) << 8; this.r[3] = ((t2 >>> 7) | (t3 << 9)) & 0x1fff;
t4 = key[ 8] & 0xff | (key[ 9] & 0xff) << 8; this.r[4] = ((t3 >>> 4) | (t4 << 12)) & 0x00ff;
this.r[5] = ((t4 >>> 1)) & 0x1ffe;
t5 = key[10] & 0xff | (key[11] & 0xff) << 8; this.r[6] = ((t4 >>> 14) | (t5 << 2)) & 0x1fff;
t6 = key[12] & 0xff | (key[13] & 0xff) << 8; this.r[7] = ((t5 >>> 11) | (t6 << 5)) & 0x1f81;
t7 = key[14] & 0xff | (key[15] & 0xff) << 8; this.r[8] = ((t6 >>> 8) | (t7 << 8)) & 0x1fff;
this.r[9] = ((t7 >>> 5)) & 0x007f;
this.pad[0] = key[16] & 0xff | (key[17] & 0xff) << 8;
this.pad[1] = key[18] & 0xff | (key[19] & 0xff) << 8;
this.pad[2] = key[20] & 0xff | (key[21] & 0xff) << 8;
this.pad[3] = key[22] & 0xff | (key[23] & 0xff) << 8;
this.pad[4] = key[24] & 0xff | (key[25] & 0xff) << 8;
this.pad[5] = key[26] & 0xff | (key[27] & 0xff) << 8;
this.pad[6] = key[28] & 0xff | (key[29] & 0xff) << 8;
this.pad[7] = key[30] & 0xff | (key[31] & 0xff) << 8;
};
poly1305.prototype.blocks = function(m, mpos, bytes) {
var hibit = this.fin ? 0 : (1 << 11);
var t0, t1, t2, t3, t4, t5, t6, t7, c;
var d0, d1, d2, d3, d4, d5, d6, d7, d8, d9;
var h0 = this.h[0],
h1 = this.h[1],
h2 = this.h[2],
h3 = this.h[3],
h4 = this.h[4],
h5 = this.h[5],
h6 = this.h[6],
h7 = this.h[7],
h8 = this.h[8],
h9 = this.h[9];
var r0 = this.r[0],
r1 = this.r[1],
r2 = this.r[2],
r3 = this.r[3],
r4 = this.r[4],
r5 = this.r[5],
r6 = this.r[6],
r7 = this.r[7],
r8 = this.r[8],
r9 = this.r[9];
while (bytes >= 16) {
t0 = m[mpos+ 0] & 0xff | (m[mpos+ 1] & 0xff) << 8; h0 += ( t0 ) & 0x1fff;
t1 = m[mpos+ 2] & 0xff | (m[mpos+ 3] & 0xff) << 8; h1 += ((t0 >>> 13) | (t1 << 3)) & 0x1fff;
t2 = m[mpos+ 4] & 0xff | (m[mpos+ 5] & 0xff) << 8; h2 += ((t1 >>> 10) | (t2 << 6)) & 0x1fff;
t3 = m[mpos+ 6] & 0xff | (m[mpos+ 7] & 0xff) << 8; h3 += ((t2 >>> 7) | (t3 << 9)) & 0x1fff;
t4 = m[mpos+ 8] & 0xff | (m[mpos+ 9] & 0xff) << 8; h4 += ((t3 >>> 4) | (t4 << 12)) & 0x1fff;
h5 += ((t4 >>> 1)) & 0x1fff;
t5 = m[mpos+10] & 0xff | (m[mpos+11] & 0xff) << 8; h6 += ((t4 >>> 14) | (t5 << 2)) & 0x1fff;
t6 = m[mpos+12] & 0xff | (m[mpos+13] & 0xff) << 8; h7 += ((t5 >>> 11) | (t6 << 5)) & 0x1fff;
t7 = m[mpos+14] & 0xff | (m[mpos+15] & 0xff) << 8; h8 += ((t6 >>> 8) | (t7 << 8)) & 0x1fff;
h9 += ((t7 >>> 5)) | hibit;
c = 0;
d0 = c;
d0 += h0 * r0;
d0 += h1 * (5 * r9);
d0 += h2 * (5 * r8);
d0 += h3 * (5 * r7);
d0 += h4 * (5 * r6);
c = (d0 >>> 13); d0 &= 0x1fff;
d0 += h5 * (5 * r5);
d0 += h6 * (5 * r4);
d0 += h7 * (5 * r3);
d0 += h8 * (5 * r2);
d0 += h9 * (5 * r1);
c += (d0 >>> 13); d0 &= 0x1fff;
d1 = c;
d1 += h0 * r1;
d1 += h1 * r0;
d1 += h2 * (5 * r9);
d1 += h3 * (5 * r8);
d1 += h4 * (5 * r7);
c = (d1 >>> 13); d1 &= 0x1fff;
d1 += h5 * (5 * r6);
d1 += h6 * (5 * r5);
d1 += h7 * (5 * r4);
d1 += h8 * (5 * r3);
d1 += h9 * (5 * r2);
c += (d1 >>> 13); d1 &= 0x1fff;
d2 = c;
d2 += h0 * r2;
d2 += h1 * r1;
d2 += h2 * r0;
d2 += h3 * (5 * r9);
d2 += h4 * (5 * r8);
c = (d2 >>> 13); d2 &= 0x1fff;
d2 += h5 * (5 * r7);
d2 += h6 * (5 * r6);
d2 += h7 * (5 * r5);
d2 += h8 * (5 * r4);
d2 += h9 * (5 * r3);
c += (d2 >>> 13); d2 &= 0x1fff;
d3 = c;
d3 += h0 * r3;
d3 += h1 * r2;
d3 += h2 * r1;
d3 += h3 * r0;
d3 += h4 * (5 * r9);
c = (d3 >>> 13); d3 &= 0x1fff;
d3 += h5 * (5 * r8);
d3 += h6 * (5 * r7);
d3 += h7 * (5 * r6);
d3 += h8 * (5 * r5);
d3 += h9 * (5 * r4);
c += (d3 >>> 13); d3 &= 0x1fff;
d4 = c;
d4 += h0 * r4;
d4 += h1 * r3;
d4 += h2 * r2;
d4 += h3 * r1;
d4 += h4 * r0;
c = (d4 >>> 13); d4 &= 0x1fff;
d4 += h5 * (5 * r9);
d4 += h6 * (5 * r8);
d4 += h7 * (5 * r7);
d4 += h8 * (5 * r6);
d4 += h9 * (5 * r5);
c += (d4 >>> 13); d4 &= 0x1fff;
d5 = c;
d5 += h0 * r5;
d5 += h1 * r4;
d5 += h2 * r3;
d5 += h3 * r2;
d5 += h4 * r1;
c = (d5 >>> 13); d5 &= 0x1fff;
d5 += h5 * r0;
d5 += h6 * (5 * r9);
d5 += h7 * (5 * r8);
d5 += h8 * (5 * r7);
d5 += h9 * (5 * r6);
c += (d5 >>> 13); d5 &= 0x1fff;
d6 = c;
d6 += h0 * r6;
d6 += h1 * r5;
d6 += h2 * r4;
d6 += h3 * r3;
d6 += h4 * r2;
c = (d6 >>> 13); d6 &= 0x1fff;
d6 += h5 * r1;
d6 += h6 * r0;
d6 += h7 * (5 * r9);
d6 += h8 * (5 * r8);
d6 += h9 * (5 * r7);
c += (d6 >>> 13); d6 &= 0x1fff;
d7 = c;
d7 += h0 * r7;
d7 += h1 * r6;
d7 += h2 * r5;
d7 += h3 * r4;
d7 += h4 * r3;
c = (d7 >>> 13); d7 &= 0x1fff;
d7 += h5 * r2;
d7 += h6 * r1;
d7 += h7 * r0;
d7 += h8 * (5 * r9);
d7 += h9 * (5 * r8);
c += (d7 >>> 13); d7 &= 0x1fff;
d8 = c;
d8 += h0 * r8;
d8 += h1 * r7;
d8 += h2 * r6;
d8 += h3 * r5;
d8 += h4 * r4;
c = (d8 >>> 13); d8 &= 0x1fff;
d8 += h5 * r3;
d8 += h6 * r2;
d8 += h7 * r1;
d8 += h8 * r0;
d8 += h9 * (5 * r9);
c += (d8 >>> 13); d8 &= 0x1fff;
d9 = c;
d9 += h0 * r9;
d9 += h1 * r8;
d9 += h2 * r7;
d9 += h3 * r6;
d9 += h4 * r5;
c = (d9 >>> 13); d9 &= 0x1fff;
d9 += h5 * r4;
d9 += h6 * r3;
d9 += h7 * r2;
d9 += h8 * r1;
d9 += h9 * r0;
c += (d9 >>> 13); d9 &= 0x1fff;
c = (((c << 2) + c)) | 0;
c = (c + d0) | 0;
d0 = c & 0x1fff;
c = (c >>> 13);
d1 += c;
h0 = d0;
h1 = d1;
h2 = d2;
h3 = d3;
h4 = d4;
h5 = d5;
h6 = d6;
h7 = d7;
h8 = d8;
h9 = d9;
mpos += 16;
bytes -= 16;
}
this.h[0] = h0;
this.h[1] = h1;
this.h[2] = h2;
this.h[3] = h3;
this.h[4] = h4;
this.h[5] = h5;
this.h[6] = h6;
this.h[7] = h7;
this.h[8] = h8;
this.h[9] = h9;
};
poly1305.prototype.finish = function(mac, macpos) {
var g = new Uint16Array(10);
var c, mask, f, i;
if (this.leftover) {
i = this.leftover;
this.buffer[i++] = 1;
for (; i < 16; i++) this.buffer[i] = 0;
this.fin = 1;
this.blocks(this.buffer, 0, 16);
}
c = this.h[1] >>> 13;
this.h[1] &= 0x1fff;
for (i = 2; i < 10; i++) {
this.h[i] += c;
c = this.h[i] >>> 13;
this.h[i] &= 0x1fff;
}
this.h[0] += (c * 5);
c = this.h[0] >>> 13;
this.h[0] &= 0x1fff;
this.h[1] += c;
c = this.h[1] >>> 13;
this.h[1] &= 0x1fff;
this.h[2] += c;
g[0] = this.h[0] + 5;
c = g[0] >>> 13;
g[0] &= 0x1fff;
for (i = 1; i < 10; i++) {
g[i] = this.h[i] + c;
c = g[i] >>> 13;
g[i] &= 0x1fff;
}
g[9] -= (1 << 13);
mask = (g[9] >>> ((2 * 8) - 1)) - 1;
for (i = 0; i < 10; i++) g[i] &= mask;
mask = ~mask;
for (i = 0; i < 10; i++) this.h[i] = (this.h[i] & mask) | g[i];
this.h[0] = ((this.h[0] ) | (this.h[1] << 13) ) & 0xffff;
this.h[1] = ((this.h[1] >>> 3) | (this.h[2] << 10) ) & 0xffff;
this.h[2] = ((this.h[2] >>> 6) | (this.h[3] << 7) ) & 0xffff;
this.h[3] = ((this.h[3] >>> 9) | (this.h[4] << 4) ) & 0xffff;
this.h[4] = ((this.h[4] >>> 12) | (this.h[5] << 1) | (this.h[6] << 14)) & 0xffff;
this.h[5] = ((this.h[6] >>> 2) | (this.h[7] << 11) ) & 0xffff;
this.h[6] = ((this.h[7] >>> 5) | (this.h[8] << 8) ) & 0xffff;
this.h[7] = ((this.h[8] >>> 8) | (this.h[9] << 5) ) & 0xffff;
f = this.h[0] + this.pad[0];
this.h[0] = f & 0xffff;
for (i = 1; i < 8; i++) {
f = (((this.h[i] + this.pad[i]) | 0) + (f >>> 16)) | 0;
this.h[i] = f & 0xffff;
}
mac[macpos+ 0] = (this.h[0] >>> 0) & 0xff;
mac[macpos+ 1] = (this.h[0] >>> 8) & 0xff;
mac[macpos+ 2] = (this.h[1] >>> 0) & 0xff;
mac[macpos+ 3] = (this.h[1] >>> 8) & 0xff;
mac[macpos+ 4] = (this.h[2] >>> 0) & 0xff;
mac[macpos+ 5] = (this.h[2] >>> 8) & 0xff;
mac[macpos+ 6] = (this.h[3] >>> 0) & 0xff;
mac[macpos+ 7] = (this.h[3] >>> 8) & 0xff;
mac[macpos+ 8] = (this.h[4] >>> 0) & 0xff;
mac[macpos+ 9] = (this.h[4] >>> 8) & 0xff;
mac[macpos+10] = (this.h[5] >>> 0) & 0xff;
mac[macpos+11] = (this.h[5] >>> 8) & 0xff;
mac[macpos+12] = (this.h[6] >>> 0) & 0xff;
mac[macpos+13] = (this.h[6] >>> 8) & 0xff;
mac[macpos+14] = (this.h[7] >>> 0) & 0xff;
mac[macpos+15] = (this.h[7] >>> 8) & 0xff;
};
poly1305.prototype.update = function(m, mpos, bytes) {
var i, want;
if (this.leftover) {
want = (16 - this.leftover);
if (want > bytes)
want = bytes;
for (i = 0; i < want; i++)
this.buffer[this.leftover + i] = m[mpos+i];
bytes -= want;
mpos += want;
this.leftover += want;
if (this.leftover < 16)
return;
this.blocks(this.buffer, 0, 16);
this.leftover = 0;
}
if (bytes >= 16) {
want = bytes - (bytes % 16);
this.blocks(m, mpos, want);
mpos += want;
bytes -= want;
}
if (bytes) {
for (i = 0; i < bytes; i++)
this.buffer[this.leftover + i] = m[mpos+i];
this.leftover += bytes;
}
};
function crypto_onetimeauth(out, outpos, m, mpos, n, k) {
var s = new poly1305(k);
s.update(m, mpos, n);
s.finish(out, outpos);
return 0;
}
function crypto_onetimeauth_verify(h, hpos, m, mpos, n, k) {
var x = new Uint8Array(16);
crypto_onetimeauth(x,0,m,mpos,n,k);
return crypto_verify_16(h,hpos,x,0);
}
function crypto_secretbox(c,m,d,n,k) {
var i;
if (d < 32) return -1;
crypto_stream_xor(c,0,m,0,d,n,k);
crypto_onetimeauth(c, 16, c, 32, d - 32, c);
for (i = 0; i < 16; i++) c[i] = 0;
return 0;
}
function crypto_secretbox_open(m,c,d,n,k) {
var i;
var x = new Uint8Array(32);
if (d < 32) return -1;
crypto_stream(x,0,32,n,k);
if (crypto_onetimeauth_verify(c, 16,c, 32,d - 32,x) !== 0) return -1;
crypto_stream_xor(m,0,c,0,d,n,k);
for (i = 0; i < 32; i++) m[i] = 0;
return 0;
}
function set25519(r, a) {
var i;
for (i = 0; i < 16; i++) r[i] = a[i]|0;
}
function car25519(o) {
var i, v, c = 1;
for (i = 0; i < 16; i++) {
v = o[i] + c + 65535;
c = Math.floor(v / 65536);
o[i] = v - c * 65536;
}
o[0] += c-1 + 37 * (c-1);
}
function sel25519(p, q, b) {
var t, c = ~(b-1);
for (var i = 0; i < 16; i++) {
t = c & (p[i] ^ q[i]);
p[i] ^= t;
q[i] ^= t;
}
}
function pack25519(o, n) {
var i, j, b;
var m = gf(), t = gf();
for (i = 0; i < 16; i++) t[i] = n[i];
car25519(t);
car25519(t);
car25519(t);
for (j = 0; j < 2; j++) {
m[0] = t[0] - 0xffed;
for (i = 1; i < 15; i++) {
m[i] = t[i] - 0xffff - ((m[i-1]>>16) & 1);
m[i-1] &= 0xffff;
}
m[15] = t[15] - 0x7fff - ((m[14]>>16) & 1);
b = (m[15]>>16) & 1;
m[14] &= 0xffff;
sel25519(t, m, 1-b);
}
for (i = 0; i < 16; i++) {
o[2*i] = t[i] & 0xff;
o[2*i+1] = t[i]>>8;
}
}
function neq25519(a, b) {
var c = new Uint8Array(32), d = new Uint8Array(32);
pack25519(c, a);
pack25519(d, b);
return crypto_verify_32(c, 0, d, 0);
}
function par25519(a) {
var d = new Uint8Array(32);
pack25519(d, a);
return d[0] & 1;
}
function unpack25519(o, n) {
var i;
for (i = 0; i < 16; i++) o[i] = n[2*i] + (n[2*i+1] << 8);
o[15] &= 0x7fff;
}
function A(o, a, b) {
for (var i = 0; i < 16; i++) o[i] = a[i] + b[i];
}
function Z(o, a, b) {
for (var i = 0; i < 16; i++) o[i] = a[i] - b[i];
}
function M(o, a, b) {
var v, c,
t0 = 0, t1 = 0, t2 = 0, t3 = 0, t4 = 0, t5 = 0, t6 = 0, t7 = 0,
t8 = 0, t9 = 0, t10 = 0, t11 = 0, t12 = 0, t13 = 0, t14 = 0, t15 = 0,
t16 = 0, t17 = 0, t18 = 0, t19 = 0, t20 = 0, t21 = 0, t22 = 0, t23 = 0,
t24 = 0, t25 = 0, t26 = 0, t27 = 0, t28 = 0, t29 = 0, t30 = 0,
b0 = b[0],
b1 = b[1],
b2 = b[2],
b3 = b[3],
b4 = b[4],
b5 = b[5],
b6 = b[6],
b7 = b[7],
b8 = b[8],
b9 = b[9],
b10 = b[10],
b11 = b[11],
b12 = b[12],
b13 = b[13],
b14 = b[14],
b15 = b[15];
v = a[0];
t0 += v * b0;
t1 += v * b1;
t2 += v * b2;
t3 += v * b3;
t4 += v * b4;
t5 += v * b5;
t6 += v * b6;
t7 += v * b7;
t8 += v * b8;
t9 += v * b9;
t10 += v * b10;
t11 += v * b11;
t12 += v * b12;
t13 += v * b13;
t14 += v * b14;
t15 += v * b15;
v = a[1];
t1 += v * b0;
t2 += v * b1;
t3 += v * b2;
t4 += v * b3;
t5 += v * b4;
t6 += v * b5;
t7 += v * b6;
t8 += v * b7;
t9 += v * b8;
t10 += v * b9;
t11 += v * b10;
t12 += v * b11;
t13 += v * b12;
t14 += v * b13;
t15 += v * b14;
t16 += v * b15;
v = a[2];
t2 += v * b0;
t3 += v * b1;
t4 += v * b2;
t5 += v * b3;
t6 += v * b4;
t7 += v * b5;
t8 += v * b6;
t9 += v * b7;
t10 += v * b8;
t11 += v * b9;
t12 += v * b10;
t13 += v * b11;
t14 += v * b12;
t15 += v * b13;
t16 += v * b14;
t17 += v * b15;
v = a[3];
t3 += v * b0;
t4 += v * b1;
t5 += v * b2;
t6 += v * b3;
t7 += v * b4;
t8 += v * b5;
t9 += v * b6;
t10 += v * b7;
t11 += v * b8;
t12 += v * b9;
t13 += v * b10;
t14 += v * b11;
t15 += v * b12;
t16 += v * b13;
t17 += v * b14;
t18 += v * b15;
v = a[4];
t4 += v * b0;
t5 += v * b1;
t6 += v * b2;
t7 += v * b3;
t8 += v * b4;
t9 += v * b5;
t10 += v * b6;
t11 += v * b7;
t12 += v * b8;
t13 += v * b9;
t14 += v * b10;
t15 += v * b11;
t16 += v * b12;
t17 += v * b13;
t18 += v * b14;
t19 += v * b15;
v = a[5];
t5 += v * b0;
t6 += v * b1;
t7 += v * b2;
t8 += v * b3;
t9 += v * b4;
t10 += v * b5;
t11 += v * b6;
t12 += v * b7;
t13 += v * b8;
t14 += v * b9;
t15 += v * b10;
t16 += v * b11;
t17 += v * b12;
t18 += v * b13;
t19 += v * b14;
t20 += v * b15;
v = a[6];
t6 += v * b0;
t7 += v * b1;
t8 += v * b2;
t9 += v * b3;
t10 += v * b4;
t11 += v * b5;
t12 += v * b6;
t13 += v * b7;
t14 += v * b8;
t15 += v * b9;
t16 += v * b10;
t17 += v * b11;
t18 += v * b12;
t19 += v * b13;
t20 += v * b14;
t21 += v * b15;
v = a[7];
t7 += v * b0;
t8 += v * b1;
t9 += v * b2;
t10 += v * b3;
t11 += v * b4;
t12 += v * b5;
t13 += v * b6;
t14 += v * b7;
t15 += v * b8;
t16 += v * b9;
t17 += v * b10;
t18 += v * b11;
t19 += v * b12;
t20 += v * b13;
t21 += v * b14;
t22 += v * b15;
v = a[8];
t8 += v * b0;
t9 += v * b1;
t10 += v * b2;
t11 += v * b3;
t12 += v * b4;
t13 += v * b5;
t14 += v * b6;
t15 += v * b7;
t16 += v * b8;
t17 += v * b9;
t18 += v * b10;
t19 += v * b11;
t20 += v * b12;
t21 += v * b13;
t22 += v * b14;
t23 += v * b15;
v = a[9];
t9 += v * b0;
t10 += v * b1;
t11 += v * b2;
t12 += v * b3;
t13 += v * b4;
t14 += v * b5;
t15 += v * b6;
t16 += v * b7;
t17 += v * b8;
t18 += v * b9;
t19 += v * b10;
t20 += v * b11;
t21 += v * b12;
t22 += v * b13;
t23 += v * b14;
t24 += v * b15;
v = a[10];
t10 += v * b0;
t11 += v * b1;
t12 += v * b2;
t13 += v * b3;
t14 += v * b4;
t15 += v * b5;
t16 += v * b6;
t17 += v * b7;
t18 += v * b8;
t19 += v * b9;
t20 += v * b10;
t21 += v * b11;
t22 += v * b12;
t23 += v * b13;
t24 += v * b14;
t25 += v * b15;
v = a[11];
t11 += v * b0;
t12 += v * b1;
t13 += v * b2;
t14 += v * b3;
t15 += v * b4;
t16 += v * b5;
t17 += v * b6;
t18 += v * b7;
t19 += v * b8;
t20 += v * b9;
t21 += v * b10;
t22 += v * b11;
t23 += v * b12;
t24 += v * b13;
t25 += v * b14;
t26 += v * b15;
v = a[12];
t12 += v * b0;
t13 += v * b1;
t14 += v * b2;
t15 += v * b3;
t16 += v * b4;
t17 += v * b5;
t18 += v * b6;
t19 += v * b7;
t20 += v * b8;
t21 += v * b9;
t22 += v * b10;
t23 += v * b11;
t24 += v * b12;
t25 += v * b13;
t26 += v * b14;
t27 += v * b15;
v = a[13];
t13 += v * b0;
t14 += v * b1;
t15 += v * b2;
t16 += v * b3;
t17 += v * b4;
t18 += v * b5;
t19 += v * b6;
t20 += v * b7;
t21 += v * b8;
t22 += v * b9;
t23 += v * b10;
t24 += v * b11;
t25 += v * b12;
t26 += v * b13;
t27 += v * b14;
t28 += v * b15;
v = a[14];
t14 += v * b0;
t15 += v * b1;
t16 += v * b2;
t17 += v * b3;
t18 += v * b4;
t19 += v * b5;
t20 += v * b6;
t21 += v * b7;
t22 += v * b8;
t23 += v * b9;
t24 += v * b10;
t25 += v * b11;
t26 += v * b12;
t27 += v * b13;
t28 += v * b14;
t29 += v * b15;
v = a[15];
t15 += v * b0;
t16 += v * b1;
t17 += v * b2;
t18 += v * b3;
t19 += v * b4;
t20 += v * b5;
t21 += v * b6;
t22 += v * b7;
t23 += v * b8;
t24 += v * b9;
t25 += v * b10;
t26 += v * b11;
t27 += v * b12;
t28 += v * b13;
t29 += v * b14;
t30 += v * b15;
t0 += 38 * t16;
t1 += 38 * t17;
t2 += 38 * t18;
t3 += 38 * t19;
t4 += 38 * t20;
t5 += 38 * t21;
t6 += 38 * t22;
t7 += 38 * t23;
t8 += 38 * t24;
t9 += 38 * t25;
t10 += 38 * t26;
t11 += 38 * t27;
t12 += 38 * t28;
t13 += 38 * t29;
t14 += 38 * t30;
// t15 left as is
// first car
c = 1;
v = t0 + c + 65535; c = Math.floor(v / 65536); t0 = v - c * 65536;
v = t1 + c + 65535; c = Math.floor(v / 65536); t1 = v - c * 65536;
v = t2 + c + 65535; c = Math.floor(v / 65536); t2 = v - c * 65536;
v = t3 + c + 65535; c = Math.floor(v / 65536); t3 = v - c * 65536;
v = t4 + c + 65535; c = Math.floor(v / 65536); t4 = v - c * 65536;
v = t5 + c + 65535; c = Math.floor(v / 65536); t5 = v - c * 65536;
v = t6 + c + 65535; c = Math.floor(v / 65536); t6 = v - c * 65536;
v = t7 + c + 65535; c = Math.floor(v / 65536); t7 = v - c * 65536;
v = t8 + c + 65535; c = Math.floor(v / 65536); t8 = v - c * 65536;
v = t9 + c + 65535; c = Math.floor(v / 65536); t9 = v - c * 65536;
v = t10 + c + 65535; c = Math.floor(v / 65536); t10 = v - c * 65536;
v = t11 + c + 65535; c = Math.floor(v / 65536); t11 = v - c * 65536;
v = t12 + c + 65535; c = Math.floor(v / 65536); t12 = v - c * 65536;
v = t13 + c + 65535; c = Math.floor(v / 65536); t13 = v - c * 65536;
v = t14 + c + 65535; c = Math.floor(v / 65536); t14 = v - c * 65536;
v = t15 + c + 65535; c = Math.floor(v / 65536); t15 = v - c * 65536;
t0 += c-1 + 37 * (c-1);
// second car
c = 1;
v = t0 + c + 65535; c = Math.floor(v / 65536); t0 = v - c * 65536;
v = t1 + c + 65535; c = Math.floor(v / 65536); t1 = v - c * 65536;
v = t2 + c + 65535; c = Math.floor(v / 65536); t2 = v - c * 65536;
v = t3 + c + 65535; c = Math.floor(v / 65536); t3 = v - c * 65536;
v = t4 + c + 65535; c = Math.floor(v / 65536); t4 = v - c * 65536;
v = t5 + c + 65535; c = Math.floor(v / 65536); t5 = v - c * 65536;
v = t6 + c + 65535; c = Math.floor(v / 65536); t6 = v - c * 65536;
v = t7 + c + 65535; c = Math.floor(v / 65536); t7 = v - c * 65536;
v = t8 + c + 65535; c = Math.floor(v / 65536); t8 = v - c * 65536;
v = t9 + c + 65535; c = Math.floor(v / 65536); t9 = v - c * 65536;
v = t10 + c + 65535; c = Math.floor(v / 65536); t10 = v - c * 65536;
v = t11 + c + 65535; c = Math.floor(v / 65536); t11 = v - c * 65536;
v = t12 + c + 65535; c = Math.floor(v / 65536); t12 = v - c * 65536;
v = t13 + c + 65535; c = Math.floor(v / 65536); t13 = v - c * 65536;
v = t14 + c + 65535; c = Math.floor(v / 65536); t14 = v - c * 65536;
v = t15 + c + 65535; c = Math.floor(v / 65536); t15 = v - c * 65536;
t0 += c-1 + 37 * (c-1);
o[ 0] = t0;
o[ 1] = t1;
o[ 2] = t2;
o[ 3] = t3;
o[ 4] = t4;
o[ 5] = t5;
o[ 6] = t6;
o[ 7] = t7;
o[ 8] = t8;
o[ 9] = t9;
o[10] = t10;
o[11] = t11;
o[12] = t12;
o[13] = t13;
o[14] = t14;
o[15] = t15;
}
function S(o, a) {
M(o, a, a);
}
function inv25519(o, i) {
var c = gf();
var a;
for (a = 0; a < 16; a++) c[a] = i[a];
for (a = 253; a >= 0; a--) {
S(c, c);
if(a !== 2 && a !== 4) M(c, c, i);
}
for (a = 0; a < 16; a++) o[a] = c[a];
}
function pow2523(o, i) {
var c = gf();
var a;
for (a = 0; a < 16; a++) c[a] = i[a];
for (a = 250; a >= 0; a--) {
S(c, c);
if(a !== 1) M(c, c, i);
}
for (a = 0; a < 16; a++) o[a] = c[a];
}
function crypto_scalarmult(q, n, p) {
var z = new Uint8Array(32);
var x = new Float64Array(80), r, i;
var a = gf(), b = gf(), c = gf(),
d = gf(), e = gf(), f = gf();
for (i = 0; i < 31; i++) z[i] = n[i];
z[31]=(n[31]&127)|64;
z[0]&=248;
unpack25519(x,p);
for (i = 0; i < 16; i++) {
b[i]=x[i];
d[i]=a[i]=c[i]=0;
}
a[0]=d[0]=1;
for (i=254; i>=0; --i) {
r=(z[i>>>3]>>>(i&7))&1;
sel25519(a,b,r);
sel25519(c,d,r);
A(e,a,c);
Z(a,a,c);
A(c,b,d);
Z(b,b,d);
S(d,e);
S(f,a);
M(a,c,a);
M(c,b,e);
A(e,a,c);
Z(a,a,c);
S(b,a);
Z(c,d,f);
M(a,c,_121665);
A(a,a,d);
M(c,c,a);
M(a,d,f);
M(d,b,x);
S(b,e);
sel25519(a,b,r);
sel25519(c,d,r);
}
for (i = 0; i < 16; i++) {
x[i+16]=a[i];
x[i+32]=c[i];
x[i+48]=b[i];
x[i+64]=d[i];
}
var x32 = x.subarray(32);
var x16 = x.subarray(16);
inv25519(x32,x32);
M(x16,x16,x32);
pack25519(q,x16);
return 0;
}
function crypto_scalarmult_base(q, n) {
return crypto_scalarmult(q, n, _9);
}
function crypto_box_keypair(y, x) {
randombytes(x, 32);
return crypto_scalarmult_base(y, x);
}
function crypto_box_beforenm(k, y, x) {
var s = new Uint8Array(32);
crypto_scalarmult(s, x, y);
return crypto_core_hsalsa20(k, _0, s, sigma);
}
var crypto_box_afternm = crypto_secretbox;
var crypto_box_open_afternm = crypto_secretbox_open;
function crypto_box(c, m, d, n, y, x) {
var k = new Uint8Array(32);
crypto_box_beforenm(k, y, x);
return crypto_box_afternm(c, m, d, n, k);
}
function crypto_box_open(m, c, d, n, y, x) {
var k = new Uint8Array(32);
crypto_box_beforenm(k, y, x);
return crypto_box_open_afternm(m, c, d, n, k);
}
var K = [
0x428a2f98, 0xd728ae22, 0x71374491, 0x23ef65cd,
0xb5c0fbcf, 0xec4d3b2f, 0xe9b5dba5, 0x8189dbbc,
0x3956c25b, 0xf348b538, 0x59f111f1, 0xb605d019,
0x923f82a4, 0xaf194f9b, 0xab1c5ed5, 0xda6d8118,
0xd807aa98, 0xa3030242, 0x12835b01, 0x45706fbe,
0x243185be, 0x4ee4b28c, 0x550c7dc3, 0xd5ffb4e2,
0x72be5d74, 0xf27b896f, 0x80deb1fe, 0x3b1696b1,
0x9bdc06a7, 0x25c71235, 0xc19bf174, 0xcf692694,
0xe49b69c1, 0x9ef14ad2, 0xefbe4786, 0x384f25e3,
0x0fc19dc6, 0x8b8cd5b5, 0x240ca1cc, 0x77ac9c65,
0x2de92c6f, 0x592b0275, 0x4a7484aa, 0x6ea6e483,
0x5cb0a9dc, 0xbd41fbd4, 0x76f988da, 0x831153b5,
0x983e5152, 0xee66dfab, 0xa831c66d, 0x2db43210,
0xb00327c8, 0x98fb213f, 0xbf597fc7, 0xbeef0ee4,
0xc6e00bf3, 0x3da88fc2, 0xd5a79147, 0x930aa725,
0x06ca6351, 0xe003826f, 0x14292967, 0x0a0e6e70,
0x27b70a85, 0x46d22ffc, 0x2e1b2138, 0x5c26c926,
0x4d2c6dfc, 0x5ac42aed, 0x53380d13, 0x9d95b3df,
0x650a7354, 0x8baf63de, 0x766a0abb, 0x3c77b2a8,
0x81c2c92e, 0x47edaee6, 0x92722c85, 0x1482353b,
0xa2bfe8a1, 0x4cf10364, 0xa81a664b, 0xbc423001,
0xc24b8b70, 0xd0f89791, 0xc76c51a3, 0x0654be30,
0xd192e819, 0xd6ef5218, 0xd6990624, 0x5565a910,
0xf40e3585, 0x5771202a, 0x106aa070, 0x32bbd1b8,
0x19a4c116, 0xb8d2d0c8, 0x1e376c08, 0x5141ab53,
0x2748774c, 0xdf8eeb99, 0x34b0bcb5, 0xe19b48a8,
0x391c0cb3, 0xc5c95a63, 0x4ed8aa4a, 0xe3418acb,
0x5b9cca4f, 0x7763e373, 0x682e6ff3, 0xd6b2b8a3,
0x748f82ee, 0x5defb2fc, 0x78a5636f, 0x43172f60,
0x84c87814, 0xa1f0ab72, 0x8cc70208, 0x1a6439ec,
0x90befffa, 0x23631e28, 0xa4506ceb, 0xde82bde9,
0xbef9a3f7, 0xb2c67915, 0xc67178f2, 0xe372532b,
0xca273ece, 0xea26619c, 0xd186b8c7, 0x21c0c207,
0xeada7dd6, 0xcde0eb1e, 0xf57d4f7f, 0xee6ed178,
0x06f067aa, 0x72176fba, 0x0a637dc5, 0xa2c898a6,
0x113f9804, 0xbef90dae, 0x1b710b35, 0x131c471b,
0x28db77f5, 0x23047d84, 0x32caab7b, 0x40c72493,
0x3c9ebe0a, 0x15c9bebc, 0x431d67c4, 0x9c100d4c,
0x4cc5d4be, 0xcb3e42b6, 0x597f299c, 0xfc657e2a,
0x5fcb6fab, 0x3ad6faec, 0x6c44198c, 0x4a475817
];
function crypto_hashblocks_hl(hh, hl, m, n) {
var wh = new Int32Array(16), wl = new Int32Array(16),
bh0, bh1, bh2, bh3, bh4, bh5, bh6, bh7,
bl0, bl1, bl2, bl3, bl4, bl5, bl6, bl7,
th, tl, i, j, h, l, a, b, c, d;
var ah0 = hh[0],
ah1 = hh[1],
ah2 = hh[2],
ah3 = hh[3],
ah4 = hh[4],
ah5 = hh[5],
ah6 = hh[6],
ah7 = hh[7],
al0 = hl[0],
al1 = hl[1],
al2 = hl[2],
al3 = hl[3],
al4 = hl[4],
al5 = hl[5],
al6 = hl[6],
al7 = hl[7];
var pos = 0;
while (n >= 128) {
for (i = 0; i < 16; i++) {
j = 8 * i + pos;
wh[i] = (m[j+0] << 24) | (m[j+1] << 16) | (m[j+2] << 8) | m[j+3];
wl[i] = (m[j+4] << 24) | (m[j+5] << 16) | (m[j+6] << 8) | m[j+7];
}
for (i = 0; i < 80; i++) {
bh0 = ah0;
bh1 = ah1;
bh2 = ah2;
bh3 = ah3;
bh4 = ah4;
bh5 = ah5;
bh6 = ah6;
bh7 = ah7;
bl0 = al0;
bl1 = al1;
bl2 = al2;
bl3 = al3;
bl4 = al4;
bl5 = al5;
bl6 = al6;
bl7 = al7;
// add
h = ah7;
l = al7;
a = l & 0xffff; b = l >>> 16;
c = h & 0xffff; d = h >>> 16;
// Sigma1
h = ((ah4 >>> 14) | (al4 << (32-14))) ^ ((ah4 >>> 18) | (al4 << (32-18))) ^ ((al4 >>> (41-32)) | (ah4 << (32-(41-32))));
l = ((al4 >>> 14) | (ah4 << (32-14))) ^ ((al4 >>> 18) | (ah4 << (32-18))) ^ ((ah4 >>> (41-32)) | (al4 << (32-(41-32))));
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
// Ch
h = (ah4 & ah5) ^ (~ah4 & ah6);
l = (al4 & al5) ^ (~al4 & al6);
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
// K
h = K[i*2];
l = K[i*2+1];
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
// w
h = wh[i%16];
l = wl[i%16];
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
b += a >>> 16;
c += b >>> 16;
d += c >>> 16;
th = c & 0xffff | d << 16;
tl = a & 0xffff | b << 16;
// add
h = th;
l = tl;
a = l & 0xffff; b = l >>> 16;
c = h & 0xffff; d = h >>> 16;
// Sigma0
h = ((ah0 >>> 28) | (al0 << (32-28))) ^ ((al0 >>> (34-32)) | (ah0 << (32-(34-32)))) ^ ((al0 >>> (39-32)) | (ah0 << (32-(39-32))));
l = ((al0 >>> 28) | (ah0 << (32-28))) ^ ((ah0 >>> (34-32)) | (al0 << (32-(34-32)))) ^ ((ah0 >>> (39-32)) | (al0 << (32-(39-32))));
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
// Maj
h = (ah0 & ah1) ^ (ah0 & ah2) ^ (ah1 & ah2);
l = (al0 & al1) ^ (al0 & al2) ^ (al1 & al2);
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
b += a >>> 16;
c += b >>> 16;
d += c >>> 16;
bh7 = (c & 0xffff) | (d << 16);
bl7 = (a & 0xffff) | (b << 16);
// add
h = bh3;
l = bl3;
a = l & 0xffff; b = l >>> 16;
c = h & 0xffff; d = h >>> 16;
h = th;
l = tl;
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
b += a >>> 16;
c += b >>> 16;
d += c >>> 16;
bh3 = (c & 0xffff) | (d << 16);
bl3 = (a & 0xffff) | (b << 16);
ah1 = bh0;
ah2 = bh1;
ah3 = bh2;
ah4 = bh3;
ah5 = bh4;
ah6 = bh5;
ah7 = bh6;
ah0 = bh7;
al1 = bl0;
al2 = bl1;
al3 = bl2;
al4 = bl3;
al5 = bl4;
al6 = bl5;
al7 = bl6;
al0 = bl7;
if (i%16 === 15) {
for (j = 0; j < 16; j++) {
// add
h = wh[j];
l = wl[j];
a = l & 0xffff; b = l >>> 16;
c = h & 0xffff; d = h >>> 16;
h = wh[(j+9)%16];
l = wl[(j+9)%16];
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
// sigma0
th = wh[(j+1)%16];
tl = wl[(j+1)%16];
h = ((th >>> 1) | (tl << (32-1))) ^ ((th >>> 8) | (tl << (32-8))) ^ (th >>> 7);
l = ((tl >>> 1) | (th << (32-1))) ^ ((tl >>> 8) | (th << (32-8))) ^ ((tl >>> 7) | (th << (32-7)));
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
// sigma1
th = wh[(j+14)%16];
tl = wl[(j+14)%16];
h = ((th >>> 19) | (tl << (32-19))) ^ ((tl >>> (61-32)) | (th << (32-(61-32)))) ^ (th >>> 6);
l = ((tl >>> 19) | (th << (32-19))) ^ ((th >>> (61-32)) | (tl << (32-(61-32)))) ^ ((tl >>> 6) | (th << (32-6)));
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
b += a >>> 16;
c += b >>> 16;
d += c >>> 16;
wh[j] = (c & 0xffff) | (d << 16);
wl[j] = (a & 0xffff) | (b << 16);
}
}
}
// add
h = ah0;
l = al0;
a = l & 0xffff; b = l >>> 16;
c = h & 0xffff; d = h >>> 16;
h = hh[0];
l = hl[0];
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
b += a >>> 16;
c += b >>> 16;
d += c >>> 16;
hh[0] = ah0 = (c & 0xffff) | (d << 16);
hl[0] = al0 = (a & 0xffff) | (b << 16);
h = ah1;
l = al1;
a = l & 0xffff; b = l >>> 16;
c = h & 0xffff; d = h >>> 16;
h = hh[1];
l = hl[1];
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
b += a >>> 16;
c += b >>> 16;
d += c >>> 16;
hh[1] = ah1 = (c & 0xffff) | (d << 16);
hl[1] = al1 = (a & 0xffff) | (b << 16);
h = ah2;
l = al2;
a = l & 0xffff; b = l >>> 16;
c = h & 0xffff; d = h >>> 16;
h = hh[2];
l = hl[2];
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
b += a >>> 16;
c += b >>> 16;
d += c >>> 16;
hh[2] = ah2 = (c & 0xffff) | (d << 16);
hl[2] = al2 = (a & 0xffff) | (b << 16);
h = ah3;
l = al3;
a = l & 0xffff; b = l >>> 16;
c = h & 0xffff; d = h >>> 16;
h = hh[3];
l = hl[3];
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
b += a >>> 16;
c += b >>> 16;
d += c >>> 16;
hh[3] = ah3 = (c & 0xffff) | (d << 16);
hl[3] = al3 = (a & 0xffff) | (b << 16);
h = ah4;
l = al4;
a = l & 0xffff; b = l >>> 16;
c = h & 0xffff; d = h >>> 16;
h = hh[4];
l = hl[4];
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
b += a >>> 16;
c += b >>> 16;
d += c >>> 16;
hh[4] = ah4 = (c & 0xffff) | (d << 16);
hl[4] = al4 = (a & 0xffff) | (b << 16);
h = ah5;
l = al5;
a = l & 0xffff; b = l >>> 16;
c = h & 0xffff; d = h >>> 16;
h = hh[5];
l = hl[5];
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
b += a >>> 16;
c += b >>> 16;
d += c >>> 16;
hh[5] = ah5 = (c & 0xffff) | (d << 16);
hl[5] = al5 = (a & 0xffff) | (b << 16);
h = ah6;
l = al6;
a = l & 0xffff; b = l >>> 16;
c = h & 0xffff; d = h >>> 16;
h = hh[6];
l = hl[6];
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
b += a >>> 16;
c += b >>> 16;
d += c >>> 16;
hh[6] = ah6 = (c & 0xffff) | (d << 16);
hl[6] = al6 = (a & 0xffff) | (b << 16);
h = ah7;
l = al7;
a = l & 0xffff; b = l >>> 16;
c = h & 0xffff; d = h >>> 16;
h = hh[7];
l = hl[7];
a += l & 0xffff; b += l >>> 16;
c += h & 0xffff; d += h >>> 16;
b += a >>> 16;
c += b >>> 16;
d += c >>> 16;
hh[7] = ah7 = (c & 0xffff) | (d << 16);
hl[7] = al7 = (a & 0xffff) | (b << 16);
pos += 128;
n -= 128;
}
return n;
}
function crypto_hash(out, m, n) {
var hh = new Int32Array(8),
hl = new Int32Array(8),
x = new Uint8Array(256),
i, b = n;
hh[0] = 0x6a09e667;
hh[1] = 0xbb67ae85;
hh[2] = 0x3c6ef372;
hh[3] = 0xa54ff53a;
hh[4] = 0x510e527f;
hh[5] = 0x9b05688c;
hh[6] = 0x1f83d9ab;
hh[7] = 0x5be0cd19;
hl[0] = 0xf3bcc908;
hl[1] = 0x84caa73b;
hl[2] = 0xfe94f82b;
hl[3] = 0x5f1d36f1;
hl[4] = 0xade682d1;
hl[5] = 0x2b3e6c1f;
hl[6] = 0xfb41bd6b;
hl[7] = 0x137e2179;
crypto_hashblocks_hl(hh, hl, m, n);
n %= 128;
for (i = 0; i < n; i++) x[i] = m[b-n+i];
x[n] = 128;
n = 256-128*(n<112?1:0);
x[n-9] = 0;
ts64(x, n-8, (b / 0x20000000) | 0, b << 3);
crypto_hashblocks_hl(hh, hl, x, n);
for (i = 0; i < 8; i++) ts64(out, 8*i, hh[i], hl[i]);
return 0;
}
function add(p, q) {
var a = gf(), b = gf(), c = gf(),
d = gf(), e = gf(), f = gf(),
g = gf(), h = gf(), t = gf();
Z(a, p[1], p[0]);
Z(t, q[1], q[0]);
M(a, a, t);
A(b, p[0], p[1]);
A(t, q[0], q[1]);
M(b, b, t);
M(c, p[3], q[3]);
M(c, c, D2);
M(d, p[2], q[2]);
A(d, d, d);
Z(e, b, a);
Z(f, d, c);
A(g, d, c);
A(h, b, a);
M(p[0], e, f);
M(p[1], h, g);
M(p[2], g, f);
M(p[3], e, h);
}
function cswap(p, q, b) {
var i;
for (i = 0; i < 4; i++) {
sel25519(p[i], q[i], b);
}
}
function pack(r, p) {
var tx = gf(), ty = gf(), zi = gf();
inv25519(zi, p[2]);
M(tx, p[0], zi);
M(ty, p[1], zi);
pack25519(r, ty);
r[31] ^= par25519(tx) << 7;
}
function scalarmult(p, q, s) {
var b, i;
set25519(p[0], gf0);
set25519(p[1], gf1);
set25519(p[2], gf1);
set25519(p[3], gf0);
for (i = 255; i >= 0; --i) {
b = (s[(i/8)|0] >> (i&7)) & 1;
cswap(p, q, b);
add(q, p);
add(p, p);
cswap(p, q, b);
}
}
function scalarbase(p, s) {
var q = [gf(), gf(), gf(), gf()];
set25519(q[0], X);
set25519(q[1], Y);
set25519(q[2], gf1);
M(q[3], X, Y);
scalarmult(p, q, s);
}
function crypto_sign_keypair(pk, sk, seeded) {
var d = new Uint8Array(64);
var p = [gf(), gf(), gf(), gf()];
var i;
if (!seeded) randombytes(sk, 32);
crypto_hash(d, sk, 32);
d[0] &= 248;
d[31] &= 127;
d[31] |= 64;
scalarbase(p, d);
pack(pk, p);
for (i = 0; i < 32; i++) sk[i+32] = pk[i];
return 0;
}
var L = new Float64Array([0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x10]);
function modL(r, x) {
var carry, i, j, k;
for (i = 63; i >= 32; --i) {
carry = 0;
for (j = i - 32, k = i - 12; j < k; ++j) {
x[j] += carry - 16 * x[i] * L[j - (i - 32)];
carry = (x[j] + 128) >> 8;
x[j] -= carry * 256;
}
x[j] += carry;
x[i] = 0;
}
carry = 0;
for (j = 0; j < 32; j++) {
x[j] += carry - (x[31] >> 4) * L[j];
carry = x[j] >> 8;
x[j] &= 255;
}
for (j = 0; j < 32; j++) x[j] -= carry * L[j];
for (i = 0; i < 32; i++) {
x[i+1] += x[i] >> 8;
r[i] = x[i] & 255;
}
}
function reduce(r) {
var x = new Float64Array(64), i;
for (i = 0; i < 64; i++) x[i] = r[i];
for (i = 0; i < 64; i++) r[i] = 0;
modL(r, x);
}
// Note: difference from C - smlen returned, not passed as argument.
function crypto_sign(sm, m, n, sk) {
var d = new Uint8Array(64), h = new Uint8Array(64), r = new Uint8Array(64);
var i, j, x = new Float64Array(64);
var p = [gf(), gf(), gf(), gf()];
crypto_hash(d, sk, 32);
d[0] &= 248;
d[31] &= 127;
d[31] |= 64;
var smlen = n + 64;
for (i = 0; i < n; i++) sm[64 + i] = m[i];
for (i = 0; i < 32; i++) sm[32 + i] = d[32 + i];
crypto_hash(r, sm.subarray(32), n+32);
reduce(r);
scalarbase(p, r);
pack(sm, p);
for (i = 32; i < 64; i++) sm[i] = sk[i];
crypto_hash(h, sm, n + 64);
reduce(h);
for (i = 0; i < 64; i++) x[i] = 0;
for (i = 0; i < 32; i++) x[i] = r[i];
for (i = 0; i < 32; i++) {
for (j = 0; j < 32; j++) {
x[i+j] += h[i] * d[j];
}
}
modL(sm.subarray(32), x);
return smlen;
}
function unpackneg(r, p) {
var t = gf(), chk = gf(), num = gf(),
den = gf(), den2 = gf(), den4 = gf(),
den6 = gf();
set25519(r[2], gf1);
unpack25519(r[1], p);
S(num, r[1]);
M(den, num, D);
Z(num, num, r[2]);
A(den, r[2], den);
S(den2, den);
S(den4, den2);
M(den6, den4, den2);
M(t, den6, num);
M(t, t, den);
pow2523(t, t);
M(t, t, num);
M(t, t, den);
M(t, t, den);
M(r[0], t, den);
S(chk, r[0]);
M(chk, chk, den);
if (neq25519(chk, num)) M(r[0], r[0], I);
S(chk, r[0]);
M(chk, chk, den);
if (neq25519(chk, num)) return -1;
if (par25519(r[0]) === (p[31]>>7)) Z(r[0], gf0, r[0]);
M(r[3], r[0], r[1]);
return 0;
}
function crypto_sign_open(m, sm, n, pk) {
var i, mlen;
var t = new Uint8Array(32), h = new Uint8Array(64);
var p = [gf(), gf(), gf(), gf()],
q = [gf(), gf(), gf(), gf()];
mlen = -1;
if (n < 64) return -1;
if (unpackneg(q, pk)) return -1;
for (i = 0; i < n; i++) m[i] = sm[i];
for (i = 0; i < 32; i++) m[i+32] = pk[i];
crypto_hash(h, m, n);
reduce(h);
scalarmult(p, q, h);
scalarbase(q, sm.subarray(32));
add(p, q);
pack(t, p);
n -= 64;
if (crypto_verify_32(sm, 0, t, 0)) {
for (i = 0; i < n; i++) m[i] = 0;
return -1;
}
for (i = 0; i < n; i++) m[i] = sm[i + 64];
mlen = n;
return mlen;
}
var crypto_secretbox_KEYBYTES = 32,
crypto_secretbox_NONCEBYTES = 24,
crypto_secretbox_ZEROBYTES = 32,
crypto_secretbox_BOXZEROBYTES = 16,
crypto_scalarmult_BYTES = 32,
crypto_scalarmult_SCALARBYTES = 32,
crypto_box_PUBLICKEYBYTES = 32,
crypto_box_SECRETKEYBYTES = 32,
crypto_box_BEFORENMBYTES = 32,
crypto_box_NONCEBYTES = crypto_secretbox_NONCEBYTES,
crypto_box_ZEROBYTES = crypto_secretbox_ZEROBYTES,
crypto_box_BOXZEROBYTES = crypto_secretbox_BOXZEROBYTES,
crypto_sign_BYTES = 64,
crypto_sign_PUBLICKEYBYTES = 32,
crypto_sign_SECRETKEYBYTES = 64,
crypto_sign_SEEDBYTES = 32,
crypto_hash_BYTES = 64;
nacl.lowlevel = {
crypto_core_hsalsa20: crypto_core_hsalsa20,
crypto_stream_xor: crypto_stream_xor,
crypto_stream: crypto_stream,
crypto_stream_salsa20_xor: crypto_stream_salsa20_xor,
crypto_stream_salsa20: crypto_stream_salsa20,
crypto_onetimeauth: crypto_onetimeauth,
crypto_onetimeauth_verify: crypto_onetimeauth_verify,
crypto_verify_16: crypto_verify_16,
crypto_verify_32: crypto_verify_32,
crypto_secretbox: crypto_secretbox,
crypto_secretbox_open: crypto_secretbox_open,
crypto_scalarmult: crypto_scalarmult,
crypto_scalarmult_base: crypto_scalarmult_base,
crypto_box_beforenm: crypto_box_beforenm,
crypto_box_afternm: crypto_box_afternm,
crypto_box: crypto_box,
crypto_box_open: crypto_box_open,
crypto_box_keypair: crypto_box_keypair,
crypto_hash: crypto_hash,
crypto_sign: crypto_sign,
crypto_sign_keypair: crypto_sign_keypair,
crypto_sign_open: crypto_sign_open,
crypto_secretbox_KEYBYTES: crypto_secretbox_KEYBYTES,
crypto_secretbox_NONCEBYTES: crypto_secretbox_NONCEBYTES,
crypto_secretbox_ZEROBYTES: crypto_secretbox_ZEROBYTES,
crypto_secretbox_BOXZEROBYTES: crypto_secretbox_BOXZEROBYTES,
crypto_scalarmult_BYTES: crypto_scalarmult_BYTES,
crypto_scalarmult_SCALARBYTES: crypto_scalarmult_SCALARBYTES,
crypto_box_PUBLICKEYBYTES: crypto_box_PUBLICKEYBYTES,
crypto_box_SECRETKEYBYTES: crypto_box_SECRETKEYBYTES,
crypto_box_BEFORENMBYTES: crypto_box_BEFORENMBYTES,
crypto_box_NONCEBYTES: crypto_box_NONCEBYTES,
crypto_box_ZEROBYTES: crypto_box_ZEROBYTES,
crypto_box_BOXZEROBYTES: crypto_box_BOXZEROBYTES,
crypto_sign_BYTES: crypto_sign_BYTES,
crypto_sign_PUBLICKEYBYTES: crypto_sign_PUBLICKEYBYTES,
crypto_sign_SECRETKEYBYTES: crypto_sign_SECRETKEYBYTES,
crypto_sign_SEEDBYTES: crypto_sign_SEEDBYTES,
crypto_hash_BYTES: crypto_hash_BYTES
};
/* High-level API */
function checkLengths(k, n) {
if (k.length !== crypto_secretbox_KEYBYTES) throw new Error('bad key size');
if (n.length !== crypto_secretbox_NONCEBYTES) throw new Error('bad nonce size');
}
function checkBoxLengths(pk, sk) {
if (pk.length !== crypto_box_PUBLICKEYBYTES) throw new Error('bad public key size');
if (sk.length !== crypto_box_SECRETKEYBYTES) throw new Error('bad secret key size');
}
function checkArrayTypes() {
var t, i;
for (i = 0; i < arguments.length; i++) {
if ((t = Object.prototype.toString.call(arguments[i])) !== '[object Uint8Array]')
throw new TypeError('unexpected type ' + t + ', use Uint8Array');
}
}
function cleanup(arr) {
for (var i = 0; i < arr.length; i++) arr[i] = 0;
}
// TODO: Completely remove this in v0.15.
if (!nacl.util) {
nacl.util = {};
nacl.util.decodeUTF8 = nacl.util.encodeUTF8 = nacl.util.encodeBase64 = nacl.util.decodeBase64 = function() {
throw new Error('nacl.util moved into separate package: https://github.com/dchest/tweetnacl-util-js');
};
}
nacl.randomBytes = function(n) {
var b = new Uint8Array(n);
randombytes(b, n);
return b;
};
nacl.secretbox = function(msg, nonce, key) {
checkArrayTypes(msg, nonce, key);
checkLengths(key, nonce);
var m = new Uint8Array(crypto_secretbox_ZEROBYTES + msg.length);
var c = new Uint8Array(m.length);
for (var i = 0; i < msg.length; i++) m[i+crypto_secretbox_ZEROBYTES] = msg[i];
crypto_secretbox(c, m, m.length, nonce, key);
return c.subarray(crypto_secretbox_BOXZEROBYTES);
};
nacl.secretbox.open = function(box, nonce, key) {
checkArrayTypes(box, nonce, key);
checkLengths(key, nonce);
var c = new Uint8Array(crypto_secretbox_BOXZEROBYTES + box.length);
var m = new Uint8Array(c.length);
for (var i = 0; i < box.length; i++) c[i+crypto_secretbox_BOXZEROBYTES] = box[i];
if (c.length < 32) return false;
if (crypto_secretbox_open(m, c, c.length, nonce, key) !== 0) return false;
return m.subarray(crypto_secretbox_ZEROBYTES);
};
nacl.secretbox.keyLength = crypto_secretbox_KEYBYTES;
nacl.secretbox.nonceLength = crypto_secretbox_NONCEBYTES;
nacl.secretbox.overheadLength = crypto_secretbox_BOXZEROBYTES;
nacl.scalarMult = function(n, p) {
checkArrayTypes(n, p);
if (n.length !== crypto_scalarmult_SCALARBYTES) throw new Error('bad n size');
if (p.length !== crypto_scalarmult_BYTES) throw new Error('bad p size');
var q = new Uint8Array(crypto_scalarmult_BYTES);
crypto_scalarmult(q, n, p);
return q;
};
nacl.scalarMult.base = function(n) {
checkArrayTypes(n);
if (n.length !== crypto_scalarmult_SCALARBYTES) throw new Error('bad n size');
var q = new Uint8Array(crypto_scalarmult_BYTES);
crypto_scalarmult_base(q, n);
return q;
};
nacl.scalarMult.scalarLength = crypto_scalarmult_SCALARBYTES;
nacl.scalarMult.groupElementLength = crypto_scalarmult_BYTES;
nacl.box = function(msg, nonce, publicKey, secretKey) {
var k = nacl.box.before(publicKey, secretKey);
return nacl.secretbox(msg, nonce, k);
};
nacl.box.before = function(publicKey, secretKey) {
checkArrayTypes(publicKey, secretKey);
checkBoxLengths(publicKey, secretKey);
var k = new Uint8Array(crypto_box_BEFORENMBYTES);
crypto_box_beforenm(k, publicKey, secretKey);
return k;
};
nacl.box.after = nacl.secretbox;
nacl.box.open = function(msg, nonce, publicKey, secretKey) {
var k = nacl.box.before(publicKey, secretKey);
return nacl.secretbox.open(msg, nonce, k);
};
nacl.box.open.after = nacl.secretbox.open;
nacl.box.keyPair = function() {
var pk = new Uint8Array(crypto_box_PUBLICKEYBYTES);
var sk = new Uint8Array(crypto_box_SECRETKEYBYTES);
crypto_box_keypair(pk, sk);
return {publicKey: pk, secretKey: sk};
};
nacl.box.keyPair.fromSecretKey = function(secretKey) {
checkArrayTypes(secretKey);
if (secretKey.length !== crypto_box_SECRETKEYBYTES)
throw new Error('bad secret key size');
var pk = new Uint8Array(crypto_box_PUBLICKEYBYTES);
crypto_scalarmult_base(pk, secretKey);
return {publicKey: pk, secretKey: new Uint8Array(secretKey)};
};
nacl.box.publicKeyLength = crypto_box_PUBLICKEYBYTES;
nacl.box.secretKeyLength = crypto_box_SECRETKEYBYTES;
nacl.box.sharedKeyLength = crypto_box_BEFORENMBYTES;
nacl.box.nonceLength = crypto_box_NONCEBYTES;
nacl.box.overheadLength = nacl.secretbox.overheadLength;
nacl.sign = function(msg, secretKey) {
checkArrayTypes(msg, secretKey);
if (secretKey.length !== crypto_sign_SECRETKEYBYTES)
throw new Error('bad secret key size');
var signedMsg = new Uint8Array(crypto_sign_BYTES+msg.length);
crypto_sign(signedMsg, msg, msg.length, secretKey);
return signedMsg;
};
nacl.sign.open = function(signedMsg, publicKey) {
if (arguments.length !== 2)
throw new Error('nacl.sign.open accepts 2 arguments; did you mean to use nacl.sign.detached.verify?');
checkArrayTypes(signedMsg, publicKey);
if (publicKey.length !== crypto_sign_PUBLICKEYBYTES)
throw new Error('bad public key size');
var tmp = new Uint8Array(signedMsg.length);
var mlen = crypto_sign_open(tmp, signedMsg, signedMsg.length, publicKey);
if (mlen < 0) return null;
var m = new Uint8Array(mlen);
for (var i = 0; i < m.length; i++) m[i] = tmp[i];
return m;
};
nacl.sign.detached = function(msg, secretKey) {
var signedMsg = nacl.sign(msg, secretKey);
var sig = new Uint8Array(crypto_sign_BYTES);
for (var i = 0; i < sig.length; i++) sig[i] = signedMsg[i];
return sig;
};
nacl.sign.detached.verify = function(msg, sig, publicKey) {
checkArrayTypes(msg, sig, publicKey);
if (sig.length !== crypto_sign_BYTES)
throw new Error('bad signature size');
if (publicKey.length !== crypto_sign_PUBLICKEYBYTES)
throw new Error('bad public key size');
var sm = new Uint8Array(crypto_sign_BYTES + msg.length);
var m = new Uint8Array(crypto_sign_BYTES + msg.length);
var i;
for (i = 0; i < crypto_sign_BYTES; i++) sm[i] = sig[i];
for (i = 0; i < msg.length; i++) sm[i+crypto_sign_BYTES] = msg[i];
return (crypto_sign_open(m, sm, sm.length, publicKey) >= 0);
};
nacl.sign.keyPair = function() {
var pk = new Uint8Array(crypto_sign_PUBLICKEYBYTES);
var sk = new Uint8Array(crypto_sign_SECRETKEYBYTES);
crypto_sign_keypair(pk, sk);
return {publicKey: pk, secretKey: sk};
};
nacl.sign.keyPair.fromSecretKey = function(secretKey) {
checkArrayTypes(secretKey);
if (secretKey.length !== crypto_sign_SECRETKEYBYTES)
throw new Error('bad secret key size');
var pk = new Uint8Array(crypto_sign_PUBLICKEYBYTES);
for (var i = 0; i < pk.length; i++) pk[i] = secretKey[32+i];
return {publicKey: pk, secretKey: new Uint8Array(secretKey)};
};
nacl.sign.keyPair.fromSeed = function(seed) {
checkArrayTypes(seed);
if (seed.length !== crypto_sign_SEEDBYTES)
throw new Error('bad seed size');
var pk = new Uint8Array(crypto_sign_PUBLICKEYBYTES);
var sk = new Uint8Array(crypto_sign_SECRETKEYBYTES);
for (var i = 0; i < 32; i++) sk[i] = seed[i];
crypto_sign_keypair(pk, sk, true);
return {publicKey: pk, secretKey: sk};
};
nacl.sign.publicKeyLength = crypto_sign_PUBLICKEYBYTES;
nacl.sign.secretKeyLength = crypto_sign_SECRETKEYBYTES;
nacl.sign.seedLength = crypto_sign_SEEDBYTES;
nacl.sign.signatureLength = crypto_sign_BYTES;
nacl.hash = function(msg) {
checkArrayTypes(msg);
var h = new Uint8Array(crypto_hash_BYTES);
crypto_hash(h, msg, msg.length);
return h;
};
nacl.hash.hashLength = crypto_hash_BYTES;
nacl.verify = function(x, y) {
checkArrayTypes(x, y);
// Zero length arguments are considered not equal.
if (x.length === 0 || y.length === 0) return false;
if (x.length !== y.length) return false;
return (vn(x, 0, y, 0, x.length) === 0) ? true : false;
};
nacl.setPRNG = function(fn) {
randombytes = fn;
};
(function() {
// Initialize PRNG if environment provides CSPRNG.
// If not, methods calling randombytes will throw.
var crypto = typeof self !== 'undefined' ? (self.crypto || self.msCrypto) : null;
if (crypto && crypto.getRandomValues) {
// Browsers.
var QUOTA = 65536;
nacl.setPRNG(function(x, n) {
var i, v = new Uint8Array(n);
for (i = 0; i < n; i += QUOTA) {
crypto.getRandomValues(v.subarray(i, i + Math.min(n - i, QUOTA)));
}
for (i = 0; i < n; i++) x[i] = v[i];
cleanup(v);
});
} else if (typeof require !== 'undefined') {
// Node.js.
crypto = require('crypto');
if (crypto && crypto.randomBytes) {
nacl.setPRNG(function(x, n) {
var i, v = crypto.randomBytes(n);
for (i = 0; i < n; i++) x[i] = v[i];
cleanup(v);
});
}
}
})();
})(typeof module !== 'undefined' && module.exports ? module.exports : (self.nacl = self.nacl || {}));
| thehonestape/avt415 | wp-content/themes/portfolio/node_modules/tweetnacl/nacl-fast.js | JavaScript | gpl-2.0 | 62,274 |
/*
* Copyright (c) International Business Machines Corp., 2006
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/*
* This file includes implementation of UBI character device operations.
*
* There are two kinds of character devices in UBI: UBI character devices and
* UBI volume character devices. UBI character devices allow users to
* manipulate whole volumes: create, remove, and re-size them. Volume character
* devices provide volume I/O capabilities.
*
* Major and minor numbers are assigned dynamically to both UBI and volume
* character devices.
*
* Well, there is the third kind of character devices - the UBI control
* character device, which allows to manipulate by UBI devices - create and
* delete them. In other words, it is used for attaching and detaching MTD
* devices.
*/
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
#include <linux/capability.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <linux/math64.h>
#include <mtd/ubi-user.h>
#include "ubi.h"
/**
* get_exclusive - get exclusive access to an UBI volume.
* @desc: volume descriptor
*
* This function changes UBI volume open mode to "exclusive". Returns previous
* mode value (positive integer) in case of success and a negative error code
* in case of failure.
*/
static int get_exclusive(struct ubi_volume_desc *desc)
{
int users, err;
struct ubi_volume *vol = desc->vol;
spin_lock(&vol->ubi->volumes_lock);
users = vol->readers + vol->writers + vol->exclusive;
ubi_assert(users > 0);
if (users > 1) {
dbg_err("%d users for volume %d", users, vol->vol_id);
err = -EBUSY;
} else {
vol->readers = vol->writers = 0;
vol->exclusive = 1;
err = desc->mode;
desc->mode = UBI_EXCLUSIVE;
}
spin_unlock(&vol->ubi->volumes_lock);
return err;
}
/**
* revoke_exclusive - revoke exclusive mode.
* @desc: volume descriptor
* @mode: new mode to switch to
*/
static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
{
struct ubi_volume *vol = desc->vol;
spin_lock(&vol->ubi->volumes_lock);
ubi_assert(vol->readers == 0 && vol->writers == 0);
ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE);
vol->exclusive = 0;
if (mode == UBI_READONLY)
vol->readers = 1;
else if (mode == UBI_READWRITE)
vol->writers = 1;
else
vol->exclusive = 1;
spin_unlock(&vol->ubi->volumes_lock);
desc->mode = mode;
}
static int vol_cdev_open(struct inode *inode, struct file *file)
{
struct ubi_volume_desc *desc;
int vol_id = iminor(inode) - 1, mode, ubi_num;
ubi_num = ubi_major2num(imajor(inode));
if (ubi_num < 0)
return ubi_num;
if (file->f_mode & FMODE_WRITE)
mode = UBI_READWRITE;
else
mode = UBI_READONLY;
dbg_gen("open device %d, volume %d, mode %d",
ubi_num, vol_id, mode);
desc = ubi_open_volume(ubi_num, vol_id, mode);
if (IS_ERR(desc))
return PTR_ERR(desc);
file->private_data = desc;
return 0;
}
static int vol_cdev_release(struct inode *inode, struct file *file)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
dbg_gen("release device %d, volume %d, mode %d",
vol->ubi->ubi_num, vol->vol_id, desc->mode);
if (vol->updating) {
ubi_warn("update of volume %d not finished, volume is damaged",
vol->vol_id);
ubi_assert(!vol->changing_leb);
vol->updating = 0;
vfree(vol->upd_buf);
} else if (vol->changing_leb) {
dbg_gen("only %lld of %lld bytes received for atomic LEB change"
" for volume %d:%d, cancel", vol->upd_received,
vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
vol->changing_leb = 0;
vfree(vol->upd_buf);
}
ubi_close_volume(desc);
return 0;
}
static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
loff_t new_offset;
if (vol->updating) {
/* Update is in progress, seeking is prohibited */
dbg_err("updating");
return -EBUSY;
}
switch (origin) {
case 0: /* SEEK_SET */
new_offset = offset;
break;
case 1: /* SEEK_CUR */
new_offset = file->f_pos + offset;
break;
case 2: /* SEEK_END */
new_offset = vol->used_bytes + offset;
break;
default:
return -EINVAL;
}
if (new_offset < 0 || new_offset > vol->used_bytes) {
dbg_err("bad seek %lld", new_offset);
return -EINVAL;
}
dbg_gen("seek volume %d, offset %lld, origin %d, new offset %lld",
vol->vol_id, offset, origin, new_offset);
file->f_pos = new_offset;
return new_offset;
}
static int vol_cdev_fsync(struct file *file, int datasync)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_device *ubi = desc->vol->ubi;
return ubi_sync(ubi->ubi_num);
}
static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
loff_t *offp)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int err, lnum, off, len, tbuf_size;
size_t count_save = count;
void *tbuf;
dbg_gen("read %zd bytes from offset %lld of volume %d",
count, *offp, vol->vol_id);
if (vol->updating) {
dbg_err("updating");
return -EBUSY;
}
if (vol->upd_marker) {
dbg_err("damaged volume, update marker is set");
return -EBADF;
}
if (*offp == vol->used_bytes || count == 0)
return 0;
if (vol->corrupted)
dbg_gen("read from corrupted volume %d", vol->vol_id);
if (*offp + count > vol->used_bytes)
count_save = count = vol->used_bytes - *offp;
tbuf_size = vol->usable_leb_size;
if (count < tbuf_size)
tbuf_size = ALIGN(count, ubi->min_io_size);
tbuf = vmalloc(tbuf_size);
if (!tbuf)
return -ENOMEM;
len = count > tbuf_size ? tbuf_size : count;
lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
do {
cond_resched();
if (off + len >= vol->usable_leb_size)
len = vol->usable_leb_size - off;
err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
if (err)
break;
off += len;
if (off == vol->usable_leb_size) {
lnum += 1;
off -= vol->usable_leb_size;
}
count -= len;
*offp += len;
err = copy_to_user(buf, tbuf, len);
if (err) {
err = -EFAULT;
break;
}
buf += len;
len = count > tbuf_size ? tbuf_size : count;
} while (count);
vfree(tbuf);
return err ? err : count_save - count;
}
/*
* This function allows to directly write to dynamic UBI volumes, without
* issuing the volume update operation.
*/
static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
size_t count, loff_t *offp)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int lnum, off, len, tbuf_size, err = 0;
size_t count_save = count;
char *tbuf;
if (!vol->direct_writes)
return -EPERM;
dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
count, *offp, vol->vol_id);
if (vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
if (off & (ubi->min_io_size - 1)) {
dbg_err("unaligned position");
return -EINVAL;
}
if (*offp + count > vol->used_bytes)
count_save = count = vol->used_bytes - *offp;
/* We can write only in fractions of the minimum I/O unit */
if (count & (ubi->min_io_size - 1)) {
dbg_err("unaligned write length");
return -EINVAL;
}
tbuf_size = vol->usable_leb_size;
if (count < tbuf_size)
tbuf_size = ALIGN(count, ubi->min_io_size);
tbuf = vmalloc(tbuf_size);
if (!tbuf)
return -ENOMEM;
len = count > tbuf_size ? tbuf_size : count;
while (count) {
cond_resched();
if (off + len >= vol->usable_leb_size)
len = vol->usable_leb_size - off;
err = copy_from_user(tbuf, buf, len);
if (err) {
err = -EFAULT;
break;
}
err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
UBI_UNKNOWN);
if (err)
break;
off += len;
if (off == vol->usable_leb_size) {
lnum += 1;
off -= vol->usable_leb_size;
}
count -= len;
*offp += len;
buf += len;
len = count > tbuf_size ? tbuf_size : count;
}
vfree(tbuf);
return err ? err : count_save - count;
}
static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
size_t count, loff_t *offp)
{
int err = 0;
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
if (!vol->updating && !vol->changing_leb)
return vol_cdev_direct_write(file, buf, count, offp);
if (vol->updating)
err = ubi_more_update_data(ubi, vol, buf, count);
else
err = ubi_more_leb_change_data(ubi, vol, buf, count);
if (err < 0) {
ubi_err("cannot accept more %zd bytes of data, error %d",
count, err);
return err;
}
if (err) {
/*
* The operation is finished, @err contains number of actually
* written bytes.
*/
count = err;
if (vol->changing_leb) {
revoke_exclusive(desc, UBI_READWRITE);
return count;
}
err = ubi_check_volume(ubi, vol->vol_id);
if (err < 0)
return err;
if (err) {
ubi_warn("volume %d on UBI device %d is corrupted",
vol->vol_id, ubi->ubi_num);
vol->corrupted = 1;
}
vol->checked = 1;
ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
revoke_exclusive(desc, UBI_READWRITE);
}
return count;
}
static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int err = 0;
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
void __user *argp = (void __user *)arg;
switch (cmd) {
/* Volume update command */
case UBI_IOCVOLUP:
{
int64_t bytes, rsvd_bytes;
if (!capable(CAP_SYS_RESOURCE)) {
err = -EPERM;
break;
}
err = copy_from_user(&bytes, argp, sizeof(int64_t));
if (err) {
err = -EFAULT;
break;
}
if (desc->mode == UBI_READONLY) {
err = -EROFS;
break;
}
rsvd_bytes = (long long)vol->reserved_pebs *
ubi->leb_size-vol->data_pad;
if (bytes < 0 || bytes > rsvd_bytes) {
err = -EINVAL;
break;
}
err = get_exclusive(desc);
if (err < 0)
break;
err = ubi_start_update(ubi, vol, bytes);
if (bytes == 0)
revoke_exclusive(desc, UBI_READWRITE);
break;
}
/* Atomic logical eraseblock change command */
case UBI_IOCEBCH:
{
struct ubi_leb_change_req req;
err = copy_from_user(&req, argp,
sizeof(struct ubi_leb_change_req));
if (err) {
err = -EFAULT;
break;
}
if (desc->mode == UBI_READONLY ||
vol->vol_type == UBI_STATIC_VOLUME) {
err = -EROFS;
break;
}
/* Validate the request */
err = -EINVAL;
if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
req.bytes < 0 || req.lnum >= vol->usable_leb_size)
break;
if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
req.dtype != UBI_UNKNOWN)
break;
err = get_exclusive(desc);
if (err < 0)
break;
err = ubi_start_leb_change(ubi, vol, &req);
if (req.bytes == 0)
revoke_exclusive(desc, UBI_READWRITE);
break;
}
/* Logical eraseblock erasure command */
case UBI_IOCEBER:
{
int32_t lnum;
err = get_user(lnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
if (desc->mode == UBI_READONLY ||
vol->vol_type == UBI_STATIC_VOLUME) {
err = -EROFS;
break;
}
if (lnum < 0 || lnum >= vol->reserved_pebs) {
err = -EINVAL;
break;
}
dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
break;
err = ubi_wl_flush(ubi);
break;
}
/* Logical eraseblock map command */
case UBI_IOCEBMAP:
{
struct ubi_map_req req;
err = copy_from_user(&req, argp, sizeof(struct ubi_map_req));
if (err) {
err = -EFAULT;
break;
}
err = ubi_leb_map(desc, req.lnum, req.dtype);
break;
}
/* Logical eraseblock un-map command */
case UBI_IOCEBUNMAP:
{
int32_t lnum;
err = get_user(lnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
err = ubi_leb_unmap(desc, lnum);
break;
}
/* Check if logical eraseblock is mapped command */
case UBI_IOCEBISMAP:
{
int32_t lnum;
err = get_user(lnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
err = ubi_is_mapped(desc, lnum);
break;
}
/* Set volume property command */
case UBI_IOCSETVOLPROP:
{
struct ubi_set_vol_prop_req req;
err = copy_from_user(&req, argp,
sizeof(struct ubi_set_vol_prop_req));
if (err) {
err = -EFAULT;
break;
}
switch (req.property) {
case UBI_VOL_PROP_DIRECT_WRITE:
mutex_lock(&ubi->device_mutex);
desc->vol->direct_writes = !!req.value;
mutex_unlock(&ubi->device_mutex);
break;
default:
err = -EINVAL;
break;
}
break;
}
default:
err = -ENOTTY;
break;
}
return err;
}
/**
* verify_mkvol_req - verify volume creation request.
* @ubi: UBI device description object
* @req: the request to check
*
* This function zero if the request is correct, and %-EINVAL if not.
*/
static int verify_mkvol_req(const struct ubi_device *ubi,
const struct ubi_mkvol_req *req)
{
int n, err = -EINVAL;
if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 ||
req->name_len < 0)
goto bad;
if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) &&
req->vol_id != UBI_VOL_NUM_AUTO)
goto bad;
if (req->alignment == 0)
goto bad;
if (req->bytes == 0)
goto bad;
if (req->vol_type != UBI_DYNAMIC_VOLUME &&
req->vol_type != UBI_STATIC_VOLUME)
goto bad;
if (req->alignment > ubi->leb_size)
goto bad;
n = req->alignment & (ubi->min_io_size - 1);
if (req->alignment != 1 && n)
goto bad;
if (!req->name[0] || !req->name_len)
goto bad;
if (req->name_len > UBI_VOL_NAME_MAX) {
err = -ENAMETOOLONG;
goto bad;
}
n = strnlen(req->name, req->name_len + 1);
if (n != req->name_len)
goto bad;
return 0;
bad:
dbg_err("bad volume creation request");
ubi_dbg_dump_mkvol_req(req);
return err;
}
/**
* verify_rsvol_req - verify volume re-size request.
* @ubi: UBI device description object
* @req: the request to check
*
* This function returns zero if the request is correct, and %-EINVAL if not.
*/
static int verify_rsvol_req(const struct ubi_device *ubi,
const struct ubi_rsvol_req *req)
{
if (req->bytes <= 0)
return -EINVAL;
if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots)
return -EINVAL;
return 0;
}
/**
* rename_volumes - rename UBI volumes.
* @ubi: UBI device description object
* @req: volumes re-name request
*
* This is a helper function for the volume re-name IOCTL which validates the
* the request, opens the volume and calls corresponding volumes management
* function. Returns zero in case of success and a negative error code in case
* of failure.
*/
static int rename_volumes(struct ubi_device *ubi,
struct ubi_rnvol_req *req)
{
int i, n, err;
struct list_head rename_list;
struct ubi_rename_entry *re, *re1;
if (req->count < 0 || req->count > UBI_MAX_RNVOL)
return -EINVAL;
if (req->count == 0)
return 0;
/* Validate volume IDs and names in the request */
for (i = 0; i < req->count; i++) {
if (req->ents[i].vol_id < 0 ||
req->ents[i].vol_id >= ubi->vtbl_slots)
return -EINVAL;
if (req->ents[i].name_len < 0)
return -EINVAL;
if (req->ents[i].name_len > UBI_VOL_NAME_MAX)
return -ENAMETOOLONG;
req->ents[i].name[req->ents[i].name_len] = '\0';
n = strlen(req->ents[i].name);
if (n != req->ents[i].name_len)
err = -EINVAL;
}
/* Make sure volume IDs and names are unique */
for (i = 0; i < req->count - 1; i++) {
for (n = i + 1; n < req->count; n++) {
if (req->ents[i].vol_id == req->ents[n].vol_id) {
dbg_err("duplicated volume id %d",
req->ents[i].vol_id);
return -EINVAL;
}
if (!strcmp(req->ents[i].name, req->ents[n].name)) {
dbg_err("duplicated volume name \"%s\"",
req->ents[i].name);
return -EINVAL;
}
}
}
/* Create the re-name list */
INIT_LIST_HEAD(&rename_list);
for (i = 0; i < req->count; i++) {
int vol_id = req->ents[i].vol_id;
int name_len = req->ents[i].name_len;
const char *name = req->ents[i].name;
re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
if (!re) {
err = -ENOMEM;
goto out_free;
}
re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
if (IS_ERR(re->desc)) {
err = PTR_ERR(re->desc);
dbg_err("cannot open volume %d, error %d", vol_id, err);
kfree(re);
goto out_free;
}
/* Skip this re-naming if the name does not really change */
if (re->desc->vol->name_len == name_len &&
!memcmp(re->desc->vol->name, name, name_len)) {
ubi_close_volume(re->desc);
kfree(re);
continue;
}
re->new_name_len = name_len;
memcpy(re->new_name, name, name_len);
list_add_tail(&re->list, &rename_list);
dbg_msg("will rename volume %d from \"%s\" to \"%s\"",
vol_id, re->desc->vol->name, name);
}
if (list_empty(&rename_list))
return 0;
/* Find out the volumes which have to be removed */
list_for_each_entry(re, &rename_list, list) {
struct ubi_volume_desc *desc;
int no_remove_needed = 0;
/*
* Volume @re->vol_id is going to be re-named to
* @re->new_name, while its current name is @name. If a volume
* with name @re->new_name currently exists, it has to be
* removed, unless it is also re-named in the request (@req).
*/
list_for_each_entry(re1, &rename_list, list) {
if (re->new_name_len == re1->desc->vol->name_len &&
!memcmp(re->new_name, re1->desc->vol->name,
re1->desc->vol->name_len)) {
no_remove_needed = 1;
break;
}
}
if (no_remove_needed)
continue;
/*
* It seems we need to remove volume with name @re->new_name,
* if it exists.
*/
desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
UBI_EXCLUSIVE);
if (IS_ERR(desc)) {
err = PTR_ERR(desc);
if (err == -ENODEV)
/* Re-naming into a non-existing volume name */
continue;
/* The volume exists but busy, or an error occurred */
dbg_err("cannot open volume \"%s\", error %d",
re->new_name, err);
goto out_free;
}
re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
if (!re1) {
err = -ENOMEM;
ubi_close_volume(desc);
goto out_free;
}
re1->remove = 1;
re1->desc = desc;
list_add(&re1->list, &rename_list);
dbg_msg("will remove volume %d, name \"%s\"",
re1->desc->vol->vol_id, re1->desc->vol->name);
}
mutex_lock(&ubi->device_mutex);
err = ubi_rename_volumes(ubi, &rename_list);
mutex_unlock(&ubi->device_mutex);
out_free:
list_for_each_entry_safe(re, re1, &rename_list, list) {
ubi_close_volume(re->desc);
list_del(&re->list);
kfree(re);
}
return err;
}
static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int err = 0;
struct ubi_device *ubi;
struct ubi_volume_desc *desc;
void __user *argp = (void __user *)arg;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
ubi = ubi_get_by_major(imajor(file->f_mapping->host));
if (!ubi)
return -ENODEV;
switch (cmd) {
/* Create volume command */
case UBI_IOCMKVOL:
{
struct ubi_mkvol_req req;
dbg_gen("create volume");
err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
if (err) {
err = -EFAULT;
break;
}
err = verify_mkvol_req(ubi, &req);
if (err)
break;
mutex_lock(&ubi->device_mutex);
err = ubi_create_volume(ubi, &req);
mutex_unlock(&ubi->device_mutex);
if (err)
break;
err = put_user(req.vol_id, (__user int32_t *)argp);
if (err)
err = -EFAULT;
break;
}
/* Remove volume command */
case UBI_IOCRMVOL:
{
int vol_id;
dbg_gen("remove volume");
err = get_user(vol_id, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
if (IS_ERR(desc)) {
err = PTR_ERR(desc);
break;
}
mutex_lock(&ubi->device_mutex);
err = ubi_remove_volume(desc, 0);
mutex_unlock(&ubi->device_mutex);
/*
* The volume is deleted (unless an error occurred), and the
* 'struct ubi_volume' object will be freed when
* 'ubi_close_volume()' will call 'put_device()'.
*/
ubi_close_volume(desc);
break;
}
/* Re-size volume command */
case UBI_IOCRSVOL:
{
int pebs;
struct ubi_rsvol_req req;
dbg_gen("re-size volume");
err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
if (err) {
err = -EFAULT;
break;
}
err = verify_rsvol_req(ubi, &req);
if (err)
break;
desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE);
if (IS_ERR(desc)) {
err = PTR_ERR(desc);
break;
}
pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
desc->vol->usable_leb_size);
mutex_lock(&ubi->device_mutex);
err = ubi_resize_volume(desc, pebs);
mutex_unlock(&ubi->device_mutex);
ubi_close_volume(desc);
break;
}
/* Re-name volumes command */
case UBI_IOCRNVOL:
{
struct ubi_rnvol_req *req;
dbg_msg("re-name volumes");
req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
if (!req) {
err = -ENOMEM;
break;
};
err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req));
if (err) {
err = -EFAULT;
kfree(req);
break;
}
err = rename_volumes(ubi, req);
kfree(req);
break;
}
default:
err = -ENOTTY;
break;
}
ubi_put_device(ubi);
return err;
}
static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int err = 0;
void __user *argp = (void __user *)arg;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
switch (cmd) {
/* Attach an MTD device command */
case UBI_IOCATT:
{
struct ubi_attach_req req;
struct mtd_info *mtd;
dbg_gen("attach MTD device");
err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
if (err) {
err = -EFAULT;
break;
}
if (req.mtd_num < 0 ||
(req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
err = -EINVAL;
break;
}
mtd = get_mtd_device(NULL, req.mtd_num);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
break;
}
/*
* Note, further request verification is done by
* 'ubi_attach_mtd_dev()'.
*/
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
mutex_unlock(&ubi_devices_mutex);
if (err < 0)
put_mtd_device(mtd);
else
/* @err contains UBI device number */
err = put_user(err, (__user int32_t *)argp);
break;
}
/* Detach an MTD device command */
case UBI_IOCDET:
{
int ubi_num;
dbg_gen("dettach MTD device");
err = get_user(ubi_num, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
mutex_lock(&ubi_devices_mutex);
err = ubi_detach_mtd_dev(ubi_num, 0);
mutex_unlock(&ubi_devices_mutex);
break;
}
default:
err = -ENOTTY;
break;
}
return err;
}
#ifdef CONFIG_COMPAT
static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
unsigned long translated_arg = (unsigned long)compat_ptr(arg);
return vol_cdev_ioctl(file, cmd, translated_arg);
}
static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
unsigned long translated_arg = (unsigned long)compat_ptr(arg);
return ubi_cdev_ioctl(file, cmd, translated_arg);
}
static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
unsigned long translated_arg = (unsigned long)compat_ptr(arg);
return ctrl_cdev_ioctl(file, cmd, translated_arg);
}
#else
#define vol_cdev_compat_ioctl NULL
#define ubi_cdev_compat_ioctl NULL
#define ctrl_cdev_compat_ioctl NULL
#endif
/* UBI volume character device operations */
const struct file_operations ubi_vol_cdev_operations = {
.owner = THIS_MODULE,
.open = vol_cdev_open,
.release = vol_cdev_release,
.llseek = vol_cdev_llseek,
.read = vol_cdev_read,
.write = vol_cdev_write,
.fsync = vol_cdev_fsync,
.unlocked_ioctl = vol_cdev_ioctl,
.compat_ioctl = vol_cdev_compat_ioctl,
};
/* UBI character device operations */
const struct file_operations ubi_cdev_operations = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = ubi_cdev_ioctl,
.compat_ioctl = ubi_cdev_compat_ioctl,
};
/* UBI control character device operations */
const struct file_operations ubi_ctrl_cdev_operations = {
.owner = THIS_MODULE,
.unlocked_ioctl = ctrl_cdev_ioctl,
.compat_ioctl = ctrl_cdev_compat_ioctl,
.llseek = no_llseek,
};
| CyanogenMod/android_kernel_samsung_smdk4412 | drivers/mtd/ubi/cdev.c | C | gpl-2.0 | 25,191 |
/*
* wm8775 - driver version 0.0.1
*
* Copyright (C) 2004 Ulf Eklund <ivtv at eklund.to>
*
* Based on saa7115 driver
*
* Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
* - Cleanup
* - V4L2 API update
* - sound fixes
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/wm8775.h>
MODULE_DESCRIPTION("wm8775 driver");
MODULE_AUTHOR("Ulf Eklund, Hans Verkuil");
MODULE_LICENSE("GPL");
/* ----------------------------------------------------------------------- */
enum {
R7 = 7, R11 = 11,
R12, R13, R14, R15, R16, R17, R18, R19, R20, R21, R23 = 23,
TOT_REGS
};
#define ALC_HOLD 0x85 /* R17: use zero cross detection, ALC hold time 42.6 ms */
#define ALC_EN 0x100 /* R17: ALC enable */
struct wm8775_state {
struct v4l2_subdev sd;
struct v4l2_ctrl_handler hdl;
struct v4l2_ctrl *mute;
struct v4l2_ctrl *vol;
struct v4l2_ctrl *bal;
struct v4l2_ctrl *loud;
u8 input; /* Last selected input (0-0xf) */
};
static inline struct wm8775_state *to_state(struct v4l2_subdev *sd)
{
return container_of(sd, struct wm8775_state, sd);
}
static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
{
return &container_of(ctrl->handler, struct wm8775_state, hdl)->sd;
}
static int wm8775_write(struct v4l2_subdev *sd, int reg, u16 val)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
int i;
if (reg < 0 || reg >= TOT_REGS) {
v4l2_err(sd, "Invalid register R%d\n", reg);
return -1;
}
for (i = 0; i < 3; i++)
if (i2c_smbus_write_byte_data(client,
(reg << 1) | (val >> 8), val & 0xff) == 0)
return 0;
v4l2_err(sd, "I2C: cannot write %03x to register R%d\n", val, reg);
return -1;
}
static void wm8775_set_audio(struct v4l2_subdev *sd, int quietly)
{
struct wm8775_state *state = to_state(sd);
u8 vol_l, vol_r;
int muted = 0 != state->mute->val;
u16 volume = (u16)state->vol->val;
u16 balance = (u16)state->bal->val;
/* normalize ( 65535 to 0 -> 255 to 0 (+24dB to -103dB) ) */
vol_l = (min(65536 - balance, 32768) * volume) >> 23;
vol_r = (min(balance, (u16)32768) * volume) >> 23;
/* Mute */
if (muted || quietly)
wm8775_write(sd, R21, 0x0c0 | state->input);
wm8775_write(sd, R14, vol_l | 0x100); /* 0x100= Left channel ADC zero cross enable */
wm8775_write(sd, R15, vol_r | 0x100); /* 0x100= Right channel ADC zero cross enable */
/* Un-mute */
if (!muted)
wm8775_write(sd, R21, state->input);
}
static int wm8775_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct wm8775_state *state = to_state(sd);
/* There are 4 inputs and one output. Zero or more inputs
are multiplexed together to the output. Hence there are
16 combinations.
If only one input is active (the normal case) then the
input values 1, 2, 4 or 8 should be used. */
if (input > 15) {
v4l2_err(sd, "Invalid input %d.\n", input);
return -EINVAL;
}
state->input = input;
if (v4l2_ctrl_g_ctrl(state->mute))
return 0;
if (!v4l2_ctrl_g_ctrl(state->vol))
return 0;
wm8775_set_audio(sd, 1);
return 0;
}
static int wm8775_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
case V4L2_CID_AUDIO_VOLUME:
case V4L2_CID_AUDIO_BALANCE:
wm8775_set_audio(sd, 0);
return 0;
case V4L2_CID_AUDIO_LOUDNESS:
wm8775_write(sd, R17, (ctrl->val ? ALC_EN : 0) | ALC_HOLD);
return 0;
}
return -EINVAL;
}
static int wm8775_log_status(struct v4l2_subdev *sd)
{
struct wm8775_state *state = to_state(sd);
v4l2_info(sd, "Input: %d\n", state->input);
v4l2_ctrl_handler_log_status(&state->hdl, sd->name);
return 0;
}
static int wm8775_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequency *freq)
{
wm8775_set_audio(sd, 0);
return 0;
}
/* ----------------------------------------------------------------------- */
static const struct v4l2_ctrl_ops wm8775_ctrl_ops = {
.s_ctrl = wm8775_s_ctrl,
};
static const struct v4l2_subdev_core_ops wm8775_core_ops = {
.log_status = wm8775_log_status,
.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
.g_ctrl = v4l2_subdev_g_ctrl,
.s_ctrl = v4l2_subdev_s_ctrl,
.queryctrl = v4l2_subdev_queryctrl,
.querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_tuner_ops wm8775_tuner_ops = {
.s_frequency = wm8775_s_frequency,
};
static const struct v4l2_subdev_audio_ops wm8775_audio_ops = {
.s_routing = wm8775_s_routing,
};
static const struct v4l2_subdev_ops wm8775_ops = {
.core = &wm8775_core_ops,
.tuner = &wm8775_tuner_ops,
.audio = &wm8775_audio_ops,
};
/* ----------------------------------------------------------------------- */
/* i2c implementation */
/*
* Generic i2c probe
* concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
*/
static int wm8775_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct wm8775_state *state;
struct v4l2_subdev *sd;
int err;
bool is_nova_s = false;
if (client->dev.platform_data) {
struct wm8775_platform_data *data = client->dev.platform_data;
is_nova_s = data->is_nova_s;
}
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
v4l_info(client, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &wm8775_ops);
state->input = 2;
v4l2_ctrl_handler_init(&state->hdl, 4);
state->mute = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
state->vol = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
V4L2_CID_AUDIO_VOLUME, 0, 65535, (65535+99)/100, 0xCF00); /* 0dB*/
state->bal = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
V4L2_CID_AUDIO_BALANCE, 0, 65535, (65535+99)/100, 32768);
state->loud = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
V4L2_CID_AUDIO_LOUDNESS, 0, 1, 1, 1);
sd->ctrl_handler = &state->hdl;
err = state->hdl.error;
if (err) {
v4l2_ctrl_handler_free(&state->hdl);
return err;
}
/* Initialize wm8775 */
/* RESET */
wm8775_write(sd, R23, 0x000);
/* Disable zero cross detect timeout */
wm8775_write(sd, R7, 0x000);
/* HPF enable, left justified, 24-bit (Philips) mode */
wm8775_write(sd, R11, 0x021);
/* Master mode, clock ratio 256fs */
wm8775_write(sd, R12, 0x102);
/* Powered up */
wm8775_write(sd, R13, 0x000);
if (!is_nova_s) {
/* ADC gain +2.5dB, enable zero cross */
wm8775_write(sd, R14, 0x1d4);
/* ADC gain +2.5dB, enable zero cross */
wm8775_write(sd, R15, 0x1d4);
/* ALC Stereo, ALC target level -1dB FS max gain +8dB */
wm8775_write(sd, R16, 0x1bf);
/* Enable gain control, use zero cross detection,
ALC hold time 42.6 ms */
wm8775_write(sd, R17, 0x185);
} else {
/* ALC stereo, ALC target level -5dB FS, ALC max gain +8dB */
wm8775_write(sd, R16, 0x1bb);
/* Set ALC mode and hold time */
wm8775_write(sd, R17, (state->loud->val ? ALC_EN : 0) | ALC_HOLD);
}
/* ALC gain ramp up delay 34 s, ALC gain ramp down delay 33 ms */
wm8775_write(sd, R18, 0x0a2);
/* Enable noise gate, threshold -72dBfs */
wm8775_write(sd, R19, 0x005);
if (!is_nova_s) {
/* Transient window 4ms, lower PGA gain limit -1dB */
wm8775_write(sd, R20, 0x07a);
/* LRBOTH = 1, use input 2. */
wm8775_write(sd, R21, 0x102);
} else {
/* Transient window 4ms, ALC min gain -5dB */
wm8775_write(sd, R20, 0x0fb);
wm8775_set_audio(sd, 1); /* set volume/mute/mux */
}
return 0;
}
static int wm8775_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct wm8775_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
return 0;
}
static const struct i2c_device_id wm8775_id[] = {
{ "wm8775", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8775_id);
static struct i2c_driver wm8775_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "wm8775",
},
.probe = wm8775_probe,
.remove = wm8775_remove,
.id_table = wm8775_id,
};
module_i2c_driver(wm8775_driver);
| codesnake/linux-amlogic | drivers/media/i2c/wm8775.c | C | gpl-2.0 | 9,093 |
/*
* Copyright (c) 2009, Microsoft Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
* Hank Janssen <hjanssen@microsoft.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/hyperv.h>
#include "hyperv_vmbus.h"
struct vmbus_channel_message_table_entry {
enum vmbus_channel_message_type message_type;
void (*message_handler)(struct vmbus_channel_message_header *msg);
};
/**
* vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
* @icmsghdrp: Pointer to msg header structure
* @icmsg_negotiate: Pointer to negotiate message structure
* @buf: Raw buffer channel data
*
* @icmsghdrp is of type &struct icmsg_hdr.
* @negop is of type &struct icmsg_negotiate.
* Set up and fill in default negotiate response message.
*
* The max_fw_version specifies the maximum framework version that
* we can support and max _srv_version specifies the maximum service
* version we can support. A special value MAX_SRV_VER can be
* specified to indicate that we can handle the maximum version
* exposed by the host.
*
* Mainly used by Hyper-V drivers.
*/
void vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
struct icmsg_negotiate *negop, u8 *buf,
int max_fw_version, int max_srv_version)
{
int icframe_vercnt;
int icmsg_vercnt;
int i;
icmsghdrp->icmsgsize = 0x10;
negop = (struct icmsg_negotiate *)&buf[
sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
icframe_vercnt = negop->icframe_vercnt;
icmsg_vercnt = negop->icmsg_vercnt;
/*
* Select the framework version number we will
* support.
*/
for (i = 0; i < negop->icframe_vercnt; i++) {
if (negop->icversion_data[i].major <= max_fw_version)
icframe_vercnt = negop->icversion_data[i].major;
}
for (i = negop->icframe_vercnt;
(i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) {
if (negop->icversion_data[i].major <= max_srv_version)
icmsg_vercnt = negop->icversion_data[i].major;
}
/*
* Respond with the maximum framework and service
* version numbers we can support.
*/
negop->icframe_vercnt = 1;
negop->icmsg_vercnt = 1;
negop->icversion_data[0].major = icframe_vercnt;
negop->icversion_data[0].minor = 0;
negop->icversion_data[1].major = icmsg_vercnt;
negop->icversion_data[1].minor = 0;
}
EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
/*
* alloc_channel - Allocate and initialize a vmbus channel object
*/
static struct vmbus_channel *alloc_channel(void)
{
struct vmbus_channel *channel;
channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
if (!channel)
return NULL;
spin_lock_init(&channel->inbound_lock);
channel->controlwq = create_workqueue("hv_vmbus_ctl");
if (!channel->controlwq) {
kfree(channel);
return NULL;
}
return channel;
}
/*
* release_hannel - Release the vmbus channel object itself
*/
static void release_channel(struct work_struct *work)
{
struct vmbus_channel *channel = container_of(work,
struct vmbus_channel,
work);
destroy_workqueue(channel->controlwq);
kfree(channel);
}
/*
* free_channel - Release the resources used by the vmbus channel object
*/
static void free_channel(struct vmbus_channel *channel)
{
/*
* We have to release the channel's workqueue/thread in the vmbus's
* workqueue/thread context
* ie we can't destroy ourselves.
*/
INIT_WORK(&channel->work, release_channel);
queue_work(vmbus_connection.work_queue, &channel->work);
}
/*
* vmbus_process_rescind_offer -
* Rescind the offer by initiating a device removal
*/
static void vmbus_process_rescind_offer(struct work_struct *work)
{
struct vmbus_channel *channel = container_of(work,
struct vmbus_channel,
work);
unsigned long flags;
struct vmbus_channel_relid_released msg;
vmbus_device_unregister(channel->device_obj);
memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
msg.child_relid = channel->offermsg.child_relid;
msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
list_del(&channel->listentry);
spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
free_channel(channel);
}
void vmbus_free_channels(void)
{
struct vmbus_channel *channel;
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
vmbus_device_unregister(channel->device_obj);
kfree(channel->device_obj);
free_channel(channel);
}
}
/*
* vmbus_process_offer - Process the offer by creating a channel/device
* associated with this offer
*/
static void vmbus_process_offer(struct work_struct *work)
{
struct vmbus_channel *newchannel = container_of(work,
struct vmbus_channel,
work);
struct vmbus_channel *channel;
bool fnew = true;
int ret;
unsigned long flags;
/* The next possible work is rescind handling */
INIT_WORK(&newchannel->work, vmbus_process_rescind_offer);
/* Make sure this is a new offer */
spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (!uuid_le_cmp(channel->offermsg.offer.if_type,
newchannel->offermsg.offer.if_type) &&
!uuid_le_cmp(channel->offermsg.offer.if_instance,
newchannel->offermsg.offer.if_instance)) {
fnew = false;
break;
}
}
if (fnew)
list_add_tail(&newchannel->listentry,
&vmbus_connection.chn_list);
spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
if (!fnew) {
free_channel(newchannel);
return;
}
/*
* Start the process of binding this offer to the driver
* We need to set the DeviceObject field before calling
* vmbus_child_dev_add()
*/
newchannel->device_obj = vmbus_device_create(
&newchannel->offermsg.offer.if_type,
&newchannel->offermsg.offer.if_instance,
newchannel);
/*
* Add the new device to the bus. This will kick off device-driver
* binding which eventually invokes the device driver's AddDevice()
* method.
*/
ret = vmbus_device_register(newchannel->device_obj);
if (ret != 0) {
pr_err("unable to add child device object (relid %d)\n",
newchannel->offermsg.child_relid);
spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
list_del(&newchannel->listentry);
spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
kfree(newchannel->device_obj);
free_channel(newchannel);
} else {
/*
* This state is used to indicate a successful open
* so that when we do close the channel normally, we
* can cleanup properly
*/
newchannel->state = CHANNEL_OPEN_STATE;
}
}
enum {
IDE = 0,
SCSI,
NIC,
MAX_PERF_CHN,
};
/*
* This is an array of device_ids (device types) that are performance critical.
* We attempt to distribute the interrupt load for these devices across
* all available CPUs.
*/
static const struct hv_vmbus_device_id hp_devs[] = {
/* IDE */
{ HV_IDE_GUID, },
/* Storage - SCSI */
{ HV_SCSI_GUID, },
/* Network */
{ HV_NIC_GUID, },
};
/*
* We use this state to statically distribute the channel interrupt load.
*/
static u32 next_vp;
/*
* Starting with Win8, we can statically distribute the incoming
* channel interrupt load by binding a channel to VCPU. We
* implement here a simple round robin scheme for distributing
* the interrupt load.
* We will bind channels that are not performance critical to cpu 0 and
* performance critical channels (IDE, SCSI and Network) will be uniformly
* distributed across all available CPUs.
*/
static u32 get_vp_index(uuid_le *type_guid)
{
u32 cur_cpu;
int i;
bool perf_chn = false;
u32 max_cpus = num_online_cpus();
for (i = IDE; i < MAX_PERF_CHN; i++) {
if (!memcmp(type_guid->b, hp_devs[i].guid,
sizeof(uuid_le))) {
perf_chn = true;
break;
}
}
if ((vmbus_proto_version == VERSION_WS2008) ||
(vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
/*
* Prior to win8, all channel interrupts are
* delivered on cpu 0.
* Also if the channel is not a performance critical
* channel, bind it to cpu 0.
*/
return 0;
}
cur_cpu = (++next_vp % max_cpus);
return hv_context.vp_index[cur_cpu];
}
/*
* vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
*
*/
static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_offer_channel *offer;
struct vmbus_channel *newchannel;
offer = (struct vmbus_channel_offer_channel *)hdr;
/* Allocate the channel object and save this offer. */
newchannel = alloc_channel();
if (!newchannel) {
pr_err("Unable to allocate channel object\n");
return;
}
/*
* By default we setup state to enable batched
* reading. A specific service can choose to
* disable this prior to opening the channel.
*/
newchannel->batched_reading = true;
/*
* Setup state for signalling the host.
*/
newchannel->sig_event = (struct hv_input_signal_event *)
(ALIGN((unsigned long)
&newchannel->sig_buf,
HV_HYPERCALL_PARAM_ALIGN));
newchannel->sig_event->connectionid.asu32 = 0;
newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
newchannel->sig_event->flag_number = 0;
newchannel->sig_event->rsvdz = 0;
if (vmbus_proto_version != VERSION_WS2008) {
newchannel->is_dedicated_interrupt =
(offer->is_dedicated_interrupt != 0);
newchannel->sig_event->connectionid.u.id =
offer->connection_id;
}
newchannel->target_vp = get_vp_index(&offer->offer.if_type);
memcpy(&newchannel->offermsg, offer,
sizeof(struct vmbus_channel_offer_channel));
newchannel->monitor_grp = (u8)offer->monitorid / 32;
newchannel->monitor_bit = (u8)offer->monitorid % 32;
INIT_WORK(&newchannel->work, vmbus_process_offer);
queue_work(newchannel->controlwq, &newchannel->work);
}
/*
* vmbus_onoffer_rescind - Rescind offer handler.
*
* We queue a work item to process this offer synchronously
*/
static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_rescind_offer *rescind;
struct vmbus_channel *channel;
rescind = (struct vmbus_channel_rescind_offer *)hdr;
channel = relid2channel(rescind->child_relid);
if (channel == NULL)
/* Just return here, no channel found */
return;
/* work is initialized for vmbus_process_rescind_offer() from
* vmbus_process_offer() where the channel got created */
queue_work(channel->controlwq, &channel->work);
}
/*
* vmbus_onoffers_delivered -
* This is invoked when all offers have been delivered.
*
* Nothing to do here.
*/
static void vmbus_onoffers_delivered(
struct vmbus_channel_message_header *hdr)
{
}
/*
* vmbus_onopen_result - Open result handler.
*
* This is invoked when we received a response to our channel open request.
* Find the matching request, copy the response and signal the requesting
* thread.
*/
static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_open_result *result;
struct vmbus_channel_msginfo *msginfo;
struct vmbus_channel_message_header *requestheader;
struct vmbus_channel_open_channel *openmsg;
unsigned long flags;
result = (struct vmbus_channel_open_result *)hdr;
/*
* Find the open msg, copy the result and signal/unblock the wait event
*/
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
msglistentry) {
requestheader =
(struct vmbus_channel_message_header *)msginfo->msg;
if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
openmsg =
(struct vmbus_channel_open_channel *)msginfo->msg;
if (openmsg->child_relid == result->child_relid &&
openmsg->openid == result->openid) {
memcpy(&msginfo->response.open_result,
result,
sizeof(
struct vmbus_channel_open_result));
complete(&msginfo->waitevent);
break;
}
}
}
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
}
/*
* vmbus_ongpadl_created - GPADL created handler.
*
* This is invoked when we received a response to our gpadl create request.
* Find the matching request, copy the response and signal the requesting
* thread.
*/
static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_gpadl_created *gpadlcreated;
struct vmbus_channel_msginfo *msginfo;
struct vmbus_channel_message_header *requestheader;
struct vmbus_channel_gpadl_header *gpadlheader;
unsigned long flags;
gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
/*
* Find the establish msg, copy the result and signal/unblock the wait
* event
*/
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
msglistentry) {
requestheader =
(struct vmbus_channel_message_header *)msginfo->msg;
if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
gpadlheader =
(struct vmbus_channel_gpadl_header *)requestheader;
if ((gpadlcreated->child_relid ==
gpadlheader->child_relid) &&
(gpadlcreated->gpadl == gpadlheader->gpadl)) {
memcpy(&msginfo->response.gpadl_created,
gpadlcreated,
sizeof(
struct vmbus_channel_gpadl_created));
complete(&msginfo->waitevent);
break;
}
}
}
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
}
/*
* vmbus_ongpadl_torndown - GPADL torndown handler.
*
* This is invoked when we received a response to our gpadl teardown request.
* Find the matching request, copy the response and signal the requesting
* thread.
*/
static void vmbus_ongpadl_torndown(
struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_gpadl_torndown *gpadl_torndown;
struct vmbus_channel_msginfo *msginfo;
struct vmbus_channel_message_header *requestheader;
struct vmbus_channel_gpadl_teardown *gpadl_teardown;
unsigned long flags;
gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
/*
* Find the open msg, copy the result and signal/unblock the wait event
*/
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
msglistentry) {
requestheader =
(struct vmbus_channel_message_header *)msginfo->msg;
if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
gpadl_teardown =
(struct vmbus_channel_gpadl_teardown *)requestheader;
if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
memcpy(&msginfo->response.gpadl_torndown,
gpadl_torndown,
sizeof(
struct vmbus_channel_gpadl_torndown));
complete(&msginfo->waitevent);
break;
}
}
}
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
}
/*
* vmbus_onversion_response - Version response handler
*
* This is invoked when we received a response to our initiate contact request.
* Find the matching request, copy the response and signal the requesting
* thread.
*/
static void vmbus_onversion_response(
struct vmbus_channel_message_header *hdr)
{
struct vmbus_channel_msginfo *msginfo;
struct vmbus_channel_message_header *requestheader;
struct vmbus_channel_version_response *version_response;
unsigned long flags;
version_response = (struct vmbus_channel_version_response *)hdr;
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
msglistentry) {
requestheader =
(struct vmbus_channel_message_header *)msginfo->msg;
if (requestheader->msgtype ==
CHANNELMSG_INITIATE_CONTACT) {
memcpy(&msginfo->response.version_response,
version_response,
sizeof(struct vmbus_channel_version_response));
complete(&msginfo->waitevent);
}
}
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
}
/* Channel message dispatch table */
static struct vmbus_channel_message_table_entry
channel_message_table[CHANNELMSG_COUNT] = {
{CHANNELMSG_INVALID, NULL},
{CHANNELMSG_OFFERCHANNEL, vmbus_onoffer},
{CHANNELMSG_RESCIND_CHANNELOFFER, vmbus_onoffer_rescind},
{CHANNELMSG_REQUESTOFFERS, NULL},
{CHANNELMSG_ALLOFFERS_DELIVERED, vmbus_onoffers_delivered},
{CHANNELMSG_OPENCHANNEL, NULL},
{CHANNELMSG_OPENCHANNEL_RESULT, vmbus_onopen_result},
{CHANNELMSG_CLOSECHANNEL, NULL},
{CHANNELMSG_GPADL_HEADER, NULL},
{CHANNELMSG_GPADL_BODY, NULL},
{CHANNELMSG_GPADL_CREATED, vmbus_ongpadl_created},
{CHANNELMSG_GPADL_TEARDOWN, NULL},
{CHANNELMSG_GPADL_TORNDOWN, vmbus_ongpadl_torndown},
{CHANNELMSG_RELID_RELEASED, NULL},
{CHANNELMSG_INITIATE_CONTACT, NULL},
{CHANNELMSG_VERSION_RESPONSE, vmbus_onversion_response},
{CHANNELMSG_UNLOAD, NULL},
};
/*
* vmbus_onmessage - Handler for channel protocol messages.
*
* This is invoked in the vmbus worker thread context.
*/
void vmbus_onmessage(void *context)
{
struct hv_message *msg = context;
struct vmbus_channel_message_header *hdr;
int size;
hdr = (struct vmbus_channel_message_header *)msg->u.payload;
size = msg->header.payload_size;
if (hdr->msgtype >= CHANNELMSG_COUNT) {
pr_err("Received invalid channel message type %d size %d\n",
hdr->msgtype, size);
print_hex_dump_bytes("", DUMP_PREFIX_NONE,
(unsigned char *)msg->u.payload, size);
return;
}
if (channel_message_table[hdr->msgtype].message_handler)
channel_message_table[hdr->msgtype].message_handler(hdr);
else
pr_err("Unhandled channel message type %d\n", hdr->msgtype);
}
/*
* vmbus_request_offers - Send a request to get all our pending offers.
*/
int vmbus_request_offers(void)
{
struct vmbus_channel_message_header *msg;
struct vmbus_channel_msginfo *msginfo;
int ret, t;
msginfo = kmalloc(sizeof(*msginfo) +
sizeof(struct vmbus_channel_message_header),
GFP_KERNEL);
if (!msginfo)
return -ENOMEM;
init_completion(&msginfo->waitevent);
msg = (struct vmbus_channel_message_header *)msginfo->msg;
msg->msgtype = CHANNELMSG_REQUESTOFFERS;
ret = vmbus_post_msg(msg,
sizeof(struct vmbus_channel_message_header));
if (ret != 0) {
pr_err("Unable to request offers - %d\n", ret);
goto cleanup;
}
t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
}
cleanup:
kfree(msginfo);
return ret;
}
/* eof */
| OldDroid/android_kernel_samsung_tblte | drivers/hv/channel_mgmt.c | C | gpl-2.0 | 19,124 |
/*
* net/core/gen_stats.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Thomas Graf <tgraf@suug.ch>
* Jamal Hadi Salim
* Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* See Documentation/networking/gen_stats.txt
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/socket.h>
#include <linux/rtnetlink.h>
#include <linux/gen_stats.h>
#include <net/netlink.h>
#include <net/gen_stats.h>
static inline int
gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
{
if (nla_put(d->skb, type, size, buf))
goto nla_put_failure;
return 0;
nla_put_failure:
spin_unlock_bh(d->lock);
return -1;
}
/**
* gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
* @skb: socket buffer to put statistics TLVs into
* @type: TLV type for top level statistic TLV
* @tc_stats_type: TLV type for backward compatibility struct tc_stats TLV
* @xstats_type: TLV type for backward compatibility xstats TLV
* @lock: statistics lock
* @d: dumping handle
*
* Initializes the dumping handle, grabs the statistic lock and appends
* an empty TLV header to the socket buffer for use a container for all
* other statistic TLVS.
*
* The dumping handle is marked to be in backward compatibility mode telling
* all gnet_stats_copy_XXX() functions to fill a local copy of struct tc_stats.
*
* Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
*/
int
gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
int xstats_type, spinlock_t *lock, struct gnet_dump *d)
__acquires(lock)
{
memset(d, 0, sizeof(*d));
spin_lock_bh(lock);
d->lock = lock;
if (type)
d->tail = (struct nlattr *)skb_tail_pointer(skb);
d->skb = skb;
d->compat_tc_stats = tc_stats_type;
d->compat_xstats = xstats_type;
if (d->tail)
return gnet_stats_copy(d, type, NULL, 0);
return 0;
}
EXPORT_SYMBOL(gnet_stats_start_copy_compat);
/**
* gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
* @skb: socket buffer to put statistics TLVs into
* @type: TLV type for top level statistic TLV
* @lock: statistics lock
* @d: dumping handle
*
* Initializes the dumping handle, grabs the statistic lock and appends
* an empty TLV header to the socket buffer for use a container for all
* other statistic TLVS.
*
* Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
*/
int
gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
struct gnet_dump *d)
{
return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d);
}
EXPORT_SYMBOL(gnet_stats_start_copy);
/**
* gnet_stats_copy_basic - copy basic statistics into statistic TLV
* @d: dumping handle
* @b: basic statistics
*
* Appends the basic statistics to the top level TLV created by
* gnet_stats_start_copy().
*
* Returns 0 on success or -1 with the statistic lock released
* if the room in the socket buffer was not sufficient.
*/
int
gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b)
{
if (d->compat_tc_stats) {
d->tc_stats.bytes = b->bytes;
d->tc_stats.packets = b->packets;
}
if (d->tail) {
struct gnet_stats_basic sb;
memset(&sb, 0, sizeof(sb));
sb.bytes = b->bytes;
sb.packets = b->packets;
return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb));
}
return 0;
}
EXPORT_SYMBOL(gnet_stats_copy_basic);
/**
* gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV
* @d: dumping handle
* @b: basic statistics
* @r: rate estimator statistics
*
* Appends the rate estimator statistics to the top level TLV created by
* gnet_stats_start_copy().
*
* Returns 0 on success or -1 with the statistic lock released
* if the room in the socket buffer was not sufficient.
*/
int
gnet_stats_copy_rate_est(struct gnet_dump *d,
const struct gnet_stats_basic_packed *b,
struct gnet_stats_rate_est *r)
{
if (b && !gen_estimator_active(b, r))
return 0;
if (d->compat_tc_stats) {
d->tc_stats.bps = r->bps;
d->tc_stats.pps = r->pps;
}
if (d->tail)
return gnet_stats_copy(d, TCA_STATS_RATE_EST, r, sizeof(*r));
return 0;
}
EXPORT_SYMBOL(gnet_stats_copy_rate_est);
/**
* gnet_stats_copy_queue - copy queue statistics into statistics TLV
* @d: dumping handle
* @q: queue statistics
*
* Appends the queue statistics to the top level TLV created by
* gnet_stats_start_copy().
*
* Returns 0 on success or -1 with the statistic lock released
* if the room in the socket buffer was not sufficient.
*/
int
gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q)
{
if (d->compat_tc_stats) {
d->tc_stats.drops = q->drops;
d->tc_stats.qlen = q->qlen;
d->tc_stats.backlog = q->backlog;
d->tc_stats.overlimits = q->overlimits;
}
if (d->tail)
return gnet_stats_copy(d, TCA_STATS_QUEUE, q, sizeof(*q));
return 0;
}
EXPORT_SYMBOL(gnet_stats_copy_queue);
/**
* gnet_stats_copy_app - copy application specific statistics into statistics TLV
* @d: dumping handle
* @st: application specific statistics data
* @len: length of data
*
* Appends the application sepecific statistics to the top level TLV created by
* gnet_stats_start_copy() and remembers the data for XSTATS if the dumping
* handle is in backward compatibility mode.
*
* Returns 0 on success or -1 with the statistic lock released
* if the room in the socket buffer was not sufficient.
*/
int
gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
{
if (d->compat_xstats) {
d->xstats = st;
d->xstats_len = len;
}
if (d->tail)
return gnet_stats_copy(d, TCA_STATS_APP, st, len);
return 0;
}
EXPORT_SYMBOL(gnet_stats_copy_app);
/**
* gnet_stats_finish_copy - finish dumping procedure
* @d: dumping handle
*
* Corrects the length of the top level TLV to include all TLVs added
* by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs
* if gnet_stats_start_copy_compat() was used and releases the statistics
* lock.
*
* Returns 0 on success or -1 with the statistic lock released
* if the room in the socket buffer was not sufficient.
*/
int
gnet_stats_finish_copy(struct gnet_dump *d)
{
if (d->tail)
d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail;
if (d->compat_tc_stats)
if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
sizeof(d->tc_stats)) < 0)
return -1;
if (d->compat_xstats && d->xstats) {
if (gnet_stats_copy(d, d->compat_xstats, d->xstats,
d->xstats_len) < 0)
return -1;
}
spin_unlock_bh(d->lock);
return 0;
}
EXPORT_SYMBOL(gnet_stats_finish_copy);
| ztemt/Z9Max_NX510J_V1_kernel | net/core/gen_stats.c | C | gpl-2.0 | 6,900 |
/*
* Cryptographic API.
*
* TEA, XTEA, and XETA crypto alogrithms
*
* The TEA and Xtended TEA algorithms were developed by David Wheeler
* and Roger Needham at the Computer Laboratory of Cambridge University.
*
* Due to the order of evaluation in XTEA many people have incorrectly
* implemented it. XETA (XTEA in the wrong order), exists for
* compatibility with these implementations.
*
* Copyright (c) 2004 Aaron Grothe ajgrothe@yahoo.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/crypto.h>
#include <linux/types.h>
#define TEA_KEY_SIZE 16
#define TEA_BLOCK_SIZE 8
#define TEA_ROUNDS 32
#define TEA_DELTA 0x9e3779b9
#define XTEA_KEY_SIZE 16
#define XTEA_BLOCK_SIZE 8
#define XTEA_ROUNDS 32
#define XTEA_DELTA 0x9e3779b9
struct tea_ctx {
u32 KEY[4];
};
struct xtea_ctx {
u32 KEY[4];
};
static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
ctx->KEY[0] = le32_to_cpu(key[0]);
ctx->KEY[1] = le32_to_cpu(key[1]);
ctx->KEY[2] = le32_to_cpu(key[2]);
ctx->KEY[3] = le32_to_cpu(key[3]);
return 0;
}
static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, n, sum = 0;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
k2 = ctx->KEY[2];
k3 = ctx->KEY[3];
n = TEA_ROUNDS;
while (n-- > 0) {
sum += TEA_DELTA;
y += ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1);
z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3);
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
}
static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, n, sum;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
k2 = ctx->KEY[2];
k3 = ctx->KEY[3];
sum = TEA_DELTA << 5;
n = TEA_ROUNDS;
while (n-- > 0) {
z -= ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3);
y -= ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1);
sum -= TEA_DELTA;
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
}
static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
ctx->KEY[0] = le32_to_cpu(key[0]);
ctx->KEY[1] = le32_to_cpu(key[1]);
ctx->KEY[2] = le32_to_cpu(key[2]);
ctx->KEY[3] = le32_to_cpu(key[3]);
return 0;
}
static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
while (sum != limit) {
y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]);
sum += XTEA_DELTA;
z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]);
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
}
static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
sum = XTEA_DELTA * XTEA_ROUNDS;
while (sum) {
z -= ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 & 3]);
sum -= XTEA_DELTA;
y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]);
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
}
static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
while (sum != limit) {
y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3];
sum += XTEA_DELTA;
z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3];
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
}
static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
sum = XTEA_DELTA * XTEA_ROUNDS;
while (sum) {
z -= (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 & 3];
sum -= XTEA_DELTA;
y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3];
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
}
static struct crypto_alg tea_algs[3] = { {
.cra_name = "tea",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct tea_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = TEA_KEY_SIZE,
.cia_max_keysize = TEA_KEY_SIZE,
.cia_setkey = tea_setkey,
.cia_encrypt = tea_encrypt,
.cia_decrypt = tea_decrypt } }
}, {
.cra_name = "xtea",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
.cia_max_keysize = XTEA_KEY_SIZE,
.cia_setkey = xtea_setkey,
.cia_encrypt = xtea_encrypt,
.cia_decrypt = xtea_decrypt } }
}, {
.cra_name = "xeta",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
.cia_max_keysize = XTEA_KEY_SIZE,
.cia_setkey = xtea_setkey,
.cia_encrypt = xeta_encrypt,
.cia_decrypt = xeta_decrypt } }
} };
static int __init tea_mod_init(void)
{
return crypto_register_algs(tea_algs, ARRAY_SIZE(tea_algs));
}
static void __exit tea_mod_fini(void)
{
crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
}
MODULE_ALIAS("xtea");
MODULE_ALIAS("xeta");
module_init(tea_mod_init);
module_exit(tea_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TEA, XTEA & XETA Cryptographic Algorithms");
| aopp/android_kernel_htc_flounder | crypto/tea.c | C | gpl-2.0 | 6,708 |
/* hermes.c
*
* Driver core for the "Hermes" wireless MAC controller, as used in
* the Lucent Orinoco and Cabletron RoamAbout cards. It should also
* work on the hfa3841 and hfa3842 MAC controller chips used in the
* Prism II chipsets.
*
* This is not a complete driver, just low-level access routines for
* the MAC controller itself.
*
* Based on the prism2 driver from Absolute Value Systems' linux-wlan
* project, the Linux wvlan_cs driver, Lucent's HCF-Light
* (wvlan_hcf.c) library, and the NetBSD wireless driver (in no
* particular order).
*
* Copyright (C) 2000, David Gibson, Linuxcare Australia.
* (C) Copyright David Gibson, IBM Corp. 2001-2003.
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License
* at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and
* limitations under the License.
*
* Alternatively, the contents of this file may be used under the
* terms of the GNU General Public License version 2 (the "GPL"), in
* which case the provisions of the GPL are applicable instead of the
* above. If you wish to allow the use of your version of this file
* only under the terms of the GPL and not to allow others to use your
* version of this file under the MPL, indicate your decision by
* deleting the provisions above and replace them with the notice and
* other provisions required by the GPL. If you do not delete the
* provisions above, a recipient may use your version of this file
* under either the MPL or the GPL.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include "hermes.h"
/* These are maximum timeouts. Most often, card wil react much faster */
#define CMD_BUSY_TIMEOUT (100) /* In iterations of ~1us */
#define CMD_INIT_TIMEOUT (50000) /* in iterations of ~10us */
#define CMD_COMPL_TIMEOUT (20000) /* in iterations of ~10us */
#define ALLOC_COMPL_TIMEOUT (1000) /* in iterations of ~10us */
/*
* AUX port access. To unlock the AUX port write the access keys to the
* PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
* register. Then read it and make sure it's HERMES_AUX_ENABLED.
*/
#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
#define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */
#define HERMES_AUX_PW0 0xFE01
#define HERMES_AUX_PW1 0xDC23
#define HERMES_AUX_PW2 0xBA45
/* HERMES_CMD_DOWNLD */
#define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD)
#define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD)
#define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD)
#define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD)
/*
* Debugging helpers
*/
#define DMSG(stuff...) do {printk(KERN_DEBUG "hermes @ %p: " , hw->iobase); \
printk(stuff); } while (0)
#undef HERMES_DEBUG
#ifdef HERMES_DEBUG
#include <stdarg.h>
#define DEBUG(lvl, stuff...) if ((lvl) <= HERMES_DEBUG) DMSG(stuff)
#else /* ! HERMES_DEBUG */
#define DEBUG(lvl, stuff...) do { } while (0)
#endif /* ! HERMES_DEBUG */
static const struct hermes_ops hermes_ops_local;
/*
* Internal functions
*/
/* Issue a command to the chip. Waiting for it to complete is the caller's
problem.
Returns -EBUSY if the command register is busy, 0 on success.
Callable from any context.
*/
static int hermes_issue_cmd(struct hermes *hw, u16 cmd, u16 param0,
u16 param1, u16 param2)
{
int k = CMD_BUSY_TIMEOUT;
u16 reg;
/* First wait for the command register to unbusy */
reg = hermes_read_regn(hw, CMD);
while ((reg & HERMES_CMD_BUSY) && k) {
k--;
udelay(1);
reg = hermes_read_regn(hw, CMD);
}
if (reg & HERMES_CMD_BUSY)
return -EBUSY;
hermes_write_regn(hw, PARAM2, param2);
hermes_write_regn(hw, PARAM1, param1);
hermes_write_regn(hw, PARAM0, param0);
hermes_write_regn(hw, CMD, cmd);
return 0;
}
/*
* Function definitions
*/
/* For doing cmds that wipe the magic constant in SWSUPPORT0 */
static int hermes_doicmd_wait(struct hermes *hw, u16 cmd,
u16 parm0, u16 parm1, u16 parm2,
struct hermes_response *resp)
{
int err = 0;
int k;
u16 status, reg;
err = hermes_issue_cmd(hw, cmd, parm0, parm1, parm2);
if (err)
return err;
reg = hermes_read_regn(hw, EVSTAT);
k = CMD_INIT_TIMEOUT;
while ((!(reg & HERMES_EV_CMD)) && k) {
k--;
udelay(10);
reg = hermes_read_regn(hw, EVSTAT);
}
hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
if (!hermes_present(hw)) {
DEBUG(0, "hermes @ 0x%x: Card removed during reset.\n",
hw->iobase);
err = -ENODEV;
goto out;
}
if (!(reg & HERMES_EV_CMD)) {
printk(KERN_ERR "hermes @ %p: "
"Timeout waiting for card to reset (reg=0x%04x)!\n",
hw->iobase, reg);
err = -ETIMEDOUT;
goto out;
}
status = hermes_read_regn(hw, STATUS);
if (resp) {
resp->status = status;
resp->resp0 = hermes_read_regn(hw, RESP0);
resp->resp1 = hermes_read_regn(hw, RESP1);
resp->resp2 = hermes_read_regn(hw, RESP2);
}
hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
if (status & HERMES_STATUS_RESULT)
err = -EIO;
out:
return err;
}
void hermes_struct_init(struct hermes *hw, void __iomem *address,
int reg_spacing)
{
hw->iobase = address;
hw->reg_spacing = reg_spacing;
hw->inten = 0x0;
hw->eeprom_pda = false;
hw->ops = &hermes_ops_local;
}
EXPORT_SYMBOL(hermes_struct_init);
static int hermes_init(struct hermes *hw)
{
u16 reg;
int err = 0;
int k;
/* We don't want to be interrupted while resetting the chipset */
hw->inten = 0x0;
hermes_write_regn(hw, INTEN, 0);
hermes_write_regn(hw, EVACK, 0xffff);
/* Normally it's a "can't happen" for the command register to
be busy when we go to issue a command because we are
serializing all commands. However we want to have some
chance of resetting the card even if it gets into a stupid
state, so we actually wait to see if the command register
will unbusy itself here. */
k = CMD_BUSY_TIMEOUT;
reg = hermes_read_regn(hw, CMD);
while (k && (reg & HERMES_CMD_BUSY)) {
if (reg == 0xffff) /* Special case - the card has probably been
removed, so don't wait for the timeout */
return -ENODEV;
k--;
udelay(1);
reg = hermes_read_regn(hw, CMD);
}
/* No need to explicitly handle the timeout - if we've timed
out hermes_issue_cmd() will probably return -EBUSY below */
/* According to the documentation, EVSTAT may contain
obsolete event occurrence information. We have to acknowledge
it by writing EVACK. */
reg = hermes_read_regn(hw, EVSTAT);
hermes_write_regn(hw, EVACK, reg);
/* We don't use hermes_docmd_wait here, because the reset wipes
the magic constant in SWSUPPORT0 away, and it gets confused */
err = hermes_doicmd_wait(hw, HERMES_CMD_INIT, 0, 0, 0, NULL);
return err;
}
/* Issue a command to the chip, and (busy!) wait for it to
* complete.
*
* Returns:
* < 0 on internal error
* 0 on success
* > 0 on error returned by the firmware
*
* Callable from any context, but locking is your problem. */
static int hermes_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0,
struct hermes_response *resp)
{
int err;
int k;
u16 reg;
u16 status;
err = hermes_issue_cmd(hw, cmd, parm0, 0, 0);
if (err) {
if (!hermes_present(hw)) {
if (net_ratelimit())
printk(KERN_WARNING "hermes @ %p: "
"Card removed while issuing command "
"0x%04x.\n", hw->iobase, cmd);
err = -ENODEV;
} else
if (net_ratelimit())
printk(KERN_ERR "hermes @ %p: "
"Error %d issuing command 0x%04x.\n",
hw->iobase, err, cmd);
goto out;
}
reg = hermes_read_regn(hw, EVSTAT);
k = CMD_COMPL_TIMEOUT;
while ((!(reg & HERMES_EV_CMD)) && k) {
k--;
udelay(10);
reg = hermes_read_regn(hw, EVSTAT);
}
if (!hermes_present(hw)) {
printk(KERN_WARNING "hermes @ %p: Card removed "
"while waiting for command 0x%04x completion.\n",
hw->iobase, cmd);
err = -ENODEV;
goto out;
}
if (!(reg & HERMES_EV_CMD)) {
printk(KERN_ERR "hermes @ %p: Timeout waiting for "
"command 0x%04x completion.\n", hw->iobase, cmd);
err = -ETIMEDOUT;
goto out;
}
status = hermes_read_regn(hw, STATUS);
if (resp) {
resp->status = status;
resp->resp0 = hermes_read_regn(hw, RESP0);
resp->resp1 = hermes_read_regn(hw, RESP1);
resp->resp2 = hermes_read_regn(hw, RESP2);
}
hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
if (status & HERMES_STATUS_RESULT)
err = -EIO;
out:
return err;
}
static int hermes_allocate(struct hermes *hw, u16 size, u16 *fid)
{
int err = 0;
int k;
u16 reg;
if ((size < HERMES_ALLOC_LEN_MIN) || (size > HERMES_ALLOC_LEN_MAX))
return -EINVAL;
err = hermes_docmd_wait(hw, HERMES_CMD_ALLOC, size, NULL);
if (err)
return err;
reg = hermes_read_regn(hw, EVSTAT);
k = ALLOC_COMPL_TIMEOUT;
while ((!(reg & HERMES_EV_ALLOC)) && k) {
k--;
udelay(10);
reg = hermes_read_regn(hw, EVSTAT);
}
if (!hermes_present(hw)) {
printk(KERN_WARNING "hermes @ %p: "
"Card removed waiting for frame allocation.\n",
hw->iobase);
return -ENODEV;
}
if (!(reg & HERMES_EV_ALLOC)) {
printk(KERN_ERR "hermes @ %p: "
"Timeout waiting for frame allocation\n",
hw->iobase);
return -ETIMEDOUT;
}
*fid = hermes_read_regn(hw, ALLOCFID);
hermes_write_regn(hw, EVACK, HERMES_EV_ALLOC);
return 0;
}
/* Set up a BAP to read a particular chunk of data from card's internal buffer.
*
* Returns:
* < 0 on internal failure (errno)
* 0 on success
* > 0 on error
* from firmware
*
* Callable from any context */
static int hermes_bap_seek(struct hermes *hw, int bap, u16 id, u16 offset)
{
int sreg = bap ? HERMES_SELECT1 : HERMES_SELECT0;
int oreg = bap ? HERMES_OFFSET1 : HERMES_OFFSET0;
int k;
u16 reg;
/* Paranoia.. */
if ((offset > HERMES_BAP_OFFSET_MAX) || (offset % 2))
return -EINVAL;
k = HERMES_BAP_BUSY_TIMEOUT;
reg = hermes_read_reg(hw, oreg);
while ((reg & HERMES_OFFSET_BUSY) && k) {
k--;
udelay(1);
reg = hermes_read_reg(hw, oreg);
}
if (reg & HERMES_OFFSET_BUSY)
return -ETIMEDOUT;
/* Now we actually set up the transfer */
hermes_write_reg(hw, sreg, id);
hermes_write_reg(hw, oreg, offset);
/* Wait for the BAP to be ready */
k = HERMES_BAP_BUSY_TIMEOUT;
reg = hermes_read_reg(hw, oreg);
while ((reg & (HERMES_OFFSET_BUSY | HERMES_OFFSET_ERR)) && k) {
k--;
udelay(1);
reg = hermes_read_reg(hw, oreg);
}
if (reg != offset) {
printk(KERN_ERR "hermes @ %p: BAP%d offset %s: "
"reg=0x%x id=0x%x offset=0x%x\n", hw->iobase, bap,
(reg & HERMES_OFFSET_BUSY) ? "timeout" : "error",
reg, id, offset);
if (reg & HERMES_OFFSET_BUSY)
return -ETIMEDOUT;
return -EIO; /* error or wrong offset */
}
return 0;
}
/* Read a block of data from the chip's buffer, via the
* BAP. Synchronization/serialization is the caller's problem. len
* must be even.
*
* Returns:
* < 0 on internal failure (errno)
* 0 on success
* > 0 on error from firmware
*/
static int hermes_bap_pread(struct hermes *hw, int bap, void *buf, int len,
u16 id, u16 offset)
{
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
int err = 0;
if ((len < 0) || (len % 2))
return -EINVAL;
err = hermes_bap_seek(hw, bap, id, offset);
if (err)
goto out;
/* Actually do the transfer */
hermes_read_words(hw, dreg, buf, len / 2);
out:
return err;
}
/* Write a block of data to the chip's buffer, via the
* BAP. Synchronization/serialization is the caller's problem.
*
* Returns:
* < 0 on internal failure (errno)
* 0 on success
* > 0 on error from firmware
*/
static int hermes_bap_pwrite(struct hermes *hw, int bap, const void *buf,
int len, u16 id, u16 offset)
{
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
int err = 0;
if (len < 0)
return -EINVAL;
err = hermes_bap_seek(hw, bap, id, offset);
if (err)
goto out;
/* Actually do the transfer */
hermes_write_bytes(hw, dreg, buf, len);
out:
return err;
}
/* Read a Length-Type-Value record from the card.
*
* If length is NULL, we ignore the length read from the card, and
* read the entire buffer regardless. This is useful because some of
* the configuration records appear to have incorrect lengths in
* practice.
*
* Callable from user or bh context. */
static int hermes_read_ltv(struct hermes *hw, int bap, u16 rid,
unsigned bufsize, u16 *length, void *buf)
{
int err = 0;
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
u16 rlength, rtype;
unsigned nwords;
if (bufsize % 2)
return -EINVAL;
err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS, rid, NULL);
if (err)
return err;
err = hermes_bap_seek(hw, bap, rid, 0);
if (err)
return err;
rlength = hermes_read_reg(hw, dreg);
if (!rlength)
return -ENODATA;
rtype = hermes_read_reg(hw, dreg);
if (length)
*length = rlength;
if (rtype != rid)
printk(KERN_WARNING "hermes @ %p: %s(): "
"rid (0x%04x) does not match type (0x%04x)\n",
hw->iobase, __func__, rid, rtype);
if (HERMES_RECLEN_TO_BYTES(rlength) > bufsize)
printk(KERN_WARNING "hermes @ %p: "
"Truncating LTV record from %d to %d bytes. "
"(rid=0x%04x, len=0x%04x)\n", hw->iobase,
HERMES_RECLEN_TO_BYTES(rlength), bufsize, rid, rlength);
nwords = min((unsigned)rlength - 1, bufsize / 2);
hermes_read_words(hw, dreg, buf, nwords);
return 0;
}
static int hermes_write_ltv(struct hermes *hw, int bap, u16 rid,
u16 length, const void *value)
{
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
int err = 0;
unsigned count;
if (length == 0)
return -EINVAL;
err = hermes_bap_seek(hw, bap, rid, 0);
if (err)
return err;
hermes_write_reg(hw, dreg, length);
hermes_write_reg(hw, dreg, rid);
count = length - 1;
hermes_write_bytes(hw, dreg, value, count << 1);
err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE,
rid, NULL);
return err;
}
/*** Hermes AUX control ***/
static inline void
hermes_aux_setaddr(struct hermes *hw, u32 addr)
{
hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
}
static inline int
hermes_aux_control(struct hermes *hw, int enabled)
{
int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
int i;
/* Already open? */
if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state)
return 0;
hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
hermes_write_reg(hw, HERMES_CONTROL, action);
for (i = 0; i < 20; i++) {
udelay(10);
if (hermes_read_reg(hw, HERMES_CONTROL) ==
desired_state)
return 0;
}
return -EBUSY;
}
/*** Hermes programming ***/
/* About to start programming data (Hermes I)
* offset is the entry point
*
* Spectrum_cs' Symbol fw does not require this
* wl_lkm Agere fw does
* Don't know about intersil
*/
static int hermesi_program_init(struct hermes *hw, u32 offset)
{
int err;
/* Disable interrupts?*/
/*hw->inten = 0x0;*/
/*hermes_write_regn(hw, INTEN, 0);*/
/*hermes_set_irqmask(hw, 0);*/
/* Acknowledge any outstanding command */
hermes_write_regn(hw, EVACK, 0xFFFF);
/* Using init_cmd_wait rather than cmd_wait */
err = hw->ops->init_cmd_wait(hw,
0x0100 | HERMES_CMD_INIT,
0, 0, 0, NULL);
if (err)
return err;
err = hw->ops->init_cmd_wait(hw,
0x0000 | HERMES_CMD_INIT,
0, 0, 0, NULL);
if (err)
return err;
err = hermes_aux_control(hw, 1);
pr_debug("AUX enable returned %d\n", err);
if (err)
return err;
pr_debug("Enabling volatile, EP 0x%08x\n", offset);
err = hw->ops->init_cmd_wait(hw,
HERMES_PROGRAM_ENABLE_VOLATILE,
offset & 0xFFFFu,
offset >> 16,
0,
NULL);
pr_debug("PROGRAM_ENABLE returned %d\n", err);
return err;
}
/* Done programming data (Hermes I)
*
* Spectrum_cs' Symbol fw does not require this
* wl_lkm Agere fw does
* Don't know about intersil
*/
static int hermesi_program_end(struct hermes *hw)
{
struct hermes_response resp;
int rc = 0;
int err;
rc = hw->ops->cmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp);
pr_debug("PROGRAM_DISABLE returned %d, "
"r0 0x%04x, r1 0x%04x, r2 0x%04x\n",
rc, resp.resp0, resp.resp1, resp.resp2);
if ((rc == 0) &&
((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD))
rc = -EIO;
err = hermes_aux_control(hw, 0);
pr_debug("AUX disable returned %d\n", err);
/* Acknowledge any outstanding command */
hermes_write_regn(hw, EVACK, 0xFFFF);
/* Reinitialise, ignoring return */
(void) hw->ops->init_cmd_wait(hw, 0x0000 | HERMES_CMD_INIT,
0, 0, 0, NULL);
return rc ? rc : err;
}
static int hermes_program_bytes(struct hermes *hw, const char *data,
u32 addr, u32 len)
{
/* wl lkm splits the programming into chunks of 2000 bytes.
* This restriction appears to come from USB. The PCMCIA
* adapters can program the whole lot in one go */
hermes_aux_setaddr(hw, addr);
hermes_write_bytes(hw, HERMES_AUXDATA, data, len);
return 0;
}
/* Read PDA from the adapter */
static int hermes_read_pda(struct hermes *hw, __le16 *pda, u32 pda_addr,
u16 pda_len)
{
int ret;
u16 pda_size;
u16 data_len = pda_len;
__le16 *data = pda;
if (hw->eeprom_pda) {
/* PDA of spectrum symbol is in eeprom */
/* Issue command to read EEPROM */
ret = hw->ops->cmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
if (ret)
return ret;
} else {
/* wl_lkm does not include PDA size in the PDA area.
* We will pad the information into pda, so other routines
* don't have to be modified */
pda[0] = cpu_to_le16(pda_len - 2);
/* Includes CFG_PROD_DATA but not itself */
pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
data_len = pda_len - 4;
data = pda + 2;
}
/* Open auxiliary port */
ret = hermes_aux_control(hw, 1);
pr_debug("AUX enable returned %d\n", ret);
if (ret)
return ret;
/* Read PDA */
hermes_aux_setaddr(hw, pda_addr);
hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2);
/* Close aux port */
ret = hermes_aux_control(hw, 0);
pr_debug("AUX disable returned %d\n", ret);
/* Check PDA length */
pda_size = le16_to_cpu(pda[0]);
pr_debug("Actual PDA length %d, Max allowed %d\n",
pda_size, pda_len);
if (pda_size > pda_len)
return -EINVAL;
return 0;
}
static void hermes_lock_irqsave(spinlock_t *lock,
unsigned long *flags) __acquires(lock)
{
spin_lock_irqsave(lock, *flags);
}
static void hermes_unlock_irqrestore(spinlock_t *lock,
unsigned long *flags) __releases(lock)
{
spin_unlock_irqrestore(lock, *flags);
}
static void hermes_lock_irq(spinlock_t *lock) __acquires(lock)
{
spin_lock_irq(lock);
}
static void hermes_unlock_irq(spinlock_t *lock) __releases(lock)
{
spin_unlock_irq(lock);
}
/* Hermes operations for local buses */
static const struct hermes_ops hermes_ops_local = {
.init = hermes_init,
.cmd_wait = hermes_docmd_wait,
.init_cmd_wait = hermes_doicmd_wait,
.allocate = hermes_allocate,
.read_ltv = hermes_read_ltv,
.write_ltv = hermes_write_ltv,
.bap_pread = hermes_bap_pread,
.bap_pwrite = hermes_bap_pwrite,
.read_pda = hermes_read_pda,
.program_init = hermesi_program_init,
.program_end = hermesi_program_end,
.program = hermes_program_bytes,
.lock_irqsave = hermes_lock_irqsave,
.unlock_irqrestore = hermes_unlock_irqrestore,
.lock_irq = hermes_lock_irq,
.unlock_irq = hermes_unlock_irq,
};
| endocode/linux | drivers/net/wireless/intersil/orinoco/hermes.c | C | gpl-2.0 | 20,014 |
/******************************************************************************
* rtl8712_cmd.c
*
* Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
* Linux device driver for RTL8192SU
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
* Contact information:
* WLAN FAE <wlanfae@realtek.com>.
* Larry Finger <Larry.Finger@lwfinger.net>
*
******************************************************************************/
#define _RTL8712_CMD_C_
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/circ_buf.h>
#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <linux/atomic.h>
#include <linux/semaphore.h>
#include <linux/rtnetlink.h>
#include "osdep_service.h"
#include "drv_types.h"
#include "recv_osdep.h"
#include "mlme_osdep.h"
#include "rtl871x_ioctl_set.h"
static void check_hw_pbc(struct _adapter *padapter)
{
u8 tmp1byte;
r8712_write8(padapter, MAC_PINMUX_CTRL, (GPIOMUX_EN | GPIOSEL_GPIO));
tmp1byte = r8712_read8(padapter, GPIO_IO_SEL);
tmp1byte &= ~(HAL_8192S_HW_GPIO_WPS_BIT);
r8712_write8(padapter, GPIO_IO_SEL, tmp1byte);
tmp1byte = r8712_read8(padapter, GPIO_CTRL);
if (tmp1byte == 0xff)
return ;
if (tmp1byte&HAL_8192S_HW_GPIO_WPS_BIT) {
/* Here we only set bPbcPressed to true
* After trigger PBC, the variable will be set to false */
DBG_8712("CheckPbcGPIO - PBC is pressed !!!!\n");
/* 0 is the default value and it means the application monitors
* the HW PBC doesn't provide its pid to driver. */
if (padapter->pid == 0)
return;
kill_pid(find_vpid(padapter->pid), SIGUSR1, 1);
}
}
/* query rx phy status from fw.
* Adhoc mode: beacon.
* Infrastructure mode: beacon , data. */
static void query_fw_rx_phy_status(struct _adapter *padapter)
{
u32 val32 = 0;
int pollingcnts = 50;
if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) == true) {
r8712_write32(padapter, IOCMD_CTRL_REG, 0xf4000001);
msleep(100);
/* Wait FW complete IO Cmd */
while ((r8712_read32(padapter, IOCMD_CTRL_REG)) &&
(pollingcnts > 0)) {
pollingcnts--;
msleep(20);
}
if (pollingcnts != 0)
val32 = r8712_read32(padapter, IOCMD_DATA_REG);
else /* time out */
val32 = 0;
val32 = val32 >> 4;
padapter->recvpriv.fw_rssi =
(u8)r8712_signal_scale_mapping(val32);
}
}
/* check mlme, hw, phy, or dynamic algorithm status. */
static void StatusWatchdogCallback(struct _adapter *padapter)
{
check_hw_pbc(padapter);
query_fw_rx_phy_status(padapter);
}
static void r871x_internal_cmd_hdl(struct _adapter *padapter, u8 *pbuf)
{
struct drvint_cmd_parm *pdrvcmd;
if (!pbuf)
return;
pdrvcmd = (struct drvint_cmd_parm *)pbuf;
switch (pdrvcmd->i_cid) {
case WDG_WK_CID:
StatusWatchdogCallback(padapter);
break;
default:
break;
}
kfree(pdrvcmd->pbuf);
}
static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
/* invoke cmd->callback function */
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
if (pcmd_callback == NULL)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
return H2C_SUCCESS;
}
static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
/* invoke cmd->callback function */
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
if (pcmd_callback == NULL)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
return H2C_SUCCESS;
}
static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
u32 val;
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
struct readBB_parm *prdbbparm;
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
prdbbparm = (struct readBB_parm *)pcmd->parmbuf;
if (pcmd->rsp && pcmd->rspsz > 0)
memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
if (pcmd_callback == NULL)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
return H2C_SUCCESS;
}
static u8 write_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
struct writeBB_parm *pwritebbparm;
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
pwritebbparm = (struct writeBB_parm *)pcmd->parmbuf;
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
if (pcmd_callback == NULL)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
return H2C_SUCCESS;
}
static u8 read_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
u32 val;
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
struct readRF_parm *prdrfparm;
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
prdrfparm = (struct readRF_parm *)pcmd->parmbuf;
if (pcmd->rsp && pcmd->rspsz > 0)
memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
if (pcmd_callback == NULL)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
return H2C_SUCCESS;
}
static u8 write_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
struct writeRF_parm *pwriterfparm;
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
pwriterfparm = (struct writeRF_parm *)pcmd->parmbuf;
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
if (pcmd_callback == NULL)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
return H2C_SUCCESS;
}
static u8 sys_suspend_hdl(struct _adapter *padapter, u8 *pbuf)
{
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
struct usb_suspend_parm *psetusbsuspend;
psetusbsuspend = (struct usb_suspend_parm *)pcmd->parmbuf;
r8712_free_cmd_obj(pcmd);
return H2C_SUCCESS;
}
static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter,
struct cmd_obj *pcmd)
{
struct cmd_obj *pcmd_r;
if (pcmd == NULL)
return pcmd;
pcmd_r = NULL;
switch (pcmd->cmdcode) {
case GEN_CMD_CODE(_Read_MACREG):
read_macreg_hdl(padapter, (u8 *)pcmd);
pcmd_r = pcmd;
break;
case GEN_CMD_CODE(_Write_MACREG):
write_macreg_hdl(padapter, (u8 *)pcmd);
pcmd_r = pcmd;
break;
case GEN_CMD_CODE(_Read_BBREG):
read_bbreg_hdl(padapter, (u8 *)pcmd);
break;
case GEN_CMD_CODE(_Write_BBREG):
write_bbreg_hdl(padapter, (u8 *)pcmd);
break;
case GEN_CMD_CODE(_Read_RFREG):
read_rfreg_hdl(padapter, (u8 *)pcmd);
break;
case GEN_CMD_CODE(_Write_RFREG):
write_rfreg_hdl(padapter, (u8 *)pcmd);
break;
case GEN_CMD_CODE(_SetUsbSuspend):
sys_suspend_hdl(padapter, (u8 *)pcmd);
break;
case GEN_CMD_CODE(_JoinBss):
r8712_joinbss_reset(padapter);
/* Before set JoinBss_CMD to FW, driver must ensure FW is in
* PS_MODE_ACTIVE. Directly write rpwm to radio on and assign
* new pwr_mode to Driver, instead of use workitem to change
* state. */
if (padapter->pwrctrlpriv.pwr_mode > PS_MODE_ACTIVE) {
padapter->pwrctrlpriv.pwr_mode = PS_MODE_ACTIVE;
_enter_pwrlock(&(padapter->pwrctrlpriv.lock));
r8712_set_rpwm(padapter, PS_STATE_S4);
up(&(padapter->pwrctrlpriv.lock));
}
pcmd_r = pcmd;
break;
case _DRV_INT_CMD_:
r871x_internal_cmd_hdl(padapter, pcmd->parmbuf);
r8712_free_cmd_obj(pcmd);
pcmd_r = NULL;
break;
default:
pcmd_r = pcmd;
break;
}
return pcmd_r; /* if returning pcmd_r == NULL, pcmd must be free. */
}
static u8 check_cmd_fifo(struct _adapter *padapter, uint sz)
{
u8 res = _SUCCESS;
return res;
}
u8 r8712_fw_cmd(struct _adapter *pAdapter, u32 cmd)
{
int pollingcnts = 50;
r8712_write32(pAdapter, IOCMD_CTRL_REG, cmd);
msleep(100);
while ((0 != r8712_read32(pAdapter, IOCMD_CTRL_REG)) &&
(pollingcnts > 0)) {
pollingcnts--;
msleep(20);
}
if (pollingcnts == 0)
return false;
return true;
}
void r8712_fw_cmd_data(struct _adapter *pAdapter, u32 *value, u8 flag)
{
if (flag == 0) /* set */
r8712_write32(pAdapter, IOCMD_DATA_REG, *value);
else /* query */
*value = r8712_read32(pAdapter, IOCMD_DATA_REG);
}
int r8712_cmd_thread(void *context)
{
struct cmd_obj *pcmd;
unsigned int cmdsz, wr_sz, *pcmdbuf, *prspbuf;
struct tx_desc *pdesc;
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
struct _adapter *padapter = (struct _adapter *)context;
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
thread_enter(padapter);
while (1) {
if ((_down_sema(&(pcmdpriv->cmd_queue_sema))) == _FAIL)
break;
if ((padapter->bDriverStopped == true) ||
(padapter->bSurpriseRemoved == true))
break;
if (r8712_register_cmd_alive(padapter) != _SUCCESS)
continue;
_next:
pcmd = r8712_dequeue_cmd(&(pcmdpriv->cmd_queue));
if (!(pcmd)) {
r8712_unregister_cmd_alive(padapter);
continue;
}
pcmdbuf = (unsigned int *)pcmdpriv->cmd_buf;
prspbuf = (unsigned int *)pcmdpriv->rsp_buf;
pdesc = (struct tx_desc *)pcmdbuf;
memset(pdesc, 0, TXDESC_SIZE);
pcmd = cmd_hdl_filter(padapter, pcmd);
if (pcmd) { /* if pcmd != NULL, cmd will be handled by f/w */
struct dvobj_priv *pdvobj = (struct dvobj_priv *)
&padapter->dvobjpriv;
u8 blnPending = 0;
pcmdpriv->cmd_issued_cnt++;
cmdsz = _RND8((pcmd->cmdsz)); /* _RND8 */
wr_sz = TXDESC_SIZE + 8 + cmdsz;
pdesc->txdw0 |= cpu_to_le32((wr_sz-TXDESC_SIZE) &
0x0000ffff);
if (pdvobj->ishighspeed) {
if ((wr_sz % 512) == 0)
blnPending = 1;
} else {
if ((wr_sz % 64) == 0)
blnPending = 1;
}
if (blnPending) /* 32 bytes for TX Desc - 8 offset */
pdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE +
OFFSET_SZ + 8) << OFFSET_SHT) &
0x00ff0000);
else {
pdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE +
OFFSET_SZ) <<
OFFSET_SHT) &
0x00ff0000);
}
pdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
pdesc->txdw1 |= cpu_to_le32((0x13 << QSEL_SHT) &
0x00001f00);
pcmdbuf += (TXDESC_SIZE >> 2);
*pcmdbuf = cpu_to_le32((cmdsz & 0x0000ffff) |
(pcmd->cmdcode << 16) |
(pcmdpriv->cmd_seq << 24));
pcmdbuf += 2 ; /* 8 bytes alignment */
memcpy((u8 *)pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
while (check_cmd_fifo(padapter, wr_sz) == _FAIL) {
if ((padapter->bDriverStopped == true) ||
(padapter->bSurpriseRemoved == true))
break;
msleep(100);
continue;
}
if (blnPending)
wr_sz += 8; /* Append 8 bytes */
r8712_write_mem(padapter, RTL8712_DMA_H2CCMD, wr_sz,
(u8 *)pdesc);
pcmdpriv->cmd_seq++;
if (pcmd->cmdcode == GEN_CMD_CODE(_CreateBss)) {
pcmd->res = H2C_SUCCESS;
pcmd_callback = cmd_callback[pcmd->
cmdcode].callback;
if (pcmd_callback)
pcmd_callback(padapter, pcmd);
continue;
}
if (pcmd->cmdcode == GEN_CMD_CODE(_SetPwrMode)) {
if (padapter->pwrctrlpriv.bSleep) {
_enter_pwrlock(&(padapter->
pwrctrlpriv.lock));
r8712_set_rpwm(padapter, PS_STATE_S2);
up(&padapter->pwrctrlpriv.lock);
}
}
r8712_free_cmd_obj(pcmd);
if (_queue_empty(&(pcmdpriv->cmd_queue))) {
r8712_unregister_cmd_alive(padapter);
continue;
} else
goto _next;
} else
goto _next;
flush_signals_thread();
}
/* free all cmd_obj resources */
do {
pcmd = r8712_dequeue_cmd(&(pcmdpriv->cmd_queue));
if (pcmd == NULL)
break;
r8712_free_cmd_obj(pcmd);
} while (1);
up(&pcmdpriv->terminate_cmdthread_sema);
thread_exit();
}
void r8712_event_handle(struct _adapter *padapter, uint *peventbuf)
{
u8 evt_code, evt_seq;
u16 evt_sz;
void (*event_callback)(struct _adapter *dev, u8 *pbuf);
struct evt_priv *pevt_priv = &(padapter->evtpriv);
if (peventbuf == NULL)
goto _abort_event_;
evt_sz = (u16)(le32_to_cpu(*peventbuf) & 0xffff);
evt_seq = (u8)((le32_to_cpu(*peventbuf) >> 24) & 0x7f);
evt_code = (u8)((le32_to_cpu(*peventbuf) >> 16) & 0xff);
/* checking event sequence... */
if ((evt_seq & 0x7f) != pevt_priv->event_seq) {
pevt_priv->event_seq = ((evt_seq + 1) & 0x7f);
goto _abort_event_;
}
/* checking if event code is valid */
if (evt_code >= MAX_C2HEVT) {
pevt_priv->event_seq = ((evt_seq+1) & 0x7f);
goto _abort_event_;
} else if ((evt_code == GEN_EVT_CODE(_Survey)) &&
(evt_sz > sizeof(struct wlan_bssid_ex))) {
pevt_priv->event_seq = ((evt_seq+1)&0x7f);
goto _abort_event_;
}
/* checking if event size match the event parm size */
if ((wlanevents[evt_code].parmsize) &&
(wlanevents[evt_code].parmsize != evt_sz)) {
pevt_priv->event_seq = ((evt_seq+1)&0x7f);
goto _abort_event_;
} else if ((evt_sz == 0) && (evt_code != GEN_EVT_CODE(_WPS_PBC))) {
pevt_priv->event_seq = ((evt_seq+1)&0x7f);
goto _abort_event_;
}
pevt_priv->event_seq++; /* update evt_seq */
if (pevt_priv->event_seq > 127)
pevt_priv->event_seq = 0;
peventbuf = peventbuf + 2; /* move to event content, 8 bytes alignment */
if (peventbuf) {
event_callback = wlanevents[evt_code].event_callback;
if (event_callback)
event_callback(padapter, (u8 *)peventbuf);
}
pevt_priv->evt_done_cnt++;
_abort_event_:
return;
}
| Psycho666/Simplicity_trlte_kernel | drivers/staging/rtl8712/rtl8712_cmd.c | C | gpl-2.0 | 13,991 |
/*
* Copyright (C) 2005-2006 Micronas USA Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/usb.h>
#include <linux/i2c.h>
#include <asm/byteorder.h>
#include <media/tvaudio.h>
#include "go7007-priv.h"
#include "wis-i2c.h"
static unsigned int assume_endura;
module_param(assume_endura, int, 0644);
MODULE_PARM_DESC(assume_endura, "when probing fails, "
"hardware is a Pelco Endura");
/* #define GO7007_USB_DEBUG */
/* #define GO7007_I2C_DEBUG */ /* for debugging the EZ-USB I2C adapter */
#define HPI_STATUS_ADDR 0xFFF4
#define INT_PARAM_ADDR 0xFFF6
#define INT_INDEX_ADDR 0xFFF8
/*
* Pipes on EZ-USB interface:
* 0 snd - Control
* 0 rcv - Control
* 2 snd - Download firmware (control)
* 4 rcv - Read Interrupt (interrupt)
* 6 rcv - Read Video (bulk)
* 8 rcv - Read Audio (bulk)
*/
#define GO7007_USB_EZUSB (1<<0)
#define GO7007_USB_EZUSB_I2C (1<<1)
struct go7007_usb_board {
unsigned int flags;
struct go7007_board_info main_info;
};
struct go7007_usb {
struct go7007_usb_board *board;
struct mutex i2c_lock;
struct usb_device *usbdev;
struct urb *video_urbs[8];
struct urb *audio_urbs[8];
struct urb *intr_urb;
};
/*********************** Product specification data ***********************/
static struct go7007_usb_board board_matrix_ii = {
.flags = GO7007_USB_EZUSB,
.main_info = {
.firmware = "go7007tv.bin",
.flags = GO7007_BOARD_HAS_AUDIO |
GO7007_BOARD_USE_ONBOARD_I2C,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_WORD_16,
.audio_rate = 48000,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_VALID_ENABLE |
GO7007_SENSOR_TV |
GO7007_SENSOR_VBI |
GO7007_SENSOR_SCALING,
.num_i2c_devs = 1,
.i2c_devs = {
{
.type = "wis_saa7115",
.id = I2C_DRIVERID_WIS_SAA7115,
.addr = 0x20,
},
},
.num_inputs = 2,
.inputs = {
{
.video_input = 0,
.name = "Composite",
},
{
.video_input = 9,
.name = "S-Video",
},
},
},
};
static struct go7007_usb_board board_matrix_reload = {
.flags = GO7007_USB_EZUSB,
.main_info = {
.firmware = "go7007tv.bin",
.flags = GO7007_BOARD_HAS_AUDIO |
GO7007_BOARD_USE_ONBOARD_I2C,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_I2S_MASTER |
GO7007_AUDIO_WORD_16,
.audio_rate = 48000,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_TV,
.num_i2c_devs = 1,
.i2c_devs = {
{
.type = "wis_saa7113",
.id = I2C_DRIVERID_WIS_SAA7113,
.addr = 0x25,
},
},
.num_inputs = 2,
.inputs = {
{
.video_input = 0,
.name = "Composite",
},
{
.video_input = 9,
.name = "S-Video",
},
},
},
};
static struct go7007_usb_board board_star_trek = {
.flags = GO7007_USB_EZUSB | GO7007_USB_EZUSB_I2C,
.main_info = {
.firmware = "go7007tv.bin",
.flags = GO7007_BOARD_HAS_AUDIO, /* |
GO7007_BOARD_HAS_TUNER, */
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_VALID_ENABLE |
GO7007_SENSOR_TV |
GO7007_SENSOR_VBI |
GO7007_SENSOR_SCALING,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_WORD_16,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.num_i2c_devs = 1,
.i2c_devs = {
{
.type = "wis_saa7115",
.id = I2C_DRIVERID_WIS_SAA7115,
.addr = 0x20,
},
},
.num_inputs = 2,
.inputs = {
{
.video_input = 1,
/* .audio_input = AUDIO_EXTERN, */
.name = "Composite",
},
{
.video_input = 8,
/* .audio_input = AUDIO_EXTERN, */
.name = "S-Video",
},
/* {
* .video_input = 3,
* .audio_input = AUDIO_TUNER,
* .name = "Tuner",
* },
*/
},
},
};
static struct go7007_usb_board board_px_tv402u = {
.flags = GO7007_USB_EZUSB | GO7007_USB_EZUSB_I2C,
.main_info = {
.firmware = "go7007tv.bin",
.flags = GO7007_BOARD_HAS_AUDIO |
GO7007_BOARD_HAS_TUNER,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_VALID_ENABLE |
GO7007_SENSOR_TV |
GO7007_SENSOR_VBI |
GO7007_SENSOR_SCALING,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_WORD_16,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.num_i2c_devs = 3,
.i2c_devs = {
{
.type = "wis_saa7115",
.id = I2C_DRIVERID_WIS_SAA7115,
.addr = 0x20,
},
{
.type = "wis_uda1342",
.id = I2C_DRIVERID_WIS_UDA1342,
.addr = 0x1a,
},
{
.type = "wis_sony_tuner",
.id = I2C_DRIVERID_WIS_SONY_TUNER,
.addr = 0x60,
},
},
.num_inputs = 3,
.inputs = {
{
.video_input = 1,
.audio_input = TVAUDIO_INPUT_EXTERN,
.name = "Composite",
},
{
.video_input = 8,
.audio_input = TVAUDIO_INPUT_EXTERN,
.name = "S-Video",
},
{
.video_input = 3,
.audio_input = TVAUDIO_INPUT_TUNER,
.name = "Tuner",
},
},
},
};
static struct go7007_usb_board board_xmen = {
.flags = 0,
.main_info = {
.firmware = "go7007tv.bin",
.flags = GO7007_BOARD_USE_ONBOARD_I2C,
.hpi_buffer_cap = 0,
.sensor_flags = GO7007_SENSOR_VREF_POLAR,
.sensor_width = 320,
.sensor_height = 240,
.sensor_framerate = 30030,
.audio_flags = GO7007_AUDIO_ONE_CHANNEL |
GO7007_AUDIO_I2S_MODE_3 |
GO7007_AUDIO_WORD_14 |
GO7007_AUDIO_I2S_MASTER |
GO7007_AUDIO_BCLK_POLAR |
GO7007_AUDIO_OKI_MODE,
.audio_rate = 8000,
.audio_bclk_div = 48,
.audio_main_div = 1,
.num_i2c_devs = 1,
.i2c_devs = {
{
.type = "wis_ov7640",
.id = I2C_DRIVERID_WIS_OV7640,
.addr = 0x21,
},
},
.num_inputs = 1,
.inputs = {
{
.name = "Camera",
},
},
},
};
static struct go7007_usb_board board_matrix_revolution = {
.flags = GO7007_USB_EZUSB,
.main_info = {
.firmware = "go7007tv.bin",
.flags = GO7007_BOARD_HAS_AUDIO |
GO7007_BOARD_USE_ONBOARD_I2C,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_I2S_MASTER |
GO7007_AUDIO_WORD_16,
.audio_rate = 48000,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_TV |
GO7007_SENSOR_VBI,
.num_i2c_devs = 1,
.i2c_devs = {
{
.type = "wis_tw9903",
.id = I2C_DRIVERID_WIS_TW9903,
.addr = 0x44,
},
},
.num_inputs = 2,
.inputs = {
{
.video_input = 2,
.name = "Composite",
},
{
.video_input = 8,
.name = "S-Video",
},
},
},
};
static struct go7007_usb_board board_lifeview_lr192 = {
.flags = GO7007_USB_EZUSB,
.main_info = {
.firmware = "go7007tv.bin",
.flags = GO7007_BOARD_HAS_AUDIO |
GO7007_BOARD_USE_ONBOARD_I2C,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_WORD_16,
.audio_rate = 48000,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_VALID_ENABLE |
GO7007_SENSOR_TV |
GO7007_SENSOR_VBI |
GO7007_SENSOR_SCALING,
.num_i2c_devs = 0,
.num_inputs = 1,
.inputs = {
{
.video_input = 0,
.name = "Composite",
},
},
},
};
static struct go7007_usb_board board_endura = {
.flags = 0,
.main_info = {
.firmware = "go7007tv.bin",
.flags = 0,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_I2S_MASTER |
GO7007_AUDIO_WORD_16,
.audio_rate = 8000,
.audio_bclk_div = 48,
.audio_main_div = 8,
.hpi_buffer_cap = 0,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_TV,
.sensor_h_offset = 8,
.num_i2c_devs = 0,
.num_inputs = 1,
.inputs = {
{
.name = "Camera",
},
},
},
};
static struct go7007_usb_board board_adlink_mpg24 = {
.flags = 0,
.main_info = {
.firmware = "go7007tv.bin",
.flags = GO7007_BOARD_USE_ONBOARD_I2C,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_I2S_MASTER |
GO7007_AUDIO_WORD_16,
.audio_rate = 48000,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 0,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_TV |
GO7007_SENSOR_VBI,
.num_i2c_devs = 1,
.i2c_devs = {
{
.type = "wis_tw2804",
.id = I2C_DRIVERID_WIS_TW2804,
.addr = 0x00, /* yes, really */
},
},
.num_inputs = 1,
.inputs = {
{
.name = "Composite",
},
},
},
};
static struct go7007_usb_board board_sensoray_2250 = {
.flags = GO7007_USB_EZUSB | GO7007_USB_EZUSB_I2C,
.main_info = {
.firmware = "go7007tv.bin",
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_I2S_MASTER |
GO7007_AUDIO_WORD_16,
.flags = GO7007_BOARD_HAS_AUDIO,
.audio_rate = 48000,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_TV,
.num_i2c_devs = 1,
.i2c_devs = {
{
.type = "s2250",
.id = I2C_DRIVERID_S2250,
.addr = 0x43,
},
},
.num_inputs = 2,
.inputs = {
{
.video_input = 0,
.name = "Composite",
},
{
.video_input = 1,
.name = "S-Video",
},
},
},
};
MODULE_FIRMWARE("go7007tv.bin");
static const struct usb_device_id go7007_usb_id_table[] = {
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION |
USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */
.idProduct = 0x7007, /* Product ID of GO7007SB chip */
.bcdDevice_lo = 0x200, /* Revision number of XMen */
.bcdDevice_hi = 0x200,
.bInterfaceClass = 255,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 255,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_XMEN,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */
.idProduct = 0x7007, /* Product ID of GO7007SB chip */
.bcdDevice_lo = 0x202, /* Revision number of Matrix II */
.bcdDevice_hi = 0x202,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_MATRIX_II,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */
.idProduct = 0x7007, /* Product ID of GO7007SB chip */
.bcdDevice_lo = 0x204, /* Revision number of Matrix */
.bcdDevice_hi = 0x204, /* Reloaded */
.driver_info = (kernel_ulong_t)GO7007_BOARDID_MATRIX_RELOAD,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION |
USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */
.idProduct = 0x7007, /* Product ID of GO7007SB chip */
.bcdDevice_lo = 0x205, /* Revision number of XMen-II */
.bcdDevice_hi = 0x205,
.bInterfaceClass = 255,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 255,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_XMEN_II,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */
.idProduct = 0x7007, /* Product ID of GO7007SB chip */
.bcdDevice_lo = 0x208, /* Revision number of Star Trek */
.bcdDevice_hi = 0x208,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_STAR_TREK,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION |
USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */
.idProduct = 0x7007, /* Product ID of GO7007SB chip */
.bcdDevice_lo = 0x209, /* Revision number of XMen-III */
.bcdDevice_hi = 0x209,
.bInterfaceClass = 255,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 255,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_XMEN_III,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */
.idProduct = 0x7007, /* Product ID of GO7007SB chip */
.bcdDevice_lo = 0x210, /* Revision number of Matrix */
.bcdDevice_hi = 0x210, /* Revolution */
.driver_info = (kernel_ulong_t)GO7007_BOARDID_MATRIX_REV,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x093b, /* Vendor ID of Plextor */
.idProduct = 0xa102, /* Product ID of M402U */
.bcdDevice_lo = 0x1, /* revision number of Blueberry */
.bcdDevice_hi = 0x1,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_PX_M402U,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x093b, /* Vendor ID of Plextor */
.idProduct = 0xa104, /* Product ID of TV402U */
.bcdDevice_lo = 0x1,
.bcdDevice_hi = 0x1,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_PX_TV402U_ANY,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x10fd, /* Vendor ID of Anubis Electronics */
.idProduct = 0xde00, /* Product ID of Lifeview LR192 */
.bcdDevice_lo = 0x1,
.bcdDevice_hi = 0x1,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_LIFEVIEW_LR192,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x1943, /* Vendor ID Sensoray */
.idProduct = 0x2250, /* Product ID of 2250/2251 */
.bcdDevice_lo = 0x1,
.bcdDevice_hi = 0x1,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_SENSORAY_2250,
},
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, go7007_usb_id_table);
/********************* Driver for EZ-USB HPI interface *********************/
static int go7007_usb_vendor_request(struct go7007 *go, int request,
int value, int index, void *transfer_buffer, int length, int in)
{
struct go7007_usb *usb = go->hpi_context;
int timeout = 5000;
if (in) {
return usb_control_msg(usb->usbdev,
usb_rcvctrlpipe(usb->usbdev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
value, index, transfer_buffer, length, timeout);
} else {
return usb_control_msg(usb->usbdev,
usb_sndctrlpipe(usb->usbdev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, transfer_buffer, length, timeout);
}
}
static int go7007_usb_interface_reset(struct go7007 *go)
{
struct go7007_usb *usb = go->hpi_context;
u16 intr_val, intr_data;
/* Reset encoder */
if (go7007_write_interrupt(go, 0x0001, 0x0001) < 0)
return -1;
msleep(100);
if (usb->board->flags & GO7007_USB_EZUSB) {
/* Reset buffer in EZ-USB */
#ifdef GO7007_USB_DEBUG
printk(KERN_DEBUG "go7007-usb: resetting EZ-USB buffers\n");
#endif
if (go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0 ||
go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0)
return -1;
/* Reset encoder again */
if (go7007_write_interrupt(go, 0x0001, 0x0001) < 0)
return -1;
msleep(100);
}
/* Wait for an interrupt to indicate successful hardware reset */
if (go7007_read_interrupt(go, &intr_val, &intr_data) < 0 ||
(intr_val & ~0x1) != 0x55aa) {
printk(KERN_ERR
"go7007-usb: unable to reset the USB interface\n");
return -1;
}
return 0;
}
static int go7007_usb_ezusb_write_interrupt(struct go7007 *go,
int addr, int data)
{
struct go7007_usb *usb = go->hpi_context;
int i, r;
u16 status_reg;
int timeout = 500;
#ifdef GO7007_USB_DEBUG
printk(KERN_DEBUG
"go7007-usb: WriteInterrupt: %04x %04x\n", addr, data);
#endif
for (i = 0; i < 100; ++i) {
r = usb_control_msg(usb->usbdev,
usb_rcvctrlpipe(usb->usbdev, 0), 0x14,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0, HPI_STATUS_ADDR, &status_reg,
sizeof(status_reg), timeout);
if (r < 0)
goto write_int_error;
__le16_to_cpus(&status_reg);
if (!(status_reg & 0x0010))
break;
msleep(10);
}
if (i == 100) {
printk(KERN_ERR
"go7007-usb: device is hung, status reg = 0x%04x\n",
status_reg);
return -1;
}
r = usb_control_msg(usb->usbdev, usb_sndctrlpipe(usb->usbdev, 0), 0x12,
USB_TYPE_VENDOR | USB_RECIP_DEVICE, data,
INT_PARAM_ADDR, NULL, 0, timeout);
if (r < 0)
goto write_int_error;
r = usb_control_msg(usb->usbdev, usb_sndctrlpipe(usb->usbdev, 0),
0x12, USB_TYPE_VENDOR | USB_RECIP_DEVICE, addr,
INT_INDEX_ADDR, NULL, 0, timeout);
if (r < 0)
goto write_int_error;
return 0;
write_int_error:
printk(KERN_ERR "go7007-usb: error in WriteInterrupt: %d\n", r);
return r;
}
static int go7007_usb_onboard_write_interrupt(struct go7007 *go,
int addr, int data)
{
struct go7007_usb *usb = go->hpi_context;
u8 *tbuf;
int r;
int timeout = 500;
#ifdef GO7007_USB_DEBUG
printk(KERN_DEBUG
"go7007-usb: WriteInterrupt: %04x %04x\n", addr, data);
#endif
tbuf = kzalloc(8, GFP_KERNEL);
if (tbuf == NULL)
return -ENOMEM;
tbuf[0] = data & 0xff;
tbuf[1] = data >> 8;
tbuf[2] = addr & 0xff;
tbuf[3] = addr >> 8;
r = usb_control_msg(usb->usbdev, usb_sndctrlpipe(usb->usbdev, 2), 0x00,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0x55aa,
0xf0f0, tbuf, 8, timeout);
kfree(tbuf);
if (r < 0) {
printk(KERN_ERR "go7007-usb: error in WriteInterrupt: %d\n", r);
return r;
}
return 0;
}
static void go7007_usb_readinterrupt_complete(struct urb *urb)
{
struct go7007 *go = (struct go7007 *)urb->context;
u16 *regs = (u16 *)urb->transfer_buffer;
int status = urb->status;
if (status) {
if (status != -ESHUTDOWN &&
go->status != STATUS_SHUTDOWN) {
printk(KERN_ERR
"go7007-usb: error in read interrupt: %d\n",
urb->status);
} else {
wake_up(&go->interrupt_waitq);
return;
}
} else if (urb->actual_length != urb->transfer_buffer_length) {
printk(KERN_ERR "go7007-usb: short read in interrupt pipe!\n");
} else {
go->interrupt_available = 1;
go->interrupt_data = __le16_to_cpu(regs[0]);
go->interrupt_value = __le16_to_cpu(regs[1]);
#ifdef GO7007_USB_DEBUG
printk(KERN_DEBUG "go7007-usb: ReadInterrupt: %04x %04x\n",
go->interrupt_value, go->interrupt_data);
#endif
}
wake_up(&go->interrupt_waitq);
}
static int go7007_usb_read_interrupt(struct go7007 *go)
{
struct go7007_usb *usb = go->hpi_context;
int r;
r = usb_submit_urb(usb->intr_urb, GFP_KERNEL);
if (r < 0) {
printk(KERN_ERR
"go7007-usb: unable to submit interrupt urb: %d\n", r);
return r;
}
return 0;
}
static void go7007_usb_read_video_pipe_complete(struct urb *urb)
{
struct go7007 *go = (struct go7007 *)urb->context;
int r, status = urb->status;
if (!go->streaming) {
wake_up_interruptible(&go->frame_waitq);
return;
}
if (status) {
printk(KERN_ERR "go7007-usb: error in video pipe: %d\n",
status);
return;
}
if (urb->actual_length != urb->transfer_buffer_length) {
printk(KERN_ERR "go7007-usb: short read in video pipe!\n");
return;
}
go7007_parse_video_stream(go, urb->transfer_buffer, urb->actual_length);
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r < 0)
printk(KERN_ERR "go7007-usb: error in video pipe: %d\n", r);
}
static void go7007_usb_read_audio_pipe_complete(struct urb *urb)
{
struct go7007 *go = (struct go7007 *)urb->context;
int r, status = urb->status;
if (!go->streaming)
return;
if (status) {
printk(KERN_ERR "go7007-usb: error in audio pipe: %d\n",
status);
return;
}
if (urb->actual_length != urb->transfer_buffer_length) {
printk(KERN_ERR "go7007-usb: short read in audio pipe!\n");
return;
}
if (go->audio_deliver != NULL)
go->audio_deliver(go, urb->transfer_buffer, urb->actual_length);
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r < 0)
printk(KERN_ERR "go7007-usb: error in audio pipe: %d\n", r);
}
static int go7007_usb_stream_start(struct go7007 *go)
{
struct go7007_usb *usb = go->hpi_context;
int i, r;
for (i = 0; i < 8; ++i) {
r = usb_submit_urb(usb->video_urbs[i], GFP_KERNEL);
if (r < 0) {
printk(KERN_ERR "go7007-usb: error submitting video "
"urb %d: %d\n", i, r);
goto video_submit_failed;
}
}
if (!go->audio_enabled)
return 0;
for (i = 0; i < 8; ++i) {
r = usb_submit_urb(usb->audio_urbs[i], GFP_KERNEL);
if (r < 0) {
printk(KERN_ERR "go7007-usb: error submitting audio "
"urb %d: %d\n", i, r);
goto audio_submit_failed;
}
}
return 0;
audio_submit_failed:
for (i = 0; i < 7; ++i)
usb_kill_urb(usb->audio_urbs[i]);
video_submit_failed:
for (i = 0; i < 8; ++i)
usb_kill_urb(usb->video_urbs[i]);
return -1;
}
static int go7007_usb_stream_stop(struct go7007 *go)
{
struct go7007_usb *usb = go->hpi_context;
int i;
if (go->status == STATUS_SHUTDOWN)
return 0;
for (i = 0; i < 8; ++i)
usb_kill_urb(usb->video_urbs[i]);
if (go->audio_enabled)
for (i = 0; i < 8; ++i)
usb_kill_urb(usb->audio_urbs[i]);
return 0;
}
static int go7007_usb_send_firmware(struct go7007 *go, u8 *data, int len)
{
struct go7007_usb *usb = go->hpi_context;
int transferred, pipe;
int timeout = 500;
#ifdef GO7007_USB_DEBUG
printk(KERN_DEBUG "go7007-usb: DownloadBuffer sending %d bytes\n", len);
#endif
if (usb->board->flags & GO7007_USB_EZUSB)
pipe = usb_sndbulkpipe(usb->usbdev, 2);
else
pipe = usb_sndbulkpipe(usb->usbdev, 3);
return usb_bulk_msg(usb->usbdev, pipe, data, len,
&transferred, timeout);
}
static struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = {
.interface_reset = go7007_usb_interface_reset,
.write_interrupt = go7007_usb_ezusb_write_interrupt,
.read_interrupt = go7007_usb_read_interrupt,
.stream_start = go7007_usb_stream_start,
.stream_stop = go7007_usb_stream_stop,
.send_firmware = go7007_usb_send_firmware,
};
static struct go7007_hpi_ops go7007_usb_onboard_hpi_ops = {
.interface_reset = go7007_usb_interface_reset,
.write_interrupt = go7007_usb_onboard_write_interrupt,
.read_interrupt = go7007_usb_read_interrupt,
.stream_start = go7007_usb_stream_start,
.stream_stop = go7007_usb_stream_stop,
.send_firmware = go7007_usb_send_firmware,
};
/********************* Driver for EZ-USB I2C adapter *********************/
static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter,
struct i2c_msg msgs[], int num)
{
struct go7007 *go = i2c_get_adapdata(adapter);
struct go7007_usb *usb = go->hpi_context;
u8 buf[16];
int buf_len, i;
int ret = -1;
if (go->status == STATUS_SHUTDOWN)
return -1;
mutex_lock(&usb->i2c_lock);
for (i = 0; i < num; ++i) {
/* The hardware command is "write some bytes then read some
* bytes", so we try to coalesce a write followed by a read
* into a single USB transaction */
if (i + 1 < num && msgs[i].addr == msgs[i + 1].addr &&
!(msgs[i].flags & I2C_M_RD) &&
(msgs[i + 1].flags & I2C_M_RD)) {
#ifdef GO7007_I2C_DEBUG
printk(KERN_DEBUG "go7007-usb: i2c write/read %d/%d "
"bytes on %02x\n", msgs[i].len,
msgs[i + 1].len, msgs[i].addr);
#endif
buf[0] = 0x01;
buf[1] = msgs[i].len + 1;
buf[2] = msgs[i].addr << 1;
memcpy(&buf[3], msgs[i].buf, msgs[i].len);
buf_len = msgs[i].len + 3;
buf[buf_len++] = msgs[++i].len;
} else if (msgs[i].flags & I2C_M_RD) {
#ifdef GO7007_I2C_DEBUG
printk(KERN_DEBUG "go7007-usb: i2c read %d "
"bytes on %02x\n", msgs[i].len,
msgs[i].addr);
#endif
buf[0] = 0x01;
buf[1] = 1;
buf[2] = msgs[i].addr << 1;
buf[3] = msgs[i].len;
buf_len = 4;
} else {
#ifdef GO7007_I2C_DEBUG
printk(KERN_DEBUG "go7007-usb: i2c write %d "
"bytes on %02x\n", msgs[i].len,
msgs[i].addr);
#endif
buf[0] = 0x00;
buf[1] = msgs[i].len + 1;
buf[2] = msgs[i].addr << 1;
memcpy(&buf[3], msgs[i].buf, msgs[i].len);
buf_len = msgs[i].len + 3;
buf[buf_len++] = 0;
}
if (go7007_usb_vendor_request(go, 0x24, 0, 0,
buf, buf_len, 0) < 0)
goto i2c_done;
if (msgs[i].flags & I2C_M_RD) {
memset(buf, 0, sizeof(buf));
if (go7007_usb_vendor_request(go, 0x25, 0, 0, buf,
msgs[i].len + 1, 1) < 0)
goto i2c_done;
memcpy(msgs[i].buf, buf + 1, msgs[i].len);
}
}
ret = 0;
i2c_done:
mutex_unlock(&usb->i2c_lock);
return ret;
}
static u32 go7007_usb_functionality(struct i2c_adapter *adapter)
{
/* No errors are reported by the hardware, so we don't bother
* supporting quick writes to avoid confusing probing */
return (I2C_FUNC_SMBUS_EMUL) & ~I2C_FUNC_SMBUS_QUICK;
}
static struct i2c_algorithm go7007_usb_algo = {
.master_xfer = go7007_usb_i2c_master_xfer,
.functionality = go7007_usb_functionality,
};
static struct i2c_adapter go7007_usb_adap_templ = {
.owner = THIS_MODULE,
.name = "WIS GO7007SB EZ-USB",
.algo = &go7007_usb_algo,
};
/********************* USB add/remove functions *********************/
static int go7007_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct go7007 *go;
struct go7007_usb *usb;
struct go7007_usb_board *board;
struct usb_device *usbdev = interface_to_usbdev(intf);
char *name;
int video_pipe, i, v_urb_len;
printk(KERN_DEBUG "go7007-usb: probing new GO7007 USB board\n");
switch (id->driver_info) {
case GO7007_BOARDID_MATRIX_II:
name = "WIS Matrix II or compatible";
board = &board_matrix_ii;
break;
case GO7007_BOARDID_MATRIX_RELOAD:
name = "WIS Matrix Reloaded or compatible";
board = &board_matrix_reload;
break;
case GO7007_BOARDID_MATRIX_REV:
name = "WIS Matrix Revolution or compatible";
board = &board_matrix_revolution;
break;
case GO7007_BOARDID_STAR_TREK:
name = "WIS Star Trek or compatible";
board = &board_star_trek;
break;
case GO7007_BOARDID_XMEN:
name = "WIS XMen or compatible";
board = &board_xmen;
break;
case GO7007_BOARDID_XMEN_II:
name = "WIS XMen II or compatible";
board = &board_xmen;
break;
case GO7007_BOARDID_XMEN_III:
name = "WIS XMen III or compatible";
board = &board_xmen;
break;
case GO7007_BOARDID_PX_M402U:
name = "Plextor PX-M402U";
board = &board_matrix_ii;
break;
case GO7007_BOARDID_PX_TV402U_ANY:
name = "Plextor PX-TV402U (unknown tuner)";
board = &board_px_tv402u;
break;
case GO7007_BOARDID_LIFEVIEW_LR192:
printk(KERN_ERR "go7007-usb: The Lifeview TV Walker Ultra "
"is not supported. Sorry!\n");
return 0;
name = "Lifeview TV Walker Ultra";
board = &board_lifeview_lr192;
break;
case GO7007_BOARDID_SENSORAY_2250:
printk(KERN_INFO "Sensoray 2250 found\n");
name = "Sensoray 2250/2251";
board = &board_sensoray_2250;
break;
default:
printk(KERN_ERR "go7007-usb: unknown board ID %d!\n",
(unsigned int)id->driver_info);
return 0;
}
usb = kzalloc(sizeof(struct go7007_usb), GFP_KERNEL);
if (usb == NULL)
return -ENOMEM;
/* Allocate the URB and buffer for receiving incoming interrupts */
usb->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
if (usb->intr_urb == NULL)
goto allocfail;
usb->intr_urb->transfer_buffer = kmalloc(2*sizeof(u16), GFP_KERNEL);
if (usb->intr_urb->transfer_buffer == NULL)
goto allocfail;
go = go7007_alloc(&board->main_info, &intf->dev);
if (go == NULL)
goto allocfail;
usb->board = board;
usb->usbdev = usbdev;
go->board_id = id->driver_info;
strncpy(go->name, name, sizeof(go->name));
if (board->flags & GO7007_USB_EZUSB)
go->hpi_ops = &go7007_usb_ezusb_hpi_ops;
else
go->hpi_ops = &go7007_usb_onboard_hpi_ops;
go->hpi_context = usb;
usb_fill_int_urb(usb->intr_urb, usb->usbdev,
usb_rcvintpipe(usb->usbdev, 4),
usb->intr_urb->transfer_buffer, 2*sizeof(u16),
go7007_usb_readinterrupt_complete, go, 8);
usb_set_intfdata(intf, &go->v4l2_dev);
/* Boot the GO7007 */
if (go7007_boot_encoder(go, go->board_info->flags &
GO7007_BOARD_USE_ONBOARD_I2C) < 0)
goto initfail;
/* Register the EZ-USB I2C adapter, if we're using it */
if (board->flags & GO7007_USB_EZUSB_I2C) {
memcpy(&go->i2c_adapter, &go7007_usb_adap_templ,
sizeof(go7007_usb_adap_templ));
mutex_init(&usb->i2c_lock);
go->i2c_adapter.dev.parent = go->dev;
i2c_set_adapdata(&go->i2c_adapter, go);
if (i2c_add_adapter(&go->i2c_adapter) < 0) {
printk(KERN_ERR
"go7007-usb: error: i2c_add_adapter failed\n");
goto initfail;
}
go->i2c_adapter_online = 1;
}
/* Pelco and Adlink reused the XMen and XMen-III vendor and product
* IDs for their own incompatible designs. We can detect XMen boards
* by probing the sensor, but there is no way to probe the sensors on
* the Pelco and Adlink designs so we default to the Adlink. If it
* is actually a Pelco, the user must set the assume_endura module
* parameter. */
if ((go->board_id == GO7007_BOARDID_XMEN ||
go->board_id == GO7007_BOARDID_XMEN_III) &&
go->i2c_adapter_online) {
union i2c_smbus_data data;
/* Check to see if register 0x0A is 0x76 */
i2c_smbus_xfer(&go->i2c_adapter, 0x21, I2C_CLIENT_SCCB,
I2C_SMBUS_READ, 0x0A, I2C_SMBUS_BYTE_DATA, &data);
if (data.byte != 0x76) {
if (assume_endura) {
go->board_id = GO7007_BOARDID_ENDURA;
usb->board = board = &board_endura;
go->board_info = &board->main_info;
strncpy(go->name, "Pelco Endura",
sizeof(go->name));
} else {
u16 channel;
/* set GPIO5 to be an output, currently low */
go7007_write_addr(go, 0x3c82, 0x0000);
go7007_write_addr(go, 0x3c80, 0x00df);
/* read channel number from GPIO[1:0] */
go7007_read_addr(go, 0x3c81, &channel);
channel &= 0x3;
go->board_id = GO7007_BOARDID_ADLINK_MPG24;
usb->board = board = &board_adlink_mpg24;
go->board_info = &board->main_info;
go->channel_number = channel;
snprintf(go->name, sizeof(go->name),
"Adlink PCI-MPG24, channel #%d",
channel);
}
}
}
/* Probe the tuner model on the TV402U */
if (go->board_id == GO7007_BOARDID_PX_TV402U_ANY) {
u8 data[3];
/* Board strapping indicates tuner model */
if (go7007_usb_vendor_request(go, 0x41, 0, 0, data, 3, 1) < 0) {
printk(KERN_ERR "go7007-usb: GPIO read failed!\n");
goto initfail;
}
switch (data[0] >> 6) {
case 1:
go->board_id = GO7007_BOARDID_PX_TV402U_EU;
go->tuner_type = TUNER_SONY_BTF_PG472Z;
strncpy(go->name, "Plextor PX-TV402U-EU",
sizeof(go->name));
break;
case 2:
go->board_id = GO7007_BOARDID_PX_TV402U_JP;
go->tuner_type = TUNER_SONY_BTF_PK467Z;
strncpy(go->name, "Plextor PX-TV402U-JP",
sizeof(go->name));
break;
case 3:
go->board_id = GO7007_BOARDID_PX_TV402U_NA;
go->tuner_type = TUNER_SONY_BTF_PB463Z;
strncpy(go->name, "Plextor PX-TV402U-NA",
sizeof(go->name));
break;
default:
printk(KERN_DEBUG "go7007-usb: unable to detect "
"tuner type!\n");
break;
}
/* Configure tuner mode selection inputs connected
* to the EZ-USB GPIO output pins */
if (go7007_usb_vendor_request(go, 0x40, 0x7f02, 0,
NULL, 0, 0) < 0) {
printk(KERN_ERR "go7007-usb: GPIO write failed!\n");
goto initfail;
}
}
/* Print a nasty message if the user attempts to use a USB2.0 device in
* a USB1.1 port. There will be silent corruption of the stream. */
if ((board->flags & GO7007_USB_EZUSB) &&
usbdev->speed != USB_SPEED_HIGH)
printk(KERN_ERR "go7007-usb: *** WARNING *** This device "
"must be connected to a USB 2.0 port! "
"Attempting to capture video through a USB 1.1 "
"port will result in stream corruption, even "
"at low bitrates!\n");
/* Do any final GO7007 initialization, then register the
* V4L2 and ALSA interfaces */
if (go7007_register_encoder(go) < 0)
goto initfail;
/* Allocate the URBs and buffers for receiving the video stream */
if (board->flags & GO7007_USB_EZUSB) {
v_urb_len = 1024;
video_pipe = usb_rcvbulkpipe(usb->usbdev, 6);
} else {
v_urb_len = 512;
video_pipe = usb_rcvbulkpipe(usb->usbdev, 1);
}
for (i = 0; i < 8; ++i) {
usb->video_urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
if (usb->video_urbs[i] == NULL)
goto initfail;
usb->video_urbs[i]->transfer_buffer =
kmalloc(v_urb_len, GFP_KERNEL);
if (usb->video_urbs[i]->transfer_buffer == NULL)
goto initfail;
usb_fill_bulk_urb(usb->video_urbs[i], usb->usbdev, video_pipe,
usb->video_urbs[i]->transfer_buffer, v_urb_len,
go7007_usb_read_video_pipe_complete, go);
}
/* Allocate the URBs and buffers for receiving the audio stream */
if ((board->flags & GO7007_USB_EZUSB) && go->audio_enabled)
for (i = 0; i < 8; ++i) {
usb->audio_urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
if (usb->audio_urbs[i] == NULL)
goto initfail;
usb->audio_urbs[i]->transfer_buffer = kmalloc(4096,
GFP_KERNEL);
if (usb->audio_urbs[i]->transfer_buffer == NULL)
goto initfail;
usb_fill_bulk_urb(usb->audio_urbs[i], usb->usbdev,
usb_rcvbulkpipe(usb->usbdev, 8),
usb->audio_urbs[i]->transfer_buffer, 4096,
go7007_usb_read_audio_pipe_complete, go);
}
go->status = STATUS_ONLINE;
return 0;
initfail:
go->status = STATUS_SHUTDOWN;
return 0;
allocfail:
if (usb->intr_urb) {
kfree(usb->intr_urb->transfer_buffer);
usb_free_urb(usb->intr_urb);
}
kfree(usb);
return -ENOMEM;
}
static void go7007_usb_disconnect(struct usb_interface *intf)
{
struct go7007 *go = to_go7007(usb_get_intfdata(intf));
struct go7007_usb *usb = go->hpi_context;
struct urb *vurb, *aurb;
int i;
go->status = STATUS_SHUTDOWN;
usb_kill_urb(usb->intr_urb);
/* Free USB-related structs */
for (i = 0; i < 8; ++i) {
vurb = usb->video_urbs[i];
if (vurb) {
usb_kill_urb(vurb);
kfree(vurb->transfer_buffer);
usb_free_urb(vurb);
}
aurb = usb->audio_urbs[i];
if (aurb) {
usb_kill_urb(aurb);
kfree(aurb->transfer_buffer);
usb_free_urb(aurb);
}
}
kfree(usb->intr_urb->transfer_buffer);
usb_free_urb(usb->intr_urb);
kfree(go->hpi_context);
go7007_remove(go);
}
static struct usb_driver go7007_usb_driver = {
.name = "go7007",
.probe = go7007_usb_probe,
.disconnect = go7007_usb_disconnect,
.id_table = go7007_usb_id_table,
};
static int __init go7007_usb_init(void)
{
return usb_register(&go7007_usb_driver);
}
static void __exit go7007_usb_cleanup(void)
{
usb_deregister(&go7007_usb_driver);
}
module_init(go7007_usb_init);
module_exit(go7007_usb_cleanup);
MODULE_LICENSE("GPL v2");
| craig0r/StockPlusKernel | drivers/staging/go7007/go7007-usb.c | C | gpl-2.0 | 34,290 |
/*
* saa7191.c - Philips SAA7191 video decoder driver
*
* Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
* Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include "saa7191.h"
#define SAA7191_MODULE_VERSION "0.0.5"
MODULE_DESCRIPTION("Philips SAA7191 video decoder driver");
MODULE_VERSION(SAA7191_MODULE_VERSION);
MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>");
MODULE_LICENSE("GPL");
// #define SAA7191_DEBUG
#ifdef SAA7191_DEBUG
#define dprintk(x...) printk("SAA7191: " x);
#else
#define dprintk(x...)
#endif
#define SAA7191_SYNC_COUNT 30
#define SAA7191_SYNC_DELAY 100 /* milliseconds */
struct saa7191 {
struct v4l2_subdev sd;
/* the register values are stored here as the actual
* I2C-registers are write-only */
u8 reg[25];
int input;
v4l2_std_id norm;
};
static inline struct saa7191 *to_saa7191(struct v4l2_subdev *sd)
{
return container_of(sd, struct saa7191, sd);
}
static const u8 initseq[] = {
0, /* Subaddress */
0x50, /* (0x50) SAA7191_REG_IDEL */
/* 50 Hz signal timing */
0x30, /* (0x30) SAA7191_REG_HSYB */
0x00, /* (0x00) SAA7191_REG_HSYS */
0xe8, /* (0xe8) SAA7191_REG_HCLB */
0xb6, /* (0xb6) SAA7191_REG_HCLS */
0xf4, /* (0xf4) SAA7191_REG_HPHI */
/* control */
SAA7191_LUMA_APER_1, /* (0x01) SAA7191_REG_LUMA - CVBS mode */
0x00, /* (0x00) SAA7191_REG_HUEC */
0xf8, /* (0xf8) SAA7191_REG_CKTQ */
0xf8, /* (0xf8) SAA7191_REG_CKTS */
0x90, /* (0x90) SAA7191_REG_PLSE */
0x90, /* (0x90) SAA7191_REG_SESE */
0x00, /* (0x00) SAA7191_REG_GAIN */
SAA7191_STDC_NFEN | SAA7191_STDC_HRMV, /* (0x0c) SAA7191_REG_STDC
* - not SECAM,
* slow time constant */
SAA7191_IOCK_OEDC | SAA7191_IOCK_OEHS | SAA7191_IOCK_OEVS
| SAA7191_IOCK_OEDY, /* (0x78) SAA7191_REG_IOCK
* - chroma from CVBS, GPSW1 & 2 off */
SAA7191_CTL3_AUFD | SAA7191_CTL3_SCEN | SAA7191_CTL3_OFTS
| SAA7191_CTL3_YDEL0, /* (0x99) SAA7191_REG_CTL3
* - automatic field detection */
0x00, /* (0x00) SAA7191_REG_CTL4 */
0x2c, /* (0x2c) SAA7191_REG_CHCV - PAL nominal value */
0x00, /* unused */
0x00, /* unused */
/* 60 Hz signal timing */
0x34, /* (0x34) SAA7191_REG_HS6B */
0x0a, /* (0x0a) SAA7191_REG_HS6S */
0xf4, /* (0xf4) SAA7191_REG_HC6B */
0xce, /* (0xce) SAA7191_REG_HC6S */
0xf4, /* (0xf4) SAA7191_REG_HP6I */
};
/* SAA7191 register handling */
static u8 saa7191_read_reg(struct v4l2_subdev *sd, u8 reg)
{
return to_saa7191(sd)->reg[reg];
}
static int saa7191_read_status(struct v4l2_subdev *sd, u8 *value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret;
ret = i2c_master_recv(client, value, 1);
if (ret < 0) {
printk(KERN_ERR "SAA7191: saa7191_read_status(): read failed\n");
return ret;
}
return 0;
}
static int saa7191_write_reg(struct v4l2_subdev *sd, u8 reg, u8 value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
to_saa7191(sd)->reg[reg] = value;
return i2c_smbus_write_byte_data(client, reg, value);
}
/* the first byte of data must be the first subaddress number (register) */
static int saa7191_write_block(struct v4l2_subdev *sd,
u8 length, const u8 *data)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct saa7191 *decoder = to_saa7191(sd);
int i;
int ret;
for (i = 0; i < (length - 1); i++) {
decoder->reg[data[0] + i] = data[i + 1];
}
ret = i2c_master_send(client, data, length);
if (ret < 0) {
printk(KERN_ERR "SAA7191: saa7191_write_block(): "
"write failed\n");
return ret;
}
return 0;
}
/* Helper functions */
static int saa7191_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct saa7191 *decoder = to_saa7191(sd);
u8 luma = saa7191_read_reg(sd, SAA7191_REG_LUMA);
u8 iock = saa7191_read_reg(sd, SAA7191_REG_IOCK);
int err;
switch (input) {
case SAA7191_INPUT_COMPOSITE: /* Set Composite input */
iock &= ~(SAA7191_IOCK_CHRS | SAA7191_IOCK_GPSW1
| SAA7191_IOCK_GPSW2);
/* Chrominance trap active */
luma &= ~SAA7191_LUMA_BYPS;
break;
case SAA7191_INPUT_SVIDEO: /* Set S-Video input */
iock |= SAA7191_IOCK_CHRS | SAA7191_IOCK_GPSW2;
/* Chrominance trap bypassed */
luma |= SAA7191_LUMA_BYPS;
break;
default:
return -EINVAL;
}
err = saa7191_write_reg(sd, SAA7191_REG_LUMA, luma);
if (err)
return -EIO;
err = saa7191_write_reg(sd, SAA7191_REG_IOCK, iock);
if (err)
return -EIO;
decoder->input = input;
return 0;
}
static int saa7191_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
{
struct saa7191 *decoder = to_saa7191(sd);
u8 stdc = saa7191_read_reg(sd, SAA7191_REG_STDC);
u8 ctl3 = saa7191_read_reg(sd, SAA7191_REG_CTL3);
u8 chcv = saa7191_read_reg(sd, SAA7191_REG_CHCV);
int err;
if (norm & V4L2_STD_PAL) {
stdc &= ~SAA7191_STDC_SECS;
ctl3 &= ~(SAA7191_CTL3_AUFD | SAA7191_CTL3_FSEL);
chcv = SAA7191_CHCV_PAL;
} else if (norm & V4L2_STD_NTSC) {
stdc &= ~SAA7191_STDC_SECS;
ctl3 &= ~SAA7191_CTL3_AUFD;
ctl3 |= SAA7191_CTL3_FSEL;
chcv = SAA7191_CHCV_NTSC;
} else if (norm & V4L2_STD_SECAM) {
stdc |= SAA7191_STDC_SECS;
ctl3 &= ~(SAA7191_CTL3_AUFD | SAA7191_CTL3_FSEL);
chcv = SAA7191_CHCV_PAL;
} else {
return -EINVAL;
}
err = saa7191_write_reg(sd, SAA7191_REG_CTL3, ctl3);
if (err)
return -EIO;
err = saa7191_write_reg(sd, SAA7191_REG_STDC, stdc);
if (err)
return -EIO;
err = saa7191_write_reg(sd, SAA7191_REG_CHCV, chcv);
if (err)
return -EIO;
decoder->norm = norm;
dprintk("ctl3: %02x stdc: %02x chcv: %02x\n", ctl3,
stdc, chcv);
dprintk("norm: %llx\n", norm);
return 0;
}
static int saa7191_wait_for_signal(struct v4l2_subdev *sd, u8 *status)
{
int i = 0;
dprintk("Checking for signal...\n");
for (i = 0; i < SAA7191_SYNC_COUNT; i++) {
if (saa7191_read_status(sd, status))
return -EIO;
if (((*status) & SAA7191_STATUS_HLCK) == 0) {
dprintk("Signal found\n");
return 0;
}
msleep(SAA7191_SYNC_DELAY);
}
dprintk("No signal\n");
return -EBUSY;
}
static int saa7191_querystd(struct v4l2_subdev *sd, v4l2_std_id *norm)
{
struct saa7191 *decoder = to_saa7191(sd);
u8 stdc = saa7191_read_reg(sd, SAA7191_REG_STDC);
u8 ctl3 = saa7191_read_reg(sd, SAA7191_REG_CTL3);
u8 status;
v4l2_std_id old_norm = decoder->norm;
int err = 0;
dprintk("SAA7191 extended signal auto-detection...\n");
*norm = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM;
stdc &= ~SAA7191_STDC_SECS;
ctl3 &= ~(SAA7191_CTL3_FSEL);
err = saa7191_write_reg(sd, SAA7191_REG_STDC, stdc);
if (err) {
err = -EIO;
goto out;
}
err = saa7191_write_reg(sd, SAA7191_REG_CTL3, ctl3);
if (err) {
err = -EIO;
goto out;
}
ctl3 |= SAA7191_CTL3_AUFD;
err = saa7191_write_reg(sd, SAA7191_REG_CTL3, ctl3);
if (err) {
err = -EIO;
goto out;
}
msleep(SAA7191_SYNC_DELAY);
err = saa7191_wait_for_signal(sd, &status);
if (err)
goto out;
if (status & SAA7191_STATUS_FIDT) {
/* 60Hz signal -> NTSC */
dprintk("60Hz signal: NTSC\n");
*norm = V4L2_STD_NTSC;
return 0;
}
/* 50Hz signal */
dprintk("50Hz signal: Trying PAL...\n");
/* try PAL first */
err = saa7191_s_std(sd, V4L2_STD_PAL);
if (err)
goto out;
msleep(SAA7191_SYNC_DELAY);
err = saa7191_wait_for_signal(sd, &status);
if (err)
goto out;
/* not 50Hz ? */
if (status & SAA7191_STATUS_FIDT) {
dprintk("No 50Hz signal\n");
saa7191_s_std(sd, old_norm);
return -EAGAIN;
}
if (status & SAA7191_STATUS_CODE) {
dprintk("PAL\n");
*norm = V4L2_STD_PAL;
return saa7191_s_std(sd, old_norm);
}
dprintk("No color detected with PAL - Trying SECAM...\n");
/* no color detected ? -> try SECAM */
err = saa7191_s_std(sd, V4L2_STD_SECAM);
if (err)
goto out;
msleep(SAA7191_SYNC_DELAY);
err = saa7191_wait_for_signal(sd, &status);
if (err)
goto out;
/* not 50Hz ? */
if (status & SAA7191_STATUS_FIDT) {
dprintk("No 50Hz signal\n");
err = -EAGAIN;
goto out;
}
if (status & SAA7191_STATUS_CODE) {
/* Color detected -> SECAM */
dprintk("SECAM\n");
*norm = V4L2_STD_SECAM;
return saa7191_s_std(sd, old_norm);
}
dprintk("No color detected with SECAM - Going back to PAL.\n");
out:
return saa7191_s_std(sd, old_norm);
}
static int saa7191_autodetect_norm(struct v4l2_subdev *sd)
{
u8 status;
dprintk("SAA7191 signal auto-detection...\n");
dprintk("Reading status...\n");
if (saa7191_read_status(sd, &status))
return -EIO;
dprintk("Checking for signal...\n");
/* no signal ? */
if (status & SAA7191_STATUS_HLCK) {
dprintk("No signal\n");
return -EBUSY;
}
dprintk("Signal found\n");
if (status & SAA7191_STATUS_FIDT) {
/* 60hz signal -> NTSC */
dprintk("NTSC\n");
return saa7191_s_std(sd, V4L2_STD_NTSC);
} else {
/* 50hz signal -> PAL */
dprintk("PAL\n");
return saa7191_s_std(sd, V4L2_STD_PAL);
}
}
static int saa7191_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
u8 reg;
int ret = 0;
switch (ctrl->id) {
case SAA7191_CONTROL_BANDPASS:
case SAA7191_CONTROL_BANDPASS_WEIGHT:
case SAA7191_CONTROL_CORING:
reg = saa7191_read_reg(sd, SAA7191_REG_LUMA);
switch (ctrl->id) {
case SAA7191_CONTROL_BANDPASS:
ctrl->value = ((s32)reg & SAA7191_LUMA_BPSS_MASK)
>> SAA7191_LUMA_BPSS_SHIFT;
break;
case SAA7191_CONTROL_BANDPASS_WEIGHT:
ctrl->value = ((s32)reg & SAA7191_LUMA_APER_MASK)
>> SAA7191_LUMA_APER_SHIFT;
break;
case SAA7191_CONTROL_CORING:
ctrl->value = ((s32)reg & SAA7191_LUMA_CORI_MASK)
>> SAA7191_LUMA_CORI_SHIFT;
break;
}
break;
case SAA7191_CONTROL_FORCE_COLOUR:
case SAA7191_CONTROL_CHROMA_GAIN:
reg = saa7191_read_reg(sd, SAA7191_REG_GAIN);
if (ctrl->id == SAA7191_CONTROL_FORCE_COLOUR)
ctrl->value = ((s32)reg & SAA7191_GAIN_COLO) ? 1 : 0;
else
ctrl->value = ((s32)reg & SAA7191_GAIN_LFIS_MASK)
>> SAA7191_GAIN_LFIS_SHIFT;
break;
case V4L2_CID_HUE:
reg = saa7191_read_reg(sd, SAA7191_REG_HUEC);
if (reg < 0x80)
reg += 0x80;
else
reg -= 0x80;
ctrl->value = (s32)reg;
break;
case SAA7191_CONTROL_VTRC:
reg = saa7191_read_reg(sd, SAA7191_REG_STDC);
ctrl->value = ((s32)reg & SAA7191_STDC_VTRC) ? 1 : 0;
break;
case SAA7191_CONTROL_LUMA_DELAY:
reg = saa7191_read_reg(sd, SAA7191_REG_CTL3);
ctrl->value = ((s32)reg & SAA7191_CTL3_YDEL_MASK)
>> SAA7191_CTL3_YDEL_SHIFT;
if (ctrl->value >= 4)
ctrl->value -= 8;
break;
case SAA7191_CONTROL_VNR:
reg = saa7191_read_reg(sd, SAA7191_REG_CTL4);
ctrl->value = ((s32)reg & SAA7191_CTL4_VNOI_MASK)
>> SAA7191_CTL4_VNOI_SHIFT;
break;
default:
ret = -EINVAL;
}
return ret;
}
static int saa7191_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
u8 reg;
int ret = 0;
switch (ctrl->id) {
case SAA7191_CONTROL_BANDPASS:
case SAA7191_CONTROL_BANDPASS_WEIGHT:
case SAA7191_CONTROL_CORING:
reg = saa7191_read_reg(sd, SAA7191_REG_LUMA);
switch (ctrl->id) {
case SAA7191_CONTROL_BANDPASS:
reg &= ~SAA7191_LUMA_BPSS_MASK;
reg |= (ctrl->value << SAA7191_LUMA_BPSS_SHIFT)
& SAA7191_LUMA_BPSS_MASK;
break;
case SAA7191_CONTROL_BANDPASS_WEIGHT:
reg &= ~SAA7191_LUMA_APER_MASK;
reg |= (ctrl->value << SAA7191_LUMA_APER_SHIFT)
& SAA7191_LUMA_APER_MASK;
break;
case SAA7191_CONTROL_CORING:
reg &= ~SAA7191_LUMA_CORI_MASK;
reg |= (ctrl->value << SAA7191_LUMA_CORI_SHIFT)
& SAA7191_LUMA_CORI_MASK;
break;
}
ret = saa7191_write_reg(sd, SAA7191_REG_LUMA, reg);
break;
case SAA7191_CONTROL_FORCE_COLOUR:
case SAA7191_CONTROL_CHROMA_GAIN:
reg = saa7191_read_reg(sd, SAA7191_REG_GAIN);
if (ctrl->id == SAA7191_CONTROL_FORCE_COLOUR) {
if (ctrl->value)
reg |= SAA7191_GAIN_COLO;
else
reg &= ~SAA7191_GAIN_COLO;
} else {
reg &= ~SAA7191_GAIN_LFIS_MASK;
reg |= (ctrl->value << SAA7191_GAIN_LFIS_SHIFT)
& SAA7191_GAIN_LFIS_MASK;
}
ret = saa7191_write_reg(sd, SAA7191_REG_GAIN, reg);
break;
case V4L2_CID_HUE:
reg = ctrl->value & 0xff;
if (reg < 0x80)
reg += 0x80;
else
reg -= 0x80;
ret = saa7191_write_reg(sd, SAA7191_REG_HUEC, reg);
break;
case SAA7191_CONTROL_VTRC:
reg = saa7191_read_reg(sd, SAA7191_REG_STDC);
if (ctrl->value)
reg |= SAA7191_STDC_VTRC;
else
reg &= ~SAA7191_STDC_VTRC;
ret = saa7191_write_reg(sd, SAA7191_REG_STDC, reg);
break;
case SAA7191_CONTROL_LUMA_DELAY: {
s32 value = ctrl->value;
if (value < 0)
value += 8;
reg = saa7191_read_reg(sd, SAA7191_REG_CTL3);
reg &= ~SAA7191_CTL3_YDEL_MASK;
reg |= (value << SAA7191_CTL3_YDEL_SHIFT)
& SAA7191_CTL3_YDEL_MASK;
ret = saa7191_write_reg(sd, SAA7191_REG_CTL3, reg);
break;
}
case SAA7191_CONTROL_VNR:
reg = saa7191_read_reg(sd, SAA7191_REG_CTL4);
reg &= ~SAA7191_CTL4_VNOI_MASK;
reg |= (ctrl->value << SAA7191_CTL4_VNOI_SHIFT)
& SAA7191_CTL4_VNOI_MASK;
ret = saa7191_write_reg(sd, SAA7191_REG_CTL4, reg);
break;
default:
ret = -EINVAL;
}
return ret;
}
/* I2C-interface */
static int saa7191_g_input_status(struct v4l2_subdev *sd, u32 *status)
{
u8 status_reg;
int res = V4L2_IN_ST_NO_SIGNAL;
if (saa7191_read_status(sd, &status_reg))
return -EIO;
if ((status_reg & SAA7191_STATUS_HLCK) == 0)
res = 0;
if (!(status_reg & SAA7191_STATUS_CODE))
res |= V4L2_IN_ST_NO_COLOR;
*status = res;
return 0;
}
static int saa7191_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_SAA7191, 0);
}
/* ----------------------------------------------------------------------- */
static const struct v4l2_subdev_core_ops saa7191_core_ops = {
.g_chip_ident = saa7191_g_chip_ident,
.g_ctrl = saa7191_g_ctrl,
.s_ctrl = saa7191_s_ctrl,
.s_std = saa7191_s_std,
};
static const struct v4l2_subdev_video_ops saa7191_video_ops = {
.s_routing = saa7191_s_routing,
.querystd = saa7191_querystd,
.g_input_status = saa7191_g_input_status,
};
static const struct v4l2_subdev_ops saa7191_ops = {
.core = &saa7191_core_ops,
.video = &saa7191_video_ops,
};
static int saa7191_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int err = 0;
struct saa7191 *decoder;
struct v4l2_subdev *sd;
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
decoder = kzalloc(sizeof(*decoder), GFP_KERNEL);
if (!decoder)
return -ENOMEM;
sd = &decoder->sd;
v4l2_i2c_subdev_init(sd, client, &saa7191_ops);
err = saa7191_write_block(sd, sizeof(initseq), initseq);
if (err) {
printk(KERN_ERR "SAA7191 initialization failed\n");
kfree(decoder);
return err;
}
printk(KERN_INFO "SAA7191 initialized\n");
decoder->input = SAA7191_INPUT_COMPOSITE;
decoder->norm = V4L2_STD_PAL;
err = saa7191_autodetect_norm(sd);
if (err && (err != -EBUSY))
printk(KERN_ERR "SAA7191: Signal auto-detection failed\n");
return 0;
}
static int saa7191_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
kfree(to_saa7191(sd));
return 0;
}
static const struct i2c_device_id saa7191_id[] = {
{ "saa7191", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, saa7191_id);
static struct i2c_driver saa7191_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "saa7191",
},
.probe = saa7191_probe,
.remove = saa7191_remove,
.id_table = saa7191_id,
};
static __init int init_saa7191(void)
{
return i2c_add_driver(&saa7191_driver);
}
static __exit void exit_saa7191(void)
{
i2c_del_driver(&saa7191_driver);
}
module_init(init_saa7191);
module_exit(exit_saa7191);
| vartanjean/yuandao-n90-window-dual-core-2 | drivers/media/video/saa7191.c | C | gpl-2.0 | 15,981 |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2011 - 2012 Cavium, Inc.
*/
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/of.h>
#define PHY_ID_BCM8706 0x0143bdc1
#define PHY_ID_BCM8727 0x0143bff0
#define BCM87XX_PMD_RX_SIGNAL_DETECT (MII_ADDR_C45 | 0x1000a)
#define BCM87XX_10GBASER_PCS_STATUS (MII_ADDR_C45 | 0x30020)
#define BCM87XX_XGXS_LANE_STATUS (MII_ADDR_C45 | 0x40018)
#define BCM87XX_LASI_CONTROL (MII_ADDR_C45 | 0x39002)
#define BCM87XX_LASI_STATUS (MII_ADDR_C45 | 0x39005)
#if IS_ENABLED(CONFIG_OF_MDIO)
/* Set and/or override some configuration registers based on the
* broadcom,c45-reg-init property stored in the of_node for the phydev.
*
* broadcom,c45-reg-init = <devid reg mask value>,...;
*
* There may be one or more sets of <devid reg mask value>:
*
* devid: which sub-device to use.
* reg: the register.
* mask: if non-zero, ANDed with existing register value.
* value: ORed with the masked value and written to the regiser.
*
*/
static int bcm87xx_of_reg_init(struct phy_device *phydev)
{
const __be32 *paddr;
const __be32 *paddr_end;
int len, ret;
if (!phydev->dev.of_node)
return 0;
paddr = of_get_property(phydev->dev.of_node,
"broadcom,c45-reg-init", &len);
if (!paddr)
return 0;
paddr_end = paddr + (len /= sizeof(*paddr));
ret = 0;
while (paddr + 3 < paddr_end) {
u16 devid = be32_to_cpup(paddr++);
u16 reg = be32_to_cpup(paddr++);
u16 mask = be32_to_cpup(paddr++);
u16 val_bits = be32_to_cpup(paddr++);
int val;
u32 regnum = MII_ADDR_C45 | (devid << 16) | reg;
val = 0;
if (mask) {
val = phy_read(phydev, regnum);
if (val < 0) {
ret = val;
goto err;
}
val &= mask;
}
val |= val_bits;
ret = phy_write(phydev, regnum, val);
if (ret < 0)
goto err;
}
err:
return ret;
}
#else
static int bcm87xx_of_reg_init(struct phy_device *phydev)
{
return 0;
}
#endif /* CONFIG_OF_MDIO */
static int bcm87xx_config_init(struct phy_device *phydev)
{
phydev->supported = SUPPORTED_10000baseR_FEC;
phydev->advertising = ADVERTISED_10000baseR_FEC;
phydev->state = PHY_NOLINK;
phydev->autoneg = AUTONEG_DISABLE;
bcm87xx_of_reg_init(phydev);
return 0;
}
static int bcm87xx_config_aneg(struct phy_device *phydev)
{
return -EINVAL;
}
static int bcm87xx_read_status(struct phy_device *phydev)
{
int rx_signal_detect;
int pcs_status;
int xgxs_lane_status;
rx_signal_detect = phy_read(phydev, BCM87XX_PMD_RX_SIGNAL_DETECT);
if (rx_signal_detect < 0)
return rx_signal_detect;
if ((rx_signal_detect & 1) == 0)
goto no_link;
pcs_status = phy_read(phydev, BCM87XX_10GBASER_PCS_STATUS);
if (pcs_status < 0)
return pcs_status;
if ((pcs_status & 1) == 0)
goto no_link;
xgxs_lane_status = phy_read(phydev, BCM87XX_XGXS_LANE_STATUS);
if (xgxs_lane_status < 0)
return xgxs_lane_status;
if ((xgxs_lane_status & 0x1000) == 0)
goto no_link;
phydev->speed = 10000;
phydev->link = 1;
phydev->duplex = 1;
return 0;
no_link:
phydev->link = 0;
return 0;
}
static int bcm87xx_config_intr(struct phy_device *phydev)
{
int reg, err;
reg = phy_read(phydev, BCM87XX_LASI_CONTROL);
if (reg < 0)
return reg;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
reg |= 1;
else
reg &= ~1;
err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
return err;
}
static int bcm87xx_did_interrupt(struct phy_device *phydev)
{
int reg;
reg = phy_read(phydev, BCM87XX_LASI_STATUS);
if (reg < 0) {
dev_err(&phydev->dev,
"Error: Read of BCM87XX_LASI_STATUS failed: %d\n", reg);
return 0;
}
return (reg & 1) != 0;
}
static int bcm87xx_ack_interrupt(struct phy_device *phydev)
{
/* Reading the LASI status clears it. */
bcm87xx_did_interrupt(phydev);
return 0;
}
static int bcm8706_match_phy_device(struct phy_device *phydev)
{
return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8706;
}
static int bcm8727_match_phy_device(struct phy_device *phydev)
{
return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8727;
}
static struct phy_driver bcm87xx_driver[] = {
{
.phy_id = PHY_ID_BCM8706,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8706",
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
.ack_interrupt = bcm87xx_ack_interrupt,
.config_intr = bcm87xx_config_intr,
.did_interrupt = bcm87xx_did_interrupt,
.match_phy_device = bcm8706_match_phy_device,
.driver = { .owner = THIS_MODULE },
}, {
.phy_id = PHY_ID_BCM8727,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8727",
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
.ack_interrupt = bcm87xx_ack_interrupt,
.config_intr = bcm87xx_config_intr,
.did_interrupt = bcm87xx_did_interrupt,
.match_phy_device = bcm8727_match_phy_device,
.driver = { .owner = THIS_MODULE },
} };
static int __init bcm87xx_init(void)
{
return phy_drivers_register(bcm87xx_driver,
ARRAY_SIZE(bcm87xx_driver));
}
module_init(bcm87xx_init);
static void __exit bcm87xx_exit(void)
{
phy_drivers_unregister(bcm87xx_driver,
ARRAY_SIZE(bcm87xx_driver));
}
module_exit(bcm87xx_exit);
MODULE_LICENSE("GPL");
| c0d3x42/P8000-Kernel | drivers/net/phy/bcm87xx.c | C | gpl-2.0 | 5,345 |
/*
* Copyright (C) 2009 by Bart Hartgers (bart.hartgers+ark3116@gmail.com)
* Original version:
* Copyright (C) 2006
* Simon Schulz (ark3116_driver <at> auctionant.de)
*
* ark3116
* - implements a driver for the arkmicro ark3116 chipset (vendor=0x6547,
* productid=0x0232) (used in a datacable called KQ-U8A)
*
* Supports full modem status lines, break, hardware flow control. Does not
* support software flow control, since I do not know how to enable it in hw.
*
* This driver is a essentially new implementation. I initially dug
* into the old ark3116.c driver and suddenly realized the ark3116 is
* a 16450 with a USB interface glued to it. See comments at the
* bottom of this file.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/uaccess.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
static bool debug;
/*
* Version information
*/
#define DRIVER_VERSION "v0.7"
#define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>"
#define DRIVER_DESC "USB ARK3116 serial/IrDA driver"
#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA"
#define DRIVER_NAME "ark3116"
/* usb timeout of 1 second */
#define ARK_TIMEOUT (1*HZ)
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x6547, 0x0232) },
{ USB_DEVICE(0x18ec, 0x3118) }, /* USB to IrDA adapter */
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
static int is_irda(struct usb_serial *serial)
{
struct usb_device *dev = serial->dev;
if (le16_to_cpu(dev->descriptor.idVendor) == 0x18ec &&
le16_to_cpu(dev->descriptor.idProduct) == 0x3118)
return 1;
return 0;
}
struct ark3116_private {
wait_queue_head_t delta_msr_wait;
struct async_icount icount;
int irda; /* 1 for irda device */
/* protects hw register updates */
struct mutex hw_lock;
int quot; /* baudrate divisor */
__u32 lcr; /* line control register value */
__u32 hcr; /* handshake control register (0x8)
* value */
__u32 mcr; /* modem contol register value */
/* protects the status values below */
spinlock_t status_lock;
__u32 msr; /* modem status register value */
__u32 lsr; /* line status register value */
};
static int ark3116_write_reg(struct usb_serial *serial,
unsigned reg, __u8 val)
{
int result;
/* 0xfe 0x40 are magic values taken from original driver */
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
0xfe, 0x40, val, reg,
NULL, 0, ARK_TIMEOUT);
return result;
}
static int ark3116_read_reg(struct usb_serial *serial,
unsigned reg, unsigned char *buf)
{
int result;
/* 0xfe 0xc0 are magic values taken from original driver */
result = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
0xfe, 0xc0, 0, reg,
buf, 1, ARK_TIMEOUT);
if (result < 0)
return result;
else
return buf[0];
}
static inline int calc_divisor(int bps)
{
/* Original ark3116 made some exceptions in rounding here
* because windows did the same. Assume that is not really
* necessary.
* Crystal is 12MHz, probably because of USB, but we divide by 4?
*/
return (12000000 + 2*bps) / (4*bps);
}
static int ark3116_attach(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
struct ark3116_private *priv;
/* make sure we have our end-points */
if ((serial->num_bulk_in == 0) ||
(serial->num_bulk_out == 0) ||
(serial->num_interrupt_in == 0)) {
dev_err(&serial->dev->dev,
"%s - missing endpoint - "
"bulk in: %d, bulk out: %d, int in %d\n",
KBUILD_MODNAME,
serial->num_bulk_in,
serial->num_bulk_out,
serial->num_interrupt_in);
return -EINVAL;
}
priv = kzalloc(sizeof(struct ark3116_private),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
init_waitqueue_head(&priv->delta_msr_wait);
mutex_init(&priv->hw_lock);
spin_lock_init(&priv->status_lock);
priv->irda = is_irda(serial);
usb_set_serial_port_data(port, priv);
/* setup the hardware */
ark3116_write_reg(serial, UART_IER, 0);
/* disable DMA */
ark3116_write_reg(serial, UART_FCR, 0);
/* handshake control */
priv->hcr = 0;
ark3116_write_reg(serial, 0x8 , 0);
/* modem control */
priv->mcr = 0;
ark3116_write_reg(serial, UART_MCR, 0);
if (!(priv->irda)) {
ark3116_write_reg(serial, 0xb , 0);
} else {
ark3116_write_reg(serial, 0xb , 1);
ark3116_write_reg(serial, 0xc , 0);
ark3116_write_reg(serial, 0xd , 0x41);
ark3116_write_reg(serial, 0xa , 1);
}
/* setup baudrate */
ark3116_write_reg(serial, UART_LCR, UART_LCR_DLAB);
/* setup for 9600 8N1 */
priv->quot = calc_divisor(9600);
ark3116_write_reg(serial, UART_DLL, priv->quot & 0xff);
ark3116_write_reg(serial, UART_DLM, (priv->quot>>8) & 0xff);
priv->lcr = UART_LCR_WLEN8;
ark3116_write_reg(serial, UART_LCR, UART_LCR_WLEN8);
ark3116_write_reg(serial, 0xe, 0);
if (priv->irda)
ark3116_write_reg(serial, 0x9, 0);
dev_info(&serial->dev->dev,
"%s using %s mode\n",
KBUILD_MODNAME,
priv->irda ? "IrDA" : "RS232");
return 0;
}
static void ark3116_release(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
struct ark3116_private *priv = usb_get_serial_port_data(port);
/* device is closed, so URBs and DMA should be down */
usb_set_serial_port_data(port, NULL);
mutex_destroy(&priv->hw_lock);
kfree(priv);
}
static void ark3116_init_termios(struct tty_struct *tty)
{
struct ktermios *termios = tty->termios;
*termios = tty_std_termios;
termios->c_cflag = B9600 | CS8
| CREAD | HUPCL | CLOCAL;
termios->c_ispeed = 9600;
termios->c_ospeed = 9600;
}
static void ark3116_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct ark3116_private *priv = usb_get_serial_port_data(port);
struct ktermios *termios = tty->termios;
unsigned int cflag = termios->c_cflag;
int bps = tty_get_baud_rate(tty);
int quot;
__u8 lcr, hcr, eval;
/* set data bit count */
switch (cflag & CSIZE) {
case CS5:
lcr = UART_LCR_WLEN5;
break;
case CS6:
lcr = UART_LCR_WLEN6;
break;
case CS7:
lcr = UART_LCR_WLEN7;
break;
default:
case CS8:
lcr = UART_LCR_WLEN8;
break;
}
if (cflag & CSTOPB)
lcr |= UART_LCR_STOP;
if (cflag & PARENB)
lcr |= UART_LCR_PARITY;
if (!(cflag & PARODD))
lcr |= UART_LCR_EPAR;
#ifdef CMSPAR
if (cflag & CMSPAR)
lcr |= UART_LCR_SPAR;
#endif
/* handshake control */
hcr = (cflag & CRTSCTS) ? 0x03 : 0x00;
/* calc baudrate */
dbg("%s - setting bps to %d", __func__, bps);
eval = 0;
switch (bps) {
case 0:
quot = calc_divisor(9600);
break;
default:
if ((bps < 75) || (bps > 3000000))
bps = 9600;
quot = calc_divisor(bps);
break;
case 460800:
eval = 1;
quot = calc_divisor(bps);
break;
case 921600:
eval = 2;
quot = calc_divisor(bps);
break;
}
/* Update state: synchronize */
mutex_lock(&priv->hw_lock);
/* keep old LCR_SBC bit */
lcr |= (priv->lcr & UART_LCR_SBC);
dbg("%s - setting hcr:0x%02x,lcr:0x%02x,quot:%d",
__func__, hcr, lcr, quot);
/* handshake control */
if (priv->hcr != hcr) {
priv->hcr = hcr;
ark3116_write_reg(serial, 0x8, hcr);
}
/* baudrate */
if (priv->quot != quot) {
priv->quot = quot;
priv->lcr = lcr; /* need to write lcr anyway */
/* disable DMA since transmit/receive is
* shadowed by UART_DLL
*/
ark3116_write_reg(serial, UART_FCR, 0);
ark3116_write_reg(serial, UART_LCR,
lcr|UART_LCR_DLAB);
ark3116_write_reg(serial, UART_DLL, quot & 0xff);
ark3116_write_reg(serial, UART_DLM, (quot>>8) & 0xff);
/* restore lcr */
ark3116_write_reg(serial, UART_LCR, lcr);
/* magic baudrate thingy: not sure what it does,
* but windows does this as well.
*/
ark3116_write_reg(serial, 0xe, eval);
/* enable DMA */
ark3116_write_reg(serial, UART_FCR, UART_FCR_DMA_SELECT);
} else if (priv->lcr != lcr) {
priv->lcr = lcr;
ark3116_write_reg(serial, UART_LCR, lcr);
}
mutex_unlock(&priv->hw_lock);
/* check for software flow control */
if (I_IXOFF(tty) || I_IXON(tty)) {
dev_warn(&serial->dev->dev,
"%s: don't know how to do software flow control\n",
KBUILD_MODNAME);
}
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, bps, bps);
}
static void ark3116_close(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
if (serial->dev) {
/* disable DMA */
ark3116_write_reg(serial, UART_FCR, 0);
/* deactivate interrupts */
ark3116_write_reg(serial, UART_IER, 0);
usb_serial_generic_close(port);
if (serial->num_interrupt_in)
usb_kill_urb(port->interrupt_in_urb);
}
}
static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct ark3116_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
unsigned char *buf;
int result;
buf = kmalloc(1, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
result = usb_serial_generic_open(tty, port);
if (result) {
dbg("%s - usb_serial_generic_open failed: %d",
__func__, result);
goto err_out;
}
/* remove any data still left: also clears error state */
ark3116_read_reg(serial, UART_RX, buf);
/* read modem status */
priv->msr = ark3116_read_reg(serial, UART_MSR, buf);
/* read line status */
priv->lsr = ark3116_read_reg(serial, UART_LSR, buf);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "submit irq_in urb failed %d\n",
result);
ark3116_close(port);
goto err_out;
}
/* activate interrupts */
ark3116_write_reg(port->serial, UART_IER, UART_IER_MSI|UART_IER_RLSI);
/* enable DMA */
ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT);
/* setup termios */
if (tty)
ark3116_set_termios(tty, port, NULL);
err_out:
kfree(buf);
return result;
}
static int ark3116_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct usb_serial_port *port = tty->driver_data;
struct ark3116_private *priv = usb_get_serial_port_data(port);
struct async_icount cnow = priv->icount;
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
icount->rng = cnow.rng;
icount->dcd = cnow.dcd;
icount->rx = cnow.rx;
icount->tx = cnow.tx;
icount->frame = cnow.frame;
icount->overrun = cnow.overrun;
icount->parity = cnow.parity;
icount->brk = cnow.brk;
icount->buf_overrun = cnow.buf_overrun;
return 0;
}
static int ark3116_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct ark3116_private *priv = usb_get_serial_port_data(port);
struct serial_struct serstruct;
void __user *user_arg = (void __user *)arg;
switch (cmd) {
case TIOCGSERIAL:
/* XXX: Some of these values are probably wrong. */
memset(&serstruct, 0, sizeof(serstruct));
serstruct.type = PORT_16654;
serstruct.line = port->serial->minor;
serstruct.port = port->number;
serstruct.custom_divisor = 0;
serstruct.baud_base = 460800;
if (copy_to_user(user_arg, &serstruct, sizeof(serstruct)))
return -EFAULT;
return 0;
case TIOCSSERIAL:
if (copy_from_user(&serstruct, user_arg, sizeof(serstruct)))
return -EFAULT;
return 0;
case TIOCMIWAIT:
for (;;) {
struct async_icount prev = priv->icount;
interruptible_sleep_on(&priv->delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
if ((prev.rng == priv->icount.rng) &&
(prev.dsr == priv->icount.dsr) &&
(prev.dcd == priv->icount.dcd) &&
(prev.cts == priv->icount.cts))
return -EIO;
if ((arg & TIOCM_RNG &&
(prev.rng != priv->icount.rng)) ||
(arg & TIOCM_DSR &&
(prev.dsr != priv->icount.dsr)) ||
(arg & TIOCM_CD &&
(prev.dcd != priv->icount.dcd)) ||
(arg & TIOCM_CTS &&
(prev.cts != priv->icount.cts)))
return 0;
}
break;
}
return -ENOIOCTLCMD;
}
static int ark3116_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct ark3116_private *priv = usb_get_serial_port_data(port);
__u32 status;
__u32 ctrl;
unsigned long flags;
mutex_lock(&priv->hw_lock);
ctrl = priv->mcr;
mutex_unlock(&priv->hw_lock);
spin_lock_irqsave(&priv->status_lock, flags);
status = priv->msr;
spin_unlock_irqrestore(&priv->status_lock, flags);
return (status & UART_MSR_DSR ? TIOCM_DSR : 0) |
(status & UART_MSR_CTS ? TIOCM_CTS : 0) |
(status & UART_MSR_RI ? TIOCM_RI : 0) |
(status & UART_MSR_DCD ? TIOCM_CD : 0) |
(ctrl & UART_MCR_DTR ? TIOCM_DTR : 0) |
(ctrl & UART_MCR_RTS ? TIOCM_RTS : 0) |
(ctrl & UART_MCR_OUT1 ? TIOCM_OUT1 : 0) |
(ctrl & UART_MCR_OUT2 ? TIOCM_OUT2 : 0);
}
static int ark3116_tiocmset(struct tty_struct *tty,
unsigned set, unsigned clr)
{
struct usb_serial_port *port = tty->driver_data;
struct ark3116_private *priv = usb_get_serial_port_data(port);
/* we need to take the mutex here, to make sure that the value
* in priv->mcr is actually the one that is in the hardware
*/
mutex_lock(&priv->hw_lock);
if (set & TIOCM_RTS)
priv->mcr |= UART_MCR_RTS;
if (set & TIOCM_DTR)
priv->mcr |= UART_MCR_DTR;
if (set & TIOCM_OUT1)
priv->mcr |= UART_MCR_OUT1;
if (set & TIOCM_OUT2)
priv->mcr |= UART_MCR_OUT2;
if (clr & TIOCM_RTS)
priv->mcr &= ~UART_MCR_RTS;
if (clr & TIOCM_DTR)
priv->mcr &= ~UART_MCR_DTR;
if (clr & TIOCM_OUT1)
priv->mcr &= ~UART_MCR_OUT1;
if (clr & TIOCM_OUT2)
priv->mcr &= ~UART_MCR_OUT2;
ark3116_write_reg(port->serial, UART_MCR, priv->mcr);
mutex_unlock(&priv->hw_lock);
return 0;
}
static void ark3116_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct ark3116_private *priv = usb_get_serial_port_data(port);
/* LCR is also used for other things: protect access */
mutex_lock(&priv->hw_lock);
if (break_state)
priv->lcr |= UART_LCR_SBC;
else
priv->lcr &= ~UART_LCR_SBC;
ark3116_write_reg(port->serial, UART_LCR, priv->lcr);
mutex_unlock(&priv->hw_lock);
}
static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr)
{
struct ark3116_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&priv->status_lock, flags);
priv->msr = msr;
spin_unlock_irqrestore(&priv->status_lock, flags);
if (msr & UART_MSR_ANY_DELTA) {
/* update input line counters */
if (msr & UART_MSR_DCTS)
priv->icount.cts++;
if (msr & UART_MSR_DDSR)
priv->icount.dsr++;
if (msr & UART_MSR_DDCD)
priv->icount.dcd++;
if (msr & UART_MSR_TERI)
priv->icount.rng++;
wake_up_interruptible(&priv->delta_msr_wait);
}
}
static void ark3116_update_lsr(struct usb_serial_port *port, __u8 lsr)
{
struct ark3116_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&priv->status_lock, flags);
/* combine bits */
priv->lsr |= lsr;
spin_unlock_irqrestore(&priv->status_lock, flags);
if (lsr&UART_LSR_BRK_ERROR_BITS) {
if (lsr & UART_LSR_BI)
priv->icount.brk++;
if (lsr & UART_LSR_FE)
priv->icount.frame++;
if (lsr & UART_LSR_PE)
priv->icount.parity++;
if (lsr & UART_LSR_OE)
priv->icount.overrun++;
}
}
static void ark3116_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
int status = urb->status;
const __u8 *data = urb->transfer_buffer;
int result;
switch (status) {
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d",
__func__, status);
return;
default:
dbg("%s - nonzero urb status received: %d",
__func__, status);
break;
case 0: /* success */
/* discovered this by trail and error... */
if ((urb->actual_length == 4) && (data[0] == 0xe8)) {
const __u8 id = data[1]&UART_IIR_ID;
dbg("%s: iir=%02x", __func__, data[1]);
if (id == UART_IIR_MSI) {
dbg("%s: msr=%02x", __func__, data[3]);
ark3116_update_msr(port, data[3]);
break;
} else if (id == UART_IIR_RLSI) {
dbg("%s: lsr=%02x", __func__, data[2]);
ark3116_update_lsr(port, data[2]);
break;
}
}
/*
* Not sure what this data meant...
*/
usb_serial_debug_data(debug, &port->dev,
__func__,
urb->actual_length,
urb->transfer_buffer);
break;
}
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(&urb->dev->dev,
"%s - Error %d submitting interrupt urb\n",
__func__, result);
}
/* Data comes in via the bulk (data) URB, erors/interrupts via the int URB.
* This means that we cannot be sure which data byte has an associated error
* condition, so we report an error for all data in the next bulk read.
*
* Actually, there might even be a window between the bulk data leaving the
* ark and reading/resetting the lsr in the read_bulk_callback where an
* interrupt for the next data block could come in.
* Without somekind of ordering on the ark, we would have to report the
* error for the next block of data as well...
* For now, let's pretend this can't happen.
*/
static void ark3116_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct ark3116_private *priv = usb_get_serial_port_data(port);
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
char tty_flag = TTY_NORMAL;
unsigned long flags;
__u32 lsr;
/* update line status */
spin_lock_irqsave(&priv->status_lock, flags);
lsr = priv->lsr;
priv->lsr &= ~UART_LSR_BRK_ERROR_BITS;
spin_unlock_irqrestore(&priv->status_lock, flags);
if (!urb->actual_length)
return;
tty = tty_port_tty_get(&port->port);
if (!tty)
return;
if (lsr & UART_LSR_BRK_ERROR_BITS) {
if (lsr & UART_LSR_BI)
tty_flag = TTY_BREAK;
else if (lsr & UART_LSR_PE)
tty_flag = TTY_PARITY;
else if (lsr & UART_LSR_FE)
tty_flag = TTY_FRAME;
/* overrun is special, not associated with a char */
if (lsr & UART_LSR_OE)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
urb->actual_length);
tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
static struct usb_driver ark3116_driver = {
.name = "ark3116",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
};
static struct usb_serial_driver ark3116_device = {
.driver = {
.owner = THIS_MODULE,
.name = "ark3116",
},
.id_table = id_table,
.num_ports = 1,
.attach = ark3116_attach,
.release = ark3116_release,
.set_termios = ark3116_set_termios,
.init_termios = ark3116_init_termios,
.ioctl = ark3116_ioctl,
.tiocmget = ark3116_tiocmget,
.tiocmset = ark3116_tiocmset,
.get_icount = ark3116_get_icount,
.open = ark3116_open,
.close = ark3116_close,
.break_ctl = ark3116_break_ctl,
.read_int_callback = ark3116_read_int_callback,
.process_read_urb = ark3116_process_read_urb,
};
static struct usb_serial_driver * const serial_drivers[] = {
&ark3116_device, NULL
};
module_usb_serial_driver(ark3116_driver, serial_drivers);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable debug");
/*
* The following describes what I learned from studying the old
* ark3116.c driver, disassembling the windows driver, and some lucky
* guesses. Since I do not have any datasheet or other
* documentation, inaccuracies are almost guaranteed.
*
* Some specs for the ARK3116 can be found here:
* http://web.archive.org/web/20060318000438/
* www.arkmicro.com/en/products/view.php?id=10
* On that page, 2 GPIO pins are mentioned: I assume these are the
* OUT1 and OUT2 pins of the UART, so I added support for those
* through the MCR. Since the pins are not available on my hardware,
* I could not verify this.
* Also, it states there is "on-chip hardware flow control". I have
* discovered how to enable that. Unfortunately, I do not know how to
* enable XON/XOFF (software) flow control, which would need support
* from the chip as well to work. Because of the wording on the web
* page there is a real possibility the chip simply does not support
* software flow control.
*
* I got my ark3116 as part of a mobile phone adapter cable. On the
* PCB, the following numbered contacts are present:
*
* 1:- +5V
* 2:o DTR
* 3:i RX
* 4:i DCD
* 5:o RTS
* 6:o TX
* 7:i RI
* 8:i DSR
* 10:- 0V
* 11:i CTS
*
* On my chip, all signals seem to be 3.3V, but 5V tolerant. But that
* may be different for the one you have ;-).
*
* The windows driver limits the registers to 0-F, so I assume there
* are actually 16 present on the device.
*
* On an UART interrupt, 4 bytes of data come in on the interrupt
* endpoint. The bytes are 0xe8 IIR LSR MSR.
*
* The baudrate seems to be generated from the 12MHz crystal, using
* 4-times subsampling. So quot=12e6/(4*baud). Also see description
* of register E.
*
* Registers 0-7:
* These seem to be the same as for a regular 16450. The FCR is set
* to UART_FCR_DMA_SELECT (0x8), I guess to enable transfers between
* the UART and the USB bridge/DMA engine.
*
* Register 8:
* By trial and error, I found out that bit 0 enables hardware CTS,
* stopping TX when CTS is +5V. Bit 1 does the same for RTS, making
* RTS +5V when the 3116 cannot transfer the data to the USB bus
* (verified by disabling the reading URB). Note that as far as I can
* tell, the windows driver does NOT use this, so there might be some
* hardware bug or something.
*
* According to a patch provided here
* (http://lkml.org/lkml/2009/7/26/56), the ARK3116 can also be used
* as an IrDA dongle. Since I do not have such a thing, I could not
* investigate that aspect. However, I can speculate ;-).
*
* - IrDA encodes data differently than RS232. Most likely, one of
* the bits in registers 9..E enables the IR ENDEC (encoder/decoder).
* - Depending on the IR transceiver, the input and output need to be
* inverted, so there are probably bits for that as well.
* - IrDA is half-duplex, so there should be a bit for selecting that.
*
* This still leaves at least two registers unaccounted for. Perhaps
* The chip can do XON/XOFF or CRC in HW?
*
* Register 9:
* Set to 0x00 for IrDA, when the baudrate is initialised.
*
* Register A:
* Set to 0x01 for IrDA, at init.
*
* Register B:
* Set to 0x01 for IrDA, 0x00 for RS232, at init.
*
* Register C:
* Set to 00 for IrDA, at init.
*
* Register D:
* Set to 0x41 for IrDA, at init.
*
* Register E:
* Somekind of baudrate override. The windows driver seems to set
* this to 0x00 for normal baudrates, 0x01 for 460800, 0x02 for 921600.
* Since 460800 and 921600 cannot be obtained by dividing 3MHz by an integer,
* it could be somekind of subdivisor thingy.
* However,it does not seem to do anything: selecting 921600 (divisor 3,
* reg E=2), still gets 1 MHz. I also checked if registers 9, C or F would
* work, but they don't.
*
* Register F: unknown
*/
| spock1104/android_kernel_lge_voltdos | drivers/usb/serial/ark3116.c | C | gpl-2.0 | 23,494 |
/*
* pps_parport.c -- kernel parallel port PPS client
*
*
* Copyright (C) 2009 Alexander Gordeev <lasaine@lvk.cs.msu.su>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* TODO:
* implement echo over SEL pin
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/irqnr.h>
#include <linux/time.h>
#include <linux/parport.h>
#include <linux/pps_kernel.h>
#define DRVDESC "parallel port PPS client"
/* module parameters */
#define CLEAR_WAIT_MAX 100
#define CLEAR_WAIT_MAX_ERRORS 5
static unsigned int clear_wait = 100;
MODULE_PARM_DESC(clear_wait,
"Maximum number of port reads when polling for signal clear,"
" zero turns clear edge capture off entirely");
module_param(clear_wait, uint, 0);
/* internal per port structure */
struct pps_client_pp {
struct pardevice *pardev; /* parport device */
struct pps_device *pps; /* PPS device */
unsigned int cw; /* port clear timeout */
unsigned int cw_err; /* number of timeouts */
};
static inline int signal_is_set(struct parport *port)
{
return (port->ops->read_status(port) & PARPORT_STATUS_ACK) != 0;
}
/* parport interrupt handler */
static void parport_irq(void *handle)
{
struct pps_event_time ts_assert, ts_clear;
struct pps_client_pp *dev = handle;
struct parport *port = dev->pardev->port;
unsigned int i;
unsigned long flags;
/* first of all we get the time stamp... */
pps_get_ts(&ts_assert);
if (dev->cw == 0)
/* clear edge capture disabled */
goto out_assert;
/* try capture the clear edge */
/* We have to disable interrupts here. The idea is to prevent
* other interrupts on the same processor to introduce random
* lags while polling the port. Reading from IO port is known
* to take approximately 1us while other interrupt handlers can
* take much more potentially.
*
* Interrupts won't be disabled for a long time because the
* number of polls is limited by clear_wait parameter which is
* kept rather low. So it should never be an issue.
*/
local_irq_save(flags);
/* check the signal (no signal means the pulse is lost this time) */
if (!signal_is_set(port)) {
local_irq_restore(flags);
dev_err(dev->pps->dev, "lost the signal\n");
goto out_assert;
}
/* poll the port until the signal is unset */
for (i = dev->cw; i; i--)
if (!signal_is_set(port)) {
pps_get_ts(&ts_clear);
local_irq_restore(flags);
dev->cw_err = 0;
goto out_both;
}
local_irq_restore(flags);
/* timeout */
dev->cw_err++;
if (dev->cw_err >= CLEAR_WAIT_MAX_ERRORS) {
dev_err(dev->pps->dev, "disabled clear edge capture after %d"
" timeouts\n", dev->cw_err);
dev->cw = 0;
dev->cw_err = 0;
}
out_assert:
/* fire assert event */
pps_event(dev->pps, &ts_assert,
PPS_CAPTUREASSERT, NULL);
return;
out_both:
/* fire assert event */
pps_event(dev->pps, &ts_assert,
PPS_CAPTUREASSERT, NULL);
/* fire clear event */
pps_event(dev->pps, &ts_clear,
PPS_CAPTURECLEAR, NULL);
return;
}
static void parport_attach(struct parport *port)
{
struct pps_client_pp *device;
struct pps_source_info info = {
.name = KBUILD_MODNAME,
.path = "",
.mode = PPS_CAPTUREBOTH | \
PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \
PPS_ECHOASSERT | PPS_ECHOCLEAR | \
PPS_CANWAIT | PPS_TSFMT_TSPEC,
.owner = THIS_MODULE,
.dev = NULL
};
device = kzalloc(sizeof(struct pps_client_pp), GFP_KERNEL);
if (!device) {
pr_err("memory allocation failed, not attaching\n");
return;
}
device->pardev = parport_register_device(port, KBUILD_MODNAME,
NULL, NULL, parport_irq, PARPORT_FLAG_EXCL, device);
if (!device->pardev) {
pr_err("couldn't register with %s\n", port->name);
goto err_free;
}
if (parport_claim_or_block(device->pardev) < 0) {
pr_err("couldn't claim %s\n", port->name);
goto err_unregister_dev;
}
device->pps = pps_register_source(&info,
PPS_CAPTUREBOTH | PPS_OFFSETASSERT | PPS_OFFSETCLEAR);
if (device->pps == NULL) {
pr_err("couldn't register PPS source\n");
goto err_release_dev;
}
device->cw = clear_wait;
port->ops->enable_irq(port);
pr_info("attached to %s\n", port->name);
return;
err_release_dev:
parport_release(device->pardev);
err_unregister_dev:
parport_unregister_device(device->pardev);
err_free:
kfree(device);
}
static void parport_detach(struct parport *port)
{
struct pardevice *pardev = port->cad;
struct pps_client_pp *device;
/* FIXME: oooh, this is ugly! */
if (strcmp(pardev->name, KBUILD_MODNAME))
/* not our port */
return;
device = pardev->private;
port->ops->disable_irq(port);
pps_unregister_source(device->pps);
parport_release(pardev);
parport_unregister_device(pardev);
kfree(device);
}
static struct parport_driver pps_parport_driver = {
.name = KBUILD_MODNAME,
.attach = parport_attach,
.detach = parport_detach,
};
/* module staff */
static int __init pps_parport_init(void)
{
int ret;
pr_info(DRVDESC "\n");
if (clear_wait > CLEAR_WAIT_MAX) {
pr_err("clear_wait value should be not greater"
" then %d\n", CLEAR_WAIT_MAX);
return -EINVAL;
}
ret = parport_register_driver(&pps_parport_driver);
if (ret) {
pr_err("unable to register with parport\n");
return ret;
}
return 0;
}
static void __exit pps_parport_exit(void)
{
parport_unregister_driver(&pps_parport_driver);
}
module_init(pps_parport_init);
module_exit(pps_parport_exit);
MODULE_AUTHOR("Alexander Gordeev <lasaine@lvk.cs.msu.su>");
MODULE_DESCRIPTION(DRVDESC);
MODULE_LICENSE("GPL");
| AOKP/kernel_oppo_find5 | drivers/pps/clients/pps_parport.c | C | gpl-2.0 | 6,180 |
/* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
*
* Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/cpudata.h>
#include <asm/uaccess.h>
#include <asm/spitfire.h>
#include <asm/oplib.h>
#include <asm/hypervisor.h>
#include <asm/cacheflush.h>
struct poll {
int fd;
short events;
short revents;
};
/* from helpers.S */
EXPORT_SYMBOL(__flushw_user);
EXPORT_SYMBOL_GPL(real_hard_smp_processor_id);
/* from head_64.S */
EXPORT_SYMBOL(__ret_efault);
EXPORT_SYMBOL(tlb_type);
EXPORT_SYMBOL(sun4v_chip_type);
EXPORT_SYMBOL(prom_root_node);
/* from hvcalls.S */
EXPORT_SYMBOL(sun4v_niagara_getperf);
EXPORT_SYMBOL(sun4v_niagara_setperf);
EXPORT_SYMBOL(sun4v_niagara2_getperf);
EXPORT_SYMBOL(sun4v_niagara2_setperf);
/* from hweight.S */
EXPORT_SYMBOL(__arch_hweight8);
EXPORT_SYMBOL(__arch_hweight16);
EXPORT_SYMBOL(__arch_hweight32);
EXPORT_SYMBOL(__arch_hweight64);
/* from ffs_ffz.S */
EXPORT_SYMBOL(ffs);
EXPORT_SYMBOL(__ffs);
/* Exporting a symbol from /init/main.c */
EXPORT_SYMBOL(saved_command_line);
| glfernando/kernel_m7 | arch/sparc/kernel/sparc_ksyms_64.c | C | gpl-2.0 | 1,277 |
#include <linux/pci.h>
#include <linux/acpi.h>
#include <acpi/reboot.h>
void acpi_reboot(void)
{
struct acpi_generic_address *rr;
struct pci_bus *bus0;
u8 reset_value;
unsigned int devfn;
if (acpi_disabled)
return;
rr = &acpi_gbl_FADT.reset_register;
/* ACPI reset register was only introduced with v2 of the FADT */
if (acpi_gbl_FADT.header.revision < 2)
return;
/* Is the reset register supported? The spec says we should be
* checking the bit width and bit offset, but Windows ignores
* these fields */
if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER))
return;
reset_value = acpi_gbl_FADT.reset_value;
/* The reset register can only exist in I/O, Memory or PCI config space
* on a device on bus 0. */
switch (rr->space_id) {
case ACPI_ADR_SPACE_PCI_CONFIG:
/* The reset register can only live on bus 0. */
bus0 = pci_find_bus(0, 0);
if (!bus0)
return;
/* Form PCI device/function pair. */
devfn = PCI_DEVFN((rr->address >> 32) & 0xffff,
(rr->address >> 16) & 0xffff);
printk(KERN_DEBUG "Resetting with ACPI PCI RESET_REG.");
/* Write the value that resets us. */
pci_bus_write_config_byte(bus0, devfn,
(rr->address & 0xffff), reset_value);
break;
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
case ACPI_ADR_SPACE_SYSTEM_IO:
printk(KERN_DEBUG "ACPI MEMORY or I/O RESET_REG.\n");
acpi_reset();
break;
}
}
| Red--Code/android_kernel_sony_msm8974ac | drivers/acpi/reboot.c | C | gpl-2.0 | 1,376 |
-- Old Town Thug
DELETE FROM `creature_text` WHERE `CreatureID` IN (4969,38867);
INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES
(4969,0,0,"Yeah, okay, boss. No problem.",12,0,100,0,0,0,1715,0,"Old Town Thug"),
(38867,0,0,"All right, boss. You sure though? Just seems like a waste of good practice.",12,0,100,0,0,0,1716,0,"Old Town Thug"),
(38867,1,0,"%s cracks his knuckles.",16,0,100,0,0,0,1717,0,"Old Town Thug");
UPDATE `smart_scripts` SET `event_param1`=18000, `event_param2`=18000 WHERE `entryorguid`=496900 AND `source_type`=9 AND `id`=2;
UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=38867;
DELETE FROM `smart_scripts` WHERE `entryorguid`=38867 AND `source_type`=0;
INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `event_param5`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES
(38867,0,0,1,54,0,100,0,0,0,0,0,0,42,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Old Town Thug - Just Summoned - Set Invinciblity"),
(38867,0,1,0,61,0,100,0,0,0,0,0,0,49,0,0,0,0,0,0,21,30,0,0,0,0,0,0,"Old Town Thug - Just Summoned - Start Attacking"),
(38867,0,2,0,38,0,100,1,1,1,0,0,0,80,496900,2,0,0,0,0,1,0,0,0,0,0,0,0,"Old Town Thug - On Data Set - Run Script");
-- Dashel Stonefist
UPDATE `smart_scripts` SET `action_param1`=38867 WHERE `entryorguid`=496100 AND `source_type`=9 AND `id`=4;
DELETE FROM `smart_scripts` WHERE `entryorguid`=496101 AND `source_type`=9;
INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `event_param5`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES
(496101,9,0,0,0,0,100,0,0,0,0,0,0,15,1447,0,0,0,0,0,7,0,0,0,0,0,0,0,"Dashel Stonefist - On Script - Quest Credit 'The Missing Diplomat'"),
(496101,9,1,0,0,0,100,0,0,0,0,0,0,45,1,1,0,0,0,0,11,4969,30,0,0,0,0,0,"Dashel Stonefist - On Script - Set Data to Old Town Thug"),
(496101,9,2,0,0,0,100,0,0,0,0,0,0,45,1,1,0,0,0,0,11,38867,30,0,0,0,0,0,"Dashel Stonefist - On Script - Set Data to Old Town Thug"),
(496101,9,3,0,0,0,100,0,0,0,0,0,0,24,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Dashel Stonefist - On Script - Evade"),
(496101,9,4,0,0,0,100,0,0,0,0,0,0,2,84,0,0,0,0,0,1,0,0,0,0,0,0,0,"Dashel Stonefist - On Script - Set Faction 84"),
(496101,9,5,0,0,0,100,0,0,0,0,0,0,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Dashel Stonefist - On Script - Say Line 1"),
(496101,9,6,0,0,0,100,0,6000,6000,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Dashel Stonefist - On Script - Say Line 2"),
(496101,9,7,0,0,0,100,0,0,0,0,0,0,42,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Dashel Stonefist - On Script - Set Invincibility Off"),
(496101,9,8,0,0,0,100,0,4000,4000,0,0,0,1,0,0,0,0,0,0,19,4969,0,0,0,0,0,0,"Dashel Stonefist - On Script - Say Line 0 (Old Town Thug)"),
(496101,9,9,0,0,0,100,0,3000,3000,0,0,0,1,0,0,0,0,0,0,19,38867,0,0,0,0,0,0,"Dashel Stonefist - On Script - Say Line 0 (Old Town Thug)"),
(496101,9,10,0,0,0,100,0,0,0,0,0,0,1,1,0,0,0,0,0,19,38867,0,0,0,0,0,0,"Dashel Stonefist - On Script - Say Line 1 (Old Town Thug)");
| pete318/TrinityCore | sql/old/3.3.5a/world/64_2019_03_19/2018_05_26_01_world_335.sql | SQL | gpl-2.0 | 3,696 |
/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/*
* this needs to be before <linux/kernel.h> is loaded,
* and <linux/sched.h> loads <linux/kernel.h>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <mach/lge/lge_proc_comm.h>
#include CONFIG_LGE_BOARD_HEADER_FILE
#include <mach/lge/lge_pm.h>
#include "../../kernel/power/power.h"
#include <mach/msm_smsm.h>
#if defined(CONFIG_MACH_MSM7X25A_V3) || defined(CONFIG_MACH_MSM7X25A_V1)
#include <mach/msm_hsusb.h>
#endif
#ifdef CONFIG_LGE_LOW_VOLTAGE_BATTERY_CHECK
#include <linux/workqueue.h>
#include <linux/reboot.h>
#include <linux/cpumask.h>
#endif
/* LGE_CHANGE_S [jongjin7.park@lge.com] 20130122 Added direct read capacity sysfs */
#if defined(CONFIG_MACH_MSM7X27A_U0) || defined(CONFIG_MACH_MSM8X25_V7) || defined (CONFIG_MACH_MSM7X25A_V1)
#include <mach/msm_battery.h>
#endif
/* LGE_CHANGE_E [jongjin7.park@lge.com] 20130122 Added direct read capacity sysfs */
#if defined(CONFIG_MACH_MSM7X25A_V3) || defined(CONFIG_MACH_MSM7X25A_V1)
extern int get_charger_type(void); /* defined in msm72k_udc.c */
#endif
extern u32 msm_batt_get_vbatt_level(void);
static ssize_t batt_volt_show(struct device* dev,struct device_attribute* attr,char* buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", lge_get_batt_volt());
}
static ssize_t chg_therm_show(struct device* dev,struct device_attribute* attr,char* buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", lge_get_chg_therm());
}
static ssize_t pcb_version_show(struct device* dev,struct device_attribute* attr,char* buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", lge_get_pcb_version());
}
static ssize_t chg_curr_volt_show(struct device* dev,struct device_attribute* attr,char* buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", lge_get_chg_curr_volt());
}
static ssize_t batt_therm_show(struct device* dev,struct device_attribute* attr,char* buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", lge_get_batt_therm());
}
// 2012-11-10 Jinhong Kim(miracle.kim@lge.com) [V7][Power] read batt therm 8bit raw [START]
static ssize_t batt_therm_8bit_raw_show(struct device* dev,struct device_attribute* attr,char* buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", lge_get_batt_therm_8bit_raw());
}
// 2012-11-10 Jinhong Kim(miracle.kim@lge.com) [V7][Power] read batt therm 8bit raw [END]
static ssize_t batt_volt_raw_show(struct device* dev,struct device_attribute* attr,char* buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", lge_get_batt_volt_raw());
}
static ssize_t chg_stat_reg_show(struct device* dev,struct device_attribute* attr,char* buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", lge_get_chg_stat_reg());
}
static ssize_t chg_en_reg_show(struct device* dev,struct device_attribute* attr,char* buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", lge_get_chg_en_reg());
}
static ssize_t batt_id_show(struct device* dev,struct device_attribute* attr,char* buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", lge_get_batt_id());
}
static ssize_t pm_suspend_state_show(struct device* dev,struct device_attribute* attr,char* buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", check_suspend_state());
}
#ifdef CONFIG_LGE_PM_BATT_ID_DETECTION
static ssize_t lge_battery_id_info_show(struct device* dev,struct device_attribute* attr,char* buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", lge_get_battery_id());
}
#endif
/* LGE_CHANGE_S: [murali.ramaiah@lge.com] 2013-01-07 */
#if defined(CONFIG_MACH_MSM7X25A_V3) || defined(CONFIG_MACH_MSM7X25A_V1)
static ssize_t msm_batt_chgr_status_show(struct device* dev, struct device_attribute* attr, char* buf)
{
int chg_type;
chg_type = get_charger_type();
return sprintf(buf,"%d\n", (chg_type == USB_CHG_TYPE__WALLCHARGER) ? 1 : (chg_type == USB_CHG_TYPE__SDP) ? 1 : 0);
}
#endif /* CONFIG_MACH_MSM7X25A_V3 */
/* LGE_CHANGE_S: [murali.ramaiah@lge.com] 2013-01-07 */
/* LGE_CHANGE_S [jongjin7.park@lge.com] 20130122 Added direct read capacity sysfs */
#if defined(CONFIG_MACH_MSM7X27A_U0) || defined(CONFIG_MACH_MSM8X25_V7) || defined (CONFIG_MACH_MSM7X25A_V1)
static ssize_t msm_batt_capacity_show(struct device* dev, struct device_attribute* attr, char* buf)
{
return snprintf(buf, PAGE_SIZE,"%d\n", msm_batt_get_vbatt_capacity());
}
#endif
/* LGE_CHANGE_E [jongjin7.park@lge.com] 20130122 Added direct read capacity sysfs */
static ssize_t msm_batt_level_show(struct device* dev, struct device_attribute* attr, char* buf)
{
return snprintf(buf, PAGE_SIZE,"%d\n", msm_batt_get_vbatt_level());
}
static DEVICE_ATTR(batt_volt, S_IRUGO, batt_volt_show, NULL);
static DEVICE_ATTR(chg_therm, S_IRUGO, chg_therm_show, NULL);
static DEVICE_ATTR(pcb_version, S_IRUGO, pcb_version_show, NULL);
static DEVICE_ATTR(chg_curr_volt, S_IRUGO, chg_curr_volt_show, NULL);
static DEVICE_ATTR(batt_therm, S_IRUGO, batt_therm_show, NULL);
// 2012-11-10 Jinhong Kim(miracle.kim@lge.com) [V7][Power] read batt therm 8bit raw [START]
static DEVICE_ATTR(batt_therm_8bit_raw, S_IRUGO, batt_therm_8bit_raw_show, NULL);
// 2012-11-10 Jinhong Kim(miracle.kim@lge.com) [V7][Power] read batt therm 8bit raw [END]
static DEVICE_ATTR(batt_volt_raw, S_IRUGO, batt_volt_raw_show, NULL);
static DEVICE_ATTR(chg_stat_reg, S_IRUGO, chg_stat_reg_show, NULL);
static DEVICE_ATTR(chg_en_reg, S_IRUGO, chg_en_reg_show, NULL);
static DEVICE_ATTR(batt_id, S_IRUGO, batt_id_show, NULL);
static DEVICE_ATTR(pm_suspend_state, S_IRUGO, pm_suspend_state_show, NULL);
#ifdef CONFIG_LGE_PM_BATT_ID_DETECTION
static DEVICE_ATTR(lge_battery_id_info, S_IRUGO, lge_battery_id_info_show, NULL);
#endif
#if defined(CONFIG_MACH_MSM7X25A_V3) || defined(CONFIG_MACH_MSM7X25A_V1)
static DEVICE_ATTR(chgr_status, S_IRUGO, msm_batt_chgr_status_show, NULL);
#endif
/* LGE_CHANGE_S [jongjin7.park@lge.com] 20130122 Added direct read capacity sysfs */
#if defined(CONFIG_MACH_MSM7X27A_U0) || defined(CONFIG_MACH_MSM8X25_V7) || defined (CONFIG_MACH_MSM7X25A_V1)
static DEVICE_ATTR(batt_capacity, S_IRUGO, msm_batt_capacity_show, NULL);
#endif
/* LGE_CHANGE_E [jongjin7.park@lge.com] 20130122 Added direct read capacity sysfs */
static DEVICE_ATTR(msm_batt_level, S_IRUGO, msm_batt_level_show, NULL);
static struct attribute* dev_attrs_lge_pm_info[] = {
&dev_attr_batt_volt.attr,
&dev_attr_chg_therm.attr,
&dev_attr_pcb_version.attr,
&dev_attr_chg_curr_volt.attr,
&dev_attr_batt_therm.attr,
// 2012-11-10 Jinhong Kim(miracle.kim@lge.com) [V7][Power] read batt therm 8bit raw [START]
&dev_attr_batt_therm_8bit_raw.attr,
// 2012-11-10 Jinhong Kim(miracle.kim@lge.com) [V7][Power] read batt therm 8bit raw [END]
&dev_attr_batt_volt_raw.attr,
&dev_attr_chg_stat_reg.attr,
&dev_attr_chg_en_reg.attr,
&dev_attr_batt_id.attr,
&dev_attr_pm_suspend_state.attr,
#ifdef CONFIG_LGE_PM_BATT_ID_DETECTION
&dev_attr_lge_battery_id_info.attr,
#endif
#if defined(CONFIG_MACH_MSM7X25A_V3) || defined(CONFIG_MACH_MSM7X25A_V1)
&dev_attr_chgr_status.attr,
#endif
/* LGE_CHANGE_S [jongjin7.park@lge.com] 20130122 Added direct read capacity sysfs */
#if defined(CONFIG_MACH_MSM7X27A_U0) || defined(CONFIG_MACH_MSM8X25_V7) || defined (CONFIG_MACH_MSM7X25A_V1)
&dev_attr_batt_capacity.attr,
#endif
/* LGE_CHANGE_E [jongjin7.park@lge.com] 20130122 Added direct read capacity sysfs */
&dev_attr_msm_batt_level.attr,
NULL,
};
static struct attribute_group dev_attr_grp_lge_pm_info = {
.attrs = dev_attrs_lge_pm_info,
};
static int __devinit lge_pm_probe(struct platform_device *pdev)
{
int rc;
rc = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp_lge_pm_info);
if(rc < 0)
{
dev_err(&pdev->dev,"%s: fail to create sysfs for lge_pm rc=%d\n", __func__, rc);
}
return rc;
}
static int __devexit lge_pm_remove(struct platform_device *pdev)
{
sysfs_remove_group(&pdev->dev.kobj,&dev_attr_grp_lge_pm_info);
return 0;
}
static struct platform_driver lge_pm_driver = {
.probe = lge_pm_probe,
.remove = __devexit_p(lge_pm_remove),
.driver = {
.name = LGE_PM_DEVICE,
.owner = THIS_MODULE,
},
};
// LGE_CHANGE_S,narasimha.chikka@lge.com,Add BATT_ID Check
#if defined(CONFIG_LGE_PM_BATT_ID_DETECTION)
static void __init lge_pm_boot_batt_id_check(void)
{
u32 *smem_batt_id = NULL;
u32 batt_id = BATT_UNKNOWN;
smem_batt_id = (u32*)smem_alloc(SMEM_BATT_INFO,sizeof(u32));
if(smem_batt_id != NULL){
batt_id = *smem_batt_id;
if((batt_id == BATT_ISL6296_L) || (batt_id == BATT_DS2704_L)
|| (batt_id == BATT_DS2704_C) || (batt_id == BATT_ISL6296_C)){
printk(KERN_INFO "%s, High Voltage Battery Detected \n",__func__);
}
else if(batt_id == BATT_NORMAL){
printk(KERN_INFO "%s, Normal Battery Detected \n",__func__);
}
else{
printk(KERN_INFO "%s, Unknow Battery [Strange!!] \n",__func__);
}
}
else{
printk(KERN_INFO "%s, BATT ID DETECTION FAILS \n",__func__);
}
}
#endif
// LGE_CHANGE_E,narasimha.chikka@lge.com,Add BATT_ID Check
#ifdef CONFIG_LGE_LOW_VOLTAGE_BATTERY_CHECK
static void pm_do_poweroff(struct work_struct *dummy)
{
kernel_power_off();
}
static DECLARE_WORK(poweroff_work, pm_do_poweroff);
void lge_pm_handle_poweroff(void)
{
#if 1
lge_pm_low_vbatt_notify();
#else
schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
#endif
}
EXPORT_SYMBOL(lge_pm_handle_poweroff);
#endif
static int __init lge_pm_init(void)
{
#if defined(CONFIG_LGE_PM_BATT_ID_DETECTION)
lge_pm_boot_batt_id_check();
#endif
return platform_driver_register(&lge_pm_driver);
}
static void __exit lge_pm_exit(void)
{
platform_driver_unregister(&lge_pm_driver);
}
module_init(lge_pm_init);
module_exit(lge_pm_exit);
| TheTypoMaster/android_kernel_samsung_smdk4412 | drivers/power/lge_pm_sysfs.c | C | gpl-2.0 | 9,934 |
/* $Id: capi.c,v 1.1.4.1 2001/11/20 14:19:34 kai Exp $
*
* ISDN lowlevel-module for the IBM ISDN-S0 Active 2000.
* CAPI encoder/decoder
*
* Author Fritz Elfert
* Copyright by Fritz Elfert <fritz@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* Thanks to Friedemann Baitinger and IBM Germany
*
*/
#define __NO_VERSION__
#include "act2000.h"
#include "capi.h"
static actcapi_msgdsc valid_msg[] = {
{{ 0x86, 0x02}, "DATA_B3_IND"}, /* DATA_B3_IND/CONF must be first because of speed!!! */
{{ 0x86, 0x01}, "DATA_B3_CONF"},
{{ 0x02, 0x01}, "CONNECT_CONF"},
{{ 0x02, 0x02}, "CONNECT_IND"},
{{ 0x09, 0x01}, "CONNECT_INFO_CONF"},
{{ 0x03, 0x02}, "CONNECT_ACTIVE_IND"},
{{ 0x04, 0x01}, "DISCONNECT_CONF"},
{{ 0x04, 0x02}, "DISCONNECT_IND"},
{{ 0x05, 0x01}, "LISTEN_CONF"},
{{ 0x06, 0x01}, "GET_PARAMS_CONF"},
{{ 0x07, 0x01}, "INFO_CONF"},
{{ 0x07, 0x02}, "INFO_IND"},
{{ 0x08, 0x01}, "DATA_CONF"},
{{ 0x08, 0x02}, "DATA_IND"},
{{ 0x40, 0x01}, "SELECT_B2_PROTOCOL_CONF"},
{{ 0x80, 0x01}, "SELECT_B3_PROTOCOL_CONF"},
{{ 0x81, 0x01}, "LISTEN_B3_CONF"},
{{ 0x82, 0x01}, "CONNECT_B3_CONF"},
{{ 0x82, 0x02}, "CONNECT_B3_IND"},
{{ 0x83, 0x02}, "CONNECT_B3_ACTIVE_IND"},
{{ 0x84, 0x01}, "DISCONNECT_B3_CONF"},
{{ 0x84, 0x02}, "DISCONNECT_B3_IND"},
{{ 0x85, 0x01}, "GET_B3_PARAMS_CONF"},
{{ 0x01, 0x01}, "RESET_B3_CONF"},
{{ 0x01, 0x02}, "RESET_B3_IND"},
/* {{ 0x87, 0x02, "HANDSET_IND"}, not implemented */
{{ 0xff, 0x01}, "MANUFACTURER_CONF"},
{{ 0xff, 0x02}, "MANUFACTURER_IND"},
#ifdef DEBUG_MSG
/* Requests */
{{ 0x01, 0x00}, "RESET_B3_REQ"},
{{ 0x02, 0x00}, "CONNECT_REQ"},
{{ 0x04, 0x00}, "DISCONNECT_REQ"},
{{ 0x05, 0x00}, "LISTEN_REQ"},
{{ 0x06, 0x00}, "GET_PARAMS_REQ"},
{{ 0x07, 0x00}, "INFO_REQ"},
{{ 0x08, 0x00}, "DATA_REQ"},
{{ 0x09, 0x00}, "CONNECT_INFO_REQ"},
{{ 0x40, 0x00}, "SELECT_B2_PROTOCOL_REQ"},
{{ 0x80, 0x00}, "SELECT_B3_PROTOCOL_REQ"},
{{ 0x81, 0x00}, "LISTEN_B3_REQ"},
{{ 0x82, 0x00}, "CONNECT_B3_REQ"},
{{ 0x84, 0x00}, "DISCONNECT_B3_REQ"},
{{ 0x85, 0x00}, "GET_B3_PARAMS_REQ"},
{{ 0x86, 0x00}, "DATA_B3_REQ"},
{{ 0xff, 0x00}, "MANUFACTURER_REQ"},
/* Responses */
{{ 0x01, 0x03}, "RESET_B3_RESP"},
{{ 0x02, 0x03}, "CONNECT_RESP"},
{{ 0x03, 0x03}, "CONNECT_ACTIVE_RESP"},
{{ 0x04, 0x03}, "DISCONNECT_RESP"},
{{ 0x07, 0x03}, "INFO_RESP"},
{{ 0x08, 0x03}, "DATA_RESP"},
{{ 0x82, 0x03}, "CONNECT_B3_RESP"},
{{ 0x83, 0x03}, "CONNECT_B3_ACTIVE_RESP"},
{{ 0x84, 0x03}, "DISCONNECT_B3_RESP"},
{{ 0x86, 0x03}, "DATA_B3_RESP"},
{{ 0xff, 0x03}, "MANUFACTURER_RESP"},
#endif
{{ 0x00, 0x00}, NULL},
};
#define num_valid_msg (sizeof(valid_msg)/sizeof(actcapi_msgdsc))
#define num_valid_imsg 27 /* MANUFACTURER_IND */
/*
* Check for a valid incoming CAPI message.
* Return:
* 0 = Invalid message
* 1 = Valid message, no B-Channel-data
* 2 = Valid message, B-Channel-data
*/
int
actcapi_chkhdr(act2000_card * card, actcapi_msghdr *hdr)
{
int i;
if (hdr->applicationID != 1)
return 0;
if (hdr->len < 9)
return 0;
for (i = 0; i < num_valid_imsg; i++)
if ((hdr->cmd.cmd == valid_msg[i].cmd.cmd) &&
(hdr->cmd.subcmd == valid_msg[i].cmd.subcmd)) {
return (i?1:2);
}
return 0;
}
#define ACTCAPI_MKHDR(l, c, s) { \
skb = alloc_skb(l + 8, GFP_ATOMIC); \
if (skb) { \
m = (actcapi_msg *)skb_put(skb, l + 8); \
m->hdr.len = l + 8; \
m->hdr.applicationID = 1; \
m->hdr.cmd.cmd = c; \
m->hdr.cmd.subcmd = s; \
m->hdr.msgnum = actcapi_nextsmsg(card); \
} else m = NULL;\
}
#define ACTCAPI_CHKSKB if (!skb) { \
printk(KERN_WARNING "actcapi: alloc_skb failed\n"); \
return; \
}
#define ACTCAPI_QUEUE_TX { \
actcapi_debug_msg(skb, 1); \
skb_queue_tail(&card->sndq, skb); \
act2000_schedule_tx(card); \
}
int
actcapi_listen_req(act2000_card *card)
{
__u16 eazmask = 0;
int i;
actcapi_msg *m;
struct sk_buff *skb;
for (i = 0; i < ACT2000_BCH; i++)
eazmask |= card->bch[i].eazmask;
ACTCAPI_MKHDR(9, 0x05, 0x00);
if (!skb) {
printk(KERN_WARNING "actcapi: alloc_skb failed\n");
return -ENOMEM;
}
m->msg.listen_req.controller = 0;
m->msg.listen_req.infomask = 0x3f; /* All information */
m->msg.listen_req.eazmask = eazmask;
m->msg.listen_req.simask = (eazmask)?0x86:0; /* All SI's */
ACTCAPI_QUEUE_TX;
return 0;
}
int
actcapi_connect_req(act2000_card *card, act2000_chan *chan, char *phone,
char eaz, int si1, int si2)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR((11 + strlen(phone)), 0x02, 0x00);
if (!skb) {
printk(KERN_WARNING "actcapi: alloc_skb failed\n");
chan->fsm_state = ACT2000_STATE_NULL;
return -ENOMEM;
}
m->msg.connect_req.controller = 0;
m->msg.connect_req.bchan = 0x83;
m->msg.connect_req.infomask = 0x3f;
m->msg.connect_req.si1 = si1;
m->msg.connect_req.si2 = si2;
m->msg.connect_req.eaz = eaz?eaz:'0';
m->msg.connect_req.addr.len = strlen(phone) + 1;
m->msg.connect_req.addr.tnp = 0x81;
memcpy(m->msg.connect_req.addr.num, phone, strlen(phone));
chan->callref = m->hdr.msgnum;
ACTCAPI_QUEUE_TX;
return 0;
}
static void
actcapi_connect_b3_req(act2000_card *card, act2000_chan *chan)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR(17, 0x82, 0x00);
ACTCAPI_CHKSKB;
m->msg.connect_b3_req.plci = chan->plci;
memset(&m->msg.connect_b3_req.ncpi, 0,
sizeof(m->msg.connect_b3_req.ncpi));
m->msg.connect_b3_req.ncpi.len = 13;
m->msg.connect_b3_req.ncpi.modulo = 8;
ACTCAPI_QUEUE_TX;
}
/*
* Set net type (1TR6) or (EDSS1)
*/
int
actcapi_manufacturer_req_net(act2000_card *card)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR(5, 0xff, 0x00);
if (!skb) {
printk(KERN_WARNING "actcapi: alloc_skb failed\n");
return -ENOMEM;
}
m->msg.manufacturer_req_net.manuf_msg = 0x11;
m->msg.manufacturer_req_net.controller = 1;
m->msg.manufacturer_req_net.nettype = (card->ptype == ISDN_PTYPE_EURO)?1:0;
ACTCAPI_QUEUE_TX;
printk(KERN_INFO "act2000 %s: D-channel protocol now %s\n",
card->interface.id, (card->ptype == ISDN_PTYPE_EURO)?"euro":"1tr6");
card->interface.features &=
~(ISDN_FEATURE_P_UNKNOWN | ISDN_FEATURE_P_EURO | ISDN_FEATURE_P_1TR6);
card->interface.features |=
((card->ptype == ISDN_PTYPE_EURO)?ISDN_FEATURE_P_EURO:ISDN_FEATURE_P_1TR6);
return 0;
}
/*
* Switch V.42 on or off
*/
int
actcapi_manufacturer_req_v42(act2000_card *card, ulong arg)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR(8, 0xff, 0x00);
if (!skb) {
printk(KERN_WARNING "actcapi: alloc_skb failed\n");
return -ENOMEM;
}
m->msg.manufacturer_req_v42.manuf_msg = 0x10;
m->msg.manufacturer_req_v42.controller = 0;
m->msg.manufacturer_req_v42.v42control = (arg?1:0);
ACTCAPI_QUEUE_TX;
return 0;
}
/*
* Set error-handler
*/
int
actcapi_manufacturer_req_errh(act2000_card *card)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR(4, 0xff, 0x00);
if (!skb) {
printk(KERN_WARNING "actcapi: alloc_skb failed\n");
return -ENOMEM;
}
m->msg.manufacturer_req_err.manuf_msg = 0x03;
m->msg.manufacturer_req_err.controller = 0;
ACTCAPI_QUEUE_TX;
return 0;
}
/*
* Set MSN-Mapping.
*/
int
actcapi_manufacturer_req_msn(act2000_card *card)
{
msn_entry *p = card->msn_list;
actcapi_msg *m;
struct sk_buff *skb;
int len;
while (p) {
int i;
len = strlen(p->msn);
for (i = 0; i < 2; i++) {
ACTCAPI_MKHDR(6 + len, 0xff, 0x00);
if (!skb) {
printk(KERN_WARNING "actcapi: alloc_skb failed\n");
return -ENOMEM;
}
m->msg.manufacturer_req_msn.manuf_msg = 0x13 + i;
m->msg.manufacturer_req_msn.controller = 0;
m->msg.manufacturer_req_msn.msnmap.eaz = p->eaz;
m->msg.manufacturer_req_msn.msnmap.len = len;
memcpy(m->msg.manufacturer_req_msn.msnmap.msn, p->msn, len);
ACTCAPI_QUEUE_TX;
}
p = p->next;
}
return 0;
}
void
actcapi_select_b2_protocol_req(act2000_card *card, act2000_chan *chan)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR(10, 0x40, 0x00);
ACTCAPI_CHKSKB;
m->msg.select_b2_protocol_req.plci = chan->plci;
memset(&m->msg.select_b2_protocol_req.dlpd, 0,
sizeof(m->msg.select_b2_protocol_req.dlpd));
m->msg.select_b2_protocol_req.dlpd.len = 6;
switch (chan->l2prot) {
case ISDN_PROTO_L2_TRANS:
m->msg.select_b2_protocol_req.protocol = 0x03;
m->msg.select_b2_protocol_req.dlpd.dlen = 4000;
break;
case ISDN_PROTO_L2_HDLC:
m->msg.select_b2_protocol_req.protocol = 0x02;
m->msg.select_b2_protocol_req.dlpd.dlen = 4000;
break;
case ISDN_PROTO_L2_X75I:
case ISDN_PROTO_L2_X75UI:
case ISDN_PROTO_L2_X75BUI:
m->msg.select_b2_protocol_req.protocol = 0x01;
m->msg.select_b2_protocol_req.dlpd.dlen = 4000;
m->msg.select_b2_protocol_req.dlpd.laa = 3;
m->msg.select_b2_protocol_req.dlpd.lab = 1;
m->msg.select_b2_protocol_req.dlpd.win = 7;
m->msg.select_b2_protocol_req.dlpd.modulo = 8;
break;
}
ACTCAPI_QUEUE_TX;
}
static void
actcapi_select_b3_protocol_req(act2000_card *card, act2000_chan *chan)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR(17, 0x80, 0x00);
ACTCAPI_CHKSKB;
m->msg.select_b3_protocol_req.plci = chan->plci;
memset(&m->msg.select_b3_protocol_req.ncpd, 0,
sizeof(m->msg.select_b3_protocol_req.ncpd));
switch (chan->l3prot) {
case ISDN_PROTO_L3_TRANS:
m->msg.select_b3_protocol_req.protocol = 0x04;
m->msg.select_b3_protocol_req.ncpd.len = 13;
m->msg.select_b3_protocol_req.ncpd.modulo = 8;
break;
}
ACTCAPI_QUEUE_TX;
}
static void
actcapi_listen_b3_req(act2000_card *card, act2000_chan *chan)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR(2, 0x81, 0x00);
ACTCAPI_CHKSKB;
m->msg.listen_b3_req.plci = chan->plci;
ACTCAPI_QUEUE_TX;
}
static void
actcapi_disconnect_req(act2000_card *card, act2000_chan *chan)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR(3, 0x04, 0x00);
ACTCAPI_CHKSKB;
m->msg.disconnect_req.plci = chan->plci;
m->msg.disconnect_req.cause = 0;
ACTCAPI_QUEUE_TX;
}
void
actcapi_disconnect_b3_req(act2000_card *card, act2000_chan *chan)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR(17, 0x84, 0x00);
ACTCAPI_CHKSKB;
m->msg.disconnect_b3_req.ncci = chan->ncci;
memset(&m->msg.disconnect_b3_req.ncpi, 0,
sizeof(m->msg.disconnect_b3_req.ncpi));
m->msg.disconnect_b3_req.ncpi.len = 13;
m->msg.disconnect_b3_req.ncpi.modulo = 8;
chan->fsm_state = ACT2000_STATE_BHWAIT;
ACTCAPI_QUEUE_TX;
}
void
actcapi_connect_resp(act2000_card *card, act2000_chan *chan, __u8 cause)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR(3, 0x02, 0x03);
ACTCAPI_CHKSKB;
m->msg.connect_resp.plci = chan->plci;
m->msg.connect_resp.rejectcause = cause;
if (cause) {
chan->fsm_state = ACT2000_STATE_NULL;
chan->plci = 0x8000;
} else
chan->fsm_state = ACT2000_STATE_IWAIT;
ACTCAPI_QUEUE_TX;
}
static void
actcapi_connect_active_resp(act2000_card *card, act2000_chan *chan)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR(2, 0x03, 0x03);
ACTCAPI_CHKSKB;
m->msg.connect_resp.plci = chan->plci;
if (chan->fsm_state == ACT2000_STATE_IWAIT)
chan->fsm_state = ACT2000_STATE_IBWAIT;
ACTCAPI_QUEUE_TX;
}
static void
actcapi_connect_b3_resp(act2000_card *card, act2000_chan *chan, __u8 rejectcause)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR((rejectcause?3:17), 0x82, 0x03);
ACTCAPI_CHKSKB;
m->msg.connect_b3_resp.ncci = chan->ncci;
m->msg.connect_b3_resp.rejectcause = rejectcause;
if (!rejectcause) {
memset(&m->msg.connect_b3_resp.ncpi, 0,
sizeof(m->msg.connect_b3_resp.ncpi));
m->msg.connect_b3_resp.ncpi.len = 13;
m->msg.connect_b3_resp.ncpi.modulo = 8;
chan->fsm_state = ACT2000_STATE_BWAIT;
}
ACTCAPI_QUEUE_TX;
}
static void
actcapi_connect_b3_active_resp(act2000_card *card, act2000_chan *chan)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR(2, 0x83, 0x03);
ACTCAPI_CHKSKB;
m->msg.connect_b3_active_resp.ncci = chan->ncci;
chan->fsm_state = ACT2000_STATE_ACTIVE;
ACTCAPI_QUEUE_TX;
}
static void
actcapi_info_resp(act2000_card *card, act2000_chan *chan)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR(2, 0x07, 0x03);
ACTCAPI_CHKSKB;
m->msg.info_resp.plci = chan->plci;
ACTCAPI_QUEUE_TX;
}
static void
actcapi_disconnect_b3_resp(act2000_card *card, act2000_chan *chan)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR(2, 0x84, 0x03);
ACTCAPI_CHKSKB;
m->msg.disconnect_b3_resp.ncci = chan->ncci;
chan->ncci = 0x8000;
chan->queued = 0;
ACTCAPI_QUEUE_TX;
}
static void
actcapi_disconnect_resp(act2000_card *card, act2000_chan *chan)
{
actcapi_msg *m;
struct sk_buff *skb;
ACTCAPI_MKHDR(2, 0x04, 0x03);
ACTCAPI_CHKSKB;
m->msg.disconnect_resp.plci = chan->plci;
chan->plci = 0x8000;
ACTCAPI_QUEUE_TX;
}
static int
new_plci(act2000_card *card, __u16 plci)
{
int i;
for (i = 0; i < ACT2000_BCH; i++)
if (card->bch[i].plci == 0x8000) {
card->bch[i].plci = plci;
return i;
}
return -1;
}
static int
find_plci(act2000_card *card, __u16 plci)
{
int i;
for (i = 0; i < ACT2000_BCH; i++)
if (card->bch[i].plci == plci)
return i;
return -1;
}
static int
find_ncci(act2000_card *card, __u16 ncci)
{
int i;
for (i = 0; i < ACT2000_BCH; i++)
if (card->bch[i].ncci == ncci)
return i;
return -1;
}
static int
find_dialing(act2000_card *card, __u16 callref)
{
int i;
for (i = 0; i < ACT2000_BCH; i++)
if ((card->bch[i].callref == callref) &&
(card->bch[i].fsm_state == ACT2000_STATE_OCALL))
return i;
return -1;
}
static int
actcapi_data_b3_ind(act2000_card *card, struct sk_buff *skb) {
__u16 plci;
__u16 ncci;
__u16 controller;
__u8 blocknr;
int chan;
actcapi_msg *msg = (actcapi_msg *)skb->data;
EVAL_NCCI(msg->msg.data_b3_ind.fakencci, plci, controller, ncci);
chan = find_ncci(card, ncci);
if (chan < 0)
return 0;
if (card->bch[chan].fsm_state != ACT2000_STATE_ACTIVE)
return 0;
if (card->bch[chan].plci != plci)
return 0;
blocknr = msg->msg.data_b3_ind.blocknr;
skb_pull(skb, 19);
card->interface.rcvcallb_skb(card->myid, chan, skb);
if (!(skb = alloc_skb(11, GFP_ATOMIC))) {
printk(KERN_WARNING "actcapi: alloc_skb failed\n");
return 1;
}
msg = (actcapi_msg *)skb_put(skb, 11);
msg->hdr.len = 11;
msg->hdr.applicationID = 1;
msg->hdr.cmd.cmd = 0x86;
msg->hdr.cmd.subcmd = 0x03;
msg->hdr.msgnum = actcapi_nextsmsg(card);
msg->msg.data_b3_resp.ncci = ncci;
msg->msg.data_b3_resp.blocknr = blocknr;
ACTCAPI_QUEUE_TX;
return 1;
}
/*
* Walk over ackq, unlink DATA_B3_REQ from it, if
* ncci and blocknr are matching.
* Decrement queued-bytes counter.
*/
static int
handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) {
unsigned long flags;
struct sk_buff *skb;
struct sk_buff *tmp;
struct actcapi_msg *m;
int ret = 0;
save_flags(flags);
cli();
skb = skb_peek(&card->ackq);
restore_flags(flags);
if (!skb) {
printk(KERN_WARNING "act2000: handle_ack nothing found!\n");
return 0;
}
tmp = skb;
while (1) {
m = (actcapi_msg *)tmp->data;
if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) &&
(m->msg.data_b3_req.blocknr == blocknr)) {
/* found corresponding DATA_B3_REQ */
skb_unlink(tmp);
chan->queued -= m->msg.data_b3_req.datalen;
if (m->msg.data_b3_req.flags)
ret = m->msg.data_b3_req.datalen;
dev_kfree_skb(tmp);
if (chan->queued < 0)
chan->queued = 0;
return ret;
}
save_flags(flags);
cli();
tmp = skb_peek((struct sk_buff_head *)tmp);
restore_flags(flags);
if ((tmp == skb) || (tmp == NULL)) {
/* reached end of queue */
printk(KERN_WARNING "act2000: handle_ack nothing found!\n");
return 0;
}
}
}
void
actcapi_dispatch(act2000_card *card)
{
struct sk_buff *skb;
actcapi_msg *msg;
__u16 ccmd;
int chan;
int len;
act2000_chan *ctmp;
isdn_ctrl cmd;
char tmp[170];
while ((skb = skb_dequeue(&card->rcvq))) {
actcapi_debug_msg(skb, 0);
msg = (actcapi_msg *)skb->data;
ccmd = ((msg->hdr.cmd.cmd << 8) | msg->hdr.cmd.subcmd);
switch (ccmd) {
case 0x8602:
/* DATA_B3_IND */
if (actcapi_data_b3_ind(card, skb))
return;
break;
case 0x8601:
/* DATA_B3_CONF */
chan = find_ncci(card, msg->msg.data_b3_conf.ncci);
if ((chan >= 0) && (card->bch[chan].fsm_state == ACT2000_STATE_ACTIVE)) {
if (msg->msg.data_b3_conf.info != 0)
printk(KERN_WARNING "act2000: DATA_B3_CONF: %04x\n",
msg->msg.data_b3_conf.info);
len = handle_ack(card, &card->bch[chan],
msg->msg.data_b3_conf.blocknr);
if (len) {
cmd.driver = card->myid;
cmd.command = ISDN_STAT_BSENT;
cmd.arg = chan;
cmd.parm.length = len;
card->interface.statcallb(&cmd);
}
}
break;
case 0x0201:
/* CONNECT_CONF */
chan = find_dialing(card, msg->hdr.msgnum);
if (chan >= 0) {
if (msg->msg.connect_conf.info) {
card->bch[chan].fsm_state = ACT2000_STATE_NULL;
cmd.driver = card->myid;
cmd.command = ISDN_STAT_DHUP;
cmd.arg = chan;
card->interface.statcallb(&cmd);
} else {
card->bch[chan].fsm_state = ACT2000_STATE_OWAIT;
card->bch[chan].plci = msg->msg.connect_conf.plci;
}
}
break;
case 0x0202:
/* CONNECT_IND */
chan = new_plci(card, msg->msg.connect_ind.plci);
if (chan < 0) {
ctmp = (act2000_chan *)tmp;
ctmp->plci = msg->msg.connect_ind.plci;
actcapi_connect_resp(card, ctmp, 0x11); /* All Card-Cannels busy */
} else {
card->bch[chan].fsm_state = ACT2000_STATE_ICALL;
cmd.driver = card->myid;
cmd.command = ISDN_STAT_ICALL;
cmd.arg = chan;
cmd.parm.setup.si1 = msg->msg.connect_ind.si1;
cmd.parm.setup.si2 = msg->msg.connect_ind.si2;
if (card->ptype == ISDN_PTYPE_EURO)
strcpy(cmd.parm.setup.eazmsn,
act2000_find_eaz(card, msg->msg.connect_ind.eaz));
else {
cmd.parm.setup.eazmsn[0] = msg->msg.connect_ind.eaz;
cmd.parm.setup.eazmsn[1] = 0;
}
memset(cmd.parm.setup.phone, 0, sizeof(cmd.parm.setup.phone));
memcpy(cmd.parm.setup.phone, msg->msg.connect_ind.addr.num,
msg->msg.connect_ind.addr.len - 1);
cmd.parm.setup.plan = msg->msg.connect_ind.addr.tnp;
cmd.parm.setup.screen = 0;
if (card->interface.statcallb(&cmd) == 2)
actcapi_connect_resp(card, &card->bch[chan], 0x15); /* Reject Call */
}
break;
case 0x0302:
/* CONNECT_ACTIVE_IND */
chan = find_plci(card, msg->msg.connect_active_ind.plci);
if (chan >= 0)
switch (card->bch[chan].fsm_state) {
case ACT2000_STATE_IWAIT:
actcapi_connect_active_resp(card, &card->bch[chan]);
break;
case ACT2000_STATE_OWAIT:
actcapi_connect_active_resp(card, &card->bch[chan]);
actcapi_select_b2_protocol_req(card, &card->bch[chan]);
break;
}
break;
case 0x8202:
/* CONNECT_B3_IND */
chan = find_plci(card, msg->msg.connect_b3_ind.plci);
if ((chan >= 0) && (card->bch[chan].fsm_state == ACT2000_STATE_IBWAIT)) {
card->bch[chan].ncci = msg->msg.connect_b3_ind.ncci;
actcapi_connect_b3_resp(card, &card->bch[chan], 0);
} else {
ctmp = (act2000_chan *)tmp;
ctmp->ncci = msg->msg.connect_b3_ind.ncci;
actcapi_connect_b3_resp(card, ctmp, 0x11); /* All Card-Cannels busy */
}
break;
case 0x8302:
/* CONNECT_B3_ACTIVE_IND */
chan = find_ncci(card, msg->msg.connect_b3_active_ind.ncci);
if ((chan >= 0) && (card->bch[chan].fsm_state == ACT2000_STATE_BWAIT)) {
actcapi_connect_b3_active_resp(card, &card->bch[chan]);
cmd.driver = card->myid;
cmd.command = ISDN_STAT_BCONN;
cmd.arg = chan;
card->interface.statcallb(&cmd);
}
break;
case 0x8402:
/* DISCONNECT_B3_IND */
chan = find_ncci(card, msg->msg.disconnect_b3_ind.ncci);
if (chan >= 0) {
ctmp = &card->bch[chan];
actcapi_disconnect_b3_resp(card, ctmp);
switch (ctmp->fsm_state) {
case ACT2000_STATE_ACTIVE:
ctmp->fsm_state = ACT2000_STATE_DHWAIT2;
cmd.driver = card->myid;
cmd.command = ISDN_STAT_BHUP;
cmd.arg = chan;
card->interface.statcallb(&cmd);
break;
case ACT2000_STATE_BHWAIT2:
actcapi_disconnect_req(card, ctmp);
ctmp->fsm_state = ACT2000_STATE_DHWAIT;
cmd.driver = card->myid;
cmd.command = ISDN_STAT_BHUP;
cmd.arg = chan;
card->interface.statcallb(&cmd);
break;
}
}
break;
case 0x0402:
/* DISCONNECT_IND */
chan = find_plci(card, msg->msg.disconnect_ind.plci);
if (chan >= 0) {
ctmp = &card->bch[chan];
actcapi_disconnect_resp(card, ctmp);
ctmp->fsm_state = ACT2000_STATE_NULL;
cmd.driver = card->myid;
cmd.command = ISDN_STAT_DHUP;
cmd.arg = chan;
card->interface.statcallb(&cmd);
} else {
ctmp = (act2000_chan *)tmp;
ctmp->plci = msg->msg.disconnect_ind.plci;
actcapi_disconnect_resp(card, ctmp);
}
break;
case 0x4001:
/* SELECT_B2_PROTOCOL_CONF */
chan = find_plci(card, msg->msg.select_b2_protocol_conf.plci);
if (chan >= 0)
switch (card->bch[chan].fsm_state) {
case ACT2000_STATE_ICALL:
case ACT2000_STATE_OWAIT:
ctmp = &card->bch[chan];
if (msg->msg.select_b2_protocol_conf.info == 0)
actcapi_select_b3_protocol_req(card, ctmp);
else {
ctmp->fsm_state = ACT2000_STATE_NULL;
cmd.driver = card->myid;
cmd.command = ISDN_STAT_DHUP;
cmd.arg = chan;
card->interface.statcallb(&cmd);
}
break;
}
break;
case 0x8001:
/* SELECT_B3_PROTOCOL_CONF */
chan = find_plci(card, msg->msg.select_b3_protocol_conf.plci);
if (chan >= 0)
switch (card->bch[chan].fsm_state) {
case ACT2000_STATE_ICALL:
case ACT2000_STATE_OWAIT:
ctmp = &card->bch[chan];
if (msg->msg.select_b3_protocol_conf.info == 0)
actcapi_listen_b3_req(card, ctmp);
else {
ctmp->fsm_state = ACT2000_STATE_NULL;
cmd.driver = card->myid;
cmd.command = ISDN_STAT_DHUP;
cmd.arg = chan;
card->interface.statcallb(&cmd);
}
}
break;
case 0x8101:
/* LISTEN_B3_CONF */
chan = find_plci(card, msg->msg.listen_b3_conf.plci);
if (chan >= 0)
switch (card->bch[chan].fsm_state) {
case ACT2000_STATE_ICALL:
ctmp = &card->bch[chan];
if (msg->msg.listen_b3_conf.info == 0)
actcapi_connect_resp(card, ctmp, 0);
else {
ctmp->fsm_state = ACT2000_STATE_NULL;
cmd.driver = card->myid;
cmd.command = ISDN_STAT_DHUP;
cmd.arg = chan;
card->interface.statcallb(&cmd);
}
break;
case ACT2000_STATE_OWAIT:
ctmp = &card->bch[chan];
if (msg->msg.listen_b3_conf.info == 0) {
actcapi_connect_b3_req(card, ctmp);
ctmp->fsm_state = ACT2000_STATE_OBWAIT;
cmd.driver = card->myid;
cmd.command = ISDN_STAT_DCONN;
cmd.arg = chan;
card->interface.statcallb(&cmd);
} else {
ctmp->fsm_state = ACT2000_STATE_NULL;
cmd.driver = card->myid;
cmd.command = ISDN_STAT_DHUP;
cmd.arg = chan;
card->interface.statcallb(&cmd);
}
break;
}
break;
case 0x8201:
/* CONNECT_B3_CONF */
chan = find_plci(card, msg->msg.connect_b3_conf.plci);
if ((chan >= 0) && (card->bch[chan].fsm_state == ACT2000_STATE_OBWAIT)) {
ctmp = &card->bch[chan];
if (msg->msg.connect_b3_conf.info) {
ctmp->fsm_state = ACT2000_STATE_NULL;
cmd.driver = card->myid;
cmd.command = ISDN_STAT_DHUP;
cmd.arg = chan;
card->interface.statcallb(&cmd);
} else {
ctmp->ncci = msg->msg.connect_b3_conf.ncci;
ctmp->fsm_state = ACT2000_STATE_BWAIT;
}
}
break;
case 0x8401:
/* DISCONNECT_B3_CONF */
chan = find_ncci(card, msg->msg.disconnect_b3_conf.ncci);
if ((chan >= 0) && (card->bch[chan].fsm_state == ACT2000_STATE_BHWAIT))
card->bch[chan].fsm_state = ACT2000_STATE_BHWAIT2;
break;
case 0x0702:
/* INFO_IND */
chan = find_plci(card, msg->msg.info_ind.plci);
if (chan >= 0)
/* TODO: Eval Charging info / cause */
actcapi_info_resp(card, &card->bch[chan]);
break;
case 0x0401:
/* LISTEN_CONF */
case 0x0501:
/* LISTEN_CONF */
case 0xff01:
/* MANUFACTURER_CONF */
break;
case 0xff02:
/* MANUFACTURER_IND */
if (msg->msg.manuf_msg == 3) {
memset(tmp, 0, sizeof(tmp));
strncpy(tmp,
&msg->msg.manufacturer_ind_err.errstring,
msg->hdr.len - 16);
if (msg->msg.manufacturer_ind_err.errcode)
printk(KERN_WARNING "act2000: %s\n", tmp);
else {
printk(KERN_DEBUG "act2000: %s\n", tmp);
if ((!strncmp(tmp, "INFO: Trace buffer con", 22)) ||
(!strncmp(tmp, "INFO: Compile Date/Tim", 22))) {
card->flags |= ACT2000_FLAGS_RUNNING;
cmd.command = ISDN_STAT_RUN;
cmd.driver = card->myid;
cmd.arg = 0;
actcapi_manufacturer_req_net(card);
actcapi_manufacturer_req_msn(card);
actcapi_listen_req(card);
card->interface.statcallb(&cmd);
}
}
}
break;
default:
printk(KERN_WARNING "act2000: UNHANDLED Message %04x\n", ccmd);
break;
}
dev_kfree_skb(skb);
}
}
#ifdef DEBUG_MSG
static void
actcapi_debug_caddr(actcapi_addr *addr)
{
char tmp[30];
printk(KERN_DEBUG " Alen = %d\n", addr->len);
if (addr->len > 0)
printk(KERN_DEBUG " Atnp = 0x%02x\n", addr->tnp);
if (addr->len > 1) {
memset(tmp, 0, 30);
memcpy(tmp, addr->num, addr->len - 1);
printk(KERN_DEBUG " Anum = '%s'\n", tmp);
}
}
static void
actcapi_debug_ncpi(actcapi_ncpi *ncpi)
{
printk(KERN_DEBUG " ncpi.len = %d\n", ncpi->len);
if (ncpi->len >= 2)
printk(KERN_DEBUG " ncpi.lic = 0x%04x\n", ncpi->lic);
if (ncpi->len >= 4)
printk(KERN_DEBUG " ncpi.hic = 0x%04x\n", ncpi->hic);
if (ncpi->len >= 6)
printk(KERN_DEBUG " ncpi.ltc = 0x%04x\n", ncpi->ltc);
if (ncpi->len >= 8)
printk(KERN_DEBUG " ncpi.htc = 0x%04x\n", ncpi->htc);
if (ncpi->len >= 10)
printk(KERN_DEBUG " ncpi.loc = 0x%04x\n", ncpi->loc);
if (ncpi->len >= 12)
printk(KERN_DEBUG " ncpi.hoc = 0x%04x\n", ncpi->hoc);
if (ncpi->len >= 13)
printk(KERN_DEBUG " ncpi.mod = %d\n", ncpi->modulo);
}
static void
actcapi_debug_dlpd(actcapi_dlpd *dlpd)
{
printk(KERN_DEBUG " dlpd.len = %d\n", dlpd->len);
if (dlpd->len >= 2)
printk(KERN_DEBUG " dlpd.dlen = 0x%04x\n", dlpd->dlen);
if (dlpd->len >= 3)
printk(KERN_DEBUG " dlpd.laa = 0x%02x\n", dlpd->laa);
if (dlpd->len >= 4)
printk(KERN_DEBUG " dlpd.lab = 0x%02x\n", dlpd->lab);
if (dlpd->len >= 5)
printk(KERN_DEBUG " dlpd.modulo = %d\n", dlpd->modulo);
if (dlpd->len >= 6)
printk(KERN_DEBUG " dlpd.win = %d\n", dlpd->win);
}
#ifdef DEBUG_DUMP_SKB
static void dump_skb(struct sk_buff *skb) {
char tmp[80];
char *p = skb->data;
char *t = tmp;
int i;
for (i = 0; i < skb->len; i++) {
t += sprintf(t, "%02x ", *p++ & 0xff);
if ((i & 0x0f) == 8) {
printk(KERN_DEBUG "dump: %s\n", tmp);
t = tmp;
}
}
if (i & 0x07)
printk(KERN_DEBUG "dump: %s\n", tmp);
}
#endif
void
actcapi_debug_msg(struct sk_buff *skb, int direction)
{
actcapi_msg *msg = (actcapi_msg *)skb->data;
char *descr;
int i;
char tmp[170];
#ifndef DEBUG_DATA_MSG
if (msg->hdr.cmd.cmd == 0x86)
return;
#endif
descr = "INVALID";
#ifdef DEBUG_DUMP_SKB
dump_skb(skb);
#endif
for (i = 0; i < num_valid_msg; i++)
if ((msg->hdr.cmd.cmd == valid_msg[i].cmd.cmd) &&
(msg->hdr.cmd.subcmd == valid_msg[i].cmd.subcmd)) {
descr = valid_msg[i].description;
break;
}
printk(KERN_DEBUG "%s %s msg\n", direction?"Outgoing":"Incoming", descr);
printk(KERN_DEBUG " ApplID = %d\n", msg->hdr.applicationID);
printk(KERN_DEBUG " Len = %d\n", msg->hdr.len);
printk(KERN_DEBUG " MsgNum = 0x%04x\n", msg->hdr.msgnum);
printk(KERN_DEBUG " Cmd = 0x%02x\n", msg->hdr.cmd.cmd);
printk(KERN_DEBUG " SubCmd = 0x%02x\n", msg->hdr.cmd.subcmd);
switch (i) {
case 0:
/* DATA B3 IND */
printk(KERN_DEBUG " BLOCK = 0x%02x\n",
msg->msg.data_b3_ind.blocknr);
break;
case 2:
/* CONNECT CONF */
printk(KERN_DEBUG " PLCI = 0x%04x\n",
msg->msg.connect_conf.plci);
printk(KERN_DEBUG " Info = 0x%04x\n",
msg->msg.connect_conf.info);
break;
case 3:
/* CONNECT IND */
printk(KERN_DEBUG " PLCI = 0x%04x\n",
msg->msg.connect_ind.plci);
printk(KERN_DEBUG " Contr = %d\n",
msg->msg.connect_ind.controller);
printk(KERN_DEBUG " SI1 = %d\n",
msg->msg.connect_ind.si1);
printk(KERN_DEBUG " SI2 = %d\n",
msg->msg.connect_ind.si2);
printk(KERN_DEBUG " EAZ = '%c'\n",
msg->msg.connect_ind.eaz);
actcapi_debug_caddr(&msg->msg.connect_ind.addr);
break;
case 5:
/* CONNECT ACTIVE IND */
printk(KERN_DEBUG " PLCI = 0x%04x\n",
msg->msg.connect_active_ind.plci);
actcapi_debug_caddr(&msg->msg.connect_active_ind.addr);
break;
case 8:
/* LISTEN CONF */
printk(KERN_DEBUG " Contr = %d\n",
msg->msg.listen_conf.controller);
printk(KERN_DEBUG " Info = 0x%04x\n",
msg->msg.listen_conf.info);
break;
case 11:
/* INFO IND */
printk(KERN_DEBUG " PLCI = 0x%04x\n",
msg->msg.info_ind.plci);
printk(KERN_DEBUG " Imsk = 0x%04x\n",
msg->msg.info_ind.nr.mask);
if (msg->hdr.len > 12) {
int l = msg->hdr.len - 12;
int j;
char *p = tmp;
for (j = 0; j < l ; j++)
p += sprintf(p, "%02x ", msg->msg.info_ind.el.display[j]);
printk(KERN_DEBUG " D = '%s'\n", tmp);
}
break;
case 14:
/* SELECT B2 PROTOCOL CONF */
printk(KERN_DEBUG " PLCI = 0x%04x\n",
msg->msg.select_b2_protocol_conf.plci);
printk(KERN_DEBUG " Info = 0x%04x\n",
msg->msg.select_b2_protocol_conf.info);
break;
case 15:
/* SELECT B3 PROTOCOL CONF */
printk(KERN_DEBUG " PLCI = 0x%04x\n",
msg->msg.select_b3_protocol_conf.plci);
printk(KERN_DEBUG " Info = 0x%04x\n",
msg->msg.select_b3_protocol_conf.info);
break;
case 16:
/* LISTEN B3 CONF */
printk(KERN_DEBUG " PLCI = 0x%04x\n",
msg->msg.listen_b3_conf.plci);
printk(KERN_DEBUG " Info = 0x%04x\n",
msg->msg.listen_b3_conf.info);
break;
case 18:
/* CONNECT B3 IND */
printk(KERN_DEBUG " NCCI = 0x%04x\n",
msg->msg.connect_b3_ind.ncci);
printk(KERN_DEBUG " PLCI = 0x%04x\n",
msg->msg.connect_b3_ind.plci);
actcapi_debug_ncpi(&msg->msg.connect_b3_ind.ncpi);
break;
case 19:
/* CONNECT B3 ACTIVE IND */
printk(KERN_DEBUG " NCCI = 0x%04x\n",
msg->msg.connect_b3_active_ind.ncci);
actcapi_debug_ncpi(&msg->msg.connect_b3_active_ind.ncpi);
break;
case 26:
/* MANUFACTURER IND */
printk(KERN_DEBUG " Mmsg = 0x%02x\n",
msg->msg.manufacturer_ind_err.manuf_msg);
switch (msg->msg.manufacturer_ind_err.manuf_msg) {
case 3:
printk(KERN_DEBUG " Contr = %d\n",
msg->msg.manufacturer_ind_err.controller);
printk(KERN_DEBUG " Code = 0x%08x\n",
msg->msg.manufacturer_ind_err.errcode);
memset(tmp, 0, sizeof(tmp));
strncpy(tmp, &msg->msg.manufacturer_ind_err.errstring,
msg->hdr.len - 16);
printk(KERN_DEBUG " Emsg = '%s'\n", tmp);
break;
}
break;
case 30:
/* LISTEN REQ */
printk(KERN_DEBUG " Imsk = 0x%08x\n",
msg->msg.listen_req.infomask);
printk(KERN_DEBUG " Emsk = 0x%04x\n",
msg->msg.listen_req.eazmask);
printk(KERN_DEBUG " Smsk = 0x%04x\n",
msg->msg.listen_req.simask);
break;
case 35:
/* SELECT_B2_PROTOCOL_REQ */
printk(KERN_DEBUG " PLCI = 0x%04x\n",
msg->msg.select_b2_protocol_req.plci);
printk(KERN_DEBUG " prot = 0x%02x\n",
msg->msg.select_b2_protocol_req.protocol);
if (msg->hdr.len >= 11)
printk(KERN_DEBUG "No dlpd\n");
else
actcapi_debug_dlpd(&msg->msg.select_b2_protocol_req.dlpd);
break;
case 44:
/* CONNECT RESP */
printk(KERN_DEBUG " PLCI = 0x%04x\n",
msg->msg.connect_resp.plci);
printk(KERN_DEBUG " CAUSE = 0x%02x\n",
msg->msg.connect_resp.rejectcause);
break;
case 45:
/* CONNECT ACTIVE RESP */
printk(KERN_DEBUG " PLCI = 0x%04x\n",
msg->msg.connect_active_resp.plci);
break;
}
}
#endif
| dduval/kernel-rhel3 | drivers/isdn/act2000/capi.c | C | gpl-2.0 | 32,843 |
<?php
$currency_sql="INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (1, '" . __( 'Mauritania', 'wpsc' ) ."', 'MR', '" . __('Mauritanian Ouguiya', 'wpsc') ."', '', '', '" . __('MRO', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (2, '" . __( 'Martinique (French)', 'wpsc' ) ."', 'MQ', '" . __('Euro', 'wpsc') ."', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (3, '" . __( 'Malta', 'wpsc' ) ."', 'MT', '" . __('Euro', 'wpsc') ."', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0' ,'europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (4, '" . __( 'Marshall Islands', 'wpsc' ) ."', 'MH', '" . __('US Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '0', '0' ,'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (5, '" . __( 'Mali', 'wpsc' ) ."', 'ML', '" . __('CFA Franc BCEAO', 'wpsc') ."', '', '', '" . __('XOF', 'wpsc') . "', '0', '0' ,'africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (6, '" . __( 'Maldives', 'wpsc' ) ."', 'MV', '" . __('Maldive Rufiyaa', 'wpsc') ."', '', '', '" . __('MVR', 'wpsc') . "', '0', '0' , 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (7, '" . __( 'Malaysia', 'wpsc' ) ."', 'MY', '" . __('Malaysian Ringgit', 'wpsc') ."', '', '', '" . __('MYR', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (8, '" . __( 'Malawi', 'wpsc' ) ."', 'MW', '" . __('Malawi Kwacha', 'wpsc') ."', '', '', '" . __('MWK', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (9, '" . __( 'Madagascar', 'wpsc' ) ."', 'MG', '" . __('Malagasy Ariary', 'wpsc') ."', '', '', '" . __('MGA', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (10, '" . __( 'Macau', 'wpsc' ) ."', 'MO', '" . __('Macau Pataca', 'wpsc') ."', '', '', '" . __('MOP', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (11, '" . __( 'Macedonia', 'wpsc' ) ."', 'MK', '" . __('Denar', 'wpsc') ."', '', '', '" . __('MKD', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (12, '" . __( 'Luxembourg', 'wpsc' ) ."', 'LU', '" . __('Euro', 'wpsc') ."', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (13, '" . __( 'Lithuania', 'wpsc' ) ."', 'LT', '" . __('Lithuanian Litas', 'wpsc') ."', '', '', '" . __('LTL', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (14, '" . __( 'Liechtenstein', 'wpsc' ) ."', 'LI', '" . __('Swiss Franc', 'wpsc') ."', '', '', '" . __('CHF', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (15, '" . __( 'Libya', 'wpsc' ) ."', 'LY', '" . __('Libyan Dinar', 'wpsc') ."', '', '', '" . __('LYD', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (16, '" . __( 'Liberia', 'wpsc' ) ."', 'LR', '" . __('Liberian Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('LRD', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (17, '" . __( 'Lesotho', 'wpsc' ) ."', 'LS', '" . __('Lesotho Loti', 'wpsc') ."', '', '', '" . __('LSL', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (18, '" . __( 'Lebanon', 'wpsc' ) ."', 'LB', '" . __('Lebanese Pound', 'wpsc') ."', '', '', '" . __('LBP', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (19, '" . __( 'Latvia', 'wpsc' ) ."', 'LV', '" . __('Latvian Lats', 'wpsc') ."', '', '', '" . __('LVL', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (20, '" . __( 'Laos', 'wpsc' ) ."', 'LA', '" . __('Lao Kip', 'wpsc') ."', '', '', '" . __('LAK', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (21, '" . __( 'Kyrgyzstan', 'wpsc' ) ."', 'KG', '" . __('Som', 'wpsc') ."', '', '', '" . __('KGS', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (22, '" . __( 'Kuwait', 'wpsc' ) ."', 'KW', '" . __('Kuwaiti Dinar', 'wpsc') ."', '', '', '" . __('KWD', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (23, '" . __( 'Korea, South', 'wpsc' ) ."', 'KR', '" . __('Korean Won', 'wpsc') ."', '', '', '" . __('KRW', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (24, '" . __( 'Korea, North', 'wpsc' ) ."', 'KP', '" . __('North Korean Won', 'wpsc') ."', '', '', '" . __('KPW', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (25, '" . __( 'Kiribati', 'wpsc' ) ."', 'KI', '" . __('Australian Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('AUD', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (26, '" . __( 'Kenya', 'wpsc' ) ."', 'KE', '" . __('Kenyan Shilling', 'wpsc') ."', '', '', '" . __('KES', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (27, '" . __( 'Kazakhstan', 'wpsc' ) ."', 'KZ', '" . __('Kazakhstan Tenge', 'wpsc') ."', '', '', '" . __('KZT', 'wpsc') . "', '0', '0' ,'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (28, '" . __( 'Jordan', 'wpsc' ) ."', 'JO', '" . __('Jordanian Dinar', 'wpsc') ."', '', '', '" . __('JOD', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (29, '" . __( 'Jersey', 'wpsc' ) ."', 'JE', '" . __('Pound Sterling', 'wpsc') ."', '" . __('£', 'wpsc') . "', '" . __('£', 'wpsc') . "', '" . __('GBP', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (30, '" . __( 'Japan', 'wpsc' ) ."', 'JP', '" . __('Japanese Yen', 'wpsc') ."', '" . __('¥', 'wpsc') . "', '" . __('¥', 'wpsc') . "', '" . __('JPY', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (31, '" . __( 'Jamaica', 'wpsc' ) ."', 'JM', '" . __('Jamaican Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('JMD', 'wpsc') . "', '0', '0' ,'southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (32, '" . __( 'Ivory Coast', 'wpsc' ) ."', 'CI', '" . __('CFA Franc BCEAO', 'wpsc') ."', '', '', '" . __('XOF', 'wpsc') . "', '0', '0' ,'africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (33, '" . __( 'Italy', 'wpsc' ) ."', 'IT', '" . __('Euro', 'wpsc') ."', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (34, '" . __( 'Isle of Man', 'wpsc' ) ."', 'IM', '" . __('Pound Sterling', 'wpsc') ."', '" . __('£', 'wpsc') . "', '" . __('£', 'wpsc') . "', '" . __('GBP', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (35, '" . __( 'Israel', 'wpsc' ) ."', 'IL', '" . __('Israeli New Shekel', 'wpsc') ."', '" . __( '₪', 'wpsc' ) . "', '" . __( '₪', 'wpsc' ) . "', '" . __('ILS', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (36, '" . __( 'Ireland', 'wpsc' ) ."', 'IE', '" . __('Euro', 'wpsc') ."', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (37, '" . __( 'Iraq', 'wpsc' ) ."', 'IQ', '" . __('Iraqi Dinar', 'wpsc') ."', '', '', '" . __('IQD', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (38, '" . __( 'Indonesia', 'wpsc' ) ."', 'ID', '" . __('Indonesian Rupiah', 'wpsc') ."', '', '', '" . __('IDR', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (39, '" . __( 'Iran', 'wpsc' ) ."', 'IR', '" . __('Iranian Rial', 'wpsc') ."', '', '', '" . __('IRR', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (40, '" . __( 'India', 'wpsc' ) ."', 'IN', '" . __('Indian Rupee', 'wpsc') ."', '', '', '" . __('INR', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (41, '" . __( 'Iceland', 'wpsc' ) ."', 'IS', '" . __('Iceland Krona', 'wpsc') ."', '', '', '" . __('ISK', 'wpsc') . "', '0', '0', 'europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (42, '" . __( 'Hungary', 'wpsc' ) ."', 'HU', '" . __('Hungarian Forint', 'wpsc') ."', '', '', '" . __('HUF', 'wpsc') . "', '0', '0', 'europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (43, '" . __( 'Hong Kong', 'wpsc' ) ."', 'HK', '" . __('Hong Kong Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('HKD', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (44, '" . __( 'Honduras', 'wpsc' ) ."', 'HN', '" . __('Honduran Lempira', 'wpsc') ."', '', '', '" . __('HNL', 'wpsc') . "', '0', '0' , 'southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (45, '" . __( 'Heard Island and McDonald Islands', 'wpsc' ) ."', 'HM', '" . __('Australian Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('AUD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (46, '" . __( 'Haiti', 'wpsc' ) ."', 'HT', '" . __('Haitian Gourde', 'wpsc') ."', '', '', '" . __('HTG', 'wpsc') . "', '0', '0', 'southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (47, '" . __( 'Guyana', 'wpsc' ) ."', 'GY', '" . __('Guyana Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('GYD', 'wpsc') . "', '0', '0', 'southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (48, '" . __( 'Guinea Bissau', 'wpsc' ) ."', 'GW', '" . __('CFA Franc BEAC', 'wpsc') ."', '', '', '" . __('XAF', 'wpsc') . "', '0', '0' ,'africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (49, '" . __( 'Guinea', 'wpsc' ) ."', 'GN', '" . __('Guinea Franc', 'wpsc') ."', '', '', '" . __('GNF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (50, '" . __( 'Guernsey', 'wpsc' ) ."', 'GG', '" . __('Pound Sterling', 'wpsc') ."', '" . __('£', 'wpsc') . "', '" . __('£', 'wpsc') . "', '" . __('GBP', 'wpsc') . "', '0', '0' ,'europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (51, '" . __( 'Guatemala', 'wpsc' ) ."', 'GT', '" . __('Guatemalan Quetzal', 'wpsc') ."', '', '', '" . __('GTQ', 'wpsc') . "', '0', '0', 'southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (52, '" . __( 'Guam (USA)', 'wpsc' ) ."', 'GU', '" . __('US Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (53, '" . __( 'Grenada', 'wpsc' ) ."', 'GD', '" . __('East Carribean Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('XCD', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (54, '" . __( 'Guadeloupe (French)', 'wpsc' ) ."', 'GP', '" . __('Euro', 'wpsc') ."', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0', 'southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (55, '" . __( 'Greenland', 'wpsc' ) ."', 'GL', '" . __('Danish Krone', 'wpsc') ."', '', '', '" . __('DKK', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (56, '" . __( 'Greece', 'wpsc' ) ."', 'GR', '" . __('Euro', 'wpsc') ."', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '19','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (57, '" . __( 'Gibraltar', 'wpsc' ) ."', 'GI', '" . __('Gibraltar Pound', 'wpsc') ."', '', '', '" . __('GIP', 'wpsc') . "', '0', '0', 'europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (58, '" . __( 'Ghana', 'wpsc' ) ."', 'GH', '" . __('Ghanaian Cedi', 'wpsc') ."', '', '', '" . __('GHS', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (59, '" . __( 'Germany', 'wpsc' ) ."', 'DE', '" . __('Euro', 'wpsc') ."', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (60, '" . __( 'Georgia', 'wpsc' ) ."', 'GE', '" . __('Georgian Lari', 'wpsc') ."', '', '', '" . __('GEL', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (61, '" . __( 'Gambia', 'wpsc' ) ."', 'GM', '" . __('Gambian Dalasi', 'wpsc') ."', '', '', '" . __('GMD', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (62, '" . __( 'Gabon', 'wpsc' ) ."', 'GA', '" . __('CFA Franc BEAC', 'wpsc') ."', '', '', '" . __('XAF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (63, '" . __( 'French Southern Territories', 'wpsc' ) ."', 'TF', '" . __('Euro', 'wpsc') ."', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (64, '" . __( 'France', 'wpsc' ) ."', 'FR', '" . __('Euro', 'wpsc') ."', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0', 'europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (65, '" . __( 'Finland', 'wpsc' ) ."', 'FI', '" . __('Euro', 'wpsc') ."', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0', 'europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (66, '" . __( 'Fiji', 'wpsc' ) ."', 'FJ', '" . __('Fiji Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('FJD', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (67, '" . __( 'Faroe Islands', 'wpsc' ) ."', 'FO', '" . __('Danish Krone', 'wpsc') ."', '', '', '" . __('DKK', 'wpsc') . "', '0', '0' ,'europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (68, '" . __( 'Falkland Islands', 'wpsc' ) ."', 'FK', '" . __('Falkland Islands Pound', 'wpsc') ."', '', '', '" . __('FKP', 'wpsc') . "', '0', '0' ,'southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (69, '" . __( 'Ethiopia', 'wpsc' ) ."', 'ET', '" . __('Ethiopian Birr', 'wpsc') ."', '', '', '" . __('ETB', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (70, '" . __( 'Estonia', 'wpsc' ) ."', 'EE', '" . __('Euro', 'wpsc') ."', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0', 'europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (71, '" . __( 'Eritrea', 'wpsc' ) ."', 'ER', '" . __('Eritrean Nakfa', 'wpsc') ."', '', '', '" . __('ERN', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (72, '" . __( 'Equatorial Guinea', 'wpsc' ) ."', 'GQ', '" . __('CFA Franc BEAC', 'wpsc') ."', '', '', '" . __('XAF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (73, '" . __( 'El Salvador', 'wpsc' ) ."', 'SV', '" . __('US Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (74, '" . __( 'Egypt', 'wpsc' ) ."', 'EG', '" . __('Egyptian Pound', 'wpsc') ."', '', '', '" . __('EGP', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (75, '" . __( 'Ecuador', 'wpsc' ) ."', 'EC', '" . __('Ecuador Sucre', 'wpsc') ."', '', '', '" . __('ECS', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (76, '" . __( 'Timor-Leste', 'wpsc' ) ."', 'TL', '" . __('US Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (77, '" . __( 'Dominican Republic', 'wpsc' ) ."', 'DO', '" . __('Dominican Peso', 'wpsc') ."', '', '', '" . __('DOP', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (78, '" . __( 'Dominica', 'wpsc' ) ."', 'DM', '" . __('East Caribbean Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('XCD', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (79, '" . __( 'Djibouti', 'wpsc' ) ."', 'DJ', '" . __('Djibouti Franc', 'wpsc') ."', '', '', '" . __('DJF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (80, '" . __( 'Denmark', 'wpsc' ) ."', 'DK', '" . __('Danish Krone', 'wpsc') ."', '', '', '" . __('DKK', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (81, '" . __( 'Democratic Republic of Congo', 'wpsc' ) ."', 'CD', '" . __('Francs', 'wpsc') ."', '', '', '" . __('CDF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (82, '" . __( 'Czech Rep.', 'wpsc' ) ."', 'CZ', '" . __('Czech Koruna', 'wpsc') ."', '', '', '" . __('CZK', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (83, '" . __( 'Cyprus', 'wpsc' ) ."', 'CY', '" . __('Euro', 'wpsc') ."', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (84, '" . __( 'Cuba', 'wpsc' ) ."', 'CU', '" . __('Cuban Peso', 'wpsc') ."', '', '', '" . __('CUP', 'wpsc') . "', '0', '0','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (85, '" . __( 'Croatia', 'wpsc' ) ."', 'HR', '" . __('Croatian Kuna', 'wpsc') ."', '', '', '" . __('HRK', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (86, '" . __( 'Costa Rica', 'wpsc' ) ."', 'CR', '" . __('Costa Rican Colon', 'wpsc') ."', '', '', '" . __('CRC', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (87, '" . __( 'Cook Islands', 'wpsc' ) ."', 'CK', '" . __('New Zealand Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('NZD', 'wpsc') . "', '0', '0' ,'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (88, '" . __( 'Republic of the Congo', 'wpsc' ) ."', 'CG', '" . __('CFA Franc BEAC', 'wpsc') ."', '', '', '" . __('XAF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (89, '" . __( 'Comoros', 'wpsc' ) ."', 'KM', '" . __('Comoros Franc', 'wpsc') ."', '', '', '" . __('KMF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (90, '" . __( 'Colombia', 'wpsc' ) ."', 'CO', '" . __('Colombian Peso', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('COP', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (91, '" . __( 'Cocos (Keeling) Islands', 'wpsc' ) ."', 'CC', '" . __('Australian Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', 'AUD', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (92, '" . __( 'Christmas Island', 'wpsc' ) ."', 'CX', '" . __('Australian Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('AUD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (93, '" . __( 'Chile', 'wpsc' ) ."', 'CL', '" . __('Chilean Peso', 'wpsc') ."', '', '', '" . __('CLP', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (94, '" . __( 'China', 'wpsc' ) ."', 'CN', '" . __('Yuan Renminbi', 'wpsc') ."', '', '', '" . __('CNY', 'wpsc') . "', '0', '0' ,'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (95, '" . __( 'Chad', 'wpsc' ) ."', 'TD', '" . __('CFA Franc BEAC', 'wpsc') ."', '', '', '" . __('XAF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (96, '" . __( 'Central African Republic', 'wpsc' ) ."', 'CF', '" . __('CFA Franc BEAC', 'wpsc') ."', '', '', '" . __('XAF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (97, '" . __( 'Cayman Islands', 'wpsc' ) ."', 'KY', '" . __('Cayman Islands Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('KYD', 'wpsc') . "', '0', '0','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (98, '" . __( 'Cape Verde', 'wpsc' ) ."', 'CV', '" . __('Cape Verde Escudo', 'wpsc') ."', '', '', '" . __('CVE', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (99, '" . __( 'Cameroon', 'wpsc' ) ."', 'CM', '" . __('CFA Franc BEAC', 'wpsc') ."', '', '', '" . __('XAF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (100, '" . __( 'Canada', 'wpsc' ) ."', 'CA', '" . __('Canadian Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('CAD', 'wpsc') . "', '1', '','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (101, '" . __( 'Cambodia', 'wpsc' ) ."', 'KH', '" . __('Kampuchean Riel', 'wpsc') ."', '', '', '" . __('KHR', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (102, '" . __( 'Burundi', 'wpsc' ) ."', 'BI', '" . __('Burundi Franc', 'wpsc') ."', '', '', '" . __('BIF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (103, '" . __( 'Burkina Faso', 'wpsc' ) ."', 'BF', '" . __('CFA Franc BCEAO', 'wpsc') ."', '', '', '" . __('XOF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (104, '" . __( 'Bulgaria', 'wpsc' ) ."', 'BG', '" . __('Bulgarian Lev', 'wpsc') ."', '', '', '" . __('BGN', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (105, '" . __( 'Brunei Darussalam', 'wpsc' ) ."', 'BN', '" . __('Brunei Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('BND', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (106, '" . __( 'British Indian Ocean Territory', 'wpsc' ) ."', 'IO', '" . __('US Dollar', 'wpsc') ."', '$', '$', 'USD', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (107, '" . __( 'Brazil', 'wpsc' ) ."', 'BR', '" . __('Brazilian Real', 'wpsc') ."', '', '', '" . __('BRL', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (108, '" . __( 'Bouvet Island', 'wpsc' ) ."', 'BV', '" . __('Norwegian Krone', 'wpsc') ."', '', '', '" . __('NOK', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (109, '" . __( 'Botswana', 'wpsc' ) ."', 'BW', '" . __('Botswana Pula', 'wpsc') ."', '', '', '" . __('BWP', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (110, '" . __( 'Bosnia-Herzegovina', 'wpsc' ) ."', 'BA', '" . __('Marka', 'wpsc') ."', '', '', '" . __('BAM', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (111, '" . __( 'Bolivia', 'wpsc' ) ."', 'BO', '" . __('Boliviano', 'wpsc') . "', '', '', '" . __('BOB', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (112, '" . __( 'Bhutan', 'wpsc' ) ."', 'BT', '" . __('Bhutan Ngultrum', 'wpsc') . "', '', '', '" . __('BTN', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (113, '" . __( 'Bermuda', 'wpsc' ) ."', 'BM', '" . __('Bermudian Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('BMD', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (114, '" . __( 'Benin', 'wpsc' ) ."', 'BJ', '" . __('CFA Franc BCEAO', 'wpsc') . "', '', '', '" . __('XOF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (115, '" . __( 'Belize', 'wpsc' ) ."', 'BZ', '" . __('Belize Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('BZD', 'wpsc') . "', '0', '0','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (116, '" . __( 'Belgium', 'wpsc' ) ."', 'BE', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (117, '" . __( 'Belarus', 'wpsc' ) ."', 'BY', '" . __('Belarussian Ruble', 'wpsc') . "', '', '', '" . __('BYR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (118, '" . __( 'Barbados', 'wpsc' ) ."', 'BB', '" . __('Barbados Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('BBD', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (119, '" . __( 'Bangladesh', 'wpsc' ) ."', 'BD', '" . __('Bangladeshi Taka', 'wpsc') . "', '', '', '" . __('BDT', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (120, '" . __( 'Bahrain', 'wpsc' ) ."', 'BH', '" . __('Bahraini Dinar', 'wpsc') . "', '', '', '" . __('BHD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (121, '" . __( 'Bahamas', 'wpsc' ) ."', 'BS', '" . __('Bahamian Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('BSD', 'wpsc') . "', '0', '0','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (122, '" . __( 'Azerbaijan', 'wpsc' ) ."', 'AZ', '" . __('Azerbaijani Manat', 'wpsc') . "', '" . _x( 'm', 'azerbaijani manat symbol', 'wpsc' ) . "', '" . _x( 'm', 'azerbaijani manat symbol html', 'wpsc' ) . "', '" . __('AZN', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (123, '" . __( 'Austria', 'wpsc' ) ."', 'AT', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (124, '" . __( 'Aruba', 'wpsc' ) ."', 'AW', '" . __('Aruban Florin', 'wpsc') . "', '" . __( 'Afl.', 'wpsc' ) . "', '" . __( 'Afl.', 'wpsc' ) . "', '" . __('AWG', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (125, '" . __( 'Armenia', 'wpsc' ) ."', 'AM', '" . __('Armenian Dram', 'wpsc') . "', '', '', '" . __('AMD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (126, '" . __( 'Argentina', 'wpsc' ) ."', 'AR', '" . __('Argentine Peso', 'wpsc') . "', '', '', '" . __('ARS', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (127, '" . __( 'Antigua and Barbuda', 'wpsc' ) ."', 'AG', '" . __('East Caribbean Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('XCD', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (128, '" . __( 'Antarctica', 'wpsc' ) ."', 'AQ', '" . __('Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('ATA', 'wpsc') . "', '0', '0','antarctica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (129, '" . __( 'Anguilla', 'wpsc' ) ."', 'AI', '" . __('East Caribbean Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('XCD', 'wpsc') . "', '0', '0','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (130, '" . __( 'Angola', 'wpsc' ) ."', 'AO', '" . __('Angolan Kwanza', 'wpsc') . "', '" . __( 'Kz', 'wpsc' ) . "', '" . __( 'Kz', 'wpsc' ) . "', '" . __('AOA', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (131, '" . __( 'Andorra', 'wpsc' ) ."', 'AD', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (132, '" . __( 'American Samoa', 'wpsc' ) ."', 'AS', '" . __('US Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (133, '" . __( 'Algeria', 'wpsc' ) ."', 'DZ', '" . __('Algerian Dinar', 'wpsc') . "', '', '', '" . __('DZD', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (134, '" . __( 'Albania', 'wpsc' ) ."', 'AL', '" . __('Albanian Lek', 'wpsc') . "', '', '', '" . __('ALL', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (135, '" . __( 'Afghanistan', 'wpsc' ) ."', 'AF', '" . __('Afghanistan Afghani', 'wpsc') . "', '', '', '" . __('AFA', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (136, '" . __( 'USA', 'wpsc' ) ."', 'US', '" . __('US Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '1', '','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (137, '" . __( 'Australia', 'wpsc' ) ."', 'AU', '" . __('Australian Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('AUD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (139, '" . __( 'Mauritius', 'wpsc' ) ."', 'MU', '" . __('Mauritius Rupee', 'wpsc') . "', '', '', '" . __('MUR', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (140, '" . __( 'Mayotte', 'wpsc' ) ."', 'YT', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (141, '" . __( 'Mexico', 'wpsc' ) ."', 'MX', '" . __('Mexican Peso', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('MXN', 'wpsc') . "', '1', '','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (142, '" . __( 'Micronesia', 'wpsc' ) ."', 'FM', '" . __('US Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (143, '" . __( 'Moldova', 'wpsc' ) ."', 'MD', '" . __('Moldovan Leu', 'wpsc') . "', '', '', '" . __('MDL', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (144, '" . __( 'Monaco', 'wpsc' ) ."', 'MC', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (145, '" . __( 'Mongolia', 'wpsc' ) ."', 'MN', '" . __('Mongolian Tugrik', 'wpsc') . "', '', '', '" . __('MNT', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (146, '" . __( 'Montserrat', 'wpsc' ) ."', 'MS', '" . __('East Caribbean Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('XCD', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (147, '" . __( 'Morocco', 'wpsc' ) ."', 'MA', '" . __('Moroccan Dirham', 'wpsc') . "', '', '', '" . __('MAD', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (148, '" . __( 'Mozambique', 'wpsc' ) ."', 'MZ', '" . __('Mozambique Metical', 'wpsc') . "', '', '', '" . __('MZN', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (149, '" . __( 'Myanmar', 'wpsc' ) ."', 'MM', '" . __('Myanmar Kyat', 'wpsc') . "', '', '', '" . __('MMK', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (150, '" . __( 'Namibia', 'wpsc' ) ."', 'NA', '" . __('Namibian Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('NAD', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (151, '" . __( 'Nauru', 'wpsc' ) ."', 'NR', '" . __('Australian Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('AUD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (152, '" . __( 'Nepal', 'wpsc' ) ."', 'NP', '" . __('Nepalese Rupee', 'wpsc') . "', '', '', '" . __('NPR', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (153, '" . __( 'Netherlands', 'wpsc' ) ."', 'NL', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (154, '" . __( 'Netherlands Antilles', 'wpsc' ) ."', 'AN', '" . __('Netherlands Antillean Guilder', 'wpsc') . "', '" . __( 'ƒ', 'wpsc' ) . "', '" . __( 'ƒ', 'wpsc' ) . "', '" . __('ANG', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (155, '" . __( 'New Caledonia (French)', 'wpsc' ) ."', 'NC', '" . __('CFP Franc', 'wpsc') . "', '', '', '" . __('XPF', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (156, '" . __( 'New Zealand', 'wpsc' ) ."', 'NZ', '" . __('New Zealand Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('NZD', 'wpsc') . "', '0', '12.5','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (157, '" . __( 'Nicaragua', 'wpsc' ) ."', 'NI', '" . __('Nicaraguan Cordoba Oro', 'wpsc') . "', '', '', '" . __('NIO', 'wpsc') . "', '0', '0','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (158, '" . __( 'Niger', 'wpsc' ) ."', 'NE', '" . __('CFA Franc BCEAO', 'wpsc') . "', '', '', '" . __('XOF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (159, '" . __( 'Nigeria', 'wpsc' ) ."', 'NG', '" . __('Nigerian Naira', 'wpsc') . "', '', '', '" . __('NGN', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (160, '" . __( 'Niue', 'wpsc' ) ."', 'NU', '" . __('New Zealand Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('NZD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (161, '" . __( 'Norfolk Island', 'wpsc' ) ."', 'NF', '" . __('Australian Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('AUD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (162, '" . __( 'Northern Mariana Islands', 'wpsc' ) ."', 'MP', '" . __('US Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (163, '" . __( 'Norway', 'wpsc' ) ."', 'NO', '" . __('Norwegian Krone', 'wpsc') . "', '', '', '" . __('NOK', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (164, '" . __( 'Oman', 'wpsc' ) ."', 'OM', '" . __('Omani Rial', 'wpsc') . "', '', '', '" . __('OMR', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (165, '" . __( 'Pakistan', 'wpsc' ) ."', 'PK', '" . __('Pakistan Rupee', 'wpsc') . "', '', '', '" . __('PKR', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (166, '" . __( 'Palau', 'wpsc' ) ."', 'PW', '" . __('US Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (167, '" . __( 'Panama', 'wpsc' ) ."', 'PA', '" . __('Panamanian Balboa', 'wpsc') . "', '', '', '" . __('PAB', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (168, '" . __( 'Papua New Guinea', 'wpsc' ) ."', 'PG', '" . __('Papua New Guinea Kina', 'wpsc') . "', '', '', '" . __('PGK', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (169, '" . __( 'Paraguay', 'wpsc' ) ."', 'PY', '" . __('Paraguay Guarani', 'wpsc') . "', '', '', '" . __('PYG', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (170, '" . __( 'Peru', 'wpsc' ) ."', 'PE', '" . __('Peruvian Nuevo Sol', 'wpsc') . "', '', '', '" . __('PEN', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (171, '" . __( 'Philippines', 'wpsc' ) ."', 'PH', '" . __('Philippine Peso', 'wpsc') . "', '', '', '" . __('PHP', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (172, '" . __( 'Pitcairn Island', 'wpsc' ) ."', 'PN', '" . __('New Zealand Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('NZD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (173, '" . __( 'Poland', 'wpsc' ) ."', 'PL', '" . __('Polish Zloty', 'wpsc') . "', '', '', '" . __('PLN', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (174, '" . __( 'Polynesia (French)', 'wpsc' ) ."', 'PF', '" . __('CFP Franc', 'wpsc') . "', '', '', '" . __('XPF', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (175, '" . __( 'Portugal', 'wpsc' ) ."', 'PT', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (176, '" . __( 'Puerto Rico', 'wpsc' ) ."', 'PR', '" . __('US Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '0', '0','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (177, '" . __( 'Qatar', 'wpsc' ) ."', 'QA', '" . __('Qatari Rial', 'wpsc') . "', '', '', '" . __('QAR', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (178, '" . __( 'Reunion (French)', 'wpsc' ) ."', 'RE', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (179, '" . __( 'Romania', 'wpsc' ) ."', 'RO', '" . __('Romanian New Leu', 'wpsc') . "', '', '', '" . __('RON', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (180, '" . __( 'Russia', 'wpsc' ) ."', 'RU', '" . __('Russian Ruble', 'wpsc') . "', '', '', '" . __('RUB', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (181, '" . __( 'Rwanda', 'wpsc' ) ."', 'RW', '" . __('Rwanda Franc', 'wpsc') . "', '', '', '" . __('RWF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (182, '" . __( 'Saint Helena', 'wpsc' ) ."', 'SH', '" . __('St. Helena Pound', 'wpsc') . "', '', '', '" . __('SHP', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (183, '" . __( 'Saint Kitts & Nevis Anguilla', 'wpsc' ) ."', 'KN', '" . __('East Caribbean Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('XCD', 'wpsc') . "', '0', '0','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (184, '" . __( 'Saint Lucia', 'wpsc' ) ."', 'LC', '" . __('East Caribbean Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('XCD', 'wpsc') . "', '0', '0','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (185, '" . __( 'Saint Pierre and Miquelon', 'wpsc' ) ."', 'PM', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (186, '" . __( 'Saint Vincent & Grenadines', 'wpsc' ) ."', 'VC', '" . __('East Caribbean Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('XCD', 'wpsc') . "', '0', '0','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (187, '" . __( 'Samoa', 'wpsc' ) ."', 'WS', '" . __('Samoan Tala', 'wpsc') . "', '', '', '" . __('WST', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (188, '" . __( 'San Marino', 'wpsc' ) ."', 'SM', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (189, '" . __( 'Sao Tome and Principe', 'wpsc' ) ."', 'ST', '" . __('Dobra', 'wpsc') . "', '', '', '" . __('STD', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (190, '" . __( 'Saudi Arabia', 'wpsc' ) ."', 'SA', '" . __('Saudi Riyal', 'wpsc') . "', '', '', '" . __('SAR', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (191, '" . __( 'Senegal', 'wpsc' ) ."', 'SN', '" . __('CFA Franc BCEAO', 'wpsc') . "', '', '', '" . __('XOF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (192, '" . __( 'Seychelles', 'wpsc' ) ."', 'SC', '" . __('Seychelles Rupee', 'wpsc') . "', '', '', '" . __('SCR', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (193, '" . __( 'Sierra Leone', 'wpsc' ) ."', 'SL', '" . __('Sierra Leone Leone', 'wpsc') . "', '', '', '" . __('SLL', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (194, '" . __( 'Singapore', 'wpsc' ) ."', 'SG', '" . __('Singapore Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('SGD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (195, '" . __( 'Slovakia', 'wpsc' ) ."', 'SK', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (196, '" . __( 'Slovenia', 'wpsc' ) ."', 'SI', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (197, '" . __( 'Solomon Islands', 'wpsc' ) ."', 'SB', '" . __('Solomon Islands Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', 'SBD', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (198, '" . __( 'Somalia', 'wpsc' ) ."', 'SO', '" . __('Somali Shilling', 'wpsc') . "', '', '', '" . __('SOS', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (199, '" . __( 'South Africa', 'wpsc' ) ."', 'ZA', '" . __('South African Rand', 'wpsc') . "', '', '', '" . __('ZAR', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (200, '" . __( 'South Georgia & South Sandwich Islands', 'wpsc' ) ."', 'GS', '" . __('Pound Sterling', 'wpsc') . "', '" . __('£', 'wpsc') . "', '" . __('£', 'wpsc') . "', '" . __('GBP', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (201, '" . __( 'Spain', 'wpsc' ) ."', 'ES', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (202, '" . __( 'Sri Lanka', 'wpsc' ) ."', 'LK', '" . __('Sri Lanka Rupee', 'wpsc') . "', '', '', '" . __('LKR', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (203, '" . __( 'Sudan', 'wpsc' ) ."', 'SD', '" . __('Sudanese Pound', 'wpsc') . "', '', '', '" . __('SDG', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (204, '" . __( 'Suriname', 'wpsc' ) ."', 'SR', '" . __('Surinamese Dollar', 'wpsc') . "', '', '', '" . __('SRD', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (205, '" . __( 'Svalbard and Jan Mayen Islands', 'wpsc' ) ."', 'SJ', '" . __('Norwegian Krone', 'wpsc') . "', '', '', '" . __('NOK', 'wpsc') . "', '0', '0','','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (206, '" . __( 'Swaziland', 'wpsc' ) ."', 'SZ', '" . __('Swaziland Lilangeni', 'wpsc') . "', '', '', '" . __('SZL', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (207, '" . __( 'Sweden', 'wpsc' ) ."', 'SE', '" . __('Swedish Krona', 'wpsc') . "', '', '', '" . __('SEK', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (208, '" . __( 'Switzerland', 'wpsc' ) ."', 'CH', '" . __('Swiss Franc', 'wpsc') . "', '', '', '" . __('CHF', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (209, '" . __( 'Syria', 'wpsc' ) ."', 'SY', '" . __('Syrian Pound', 'wpsc') . "', '', '', '" . __('SYP', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (210, '" . __( 'Taiwan', 'wpsc' ) ."', 'TW', '" . __('New Taiwan Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('TWD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (211, '" . __( 'Tajikistan', 'wpsc' ) ."', 'TJ', '" . __('Tajik Somoni', 'wpsc') . "', '', '', '" . __('TJS', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (212, '" . __( 'Tanzania', 'wpsc' ) ."', 'TZ', '" . __('Tanzanian Shilling', 'wpsc') . "', '', '', '" . __('TZS', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (213, '" . __( 'Thailand', 'wpsc' ) ."', 'TH', '" . __('Thai Baht', 'wpsc') . "', '', '', '" . __('THB', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (214, '" . __( 'Togo', 'wpsc' ) ."', 'TG', '" . __('CFA Franc BCEAO', 'wpsc') . "', '', '', '" . __('XOF', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (215, '" . __( 'Tokelau', 'wpsc' ) ."', 'TK', '" . __('New Zealand Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('NZD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (216, '" . __( 'Tonga', 'wpsc' ) ."', 'TO', '" . __('Tongan Paʻanga', 'wpsc') . "', '', '', '" . __('TOP', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (217, '" . __( 'Trinidad and Tobago', 'wpsc' ) ."', 'TT', '" . __('Trinidad and Tobago Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('TTD', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (218, '" . __( 'Tunisia', 'wpsc' ) ."', 'TN', '" . __('Tunisian Dinar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('TND', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (219, '" . __( 'Turkey', 'wpsc' ) ."', 'TR', '" . __('Turkish Lira', 'wpsc') . "', '', '', '" . __('TRY', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (220, '" . __( 'Turkmenistan', 'wpsc' ) ."', 'TM', '" . __('Manat', 'wpsc') . "', '', '', '" . __('TMM', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (221, '" . __( 'Turks and Caicos Islands', 'wpsc' ) ."', 'TC', '" . __('US Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '0', '0', 'northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (222, '" . __( 'Tuvalu', 'wpsc' ) ."', 'TV', '" . __('Australian Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('AUD', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (223, '" . __( 'United Kingdom', 'wpsc' ) ."', 'GB', '" . __('Pound Sterling', 'wpsc') . "', '" . __('£', 'wpsc') . "', '" . __('£', 'wpsc') . "', '" . __('GBP', 'wpsc') . "', '0', '17.5','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (224, '" . __( 'Uganda', 'wpsc' ) ."', 'UG', '" . __('Uganda Shilling', 'wpsc') . "', '', '', '" . __('UGX', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (225, '" . __( 'Ukraine', 'wpsc' ) ."', 'UA', '" . __('Ukraine Hryvnia', 'wpsc') . "', '". __( '₴', 'wpsc' )."', '". __( '₴', 'wpsc' ) ."', '" . __('UAH', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (226, '" . __( 'United Arab Emirates', 'wpsc' ) ."', 'AE', '" . __('Arab Emirates Dirham', 'wpsc') . "', '', '', '" . __('AED', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (227, '" . __( 'Uruguay', 'wpsc' ) ."', 'UY', '" . __( 'Uruguayan Peso', 'wpsc' ) . "', '', '', '" . __('UYU', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (228, '" . __( 'USA Minor Outlying Islands', 'wpsc' ) ."', 'UM', '" . __('US Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '0', '0','','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (229, '" . __( 'Uzbekistan', 'wpsc' ) ."', 'UZ', '" . __('Uzbekistan Sum', 'wpsc') . "', '', '', '" . __('UZS', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (230, '" . __( 'Vanuatu', 'wpsc' ) ."', 'VU', '" . __('Vanuatu Vatu', 'wpsc') . "', '', '', '" . __('VUV', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (231, '" . __( 'Vatican', 'wpsc' ) ."', 'VA', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (232, '" . __( 'Venezuela', 'wpsc' ) ."', 'VE', '" . __('Venezuelan Bolivar Fuerte', 'wpsc') . "', '', '', '" . __('VEF', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (233, '" . __( 'Vietnam', 'wpsc' ) ."', 'VN', '" . __('Vietnamese Dong', 'wpsc') . "', '', '', '" . __('VND', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (234, '" . __( 'Virgin Islands (British)', 'wpsc' ) ."', 'VG', '" . __('US Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '0', '0','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (235, '" . __( 'Virgin Islands (USA)', 'wpsc' ) ."', 'VI', '" . __('US Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '0', '0','northamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (236, '" . __( 'Wallis and Futuna Islands', 'wpsc' ) ."', 'WF', '" . __('CFP Franc', 'wpsc') . "', '', '', '" . __('XPF', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (237, '" . __( 'Western Sahara', 'wpsc' ) ."', 'EH', '" . __('Moroccan Dirham', 'wpsc') . "', '', '', '" . __('MAD', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (238, '" . __( 'Yemen', 'wpsc' ) ."', 'YE', '" . __('Yemeni Rial', 'wpsc') . "', '', '', '" . __('YER', 'wpsc') . "', '0', '0','asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (240, '" . __( 'Zambia', 'wpsc' ) ."', 'ZM', '" . __('Zambian Kwacha', 'wpsc') . "', '', '', '" . __('ZMK', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (241, '" . __( 'Zimbabwe', 'wpsc' ) ."', 'ZW', '" . __('US Dollar', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (242, '" . __( 'South Sudan', 'wpsc' ) . "', 'SS', '" . __( 'South Sudanese Pound', 'wpsc' ) . "', '', '', '" . __( 'SSP', 'wpsc' ) . "', '0', '0', 'africa', '1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (243, '" . __( 'Serbia', 'wpsc' ) ."', 'RS', '" . __('Serbian Dinar', 'wpsc') . "', '', '', '" . __('RSD', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (244, '" . __( 'Montenegro', 'wpsc' ) ."', 'ME', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (246, '" . __( 'Aland Islands', 'wpsc' ) ."', 'AX', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (247, '" . __( 'Saint Barthelemy', 'wpsc' ) ."', 'BL', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','europe','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (248, '" . __( 'Bonaire, Sint Eustatius and Saba', 'wpsc' ) ."', 'BQ', '" . __('US Dollar', 'wpsc') ."', '" . __('$', 'wpsc') . "', '" . __('$', 'wpsc') . "', '" . __('USD', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (249, '" . __( 'Curacao', 'wpsc' ) . "', 'CW', '" . __( 'Netherlands Antillean Guilder', 'wpsc' ) . "', '" . __( 'ƒ', 'wpsc' ) . "', '" . __( 'ƒ', 'wpsc' ) . "', '" . __( 'ANG', 'wpsc' ) . "', '0', '0', 'southamerica', '1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (250, '" . __( 'Saint Martin (French Part)', 'wpsc' ) ."', 'MF', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','southamerica','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (251, '" . __( 'Palestinian Territories', 'wpsc' ) ."', 'PS', '" . __('Israeli New Shekel', 'wpsc') ."', '" . __( '₪', 'wpsc' ) . "', '" . __( '₪', 'wpsc' ) . "', '" . __('ILS', 'wpsc') . "', '0', '0', 'asiapacific','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (252, '" . __( 'Sint Maarten (Dutch Part)', 'wpsc' ) ."', 'SX', '" . __('Netherlands Antillean Guilder', 'wpsc') . "', '" . __( 'ƒ', 'wpsc' ) . "', '" . __( 'ƒ', 'wpsc' ) . "', '" . __('ANG', 'wpsc') . "', '0', '0','africa','1');
INSERT INTO `".WPSC_TABLE_CURRENCY_LIST."` VALUES (253, '" . __( 'French Guiana', 'wpsc' ) ."', 'GF', '" . __('Euro', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('€', 'wpsc') . "', '" . __('EUR', 'wpsc') . "', '0', '0','southamerica','1');";
| visser/WP-e-Commerce | wpsc-updates/currency_list.php | PHP | gpl-2.0 | 55,852 |
<?php
/**
* @package Joomla.UnitTest
* @subpackage Plugin
*
* @copyright Copyright (C) 2005 - 2014 Open Source Matters, Inc. All rights reserved.
* @license GNU General Public License version 2 or later; see LICENSE
*/
require_once __DIR__ . '/stubs/PlgSystemBase.php';
require_once __DIR__ . '/stubs/PlgSystemJoomla.php';
require_once __DIR__ . '/stubs/PlgSystemPrivate.php';
/**
* Test class for JPlugin.
*
* @package Joomla.UnitTest
* @subpackage Plugin
* @since 3.2
*/
class JPluginTest extends TestCase
{
/**
* Sets up the fixture, for example, opens a network connection.
* This method is called before a test is executed.
*
* @return void
*
* @since 3.2
*/
protected function setUp()
{
$this->saveFactoryState();
JFactory::$application = $this->getMockApplication();
JFactory::$database = $this->getMockDatabase();
}
/**
* Tears down the fixture, for example, closes a network connection.
* This method is called after a test is executed.
*
* @return void
*
* @since 3.2
*/
protected function tearDown()
{
$this->restoreFactoryState();
}
/**
* Test constructor with app and database variables
*
* @return void
*
* @since 3.2
*/
public function test__constructWithAppAndDb()
{
// Load our test plugin
$plugin = new PlgSystemJoomla;
$this->assertInstanceOf(
'JApplicationBase',
TestReflection::getValue($plugin, 'app'),
'Assert the $app property is an instance of JApplicationBase'
);
$this->assertInstanceOf(
'JDatabaseDriver',
TestReflection::getValue($plugin, 'db'),
'Assert the $db property is an instance of JDatabaseDriver'
);
$this->assertThat(
TestReflection::getValue($plugin, '_name'),
$this->equalTo('Joomla')
);
}
/**
* Test constructor without app and database variables
*
* @return void
*
* @since 3.2
*/
public function test__constructWithoutAppAndDb()
{
// Load our test plugin
$plugin = new PlgSystemBase;
$this->assertClassNotHasAttribute(
'app',
'PlgSystemBase',
'Assert the $app property does not exist'
);
$this->assertClassNotHasAttribute(
'db',
'PlgSystemBase',
'Assert the $db property does not exist'
);
$this->assertThat(
TestReflection::getValue($plugin, '_name'),
$this->equalTo('Base')
);
}
/**
* Test constructor without app and database variables
*
* @return void
*
* @since 3.2
*/
public function test__constructPrivateAppAndDb()
{
// Load our test plugin
$plugin = new PlgSystemPrivate;
$this->assertNull(
TestReflection::getValue($plugin, 'app'),
'Assert the $app property is not set if private and a fatal error does not occur'
);
$this->assertNull(
TestReflection::getValue($plugin, 'db'),
'Assert the $db property is not set if private and a fatal error does not occur'
);
$this->assertThat(
TestReflection::getValue($plugin, '_name'),
$this->equalTo('Private')
);
}
}
| mootombo/webos | tests/unit/suites/libraries/cms/plugin/JPluginTest.php | PHP | gpl-2.0 | 2,985 |
--
SET @guid:=67973;
DELETE FROM `creature` WHERE `guid` BETWEEN @guid+0 AND @guid+16;
INSERT INTO `creature` (`guid`, `id`, `map`, `spawnMask`, `modelid`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `currentwaypoint`, `curhealth`, `curmana`, `MovementType`, `npcflag`, `unit_flags`, `dynamicflags`) VALUES
(@guid+0, 13196, 429, 2, 0, 0, 51.1273, -748.835, -25.1003, 4.72, 86400, 0, 0, 15198, 0, 0, 0, 0, 0),
(@guid+1, 13196, 429, 2, 0, 0, 61.9258, -648.15, -25.1335, 4.72, 86400, 0, 0, 15198, 0, 0, 0, 0, 0),
(@guid+2, 13285, 429, 2, 0, 0, 293.8714, -479.7898, -119.1195, 3.162191, 86400, 0, 0, 16704, 0, 0, 0, 0, 0),
(@guid+3, 13022, 429, 2, 0, 0, 292.697, -476.578, -119.036, 5.58505, 86400, 2, 0, 1900, 0, 1, 0, 0, 0),
(@guid+4, 13022, 429, 2, 0, 0, 287.544, -479.895, -119.036, 5.74213, 86400, 2, 0, 1900, 0, 1, 0, 0, 0),
(@guid+5, 13022, 429, 2, 0, 0, 299.18, -474.913, -119.036, 2.80998, 86400, 2, 0, 1900, 0, 1, 0, 0, 0),
(@guid+6, 13022, 429, 2, 0, 0, 292.824, -482.119, -119.036, 3.89208, 86400, 2, 0, 1900, 0, 1, 0, 0, 0),
(@guid+7, 13197, 429, 2, 0, 0, 285.642, -501.949, -119.036, 2.14675, 86400, 0, 0, 12954, 4326, 0, 0, 0, 0),
(@guid+8, 13022, 429, 2, 0, 0, 281.187, -499.258, -119.036, 0, 86400, 2, 0, 1900, 0, 1, 0, 0, 0),
(@guid+9, 13022, 429, 2, 0, 0, 289.159, -504.723, -119.036, 0, 86400, 2, 0, 1900, 0, 1, 0, 0, 0),
(@guid+10, 13022, 429, 2, 0, 0, 282.743, -504.313, -119.036, 0, 86400, 2, 0, 1900, 0, 1, 0, 0, 0),
(@guid+11, 13022, 429, 2, 0, 0, 288.645, -499.382, -119.036, 0, 86400, 2, 0, 1900, 0, 1, 0, 0, 0),
(@guid+12, 13197, 429, 2, 0, 0, 240.481, -489.337, -119.119, 0.093636, 86400, 0, 0, 12954, 4326, 0, 0, 0, 0),
(@guid+13, 13022, 429, 2, 0, 0, 245.336, -492.562, -119.036, 0, 86400, 2, 0, 1900, 0, 1, 0, 0, 0),
(@guid+14, 13022, 429, 2, 0, 0, 243.656, -487.932, -119.036, 5.20108, 86400, 2, 0, 1900, 0, 1, 0, 0, 0),
(@guid+15, 13022, 429, 2, 0, 0, 239.995, -491.467, -119.036, 0, 86400, 2, 0, 1900, 0, 1, 0, 0, 0),
(@guid+16, 13022, 429, 2, 0, 0, 241.339, -484.667, -119.036, 0, 86400, 2, 0, 1900, 0, 1, 0, 0, 0);
| Shauren/TrinityCore | sql/old/7/world/19061_2019_07_15/2018_03_11_01_world_2016_12_26_07_world.sql | SQL | gpl-2.0 | 2,108 |
/* Applied Micro X-Gene SoC Ethernet Driver
*
* Copyright (c) 2014, Applied Micro Circuits Corporation
* Authors: Iyappan Subramanian <isubramanian@apm.com>
* Ravi Patel <rapatel@apm.com>
* Keyur Chudgar <kchudgar@apm.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_sgmac.h"
#include "xgene_enet_xgmac.h"
#define RES_ENET_CSR 0
#define RES_RING_CSR 1
#define RES_RING_CMD 2
static const struct of_device_id xgene_enet_of_match[];
static const struct acpi_device_id xgene_enet_acpi_match[];
static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
struct xgene_enet_raw_desc16 *raw_desc;
int i;
for (i = 0; i < buf_pool->slots; i++) {
raw_desc = &buf_pool->raw_desc16[i];
/* Hardware expects descriptor in little endian format */
raw_desc->m0 = cpu_to_le64(i |
SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
SET_VAL(STASH, 3));
}
}
static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
u32 nbuf)
{
struct sk_buff *skb;
struct xgene_enet_raw_desc16 *raw_desc;
struct xgene_enet_pdata *pdata;
struct net_device *ndev;
struct device *dev;
dma_addr_t dma_addr;
u32 tail = buf_pool->tail;
u32 slots = buf_pool->slots - 1;
u16 bufdatalen, len;
int i;
ndev = buf_pool->ndev;
dev = ndev_to_dev(buf_pool->ndev);
pdata = netdev_priv(ndev);
bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
len = XGENE_ENET_MAX_MTU;
for (i = 0; i < nbuf; i++) {
raw_desc = &buf_pool->raw_desc16[tail];
skb = netdev_alloc_skb_ip_align(ndev, len);
if (unlikely(!skb))
return -ENOMEM;
buf_pool->rx_skb[tail] = skb;
dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
netdev_err(ndev, "DMA mapping error\n");
dev_kfree_skb_any(skb);
return -EINVAL;
}
raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
SET_VAL(BUFDATALEN, bufdatalen) |
SET_BIT(COHERENT));
tail = (tail + 1) & slots;
}
pdata->ring_ops->wr_cmd(buf_pool, nbuf);
buf_pool->tail = tail;
return 0;
}
static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
{
struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
return ((u16)pdata->rm << 10) | ring->num;
}
static u8 xgene_enet_hdr_len(const void *data)
{
const struct ethhdr *eth = data;
return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
}
static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
struct xgene_enet_raw_desc16 *raw_desc;
u32 slots = buf_pool->slots - 1;
u32 tail = buf_pool->tail;
u32 userinfo;
int i, len;
len = pdata->ring_ops->len(buf_pool);
for (i = 0; i < len; i++) {
tail = (tail - 1) & slots;
raw_desc = &buf_pool->raw_desc16[tail];
/* Hardware stores descriptor in little endian format */
userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
}
pdata->ring_ops->wr_cmd(buf_pool, -len);
buf_pool->tail = tail;
}
static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
{
struct xgene_enet_desc_ring *rx_ring = data;
if (napi_schedule_prep(&rx_ring->napi)) {
disable_irq_nosync(irq);
__napi_schedule(&rx_ring->napi);
}
return IRQ_HANDLED;
}
static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
struct xgene_enet_raw_desc *raw_desc)
{
struct sk_buff *skb;
struct device *dev;
skb_frag_t *frag;
dma_addr_t *frag_dma_addr;
u16 skb_index;
u8 status;
int i, ret = 0;
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = cp_ring->cp_skb[skb_index];
frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
dev = ndev_to_dev(cp_ring->ndev);
dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
skb_headlen(skb),
DMA_TO_DEVICE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
DMA_TO_DEVICE);
}
/* Checking for error */
status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) {
xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
status);
ret = -EIO;
}
if (likely(skb)) {
dev_kfree_skb_any(skb);
} else {
netdev_err(cp_ring->ndev, "completion skb is NULL\n");
ret = -EIO;
}
return ret;
}
static u64 xgene_enet_work_msg(struct sk_buff *skb)
{
struct net_device *ndev = skb->dev;
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct iphdr *iph;
u8 l3hlen = 0, l4hlen = 0;
u8 ethhdr, proto = 0, csum_enable = 0;
u64 hopinfo = 0;
u32 hdr_len, mss = 0;
u32 i, len, nr_frags;
ethhdr = xgene_enet_hdr_len(skb->data);
if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
unlikely(skb->protocol != htons(ETH_P_8021Q)))
goto out;
if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
goto out;
iph = ip_hdr(skb);
if (unlikely(ip_is_fragment(iph)))
goto out;
if (likely(iph->protocol == IPPROTO_TCP)) {
l4hlen = tcp_hdrlen(skb) >> 2;
csum_enable = 1;
proto = TSO_IPPROTO_TCP;
if (ndev->features & NETIF_F_TSO) {
hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
if (skb_is_nonlinear(skb)) {
len = skb_headlen(skb);
nr_frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < 2 && i < nr_frags; i++)
len += skb_shinfo(skb)->frags[i].size;
/* HW requires header must reside in 3 buffer */
if (unlikely(hdr_len > len)) {
if (skb_linearize(skb))
return 0;
}
}
if (!mss || ((skb->len - hdr_len) <= mss))
goto out;
if (mss != pdata->mss) {
pdata->mss = mss;
pdata->mac_ops->set_mss(pdata);
}
hopinfo |= SET_BIT(ET);
}
} else if (iph->protocol == IPPROTO_UDP) {
l4hlen = UDP_HDR_SIZE;
csum_enable = 1;
}
out:
l3hlen = ip_hdrlen(skb) >> 2;
hopinfo |= SET_VAL(TCPHDR, l4hlen) |
SET_VAL(IPHDR, l3hlen) |
SET_VAL(ETHHDR, ethhdr) |
SET_VAL(EC, csum_enable) |
SET_VAL(IS, proto) |
SET_BIT(IC) |
SET_BIT(TYPE_ETH_WORK_MESSAGE);
return hopinfo;
}
static u16 xgene_enet_encode_len(u16 len)
{
return (len == BUFLEN_16K) ? 0 : len;
}
static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
{
desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
SET_VAL(BUFDATALEN, len));
}
static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
{
__le64 *exp_bufs;
exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
return exp_bufs;
}
static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
{
return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
}
static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
struct sk_buff *skb)
{
struct device *dev = ndev_to_dev(tx_ring->ndev);
struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
struct xgene_enet_raw_desc *raw_desc;
__le64 *exp_desc = NULL, *exp_bufs = NULL;
dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
skb_frag_t *frag;
u16 tail = tx_ring->tail;
u64 hopinfo;
u32 len, hw_len;
u8 ll = 0, nv = 0, idx = 0;
bool split = false;
u32 size, offset, ell_bytes = 0;
u32 i, fidx, nr_frags, count = 1;
raw_desc = &tx_ring->raw_desc[tail];
tail = (tail + 1) & (tx_ring->slots - 1);
memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
hopinfo = xgene_enet_work_msg(skb);
if (!hopinfo)
return -EINVAL;
raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
hopinfo);
len = skb_headlen(skb);
hw_len = xgene_enet_encode_len(len);
dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
netdev_err(tx_ring->ndev, "DMA mapping error\n");
return -EINVAL;
}
/* Hardware expects descriptor in little endian format */
raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
SET_VAL(BUFDATALEN, hw_len) |
SET_BIT(COHERENT));
if (!skb_is_nonlinear(skb))
goto out;
/* scatter gather */
nv = 1;
exp_desc = (void *)&tx_ring->raw_desc[tail];
tail = (tail + 1) & (tx_ring->slots - 1);
memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
nr_frags = skb_shinfo(skb)->nr_frags;
for (i = nr_frags; i < 4 ; i++)
exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
if (!split) {
frag = &skb_shinfo(skb)->frags[fidx];
size = skb_frag_size(frag);
offset = 0;
pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, pbuf_addr))
return -EINVAL;
frag_dma_addr[fidx] = pbuf_addr;
fidx++;
if (size > BUFLEN_16K)
split = true;
}
if (size > BUFLEN_16K) {
len = BUFLEN_16K;
size -= BUFLEN_16K;
} else {
len = size;
split = false;
}
dma_addr = pbuf_addr + offset;
hw_len = xgene_enet_encode_len(len);
switch (i) {
case 0:
case 1:
case 2:
xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
break;
case 3:
if (split || (fidx != nr_frags)) {
exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
xgene_set_addr_len(exp_bufs, idx, dma_addr,
hw_len);
idx++;
ell_bytes += len;
} else {
xgene_set_addr_len(exp_desc, i, dma_addr,
hw_len);
}
break;
default:
xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
idx++;
ell_bytes += len;
break;
}
if (split)
offset += BUFLEN_16K;
}
count++;
if (idx) {
ll = 1;
dma_addr = dma_map_single(dev, exp_bufs,
sizeof(u64) * MAX_EXP_BUFFS,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
dev_kfree_skb_any(skb);
return -EINVAL;
}
i = ell_bytes >> LL_BYTES_LSB_LEN;
exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
SET_VAL(LL_BYTES_MSB, i) |
SET_VAL(LL_LEN, idx));
raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
}
out:
raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
SET_VAL(USERINFO, tx_ring->tail));
tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
pdata->tx_level += count;
tx_ring->tail = tail;
return count;
}
static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
u32 tx_level = pdata->tx_level;
int count;
if (tx_level < pdata->txc_level)
tx_level += ((typeof(pdata->tx_level))~0U);
if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) {
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
}
if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
return NETDEV_TX_OK;
count = xgene_enet_setup_tx_desc(tx_ring, skb);
if (count <= 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
skb_tx_timestamp(skb);
pdata->stats.tx_packets++;
pdata->stats.tx_bytes += skb->len;
pdata->ring_ops->wr_cmd(tx_ring, count);
return NETDEV_TX_OK;
}
static void xgene_enet_skip_csum(struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
if (!ip_is_fragment(iph) ||
(iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
}
static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
struct xgene_enet_raw_desc *raw_desc)
{
struct net_device *ndev;
struct xgene_enet_pdata *pdata;
struct device *dev;
struct xgene_enet_desc_ring *buf_pool;
u32 datalen, skb_index;
struct sk_buff *skb;
u8 status;
int ret = 0;
ndev = rx_ring->ndev;
pdata = netdev_priv(ndev);
dev = ndev_to_dev(rx_ring->ndev);
buf_pool = rx_ring->buf_pool;
dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = buf_pool->rx_skb[skb_index];
/* checking for error */
status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) {
dev_kfree_skb_any(skb);
xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
status);
pdata->stats.rx_dropped++;
ret = -EIO;
goto out;
}
/* strip off CRC as HW isn't doing this */
datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
datalen = (datalen & DATALEN_MASK) - 4;
prefetch(skb->data - NET_IP_ALIGN);
skb_put(skb, datalen);
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, ndev);
if (likely((ndev->features & NETIF_F_IP_CSUM) &&
skb->protocol == htons(ETH_P_IP))) {
xgene_enet_skip_csum(skb);
}
pdata->stats.rx_packets++;
pdata->stats.rx_bytes += datalen;
napi_gro_receive(&rx_ring->napi, skb);
out:
if (--rx_ring->nbufpool == 0) {
ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
rx_ring->nbufpool = NUM_BUFPOOL;
}
return ret;
}
static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
{
return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
}
static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
int budget)
{
struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
struct xgene_enet_raw_desc *raw_desc, *exp_desc;
u16 head = ring->head;
u16 slots = ring->slots - 1;
int ret, desc_count, count = 0, processed = 0;
bool is_completion;
do {
raw_desc = &ring->raw_desc[head];
desc_count = 0;
is_completion = false;
exp_desc = NULL;
if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
break;
/* read fpqnum field after dataaddr field */
dma_rmb();
if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
head = (head + 1) & slots;
exp_desc = &ring->raw_desc[head];
if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
head = (head - 1) & slots;
break;
}
dma_rmb();
count++;
desc_count++;
}
if (is_rx_desc(raw_desc)) {
ret = xgene_enet_rx_frame(ring, raw_desc);
} else {
ret = xgene_enet_tx_completion(ring, raw_desc);
is_completion = true;
}
xgene_enet_mark_desc_slot_empty(raw_desc);
if (exp_desc)
xgene_enet_mark_desc_slot_empty(exp_desc);
head = (head + 1) & slots;
count++;
desc_count++;
processed++;
if (is_completion)
pdata->txc_level += desc_count;
if (ret)
break;
} while (--budget);
if (likely(count)) {
pdata->ring_ops->wr_cmd(ring, -count);
ring->head = head;
if (netif_queue_stopped(ring->ndev))
netif_start_queue(ring->ndev);
}
return processed;
}
static int xgene_enet_napi(struct napi_struct *napi, const int budget)
{
struct xgene_enet_desc_ring *ring;
int processed;
ring = container_of(napi, struct xgene_enet_desc_ring, napi);
processed = xgene_enet_process_ring(ring, budget);
if (processed != budget) {
napi_complete(napi);
enable_irq(ring->irq);
}
return processed;
}
static void xgene_enet_timeout(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
pdata->mac_ops->reset(pdata);
}
static int xgene_enet_register_irq(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct device *dev = ndev_to_dev(ndev);
struct xgene_enet_desc_ring *ring;
int ret;
ring = pdata->rx_ring;
ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
IRQF_SHARED, ring->irq_name, ring);
if (ret)
netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name);
if (pdata->cq_cnt) {
ring = pdata->tx_ring->cp_ring;
ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
IRQF_SHARED, ring->irq_name, ring);
if (ret) {
netdev_err(ndev, "Failed to request irq %s\n",
ring->irq_name);
}
}
return ret;
}
static void xgene_enet_free_irq(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata;
struct device *dev;
pdata = netdev_priv(ndev);
dev = ndev_to_dev(ndev);
devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
if (pdata->cq_cnt) {
devm_free_irq(dev, pdata->tx_ring->cp_ring->irq,
pdata->tx_ring->cp_ring);
}
}
static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
{
struct napi_struct *napi;
napi = &pdata->rx_ring->napi;
napi_enable(napi);
if (pdata->cq_cnt) {
napi = &pdata->tx_ring->cp_ring->napi;
napi_enable(napi);
}
}
static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
{
struct napi_struct *napi;
napi = &pdata->rx_ring->napi;
napi_disable(napi);
if (pdata->cq_cnt) {
napi = &pdata->tx_ring->cp_ring->napi;
napi_disable(napi);
}
}
static int xgene_enet_open(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
int ret;
mac_ops->tx_enable(pdata);
mac_ops->rx_enable(pdata);
xgene_enet_napi_enable(pdata);
ret = xgene_enet_register_irq(ndev);
if (ret)
return ret;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
phy_start(pdata->phy_dev);
else
schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
netif_start_queue(ndev);
return ret;
}
static int xgene_enet_close(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
netif_stop_queue(ndev);
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
phy_stop(pdata->phy_dev);
else
cancel_delayed_work_sync(&pdata->link_work);
mac_ops->tx_disable(pdata);
mac_ops->rx_disable(pdata);
xgene_enet_free_irq(ndev);
xgene_enet_napi_disable(pdata);
xgene_enet_process_ring(pdata->rx_ring, -1);
return 0;
}
static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
{
struct xgene_enet_pdata *pdata;
struct device *dev;
pdata = netdev_priv(ring->ndev);
dev = ndev_to_dev(ring->ndev);
pdata->ring_ops->clear(ring);
dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
}
static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
{
struct xgene_enet_desc_ring *buf_pool;
if (pdata->tx_ring) {
xgene_enet_delete_ring(pdata->tx_ring);
pdata->tx_ring = NULL;
}
if (pdata->rx_ring) {
buf_pool = pdata->rx_ring->buf_pool;
xgene_enet_delete_bufpool(buf_pool);
xgene_enet_delete_ring(buf_pool);
xgene_enet_delete_ring(pdata->rx_ring);
pdata->rx_ring = NULL;
}
}
static int xgene_enet_get_ring_size(struct device *dev,
enum xgene_enet_ring_cfgsize cfgsize)
{
int size = -EINVAL;
switch (cfgsize) {
case RING_CFGSIZE_512B:
size = 0x200;
break;
case RING_CFGSIZE_2KB:
size = 0x800;
break;
case RING_CFGSIZE_16KB:
size = 0x4000;
break;
case RING_CFGSIZE_64KB:
size = 0x10000;
break;
case RING_CFGSIZE_512KB:
size = 0x80000;
break;
default:
dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
break;
}
return size;
}
static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
{
struct xgene_enet_pdata *pdata;
struct device *dev;
if (!ring)
return;
dev = ndev_to_dev(ring->ndev);
pdata = netdev_priv(ring->ndev);
if (ring->desc_addr) {
pdata->ring_ops->clear(ring);
dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
}
devm_kfree(dev, ring);
}
static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
{
struct device *dev = &pdata->pdev->dev;
struct xgene_enet_desc_ring *ring;
ring = pdata->tx_ring;
if (ring) {
if (ring->cp_ring && ring->cp_ring->cp_skb)
devm_kfree(dev, ring->cp_ring->cp_skb);
if (ring->cp_ring && pdata->cq_cnt)
xgene_enet_free_desc_ring(ring->cp_ring);
xgene_enet_free_desc_ring(ring);
}
ring = pdata->rx_ring;
if (ring) {
if (ring->buf_pool) {
if (ring->buf_pool->rx_skb)
devm_kfree(dev, ring->buf_pool->rx_skb);
xgene_enet_free_desc_ring(ring->buf_pool);
}
xgene_enet_free_desc_ring(ring);
}
}
static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
struct xgene_enet_desc_ring *ring)
{
if ((pdata->enet_id == XGENE_ENET2) &&
(xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
return true;
}
return false;
}
static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
struct xgene_enet_desc_ring *ring)
{
u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
}
static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
struct net_device *ndev, u32 ring_num,
enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
{
struct xgene_enet_desc_ring *ring;
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct device *dev = ndev_to_dev(ndev);
int size;
size = xgene_enet_get_ring_size(dev, cfgsize);
if (size < 0)
return NULL;
ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
GFP_KERNEL);
if (!ring)
return NULL;
ring->ndev = ndev;
ring->num = ring_num;
ring->cfgsize = cfgsize;
ring->id = ring_id;
ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
GFP_KERNEL);
if (!ring->desc_addr) {
devm_kfree(dev, ring);
return NULL;
}
ring->size = size;
if (is_irq_mbox_required(pdata, ring)) {
ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
&ring->irq_mbox_dma, GFP_KERNEL);
if (!ring->irq_mbox_addr) {
dma_free_coherent(dev, size, ring->desc_addr,
ring->dma);
devm_kfree(dev, ring);
return NULL;
}
}
ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
ring = pdata->ring_ops->setup(ring);
netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
ring->num, ring->size, ring->id, ring->slots);
return ring;
}
static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
{
return (owner << 6) | (bufnum & GENMASK(5, 0));
}
static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
{
enum xgene_ring_owner owner;
if (p->enet_id == XGENE_ENET1) {
switch (p->phy_mode) {
case PHY_INTERFACE_MODE_SGMII:
owner = RING_OWNER_ETH0;
break;
default:
owner = (!p->port_id) ? RING_OWNER_ETH0 :
RING_OWNER_ETH1;
break;
}
} else {
owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
}
return owner;
}
static int xgene_enet_create_desc_rings(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct device *dev = ndev_to_dev(ndev);
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
struct xgene_enet_desc_ring *buf_pool = NULL;
enum xgene_ring_owner owner;
dma_addr_t dma_exp_bufs;
u8 cpu_bufnum = pdata->cpu_bufnum;
u8 eth_bufnum = pdata->eth_bufnum;
u8 bp_bufnum = pdata->bp_bufnum;
u16 ring_num = pdata->ring_num;
u16 ring_id;
int ret, size;
/* allocate rx descriptor ring */
owner = xgene_derive_ring_owner(pdata);
ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB, ring_id);
if (!rx_ring) {
ret = -ENOMEM;
goto err;
}
/* allocate buffer pool for receiving packets */
owner = xgene_derive_ring_owner(pdata);
ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_2KB, ring_id);
if (!buf_pool) {
ret = -ENOMEM;
goto err;
}
rx_ring->nbufpool = NUM_BUFPOOL;
rx_ring->buf_pool = buf_pool;
rx_ring->irq = pdata->rx_irq;
if (!pdata->cq_cnt) {
snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
ndev->name);
} else {
snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name);
}
buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
sizeof(struct sk_buff *), GFP_KERNEL);
if (!buf_pool->rx_skb) {
ret = -ENOMEM;
goto err;
}
buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
rx_ring->buf_pool = buf_pool;
pdata->rx_ring = rx_ring;
/* allocate tx descriptor ring */
owner = xgene_derive_ring_owner(pdata);
ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB, ring_id);
if (!tx_ring) {
ret = -ENOMEM;
goto err;
}
size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs,
GFP_KERNEL);
if (!tx_ring->exp_bufs) {
ret = -ENOMEM;
goto err;
}
pdata->tx_ring = tx_ring;
if (!pdata->cq_cnt) {
cp_ring = pdata->rx_ring;
} else {
/* allocate tx completion descriptor ring */
ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB,
ring_id);
if (!cp_ring) {
ret = -ENOMEM;
goto err;
}
cp_ring->irq = pdata->txc_irq;
snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name);
}
cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
sizeof(struct sk_buff *), GFP_KERNEL);
if (!cp_ring->cp_skb) {
ret = -ENOMEM;
goto err;
}
size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
size, GFP_KERNEL);
if (!cp_ring->frag_dma_addr) {
devm_kfree(dev, cp_ring->cp_skb);
ret = -ENOMEM;
goto err;
}
pdata->tx_ring->cp_ring = cp_ring;
pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128;
return 0;
err:
xgene_enet_free_desc_rings(pdata);
return ret;
}
static struct rtnl_link_stats64 *xgene_enet_get_stats64(
struct net_device *ndev,
struct rtnl_link_stats64 *storage)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct rtnl_link_stats64 *stats = &pdata->stats;
stats->rx_errors += stats->rx_length_errors +
stats->rx_crc_errors +
stats->rx_frame_errors +
stats->rx_fifo_errors;
memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
return storage;
}
static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
int ret;
ret = eth_mac_addr(ndev, addr);
if (ret)
return ret;
pdata->mac_ops->set_mac_addr(pdata);
return ret;
}
static const struct net_device_ops xgene_ndev_ops = {
.ndo_open = xgene_enet_open,
.ndo_stop = xgene_enet_close,
.ndo_start_xmit = xgene_enet_start_xmit,
.ndo_tx_timeout = xgene_enet_timeout,
.ndo_get_stats64 = xgene_enet_get_stats64,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = xgene_enet_set_mac_address,
};
#ifdef CONFIG_ACPI
static void xgene_get_port_id_acpi(struct device *dev,
struct xgene_enet_pdata *pdata)
{
acpi_status status;
u64 temp;
status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
if (ACPI_FAILURE(status)) {
pdata->port_id = 0;
} else {
pdata->port_id = temp;
}
return;
}
#endif
static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
{
u32 id = 0;
of_property_read_u32(dev->of_node, "port-id", &id);
pdata->port_id = id & BIT(0);
return;
}
static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
{
struct device *dev = &pdata->pdev->dev;
int delay, ret;
ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
if (ret) {
pdata->tx_delay = 4;
return 0;
}
if (delay < 0 || delay > 7) {
dev_err(dev, "Invalid tx-delay specified\n");
return -EINVAL;
}
pdata->tx_delay = delay;
return 0;
}
static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
{
struct device *dev = &pdata->pdev->dev;
int delay, ret;
ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
if (ret) {
pdata->rx_delay = 2;
return 0;
}
if (delay < 0 || delay > 7) {
dev_err(dev, "Invalid rx-delay specified\n");
return -EINVAL;
}
pdata->rx_delay = delay;
return 0;
}
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
{
struct platform_device *pdev;
struct net_device *ndev;
struct device *dev;
struct resource *res;
void __iomem *base_addr;
u32 offset;
int ret = 0;
pdev = pdata->pdev;
dev = &pdev->dev;
ndev = pdata->ndev;
res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
if (!res) {
dev_err(dev, "Resource enet_csr not defined\n");
return -ENODEV;
}
pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
if (!pdata->base_addr) {
dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
return -ENOMEM;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
if (!res) {
dev_err(dev, "Resource ring_csr not defined\n");
return -ENODEV;
}
pdata->ring_csr_addr = devm_ioremap(dev, res->start,
resource_size(res));
if (!pdata->ring_csr_addr) {
dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
return -ENOMEM;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
if (!res) {
dev_err(dev, "Resource ring_cmd not defined\n");
return -ENODEV;
}
pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
resource_size(res));
if (!pdata->ring_cmd_addr) {
dev_err(dev, "Unable to retrieve ENET Ring command region\n");
return -ENOMEM;
}
if (dev->of_node)
xgene_get_port_id_dt(dev, pdata);
#ifdef CONFIG_ACPI
else
xgene_get_port_id_acpi(dev, pdata);
#endif
if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
eth_hw_addr_random(ndev);
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
pdata->phy_mode = device_get_phy_mode(dev);
if (pdata->phy_mode < 0) {
dev_err(dev, "Unable to get phy-connection-type\n");
return pdata->phy_mode;
}
if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
dev_err(dev, "Incorrect phy-connection-type specified\n");
return -ENODEV;
}
ret = xgene_get_tx_delay(pdata);
if (ret)
return ret;
ret = xgene_get_rx_delay(pdata);
if (ret)
return ret;
ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(dev, "Unable to get ENET Rx IRQ\n");
ret = ret ? : -ENXIO;
return ret;
}
pdata->rx_irq = ret;
if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) {
ret = platform_get_irq(pdev, 1);
if (ret <= 0) {
pdata->cq_cnt = 0;
dev_info(dev, "Unable to get Tx completion IRQ,"
"using Rx IRQ instead\n");
} else {
pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
pdata->txc_irq = ret;
}
}
pdata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk)) {
/* Firmware may have set up the clock already. */
dev_info(dev, "clocks have been setup already\n");
}
if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
else
base_addr = pdata->base_addr;
pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
offset = (pdata->enet_id == XGENE_ENET1) ?
BLOCK_ETH_MAC_CSR_OFFSET :
X2_BLOCK_ETH_MAC_CSR_OFFSET;
pdata->mcx_mac_csr_addr = base_addr + offset;
} else {
pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
}
pdata->rx_buff_cnt = NUM_PKT_BUF;
return 0;
}
static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
{
struct net_device *ndev = pdata->ndev;
struct xgene_enet_desc_ring *buf_pool;
u16 dst_ring_num;
int ret;
ret = pdata->port_ops->reset(pdata);
if (ret)
return ret;
ret = xgene_enet_create_desc_rings(ndev);
if (ret) {
netdev_err(ndev, "Error in ring configuration\n");
return ret;
}
/* setup buffer pool */
buf_pool = pdata->rx_ring->buf_pool;
xgene_enet_init_bufpool(buf_pool);
ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
if (ret) {
xgene_enet_delete_desc_rings(pdata);
return ret;
}
dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring);
pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
pdata->mac_ops->init(pdata);
return ret;
}
static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
{
switch (pdata->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
pdata->mac_ops = &xgene_gmac_ops;
pdata->port_ops = &xgene_gport_ops;
pdata->rm = RM3;
break;
case PHY_INTERFACE_MODE_SGMII:
pdata->mac_ops = &xgene_sgmac_ops;
pdata->port_ops = &xgene_sgport_ops;
pdata->rm = RM1;
break;
default:
pdata->mac_ops = &xgene_xgmac_ops;
pdata->port_ops = &xgene_xgport_ops;
pdata->rm = RM0;
break;
}
if (pdata->enet_id == XGENE_ENET1) {
switch (pdata->port_id) {
case 0:
pdata->cpu_bufnum = START_CPU_BUFNUM_0;
pdata->eth_bufnum = START_ETH_BUFNUM_0;
pdata->bp_bufnum = START_BP_BUFNUM_0;
pdata->ring_num = START_RING_NUM_0;
break;
case 1:
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
pdata->ring_num = XG_START_RING_NUM_1;
} else {
pdata->cpu_bufnum = START_CPU_BUFNUM_1;
pdata->eth_bufnum = START_ETH_BUFNUM_1;
pdata->bp_bufnum = START_BP_BUFNUM_1;
pdata->ring_num = START_RING_NUM_1;
}
break;
default:
break;
}
pdata->ring_ops = &xgene_ring1_ops;
} else {
switch (pdata->port_id) {
case 0:
pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
pdata->ring_num = X2_START_RING_NUM_0;
break;
case 1:
pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
pdata->ring_num = X2_START_RING_NUM_1;
break;
default:
break;
}
pdata->rm = RM0;
pdata->ring_ops = &xgene_ring2_ops;
}
}
static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
{
struct napi_struct *napi;
napi = &pdata->rx_ring->napi;
netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
if (pdata->cq_cnt) {
napi = &pdata->tx_ring->cp_ring->napi;
netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
NAPI_POLL_WEIGHT);
}
}
static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
{
struct napi_struct *napi;
napi = &pdata->rx_ring->napi;
netif_napi_del(napi);
if (pdata->cq_cnt) {
napi = &pdata->tx_ring->cp_ring->napi;
netif_napi_del(napi);
}
}
static int xgene_enet_probe(struct platform_device *pdev)
{
struct net_device *ndev;
struct xgene_enet_pdata *pdata;
struct device *dev = &pdev->dev;
const struct xgene_mac_ops *mac_ops;
const struct of_device_id *of_id;
int ret;
ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
if (!ndev)
return -ENOMEM;
pdata = netdev_priv(ndev);
pdata->pdev = pdev;
pdata->ndev = ndev;
SET_NETDEV_DEV(ndev, dev);
platform_set_drvdata(pdev, pdata);
ndev->netdev_ops = &xgene_ndev_ops;
xgene_enet_set_ethtool_ops(ndev);
ndev->features |= NETIF_F_IP_CSUM |
NETIF_F_GSO |
NETIF_F_GRO |
NETIF_F_SG;
of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
if (of_id) {
pdata->enet_id = (enum xgene_enet_id)of_id->data;
}
#ifdef CONFIG_ACPI
else {
const struct acpi_device_id *acpi_id;
acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
if (acpi_id)
pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
}
#endif
if (!pdata->enet_id) {
free_netdev(ndev);
return -ENODEV;
}
ret = xgene_enet_get_resources(pdata);
if (ret)
goto err;
xgene_enet_setup_ops(pdata);
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
ndev->features |= NETIF_F_TSO;
pdata->mss = XGENE_ENET_MSS;
}
ndev->hw_features = ndev->features;
ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
netdev_err(ndev, "No usable DMA configuration\n");
goto err;
}
ret = register_netdev(ndev);
if (ret) {
netdev_err(ndev, "Failed to register netdev\n");
goto err;
}
ret = xgene_enet_init_hw(pdata);
if (ret)
goto err;
mac_ops = pdata->mac_ops;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
ret = xgene_enet_mdio_config(pdata);
if (ret)
goto err;
} else {
INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
}
xgene_enet_napi_add(pdata);
return 0;
err:
unregister_netdev(ndev);
free_netdev(ndev);
return ret;
}
static int xgene_enet_remove(struct platform_device *pdev)
{
struct xgene_enet_pdata *pdata;
const struct xgene_mac_ops *mac_ops;
struct net_device *ndev;
pdata = platform_get_drvdata(pdev);
mac_ops = pdata->mac_ops;
ndev = pdata->ndev;
mac_ops->rx_disable(pdata);
mac_ops->tx_disable(pdata);
xgene_enet_napi_del(pdata);
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
xgene_enet_mdio_remove(pdata);
unregister_netdev(ndev);
xgene_enet_delete_desc_rings(pdata);
pdata->port_ops->shutdown(pdata);
free_netdev(ndev);
return 0;
}
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgene_enet_acpi_match[] = {
{ "APMC0D05", XGENE_ENET1},
{ "APMC0D30", XGENE_ENET1},
{ "APMC0D31", XGENE_ENET1},
{ "APMC0D3F", XGENE_ENET1},
{ "APMC0D26", XGENE_ENET2},
{ "APMC0D25", XGENE_ENET2},
{ }
};
MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
#endif
#ifdef CONFIG_OF
static const struct of_device_id xgene_enet_of_match[] = {
{.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
{.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
{.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
{.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
{.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
{},
};
MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
#endif
static struct platform_driver xgene_enet_driver = {
.driver = {
.name = "xgene-enet",
.of_match_table = of_match_ptr(xgene_enet_of_match),
.acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
},
.probe = xgene_enet_probe,
.remove = xgene_enet_remove,
};
module_platform_driver(xgene_enet_driver);
MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
MODULE_VERSION(XGENE_DRV_VERSION);
MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
MODULE_LICENSE("GPL");
| sria91/linux | drivers/net/ethernet/apm/xgene/xgene_enet_main.c | C | gpl-2.0 | 38,895 |
/*
YUI 3.5.1 (build 22)
Copyright 2012 Yahoo! Inc. All rights reserved.
Licensed under the BSD License.
http://yuilibrary.com/license/
*/
.yui3-slider,.yui3-slider-rail{display:-moz-inline-stack;display:inline-block;*display:inline;zoom:1;vertical-align:middle}.yui3-slider-content{position:relative;display:block}.yui3-slider-rail{position:relative}.yui3-slider-rail-cap-top,.yui3-slider-rail-cap-left,.yui3-slider-rail-cap-bottom,.yui3-slider-rail-cap-right,.yui3-slider-thumb,.yui3-slider-thumb-image,.yui3-slider-thumb-shadow{position:absolute}.yui3-slider-thumb{overflow:hidden}.yui3-skin-round .yui3-slider-x .yui3-slider-rail,.yui3-skin-round .yui3-slider-x .yui3-slider-rail-cap-left,.yui3-skin-round .yui3-slider-x .yui3-slider-rail-cap-right{background-image:url(rail-x.png);background-repeat:repeat-x}.yui3-skin-round .yui3-slider-x .yui3-slider-rail{height:25px;background-position:0 3px}.yui3-skin-round .yui3-slider-x .yui3-slider-thumb{height:26px;width:24px}.yui3-skin-round .yui3-slider-x .yui3-slider-rail-cap-left{background-position:0 -17px;height:20px;left:-2px;width:5px}.yui3-skin-round .yui3-slider-x .yui3-slider-rail-cap-right{background-position:0 -37px;height:20px;right:-2px;width:5px}.yui3-skin-round .yui3-slider-x .yui3-slider-thumb-image{left:0;top:-7px}.yui3-skin-round .yui3-slider-x .yui3-slider-thumb-shadow{left:0;opacity:.15;filter:alpha(opacity=15);top:-47px}.yui3-skin-round .yui3-slider-y .yui3-slider-rail,.yui3-skin-round .yui3-slider-y .yui3-slider-rail-cap-top,.yui3-skin-round .yui3-slider-y .yui3-slider-rail-cap-bottom{background-image:url(rail-y.png);background-repeat:repeat-y}.yui3-skin-round .yui3-slider-y .yui3-slider-rail{width:25px;background-position:3px 0}.yui3-skin-round .yui3-slider-y .yui3-slider-thumb{width:26px;height:24px}.yui3-skin-round .yui3-slider-y .yui3-slider-rail-cap-top{background-position:-17px 0;width:20px;top:-2px;height:5px}.yui3-skin-round .yui3-slider-y .yui3-slider-rail-cap-bottom{background-position:-37px 0;width:20px;bottom:-2px;height:5px}.yui3-skin-round .yui3-slider-y .yui3-slider-thumb-image{top:0;left:-8px}.yui3-skin-round .yui3-slider-y .yui3-slider-thumb-shadow{top:0;left:-48px;opacity:.15;filter:alpha(opacity=15)}#yui3-css-stamp.skin-round-slider-base{display:none}
| usabilidoido/corais | sites/all/libraries/yui/build/slider-base/assets/skins/round/slider-base.css | CSS | gpl-2.0 | 2,267 |
/* $NoKeywords:$ */
/**
* @file
*
* mnotdr.c
*
* Northbridge Non-SPD timings for DR
*
* @xrefitem bom "File Content Label" "Release Content"
* @e project: AGESA
* @e sub-project: (Mem/NB/DR)
* @e \$Revision: 36462 $ @e \$Date: 2010-08-20 00:49:49 +0800 (Fri, 20 Aug 2010) $
*
**/
/*
*****************************************************************************
*
* Copyright (c) 2011, Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Advanced Micro Devices, Inc. nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***************************************************************************
*
*/
/*
*----------------------------------------------------------------------------
* MODULES USED
*
*----------------------------------------------------------------------------
*/
#include "AGESA.h"
#include "Ids.h"
#include "mm.h"
#include "mn.h"
#include "OptionMemory.h" // need def for MEM_FEAT_BLOCK_NB
#include "mndr.h"
#include "Filecode.h"
CODE_GROUP (G1_PEICC)
RDATA_GROUP (G1_PEICC)
#define FILECODE PROC_MEM_NB_DR_MNOTDR_FILECODE
/*----------------------------------------------------------------------------
* DEFINITIONS AND MACROS
*
*----------------------------------------------------------------------------
*/
/*----------------------------------------------------------------------------
* TYPEDEFS AND STRUCTURES
*
*----------------------------------------------------------------------------
*/
/*----------------------------------------------------------------------------
* PROTOTYPES OF LOCAL FUNCTIONS
*
*----------------------------------------------------------------------------
*/
VOID
STATIC
MemNSetOtherTimingDR (
IN OUT MEM_NB_BLOCK *NBPtr
);
VOID
STATIC
MemNPowerDownCtlDR (
IN OUT MEM_NB_BLOCK *NBPtr
);
/*----------------------------------------------------------------------------
* EXPORTED FUNCTIONS
*
*----------------------------------------------------------------------------
*/
extern BUILD_OPT_CFG UserOptions;
/* -----------------------------------------------------------------------------*/
/**
*
*
* This function sets the non-SPD timings
*
* @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK
*
* @return TRUE - No fatal error occurs.
* @return FALSE - Fatal error occurs.
*/
BOOLEAN
MemNOtherTimingDr (
IN OUT MEM_NB_BLOCK *NBPtr
)
{
MemNSwitchDCTNb (NBPtr, 0);
if (NBPtr->DCTPtr->Timings.DctDimmValid > 0) {
MemNSetOtherTimingDR (NBPtr); // Set DR Timings
MemNPowerDownCtlNb (NBPtr);
}
MemNSwitchDCTNb (NBPtr, 1);
if ((NBPtr->DCTPtr->Timings.DctDimmValid > 0) && (NBPtr->MCTPtr->GangedMode == FALSE)) {
MemNSetOtherTimingDR (NBPtr); // Set DR Timings
MemNPowerDownCtlNb (NBPtr);
}
return (BOOLEAN) (NBPtr->MCTPtr->ErrCode < AGESA_FATAL);
}
/*----------------------------------------------------------------------------
* LOCAL FUNCTIONS
*
*----------------------------------------------------------------------------
*/
/* -----------------------------------------------------------------------------*/
/**
*
*
* This function sets the non-SPD timings into the PCI registers
*
* @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK
*
*/
VOID
STATIC
MemNSetOtherTimingDR (
IN OUT MEM_NB_BLOCK *NBPtr
)
{
MemNSetBitFieldNb (NBPtr, BFTrdrd, MemNGetTrdrdNb (NBPtr));
MemNSetBitFieldNb (NBPtr, BFTwrwr, MemNGetTwrwrNb (NBPtr));
MemNSetBitFieldNb (NBPtr, BFTwrrd, MemNGetTwrrdNb (NBPtr));
MemNSetBitFieldNb (NBPtr, BFTrwtTO, MemNGetTrwtTONb (NBPtr));
MemNSetBitFieldNb (NBPtr, BFTrwtWB, MemNGetTrwtWBNb (NBPtr));
}
/* -----------------------------------------------------------------------------*/
/**
*
*
* This function enables power down mode
*
* @param[in,out] *NBPtr - Pointer to the MEM_NB_BLOCK
*
*/
VOID
STATIC
MemNPowerDownCtlDR (
IN OUT MEM_NB_BLOCK *NBPtr
)
{
MEM_PARAMETER_STRUCT *RefPtr;
UINT8 PowerDownMode;
RefPtr = NBPtr->RefPtr;
// we can't enable powerdown mode when doing WL
if (RefPtr->EnablePowerDown) {
MemNSetBitFieldNb (NBPtr, BFPowerDownEn, 1);
PowerDownMode = (UINT8) UserOptions.CfgPowerDownMode;
IDS_OPTION_HOOK (IDS_POWERDOWN_MODE, &PowerDownMode, &(NBPtr->MemPtr->StdHeader));
if (PowerDownMode) {
MemNSetBitFieldNb (NBPtr, BFPowerDownMode, 1);
}
}
}
| lkundrak/coreboot | src/vendorcode/amd/agesa/f14/Proc/Mem/NB/DR/mnotdr.c | C | gpl-2.0 | 6,024 |
/*
* av7110_v4l.c: av7110 video4linux interface for DVB and Siemens DVB-C analog module
*
* Copyright (C) 1999-2002 Ralph Metzler
* & Marcus Metzler for convergence integrated media GmbH
*
* originally based on code by:
* Copyright (C) 1998,1999 Christian Theiss <mistert@rz.fh-augsburg.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
* Or, point your browser to http://www.gnu.org/copyleft/gpl.html
*
* the project's page is at http://www.linuxtv.org/dvb/
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/timer.h>
#include <linux/poll.h>
#include "av7110.h"
#include "av7110_hw.h"
#include "av7110_av.h"
int msp_writereg(struct av7110 *av7110, u8 dev, u16 reg, u16 val)
{
u8 msg[5] = { dev, reg >> 8, reg & 0xff, val >> 8 , val & 0xff };
struct i2c_msg msgs = { .flags = 0, .len = 5, .buf = msg };
switch (av7110->adac_type) {
case DVB_ADAC_MSP34x0:
msgs.addr = 0x40;
break;
case DVB_ADAC_MSP34x5:
msgs.addr = 0x42;
break;
default:
return 0;
}
if (i2c_transfer(&av7110->i2c_adap, &msgs, 1) != 1) {
dprintk(1, "dvb-ttpci: failed @ card %d, %u = %u\n",
av7110->dvb_adapter.num, reg, val);
return -EIO;
}
return 0;
}
static int msp_readreg(struct av7110 *av7110, u8 dev, u16 reg, u16 *val)
{
u8 msg1[3] = { dev, reg >> 8, reg & 0xff };
u8 msg2[2];
struct i2c_msg msgs[2] = {
{ .flags = 0 , .len = 3, .buf = msg1 },
{ .flags = I2C_M_RD, .len = 2, .buf = msg2 }
};
switch (av7110->adac_type) {
case DVB_ADAC_MSP34x0:
msgs[0].addr = 0x40;
msgs[1].addr = 0x40;
break;
case DVB_ADAC_MSP34x5:
msgs[0].addr = 0x42;
msgs[1].addr = 0x42;
break;
default:
return 0;
}
if (i2c_transfer(&av7110->i2c_adap, &msgs[0], 2) != 2) {
dprintk(1, "dvb-ttpci: failed @ card %d, %u\n",
av7110->dvb_adapter.num, reg);
return -EIO;
}
*val = (msg2[0] << 8) | msg2[1];
return 0;
}
static struct v4l2_input inputs[4] = {
{
.index = 0,
.name = "DVB",
.type = V4L2_INPUT_TYPE_CAMERA,
.audioset = 1,
.tuner = 0, /* ignored */
.std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M,
.status = 0,
}, {
.index = 1,
.name = "Television",
.type = V4L2_INPUT_TYPE_TUNER,
.audioset = 2,
.tuner = 0,
.std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M,
.status = 0,
}, {
.index = 2,
.name = "Video",
.type = V4L2_INPUT_TYPE_CAMERA,
.audioset = 0,
.tuner = 0,
.std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M,
.status = 0,
}, {
.index = 3,
.name = "Y/C",
.type = V4L2_INPUT_TYPE_CAMERA,
.audioset = 0,
.tuner = 0,
.std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M,
.status = 0,
}
};
static int ves1820_writereg(struct saa7146_dev *dev, u8 addr, u8 reg, u8 data)
{
struct av7110 *av7110 = dev->ext_priv;
u8 buf[] = { 0x00, reg, data };
struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = buf, .len = 3 };
dprintk(4, "dev: %p\n", dev);
if (1 != i2c_transfer(&av7110->i2c_adap, &msg, 1))
return -1;
return 0;
}
static int tuner_write(struct saa7146_dev *dev, u8 addr, u8 data [4])
{
struct av7110 *av7110 = dev->ext_priv;
struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = data, .len = 4 };
dprintk(4, "dev: %p\n", dev);
if (1 != i2c_transfer(&av7110->i2c_adap, &msg, 1))
return -1;
return 0;
}
static int ves1820_set_tv_freq(struct saa7146_dev *dev, u32 freq)
{
u32 div;
u8 config;
u8 buf[4];
dprintk(4, "freq: 0x%08x\n", freq);
/* magic number: 614. tuning with the frequency given by v4l2
is always off by 614*62.5 = 38375 kHz...*/
div = freq + 614;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0x8e;
if (freq < (u32) (16 * 168.25))
config = 0xa0;
else if (freq < (u32) (16 * 447.25))
config = 0x90;
else
config = 0x30;
config &= ~0x02;
buf[3] = config;
return tuner_write(dev, 0x61, buf);
}
static int stv0297_set_tv_freq(struct saa7146_dev *dev, u32 freq)
{
struct av7110 *av7110 = (struct av7110*)dev->ext_priv;
u32 div;
u8 data[4];
div = (freq + 38900000 + 31250) / 62500;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0xce;
if (freq < 45000000)
return -EINVAL;
else if (freq < 137000000)
data[3] = 0x01;
else if (freq < 403000000)
data[3] = 0x02;
else if (freq < 860000000)
data[3] = 0x04;
else
return -EINVAL;
if (av7110->fe->ops.i2c_gate_ctrl)
av7110->fe->ops.i2c_gate_ctrl(av7110->fe, 1);
return tuner_write(dev, 0x63, data);
}
static struct saa7146_standard analog_standard[];
static struct saa7146_standard dvb_standard[];
static struct saa7146_standard standard[];
static struct v4l2_audio msp3400_v4l2_audio = {
.index = 0,
.name = "Television",
.capability = V4L2_AUDCAP_STEREO
};
static int av7110_dvb_c_switch(struct saa7146_fh *fh)
{
struct saa7146_dev *dev = fh->dev;
struct saa7146_vv *vv = dev->vv_data;
struct av7110 *av7110 = (struct av7110*)dev->ext_priv;
u16 adswitch;
int source, sync, err;
dprintk(4, "%p\n", av7110);
if ((vv->video_status & STATUS_OVERLAY) != 0) {
vv->ov_suspend = vv->video_fh;
err = saa7146_stop_preview(vv->video_fh); /* side effect: video_status is now 0, video_fh is NULL */
if (err != 0) {
dprintk(2, "suspending video failed\n");
vv->ov_suspend = NULL;
}
}
if (0 != av7110->current_input) {
dprintk(1, "switching to analog TV:\n");
adswitch = 1;
source = SAA7146_HPS_SOURCE_PORT_B;
sync = SAA7146_HPS_SYNC_PORT_B;
memcpy(standard, analog_standard, sizeof(struct saa7146_standard) * 2);
switch (av7110->current_input) {
case 1:
dprintk(1, "switching SAA7113 to Analog Tuner Input.\n");
msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0000); // loudspeaker source
msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0000); // headphone source
msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0000); // SCART 1 source
msp_writereg(av7110, MSP_WR_DSP, 0x000e, 0x3000); // FM matrix, mono
msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x4f00); // loudspeaker + headphone
msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x4f00); // SCART 1 volume
if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) {
if (ves1820_writereg(dev, 0x09, 0x0f, 0x60))
dprintk(1, "setting band in demodulator failed.\n");
} else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) {
saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI); // TDA9819 pin9(STD)
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI); // TDA9819 pin30(VIF)
}
if (i2c_writereg(av7110, 0x48, 0x02, 0xd0) != 1)
dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num);
break;
case 2:
dprintk(1, "switching SAA7113 to Video AV CVBS Input.\n");
if (i2c_writereg(av7110, 0x48, 0x02, 0xd2) != 1)
dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num);
break;
case 3:
dprintk(1, "switching SAA7113 to Video AV Y/C Input.\n");
if (i2c_writereg(av7110, 0x48, 0x02, 0xd9) != 1)
dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num);
break;
default:
dprintk(1, "switching SAA7113 to Input: AV7110: SAA7113: invalid input.\n");
}
} else {
adswitch = 0;
source = SAA7146_HPS_SOURCE_PORT_A;
sync = SAA7146_HPS_SYNC_PORT_A;
memcpy(standard, dvb_standard, sizeof(struct saa7146_standard) * 2);
dprintk(1, "switching DVB mode\n");
msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220); // loudspeaker source
msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0220); // headphone source
msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0220); // SCART 1 source
msp_writereg(av7110, MSP_WR_DSP, 0x000e, 0x3000); // FM matrix, mono
msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x7f00); // loudspeaker + headphone
msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x7f00); // SCART 1 volume
if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) {
if (ves1820_writereg(dev, 0x09, 0x0f, 0x20))
dprintk(1, "setting band in demodulator failed.\n");
} else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) {
saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD)
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF)
}
}
/* hmm, this does not do anything!? */
if (av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, ADSwitch, 1, adswitch))
dprintk(1, "ADSwitch error\n");
saa7146_set_hps_source_and_sync(dev, source, sync);
if (vv->ov_suspend != NULL) {
saa7146_start_preview(vv->ov_suspend);
vv->ov_suspend = NULL;
}
return 0;
}
static int vidioc_g_tuner(struct file *file, void *fh, struct v4l2_tuner *t)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
u16 stereo_det;
s8 stereo;
dprintk(2, "VIDIOC_G_TUNER: %d\n", t->index);
if (!av7110->analog_tuner_flags || t->index != 0)
return -EINVAL;
memset(t, 0, sizeof(*t));
strcpy((char *)t->name, "Television");
t->type = V4L2_TUNER_ANALOG_TV;
t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP;
t->rangelow = 772; /* 48.25 MHZ / 62.5 kHz = 772, see fi1216mk2-specs, page 2 */
t->rangehigh = 13684; /* 855.25 MHz / 62.5 kHz = 13684 */
/* FIXME: add the real signal strength here */
t->signal = 0xffff;
t->afc = 0;
/* FIXME: standard / stereo detection is still broken */
msp_readreg(av7110, MSP_RD_DEM, 0x007e, &stereo_det);
dprintk(1, "VIDIOC_G_TUNER: msp3400 TV standard detection: 0x%04x\n", stereo_det);
msp_readreg(av7110, MSP_RD_DSP, 0x0018, &stereo_det);
dprintk(1, "VIDIOC_G_TUNER: msp3400 stereo detection: 0x%04x\n", stereo_det);
stereo = (s8)(stereo_det >> 8);
if (stereo > 0x10) {
/* stereo */
t->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO;
t->audmode = V4L2_TUNER_MODE_STEREO;
} else if (stereo < -0x10) {
/* bilingual */
t->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
t->audmode = V4L2_TUNER_MODE_LANG1;
} else /* mono */
t->rxsubchans = V4L2_TUNER_SUB_MONO;
return 0;
}
static int vidioc_s_tuner(struct file *file, void *fh, struct v4l2_tuner *t)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
u16 fm_matrix, src;
dprintk(2, "VIDIOC_S_TUNER: %d\n", t->index);
if (!av7110->analog_tuner_flags || av7110->current_input != 1)
return -EINVAL;
switch (t->audmode) {
case V4L2_TUNER_MODE_STEREO:
dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_STEREO\n");
fm_matrix = 0x3001; /* stereo */
src = 0x0020;
break;
case V4L2_TUNER_MODE_LANG1_LANG2:
dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG1_LANG2\n");
fm_matrix = 0x3000; /* bilingual */
src = 0x0020;
break;
case V4L2_TUNER_MODE_LANG1:
dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG1\n");
fm_matrix = 0x3000; /* mono */
src = 0x0000;
break;
case V4L2_TUNER_MODE_LANG2:
dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG2\n");
fm_matrix = 0x3000; /* mono */
src = 0x0010;
break;
default: /* case V4L2_TUNER_MODE_MONO: */
dprintk(2, "VIDIOC_S_TUNER: TDA9840_SET_MONO\n");
fm_matrix = 0x3000; /* mono */
src = 0x0030;
break;
}
msp_writereg(av7110, MSP_WR_DSP, 0x000e, fm_matrix);
msp_writereg(av7110, MSP_WR_DSP, 0x0008, src);
msp_writereg(av7110, MSP_WR_DSP, 0x0009, src);
msp_writereg(av7110, MSP_WR_DSP, 0x000a, src);
return 0;
}
static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency *f)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_G_FREQ: freq:0x%08x.\n", f->frequency);
if (!av7110->analog_tuner_flags || av7110->current_input != 1)
return -EINVAL;
memset(f, 0, sizeof(*f));
f->type = V4L2_TUNER_ANALOG_TV;
f->frequency = av7110->current_freq;
return 0;
}
static int vidioc_s_frequency(struct file *file, void *fh, struct v4l2_frequency *f)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_S_FREQUENCY: freq:0x%08x.\n", f->frequency);
if (!av7110->analog_tuner_flags || av7110->current_input != 1)
return -EINVAL;
if (V4L2_TUNER_ANALOG_TV != f->type)
return -EINVAL;
msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0xffe0); /* fast mute */
msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0xffe0);
/* tune in desired frequency */
if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820)
ves1820_set_tv_freq(dev, f->frequency);
else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297)
stv0297_set_tv_freq(dev, f->frequency);
av7110->current_freq = f->frequency;
msp_writereg(av7110, MSP_WR_DSP, 0x0015, 0x003f); /* start stereo detection */
msp_writereg(av7110, MSP_WR_DSP, 0x0015, 0x0000);
msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x4f00); /* loudspeaker + headphone */
msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x4f00); /* SCART 1 volume */
return 0;
}
static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_ENUMINPUT: %d\n", i->index);
if (av7110->analog_tuner_flags) {
if (i->index >= 4)
return -EINVAL;
} else {
if (i->index != 0)
return -EINVAL;
}
memcpy(i, &inputs[i->index], sizeof(struct v4l2_input));
return 0;
}
static int vidioc_g_input(struct file *file, void *fh, unsigned int *input)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
*input = av7110->current_input;
dprintk(2, "VIDIOC_G_INPUT: %d\n", *input);
return 0;
}
static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_S_INPUT: %d\n", input);
if (!av7110->analog_tuner_flags)
return 0;
if (input < 0 || input >= 4)
return -EINVAL;
av7110->current_input = input;
return av7110_dvb_c_switch(fh);
}
static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
dprintk(2, "VIDIOC_G_AUDIO: %d\n", a->index);
if (a->index != 0)
return -EINVAL;
memcpy(a, &msp3400_v4l2_audio, sizeof(struct v4l2_audio));
return 0;
}
static int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
dprintk(2, "VIDIOC_S_AUDIO: %d\n", a->index);
return 0;
}
static int vidioc_g_sliced_vbi_cap(struct file *file, void *fh,
struct v4l2_sliced_vbi_cap *cap)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_G_SLICED_VBI_CAP\n");
if (cap->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT)
return -EINVAL;
if (FW_VERSION(av7110->arm_app) >= 0x2623) {
cap->service_set = V4L2_SLICED_WSS_625;
cap->service_lines[0][23] = V4L2_SLICED_WSS_625;
}
return 0;
}
static int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh,
struct v4l2_format *f)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_G_FMT:\n");
if (FW_VERSION(av7110->arm_app) < 0x2623)
return -EINVAL;
memset(&f->fmt.sliced, 0, sizeof f->fmt.sliced);
if (av7110->wssMode) {
f->fmt.sliced.service_set = V4L2_SLICED_WSS_625;
f->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625;
f->fmt.sliced.io_size = sizeof(struct v4l2_sliced_vbi_data);
}
return 0;
}
static int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh,
struct v4l2_format *f)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_S_FMT\n");
if (FW_VERSION(av7110->arm_app) < 0x2623)
return -EINVAL;
if (f->fmt.sliced.service_set != V4L2_SLICED_WSS_625 &&
f->fmt.sliced.service_lines[0][23] != V4L2_SLICED_WSS_625) {
memset(&f->fmt.sliced, 0, sizeof(f->fmt.sliced));
/* WSS controlled by firmware */
av7110->wssMode = 0;
av7110->wssData = 0;
return av7110_fw_cmd(av7110, COMTYPE_ENCODER,
SetWSSConfig, 1, 0);
} else {
memset(&f->fmt.sliced, 0, sizeof(f->fmt.sliced));
f->fmt.sliced.service_set = V4L2_SLICED_WSS_625;
f->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625;
f->fmt.sliced.io_size = sizeof(struct v4l2_sliced_vbi_data);
/* WSS controlled by userspace */
av7110->wssMode = 1;
av7110->wssData = 0;
}
return 0;
}
static int av7110_vbi_reset(struct file *file)
{
struct saa7146_fh *fh = file->private_data;
struct saa7146_dev *dev = fh->dev;
struct av7110 *av7110 = (struct av7110*) dev->ext_priv;
dprintk(2, "%s\n", __func__);
av7110->wssMode = 0;
av7110->wssData = 0;
if (FW_VERSION(av7110->arm_app) < 0x2623)
return 0;
else
return av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 1, 0);
}
static ssize_t av7110_vbi_write(struct file *file, const char __user *data, size_t count, loff_t *ppos)
{
struct saa7146_fh *fh = file->private_data;
struct saa7146_dev *dev = fh->dev;
struct av7110 *av7110 = (struct av7110*) dev->ext_priv;
struct v4l2_sliced_vbi_data d;
int rc;
dprintk(2, "%s\n", __func__);
if (FW_VERSION(av7110->arm_app) < 0x2623 || !av7110->wssMode || count != sizeof d)
return -EINVAL;
if (copy_from_user(&d, data, count))
return -EFAULT;
if ((d.id != 0 && d.id != V4L2_SLICED_WSS_625) || d.field != 0 || d.line != 23)
return -EINVAL;
if (d.id)
av7110->wssData = ((d.data[1] << 8) & 0x3f00) | d.data[0];
else
av7110->wssData = 0x8000;
rc = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 2, 1, av7110->wssData);
return (rc < 0) ? rc : count;
}
/****************************************************************************
* INITIALIZATION
****************************************************************************/
static u8 saa7113_init_regs[] = {
0x02, 0xd0,
0x03, 0x23,
0x04, 0x00,
0x05, 0x00,
0x06, 0xe9,
0x07, 0x0d,
0x08, 0x98,
0x09, 0x02,
0x0a, 0x80,
0x0b, 0x40,
0x0c, 0x40,
0x0d, 0x00,
0x0e, 0x01,
0x0f, 0x7c,
0x10, 0x48,
0x11, 0x0c,
0x12, 0x8b,
0x13, 0x1a,
0x14, 0x00,
0x15, 0x00,
0x16, 0x00,
0x17, 0x00,
0x18, 0x00,
0x19, 0x00,
0x1a, 0x00,
0x1b, 0x00,
0x1c, 0x00,
0x1d, 0x00,
0x1e, 0x00,
0x41, 0x77,
0x42, 0x77,
0x43, 0x77,
0x44, 0x77,
0x45, 0x77,
0x46, 0x77,
0x47, 0x77,
0x48, 0x77,
0x49, 0x77,
0x4a, 0x77,
0x4b, 0x77,
0x4c, 0x77,
0x4d, 0x77,
0x4e, 0x77,
0x4f, 0x77,
0x50, 0x77,
0x51, 0x77,
0x52, 0x77,
0x53, 0x77,
0x54, 0x77,
0x55, 0x77,
0x56, 0x77,
0x57, 0xff,
0xff
};
static struct saa7146_ext_vv av7110_vv_data_st;
static struct saa7146_ext_vv av7110_vv_data_c;
int av7110_init_analog_module(struct av7110 *av7110)
{
u16 version1, version2;
if (i2c_writereg(av7110, 0x80, 0x0, 0x80) == 1 &&
i2c_writereg(av7110, 0x80, 0x0, 0) == 1) {
printk("dvb-ttpci: DVB-C analog module @ card %d detected, initializing MSP3400\n",
av7110->dvb_adapter.num);
av7110->adac_type = DVB_ADAC_MSP34x0;
} else if (i2c_writereg(av7110, 0x84, 0x0, 0x80) == 1 &&
i2c_writereg(av7110, 0x84, 0x0, 0) == 1) {
printk("dvb-ttpci: DVB-C analog module @ card %d detected, initializing MSP3415\n",
av7110->dvb_adapter.num);
av7110->adac_type = DVB_ADAC_MSP34x5;
} else
return -ENODEV;
msleep(100); // the probing above resets the msp...
msp_readreg(av7110, MSP_RD_DSP, 0x001e, &version1);
msp_readreg(av7110, MSP_RD_DSP, 0x001f, &version2);
dprintk(1, "dvb-ttpci: @ card %d MSP34xx version 0x%04x 0x%04x\n",
av7110->dvb_adapter.num, version1, version2);
msp_writereg(av7110, MSP_WR_DSP, 0x0013, 0x0c00);
msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x7f00); // loudspeaker + headphone
msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220); // loudspeaker source
msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0220); // headphone source
msp_writereg(av7110, MSP_WR_DSP, 0x0004, 0x7f00); // loudspeaker volume
msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0220); // SCART 1 source
msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x7f00); // SCART 1 volume
msp_writereg(av7110, MSP_WR_DSP, 0x000d, 0x1900); // prescale SCART
if (i2c_writereg(av7110, 0x48, 0x01, 0x00)!=1) {
INFO(("saa7113 not accessible.\n"));
} else {
u8 *i = saa7113_init_regs;
if ((av7110->dev->pci->subsystem_vendor == 0x110a) && (av7110->dev->pci->subsystem_device == 0x0000)) {
/* Fujitsu/Siemens DVB-Cable */
av7110->analog_tuner_flags |= ANALOG_TUNER_VES1820;
} else if ((av7110->dev->pci->subsystem_vendor == 0x13c2) && (av7110->dev->pci->subsystem_device == 0x0002)) {
/* Hauppauge/TT DVB-C premium */
av7110->analog_tuner_flags |= ANALOG_TUNER_VES1820;
} else if ((av7110->dev->pci->subsystem_vendor == 0x13c2) && (av7110->dev->pci->subsystem_device == 0x000A)) {
/* Hauppauge/TT DVB-C premium */
av7110->analog_tuner_flags |= ANALOG_TUNER_STV0297;
}
/* setup for DVB by default */
if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) {
if (ves1820_writereg(av7110->dev, 0x09, 0x0f, 0x20))
dprintk(1, "setting band in demodulator failed.\n");
} else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) {
saa7146_setgpio(av7110->dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD)
saa7146_setgpio(av7110->dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF)
}
/* init the saa7113 */
while (*i != 0xff) {
if (i2c_writereg(av7110, 0x48, i[0], i[1]) != 1) {
dprintk(1, "saa7113 initialization failed @ card %d", av7110->dvb_adapter.num);
break;
}
i += 2;
}
/* setup msp for analog sound: B/G Dual-FM */
msp_writereg(av7110, MSP_WR_DEM, 0x00bb, 0x02d0); // AD_CV
msp_writereg(av7110, MSP_WR_DEM, 0x0001, 3); // FIR1
msp_writereg(av7110, MSP_WR_DEM, 0x0001, 18); // FIR1
msp_writereg(av7110, MSP_WR_DEM, 0x0001, 27); // FIR1
msp_writereg(av7110, MSP_WR_DEM, 0x0001, 48); // FIR1
msp_writereg(av7110, MSP_WR_DEM, 0x0001, 66); // FIR1
msp_writereg(av7110, MSP_WR_DEM, 0x0001, 72); // FIR1
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 4); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 64); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 0); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 3); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 18); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 27); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 48); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 66); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 72); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0083, 0xa000); // MODE_REG
msp_writereg(av7110, MSP_WR_DEM, 0x0093, 0x00aa); // DCO1_LO 5.74MHz
msp_writereg(av7110, MSP_WR_DEM, 0x009b, 0x04fc); // DCO1_HI
msp_writereg(av7110, MSP_WR_DEM, 0x00a3, 0x038e); // DCO2_LO 5.5MHz
msp_writereg(av7110, MSP_WR_DEM, 0x00ab, 0x04c6); // DCO2_HI
msp_writereg(av7110, MSP_WR_DEM, 0x0056, 0); // LOAD_REG 1/2
}
memcpy(standard, dvb_standard, sizeof(struct saa7146_standard) * 2);
/* set dd1 stream a & b */
saa7146_write(av7110->dev, DD1_STREAM_B, 0x00000000);
saa7146_write(av7110->dev, DD1_INIT, 0x03000700);
saa7146_write(av7110->dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
return 0;
}
int av7110_init_v4l(struct av7110 *av7110)
{
struct saa7146_dev* dev = av7110->dev;
struct saa7146_ext_vv *vv_data;
int ret;
/* special case DVB-C: these cards have an analog tuner
plus need some special handling, so we have separate
saa7146_ext_vv data for these... */
if (av7110->analog_tuner_flags)
vv_data = &av7110_vv_data_c;
else
vv_data = &av7110_vv_data_st;
ret = saa7146_vv_init(dev, vv_data);
if (ret) {
ERR(("cannot init capture device. skipping.\n"));
return -ENODEV;
}
vv_data->ops.vidioc_enum_input = vidioc_enum_input;
vv_data->ops.vidioc_g_input = vidioc_g_input;
vv_data->ops.vidioc_s_input = vidioc_s_input;
vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
vv_data->ops.vidioc_g_audio = vidioc_g_audio;
vv_data->ops.vidioc_s_audio = vidioc_s_audio;
vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) {
ERR(("cannot register capture device. skipping.\n"));
saa7146_vv_release(dev);
return -ENODEV;
}
if (saa7146_register_device(&av7110->vbi_dev, dev, "av7110", VFL_TYPE_VBI))
ERR(("cannot register vbi v4l2 device. skipping.\n"));
return 0;
}
int av7110_exit_v4l(struct av7110 *av7110)
{
struct saa7146_dev* dev = av7110->dev;
saa7146_unregister_device(&av7110->v4l_dev, av7110->dev);
saa7146_unregister_device(&av7110->vbi_dev, av7110->dev);
saa7146_vv_release(dev);
return 0;
}
/* FIXME: these values are experimental values that look better than the
values from the latest "official" driver -- at least for me... (MiHu) */
static struct saa7146_standard standard[] = {
{
.name = "PAL", .id = V4L2_STD_PAL_BG,
.v_offset = 0x15, .v_field = 288,
.h_offset = 0x48, .h_pixels = 708,
.v_max_out = 576, .h_max_out = 768,
}, {
.name = "NTSC", .id = V4L2_STD_NTSC,
.v_offset = 0x10, .v_field = 244,
.h_offset = 0x40, .h_pixels = 708,
.v_max_out = 480, .h_max_out = 640,
}
};
static struct saa7146_standard analog_standard[] = {
{
.name = "PAL", .id = V4L2_STD_PAL_BG,
.v_offset = 0x1b, .v_field = 288,
.h_offset = 0x08, .h_pixels = 708,
.v_max_out = 576, .h_max_out = 768,
}, {
.name = "NTSC", .id = V4L2_STD_NTSC,
.v_offset = 0x10, .v_field = 244,
.h_offset = 0x40, .h_pixels = 708,
.v_max_out = 480, .h_max_out = 640,
}
};
static struct saa7146_standard dvb_standard[] = {
{
.name = "PAL", .id = V4L2_STD_PAL_BG,
.v_offset = 0x14, .v_field = 288,
.h_offset = 0x48, .h_pixels = 708,
.v_max_out = 576, .h_max_out = 768,
}, {
.name = "NTSC", .id = V4L2_STD_NTSC,
.v_offset = 0x10, .v_field = 244,
.h_offset = 0x40, .h_pixels = 708,
.v_max_out = 480, .h_max_out = 640,
}
};
static int std_callback(struct saa7146_dev* dev, struct saa7146_standard *std)
{
struct av7110 *av7110 = (struct av7110*) dev->ext_priv;
if (std->id & V4L2_STD_PAL) {
av7110->vidmode = AV7110_VIDEO_MODE_PAL;
av7110_set_vidmode(av7110, av7110->vidmode);
}
else if (std->id & V4L2_STD_NTSC) {
av7110->vidmode = AV7110_VIDEO_MODE_NTSC;
av7110_set_vidmode(av7110, av7110->vidmode);
}
else
return -1;
return 0;
}
static struct saa7146_ext_vv av7110_vv_data_st = {
.inputs = 1,
.audios = 1,
.capabilities = V4L2_CAP_SLICED_VBI_OUTPUT,
.flags = 0,
.stds = &standard[0],
.num_stds = ARRAY_SIZE(standard),
.std_callback = &std_callback,
.vbi_fops.open = av7110_vbi_reset,
.vbi_fops.release = av7110_vbi_reset,
.vbi_fops.write = av7110_vbi_write,
};
static struct saa7146_ext_vv av7110_vv_data_c = {
.inputs = 1,
.audios = 1,
.capabilities = V4L2_CAP_TUNER | V4L2_CAP_SLICED_VBI_OUTPUT,
.flags = SAA7146_USE_PORT_B_FOR_VBI,
.stds = &standard[0],
.num_stds = ARRAY_SIZE(standard),
.std_callback = &std_callback,
.vbi_fops.open = av7110_vbi_reset,
.vbi_fops.release = av7110_vbi_reset,
.vbi_fops.write = av7110_vbi_write,
};
| houzhenggang/linux-2.6 | drivers/media/dvb/ttpci/av7110_v4l.c | C | gpl-2.0 | 27,884 |
/* $Id$ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <pjmedia-codec/ilbc.h>
#include <pjmedia-codec/types.h>
#include <pjmedia/codec.h>
#include <pjmedia/errno.h>
#include <pjmedia/endpoint.h>
#include <pjmedia/plc.h>
#include <pjmedia/port.h>
#include <pjmedia/silencedet.h>
#include <pj/assert.h>
#include <pj/log.h>
#include <pj/pool.h>
#include <pj/string.h>
#include <pj/os.h>
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
#include <AudioToolbox/AudioToolbox.h>
#define iLBC_Enc_Inst_t AudioConverterRef
#define iLBC_Dec_Inst_t AudioConverterRef
#define BLOCKL_MAX 1
#else
#include "../../third_party/ilbc/iLBC_encode.h"
#include "../../third_party/ilbc/iLBC_decode.h"
#endif
/*
* Only build this file if PJMEDIA_HAS_ILBC_CODEC != 0
*/
#if defined(PJMEDIA_HAS_ILBC_CODEC) && PJMEDIA_HAS_ILBC_CODEC != 0
#define THIS_FILE "ilbc.c"
#define CLOCK_RATE 8000
#define DEFAULT_MODE 30
/* Prototypes for iLBC factory */
static pj_status_t ilbc_test_alloc(pjmedia_codec_factory *factory,
const pjmedia_codec_info *id );
static pj_status_t ilbc_default_attr(pjmedia_codec_factory *factory,
const pjmedia_codec_info *id,
pjmedia_codec_param *attr );
static pj_status_t ilbc_enum_codecs(pjmedia_codec_factory *factory,
unsigned *count,
pjmedia_codec_info codecs[]);
static pj_status_t ilbc_alloc_codec(pjmedia_codec_factory *factory,
const pjmedia_codec_info *id,
pjmedia_codec **p_codec);
static pj_status_t ilbc_dealloc_codec(pjmedia_codec_factory *factory,
pjmedia_codec *codec );
/* Prototypes for iLBC implementation. */
static pj_status_t ilbc_codec_init(pjmedia_codec *codec,
pj_pool_t *pool );
static pj_status_t ilbc_codec_open(pjmedia_codec *codec,
pjmedia_codec_param *attr );
static pj_status_t ilbc_codec_close(pjmedia_codec *codec );
static pj_status_t ilbc_codec_modify(pjmedia_codec *codec,
const pjmedia_codec_param *attr );
static pj_status_t ilbc_codec_parse(pjmedia_codec *codec,
void *pkt,
pj_size_t pkt_size,
const pj_timestamp *ts,
unsigned *frame_cnt,
pjmedia_frame frames[]);
static pj_status_t ilbc_codec_encode(pjmedia_codec *codec,
const struct pjmedia_frame *input,
unsigned output_buf_len,
struct pjmedia_frame *output);
static pj_status_t ilbc_codec_decode(pjmedia_codec *codec,
const struct pjmedia_frame *input,
unsigned output_buf_len,
struct pjmedia_frame *output);
static pj_status_t ilbc_codec_recover(pjmedia_codec *codec,
unsigned output_buf_len,
struct pjmedia_frame *output);
/* Definition for iLBC codec operations. */
static pjmedia_codec_op ilbc_op =
{
&ilbc_codec_init,
&ilbc_codec_open,
&ilbc_codec_close,
&ilbc_codec_modify,
&ilbc_codec_parse,
&ilbc_codec_encode,
&ilbc_codec_decode,
&ilbc_codec_recover
};
/* Definition for iLBC codec factory operations. */
static pjmedia_codec_factory_op ilbc_factory_op =
{
&ilbc_test_alloc,
&ilbc_default_attr,
&ilbc_enum_codecs,
&ilbc_alloc_codec,
&ilbc_dealloc_codec,
&pjmedia_codec_ilbc_deinit
};
/* iLBC factory */
static struct ilbc_factory
{
pjmedia_codec_factory base;
pjmedia_endpt *endpt;
int mode;
int bps;
} ilbc_factory;
/* iLBC codec private data. */
struct ilbc_codec
{
pjmedia_codec base;
pj_pool_t *pool;
char obj_name[PJ_MAX_OBJ_NAME];
pjmedia_silence_det *vad;
pj_bool_t vad_enabled;
pj_bool_t plc_enabled;
pj_timestamp last_tx;
pj_bool_t enc_ready;
iLBC_Enc_Inst_t enc;
unsigned enc_frame_size;
unsigned enc_samples_per_frame;
float enc_block[BLOCKL_MAX];
pj_bool_t dec_ready;
iLBC_Dec_Inst_t dec;
unsigned dec_frame_size;
unsigned dec_samples_per_frame;
float dec_block[BLOCKL_MAX];
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
unsigned enc_total_packets;
char *enc_buffer;
unsigned enc_buffer_offset;
unsigned dec_total_packets;
char *dec_buffer;
unsigned dec_buffer_offset;
#endif
};
static pj_str_t STR_MODE = {"mode", 4};
/*
* Initialize and register iLBC codec factory to pjmedia endpoint.
*/
PJ_DEF(pj_status_t) pjmedia_codec_ilbc_init( pjmedia_endpt *endpt,
int mode )
{
pjmedia_codec_mgr *codec_mgr;
pj_status_t status;
PJ_ASSERT_RETURN(endpt != NULL, PJ_EINVAL);
PJ_ASSERT_RETURN(mode==0 || mode==20 || mode==30, PJ_EINVAL);
/* Create iLBC codec factory. */
ilbc_factory.base.op = &ilbc_factory_op;
ilbc_factory.base.factory_data = NULL;
ilbc_factory.endpt = endpt;
if (mode == 0)
mode = DEFAULT_MODE;
ilbc_factory.mode = mode;
if (mode == 20) {
ilbc_factory.bps = 15200;
} else {
ilbc_factory.bps = 13333;
}
/* Get the codec manager. */
codec_mgr = pjmedia_endpt_get_codec_mgr(endpt);
if (!codec_mgr)
return PJ_EINVALIDOP;
/* Register codec factory to endpoint. */
status = pjmedia_codec_mgr_register_factory(codec_mgr,
&ilbc_factory.base);
if (status != PJ_SUCCESS)
return status;
/* Done. */
return PJ_SUCCESS;
}
/*
* Unregister iLBC codec factory from pjmedia endpoint and deinitialize
* the iLBC codec library.
*/
PJ_DEF(pj_status_t) pjmedia_codec_ilbc_deinit(void)
{
pjmedia_codec_mgr *codec_mgr;
pj_status_t status;
/* Get the codec manager. */
codec_mgr = pjmedia_endpt_get_codec_mgr(ilbc_factory.endpt);
if (!codec_mgr)
return PJ_EINVALIDOP;
/* Unregister iLBC codec factory. */
status = pjmedia_codec_mgr_unregister_factory(codec_mgr,
&ilbc_factory.base);
return status;
}
/*
* Check if factory can allocate the specified codec.
*/
static pj_status_t ilbc_test_alloc( pjmedia_codec_factory *factory,
const pjmedia_codec_info *info )
{
const pj_str_t ilbc_tag = { "iLBC", 4};
PJ_UNUSED_ARG(factory);
PJ_ASSERT_RETURN(factory==&ilbc_factory.base, PJ_EINVAL);
/* Type MUST be audio. */
if (info->type != PJMEDIA_TYPE_AUDIO)
return PJMEDIA_CODEC_EUNSUP;
/* Check encoding name. */
if (pj_stricmp(&info->encoding_name, &ilbc_tag) != 0)
return PJMEDIA_CODEC_EUNSUP;
/* Check clock-rate */
if (info->clock_rate != CLOCK_RATE)
return PJMEDIA_CODEC_EUNSUP;
/* Channel count must be one */
if (info->channel_cnt != 1)
return PJMEDIA_CODEC_EUNSUP;
/* Yes, this should be iLBC! */
return PJ_SUCCESS;
}
/*
* Generate default attribute.
*/
static pj_status_t ilbc_default_attr (pjmedia_codec_factory *factory,
const pjmedia_codec_info *id,
pjmedia_codec_param *attr )
{
PJ_UNUSED_ARG(factory);
PJ_ASSERT_RETURN(factory==&ilbc_factory.base, PJ_EINVAL);
PJ_UNUSED_ARG(id);
PJ_ASSERT_RETURN(pj_stricmp2(&id->encoding_name, "iLBC")==0, PJ_EINVAL);
pj_bzero(attr, sizeof(pjmedia_codec_param));
attr->info.clock_rate = CLOCK_RATE;
attr->info.channel_cnt = 1;
attr->info.avg_bps = ilbc_factory.bps;
attr->info.max_bps = 15200;
attr->info.pcm_bits_per_sample = 16;
attr->info.frm_ptime = (short)ilbc_factory.mode;
attr->info.pt = PJMEDIA_RTP_PT_ILBC;
attr->setting.frm_per_pkt = 1;
attr->setting.vad = 1;
attr->setting.plc = 1;
attr->setting.penh = 1;
attr->setting.dec_fmtp.cnt = 1;
attr->setting.dec_fmtp.param[0].name = STR_MODE;
if (ilbc_factory.mode == 30)
attr->setting.dec_fmtp.param[0].val = pj_str("30");
else
attr->setting.dec_fmtp.param[0].val = pj_str("20");
return PJ_SUCCESS;
}
/*
* Enum codecs supported by this factory (i.e. only iLBC!).
*/
static pj_status_t ilbc_enum_codecs(pjmedia_codec_factory *factory,
unsigned *count,
pjmedia_codec_info codecs[])
{
PJ_UNUSED_ARG(factory);
PJ_ASSERT_RETURN(factory==&ilbc_factory.base, PJ_EINVAL);
PJ_ASSERT_RETURN(codecs && *count > 0, PJ_EINVAL);
pj_bzero(&codecs[0], sizeof(pjmedia_codec_info));
codecs[0].encoding_name = pj_str("iLBC");
codecs[0].pt = PJMEDIA_RTP_PT_ILBC;
codecs[0].type = PJMEDIA_TYPE_AUDIO;
codecs[0].clock_rate = 8000;
codecs[0].channel_cnt = 1;
*count = 1;
return PJ_SUCCESS;
}
/*
* Allocate a new iLBC codec instance.
*/
static pj_status_t ilbc_alloc_codec(pjmedia_codec_factory *factory,
const pjmedia_codec_info *id,
pjmedia_codec **p_codec)
{
pj_pool_t *pool;
struct ilbc_codec *codec;
PJ_ASSERT_RETURN(factory && id && p_codec, PJ_EINVAL);
PJ_ASSERT_RETURN(factory == &ilbc_factory.base, PJ_EINVAL);
pool = pjmedia_endpt_create_pool(ilbc_factory.endpt, "iLBC%p",
2000, 2000);
PJ_ASSERT_RETURN(pool != NULL, PJ_ENOMEM);
codec = PJ_POOL_ZALLOC_T(pool, struct ilbc_codec);
codec->base.op = &ilbc_op;
codec->base.factory = factory;
codec->pool = pool;
pj_ansi_snprintf(codec->obj_name, sizeof(codec->obj_name),
"ilbc%p", codec);
*p_codec = &codec->base;
return PJ_SUCCESS;
}
/*
* Free codec.
*/
static pj_status_t ilbc_dealloc_codec( pjmedia_codec_factory *factory,
pjmedia_codec *codec )
{
struct ilbc_codec *ilbc_codec;
PJ_ASSERT_RETURN(factory && codec, PJ_EINVAL);
PJ_UNUSED_ARG(factory);
PJ_ASSERT_RETURN(factory == &ilbc_factory.base, PJ_EINVAL);
ilbc_codec = (struct ilbc_codec*) codec;
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
if (ilbc_codec->enc) {
AudioConverterDispose(ilbc_codec->enc);
ilbc_codec->enc = NULL;
}
if (ilbc_codec->dec) {
AudioConverterDispose(ilbc_codec->dec);
ilbc_codec->dec = NULL;
}
#endif
pj_pool_release(ilbc_codec->pool);
return PJ_SUCCESS;
}
/*
* Init codec.
*/
static pj_status_t ilbc_codec_init(pjmedia_codec *codec,
pj_pool_t *pool )
{
PJ_UNUSED_ARG(codec);
PJ_UNUSED_ARG(pool);
return PJ_SUCCESS;
}
/*
* Open codec.
*/
static pj_status_t ilbc_codec_open(pjmedia_codec *codec,
pjmedia_codec_param *attr )
{
struct ilbc_codec *ilbc_codec = (struct ilbc_codec*)codec;
pj_status_t status;
unsigned i;
pj_uint16_t dec_fmtp_mode = DEFAULT_MODE,
enc_fmtp_mode = DEFAULT_MODE;
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
AudioStreamBasicDescription srcFormat, dstFormat;
UInt32 size;
srcFormat.mSampleRate = attr->info.clock_rate;
srcFormat.mFormatID = kAudioFormatLinearPCM;
srcFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
| kLinearPCMFormatFlagIsPacked;
srcFormat.mBitsPerChannel = attr->info.pcm_bits_per_sample;
srcFormat.mChannelsPerFrame = attr->info.channel_cnt;
srcFormat.mBytesPerFrame = srcFormat.mChannelsPerFrame
* srcFormat.mBitsPerChannel >> 3;
srcFormat.mFramesPerPacket = 1;
srcFormat.mBytesPerPacket = srcFormat.mBytesPerFrame *
srcFormat.mFramesPerPacket;
memset(&dstFormat, 0, sizeof(dstFormat));
dstFormat.mSampleRate = attr->info.clock_rate;
dstFormat.mFormatID = kAudioFormatiLBC;
dstFormat.mChannelsPerFrame = attr->info.channel_cnt;
#endif
pj_assert(ilbc_codec != NULL);
pj_assert(ilbc_codec->enc_ready == PJ_FALSE &&
ilbc_codec->dec_ready == PJ_FALSE);
/* Get decoder mode */
for (i = 0; i < attr->setting.dec_fmtp.cnt; ++i) {
if (pj_stricmp(&attr->setting.dec_fmtp.param[i].name, &STR_MODE) == 0)
{
dec_fmtp_mode = (pj_uint16_t)
pj_strtoul(&attr->setting.dec_fmtp.param[i].val);
break;
}
}
/* Decoder mode must be set */
PJ_ASSERT_RETURN(dec_fmtp_mode == 20 || dec_fmtp_mode == 30,
PJMEDIA_CODEC_EINMODE);
/* Get encoder mode */
for (i = 0; i < attr->setting.enc_fmtp.cnt; ++i) {
if (pj_stricmp(&attr->setting.enc_fmtp.param[i].name, &STR_MODE) == 0)
{
enc_fmtp_mode = (pj_uint16_t)
pj_strtoul(&attr->setting.enc_fmtp.param[i].val);
break;
}
}
PJ_ASSERT_RETURN(enc_fmtp_mode==20 || enc_fmtp_mode==30,
PJMEDIA_CODEC_EINMODE);
/* Both sides of a bi-directional session MUST use the same "mode" value.
* In this point, possible values are only 20 or 30, so when encoder and
* decoder modes are not same, just use the default mode, it is 30.
*/
if (enc_fmtp_mode != dec_fmtp_mode) {
enc_fmtp_mode = dec_fmtp_mode = DEFAULT_MODE;
PJ_LOG(4,(ilbc_codec->obj_name,
"Normalized iLBC encoder and decoder modes to %d",
DEFAULT_MODE));
}
/* Update some attributes based on negotiated mode. */
attr->info.avg_bps = (dec_fmtp_mode == 30? 13333 : 15200);
attr->info.frm_ptime = dec_fmtp_mode;
/* Create encoder */
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
dstFormat.mFramesPerPacket = CLOCK_RATE * enc_fmtp_mode / 1000;
dstFormat.mBytesPerPacket = (enc_fmtp_mode == 20? 38 : 50);
/* Use AudioFormat API to fill out the rest of the description */
size = sizeof(dstFormat);
AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
0, NULL, &size, &dstFormat);
if (AudioConverterNew(&srcFormat, &dstFormat, &ilbc_codec->enc) != noErr)
return PJMEDIA_CODEC_EFAILED;
ilbc_codec->enc_frame_size = (enc_fmtp_mode == 20? 38 : 50);
#else
ilbc_codec->enc_frame_size = initEncode(&ilbc_codec->enc, enc_fmtp_mode);
#endif
ilbc_codec->enc_samples_per_frame = CLOCK_RATE * enc_fmtp_mode / 1000;
ilbc_codec->enc_ready = PJ_TRUE;
/* Create decoder */
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
if (AudioConverterNew(&dstFormat, &srcFormat, &ilbc_codec->dec) != noErr)
return PJMEDIA_CODEC_EFAILED;
ilbc_codec->dec_samples_per_frame = CLOCK_RATE * dec_fmtp_mode / 1000;
#else
ilbc_codec->dec_samples_per_frame = initDecode(&ilbc_codec->dec,
dec_fmtp_mode,
attr->setting.penh);
#endif
ilbc_codec->dec_frame_size = (dec_fmtp_mode == 20? 38 : 50);
ilbc_codec->dec_ready = PJ_TRUE;
/* Save plc flags */
ilbc_codec->plc_enabled = (attr->setting.plc != 0);
/* Create silence detector. */
ilbc_codec->vad_enabled = (attr->setting.vad != 0);
status = pjmedia_silence_det_create(ilbc_codec->pool, CLOCK_RATE,
ilbc_codec->enc_samples_per_frame,
&ilbc_codec->vad);
if (status != PJ_SUCCESS)
return status;
/* Init last_tx (not necessary because of zalloc, but better
* be safe in case someone remove zalloc later.
*/
pj_set_timestamp32(&ilbc_codec->last_tx, 0, 0);
PJ_LOG(5,(ilbc_codec->obj_name,
"iLBC codec opened, mode=%d", dec_fmtp_mode));
return PJ_SUCCESS;
}
/*
* Close codec.
*/
static pj_status_t ilbc_codec_close( pjmedia_codec *codec )
{
struct ilbc_codec *ilbc_codec = (struct ilbc_codec*)codec;
PJ_UNUSED_ARG(codec);
PJ_LOG(5,(ilbc_codec->obj_name, "iLBC codec closed"));
return PJ_SUCCESS;
}
/*
* Modify codec settings.
*/
static pj_status_t ilbc_codec_modify(pjmedia_codec *codec,
const pjmedia_codec_param *attr )
{
struct ilbc_codec *ilbc_codec = (struct ilbc_codec*)codec;
ilbc_codec->plc_enabled = (attr->setting.plc != 0);
ilbc_codec->vad_enabled = (attr->setting.vad != 0);
return PJ_SUCCESS;
}
/*
* Get frames in the packet.
*/
static pj_status_t ilbc_codec_parse( pjmedia_codec *codec,
void *pkt,
pj_size_t pkt_size,
const pj_timestamp *ts,
unsigned *frame_cnt,
pjmedia_frame frames[])
{
struct ilbc_codec *ilbc_codec = (struct ilbc_codec*)codec;
unsigned count;
PJ_ASSERT_RETURN(frame_cnt, PJ_EINVAL);
count = 0;
while (pkt_size >= ilbc_codec->dec_frame_size && count < *frame_cnt) {
frames[count].type = PJMEDIA_FRAME_TYPE_AUDIO;
frames[count].buf = pkt;
frames[count].size = ilbc_codec->dec_frame_size;
frames[count].timestamp.u64 = ts->u64 + count *
ilbc_codec->dec_samples_per_frame;
pkt = ((char*)pkt) + ilbc_codec->dec_frame_size;
pkt_size -= ilbc_codec->dec_frame_size;
++count;
}
*frame_cnt = count;
return PJ_SUCCESS;
}
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
static OSStatus encodeDataProc (
AudioConverterRef inAudioConverter,
UInt32 *ioNumberDataPackets,
AudioBufferList *ioData,
AudioStreamPacketDescription **outDataPacketDescription,
void *inUserData
)
{
struct ilbc_codec *ilbc_codec = (struct ilbc_codec*)inUserData;
/* Initialize in case of failure */
ioData->mBuffers[0].mData = NULL;
ioData->mBuffers[0].mDataByteSize = 0;
if (ilbc_codec->enc_total_packets < *ioNumberDataPackets) {
*ioNumberDataPackets = ilbc_codec->enc_total_packets;
}
if (*ioNumberDataPackets) {
ioData->mBuffers[0].mData = ilbc_codec->enc_buffer +
ilbc_codec->enc_buffer_offset;
ioData->mBuffers[0].mDataByteSize = *ioNumberDataPackets *
ilbc_codec->enc_samples_per_frame
<< 1;
ilbc_codec->enc_buffer_offset += ioData->mBuffers[0].mDataByteSize;
}
ilbc_codec->enc_total_packets -= *ioNumberDataPackets;
return noErr;
}
static OSStatus decodeDataProc (
AudioConverterRef inAudioConverter,
UInt32 *ioNumberDataPackets,
AudioBufferList *ioData,
AudioStreamPacketDescription **outDataPacketDescription,
void *inUserData
)
{
struct ilbc_codec *ilbc_codec = (struct ilbc_codec*)inUserData;
/* Initialize in case of failure */
ioData->mBuffers[0].mData = NULL;
ioData->mBuffers[0].mDataByteSize = 0;
if (ilbc_codec->dec_total_packets < *ioNumberDataPackets) {
*ioNumberDataPackets = ilbc_codec->dec_total_packets;
}
if (*ioNumberDataPackets) {
ioData->mBuffers[0].mData = ilbc_codec->dec_buffer +
ilbc_codec->dec_buffer_offset;
ioData->mBuffers[0].mDataByteSize = *ioNumberDataPackets *
ilbc_codec->dec_frame_size;
ilbc_codec->dec_buffer_offset += ioData->mBuffers[0].mDataByteSize;
}
ilbc_codec->dec_total_packets -= *ioNumberDataPackets;
return noErr;
}
#endif
/*
* Encode frame.
*/
static pj_status_t ilbc_codec_encode(pjmedia_codec *codec,
const struct pjmedia_frame *input,
unsigned output_buf_len,
struct pjmedia_frame *output)
{
struct ilbc_codec *ilbc_codec = (struct ilbc_codec*)codec;
pj_int16_t *pcm_in;
pj_size_t nsamples;
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
UInt32 npackets;
OSStatus err;
AudioBufferList theABL;
#endif
pj_assert(ilbc_codec && input && output);
pcm_in = (pj_int16_t*)input->buf;
nsamples = input->size >> 1;
PJ_ASSERT_RETURN(nsamples % ilbc_codec->enc_samples_per_frame == 0,
PJMEDIA_CODEC_EPCMFRMINLEN);
PJ_ASSERT_RETURN(output_buf_len >= ilbc_codec->enc_frame_size * nsamples /
ilbc_codec->enc_samples_per_frame,
PJMEDIA_CODEC_EFRMTOOSHORT);
/* Detect silence */
if (ilbc_codec->vad_enabled) {
pj_bool_t is_silence;
pj_int32_t silence_period;
silence_period = pj_timestamp_diff32(&ilbc_codec->last_tx,
&input->timestamp);
is_silence = pjmedia_silence_det_detect(ilbc_codec->vad,
(const pj_int16_t*)input->buf,
(input->size >> 1),
NULL);
if (is_silence &&
(PJMEDIA_CODEC_MAX_SILENCE_PERIOD == -1 ||
silence_period < PJMEDIA_CODEC_MAX_SILENCE_PERIOD*8000/1000))
{
output->type = PJMEDIA_FRAME_TYPE_NONE;
output->buf = NULL;
output->size = 0;
output->timestamp = input->timestamp;
return PJ_SUCCESS;
} else {
ilbc_codec->last_tx = input->timestamp;
}
}
/* Encode */
output->size = 0;
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
npackets = nsamples / ilbc_codec->enc_samples_per_frame;
theABL.mNumberBuffers = 1;
theABL.mBuffers[0].mNumberChannels = 1;
theABL.mBuffers[0].mDataByteSize = output_buf_len;
theABL.mBuffers[0].mData = output->buf;
ilbc_codec->enc_total_packets = npackets;
ilbc_codec->enc_buffer = (char *)input->buf;
ilbc_codec->enc_buffer_offset = 0;
err = AudioConverterFillComplexBuffer(ilbc_codec->enc, encodeDataProc,
ilbc_codec, &npackets,
&theABL, NULL);
if (err == noErr) {
output->size = npackets * ilbc_codec->enc_frame_size;
}
#else
while (nsamples >= ilbc_codec->enc_samples_per_frame) {
unsigned i;
/* Convert to float */
for (i=0; i<ilbc_codec->enc_samples_per_frame; ++i) {
ilbc_codec->enc_block[i] = (float) (*pcm_in++);
}
iLBC_encode((unsigned char *)output->buf + output->size,
ilbc_codec->enc_block,
&ilbc_codec->enc);
output->size += ilbc_codec->enc.no_of_bytes;
nsamples -= ilbc_codec->enc_samples_per_frame;
}
#endif
output->type = PJMEDIA_FRAME_TYPE_AUDIO;
output->timestamp = input->timestamp;
return PJ_SUCCESS;
}
/*
* Decode frame.
*/
static pj_status_t ilbc_codec_decode(pjmedia_codec *codec,
const struct pjmedia_frame *input,
unsigned output_buf_len,
struct pjmedia_frame *output)
{
struct ilbc_codec *ilbc_codec = (struct ilbc_codec*)codec;
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
UInt32 npackets;
OSStatus err;
AudioBufferList theABL;
#else
unsigned i;
#endif
pj_assert(ilbc_codec != NULL);
PJ_ASSERT_RETURN(input && output, PJ_EINVAL);
if (output_buf_len < (ilbc_codec->dec_samples_per_frame << 1))
return PJMEDIA_CODEC_EPCMTOOSHORT;
if (input->size != ilbc_codec->dec_frame_size)
return PJMEDIA_CODEC_EFRMINLEN;
/* Decode to temporary buffer */
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
npackets = input->size / ilbc_codec->dec_frame_size *
ilbc_codec->dec_samples_per_frame;
theABL.mNumberBuffers = 1;
theABL.mBuffers[0].mNumberChannels = 1;
theABL.mBuffers[0].mDataByteSize = output_buf_len;
theABL.mBuffers[0].mData = output->buf;
ilbc_codec->dec_total_packets = npackets;
ilbc_codec->dec_buffer = (char *)input->buf;
ilbc_codec->dec_buffer_offset = 0;
err = AudioConverterFillComplexBuffer(ilbc_codec->dec, decodeDataProc,
ilbc_codec, &npackets,
&theABL, NULL);
if (err == noErr) {
output->size = npackets * (ilbc_codec->dec_samples_per_frame << 1);
}
#else
iLBC_decode(ilbc_codec->dec_block, (unsigned char*) input->buf,
&ilbc_codec->dec, 1);
/* Convert decodec samples from float to short */
for (i=0; i<ilbc_codec->dec_samples_per_frame; ++i) {
((short*)output->buf)[i] = (short)ilbc_codec->dec_block[i];
}
output->size = (ilbc_codec->dec_samples_per_frame << 1);
#endif
output->type = PJMEDIA_FRAME_TYPE_AUDIO;
output->timestamp = input->timestamp;
return PJ_SUCCESS;
}
/*
* Recover lost frame.
*/
static pj_status_t ilbc_codec_recover(pjmedia_codec *codec,
unsigned output_buf_len,
struct pjmedia_frame *output)
{
struct ilbc_codec *ilbc_codec = (struct ilbc_codec*)codec;
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
UInt32 npackets;
OSStatus err;
AudioBufferList theABL;
#else
unsigned i;
#endif
pj_assert(ilbc_codec != NULL);
PJ_ASSERT_RETURN(output, PJ_EINVAL);
if (output_buf_len < (ilbc_codec->dec_samples_per_frame << 1))
return PJMEDIA_CODEC_EPCMTOOSHORT;
/* Decode to temporary buffer */
#if defined(PJMEDIA_ILBC_CODEC_USE_COREAUDIO)&& PJMEDIA_ILBC_CODEC_USE_COREAUDIO
npackets = 1;
theABL.mNumberBuffers = 1;
theABL.mBuffers[0].mNumberChannels = 1;
theABL.mBuffers[0].mDataByteSize = output_buf_len;
theABL.mBuffers[0].mData = output->buf;
ilbc_codec->dec_total_packets = npackets;
ilbc_codec->dec_buffer_offset = 0;
if (ilbc_codec->dec_buffer) {
err = AudioConverterFillComplexBuffer(ilbc_codec->dec, decodeDataProc,
ilbc_codec, &npackets,
&theABL, NULL);
if (err == noErr) {
output->size = npackets *
(ilbc_codec->dec_samples_per_frame << 1);
}
} else {
output->size = npackets * (ilbc_codec->dec_samples_per_frame << 1);
pj_bzero(output->buf, output->size);
}
#else
iLBC_decode(ilbc_codec->dec_block, NULL, &ilbc_codec->dec, 0);
/* Convert decodec samples from float to short */
for (i=0; i<ilbc_codec->dec_samples_per_frame; ++i) {
((short*)output->buf)[i] = (short)ilbc_codec->dec_block[i];
}
output->size = (ilbc_codec->dec_samples_per_frame << 1);
#endif
output->type = PJMEDIA_FRAME_TYPE_AUDIO;
return PJ_SUCCESS;
}
#endif /* PJMEDIA_HAS_ILBC_CODEC */
| pol51/pjsip-winphone | pjmedia/src/pjmedia-codec/ilbc.c | C | gpl-2.0 | 25,784 |
/* hscdtd008a_i2c.c
*
* GeoMagneticField device driver for I2C (HSCDTD008A)
*
* Copyright (C) 2012 ALPS ELECTRIC CO., LTD. All Rights Reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#ifdef CONFIG_HAS_EARLYSUSPEND
#include <linux/earlysuspend.h>
#endif
#define I2C_RETRY_DELAY 5
#define I2C_RETRIES 5
#define I2C_HSCD_ADDR (0x0c) /* 000 1100 */
#define HSCD_DRIVER_NAME "hscd_i2c"
#undef ALPS_DEBUG
#define HSCD_STB 0x0C
#define HSCD_XOUT 0x10
#define HSCD_YOUT 0x12
#define HSCD_ZOUT 0x14
#define HSCD_XOUT_H 0x11
#define HSCD_XOUT_L 0x10
#define HSCD_YOUT_H 0x13
#define HSCD_YOUT_L 0x12
#define HSCD_ZOUT_H 0x15
#define HSCD_ZOUT_L 0x14
#define HSCD_STATUS 0x18
#define HSCD_CTRL1 0x1b
#define HSCD_CTRL2 0x1c
#define HSCD_CTRL3 0x1d
#define HSCD_CTRL4 0x1e
/* hscdtd008a chip id */
#define DEVICE_ID 0x49
/* hscd magnetic sensor chip identification register */
#define WHO_AM_I 0x0F
static struct i2c_driver hscd_driver;
static struct i2c_client *this_client = NULL;
#ifdef CONFIG_HAS_EARLYSUSPEND
static struct early_suspend hscd_early_suspend_handler;
#endif
struct hscd_power_data {
struct regulator *regulator_vdd;
struct regulator *regulator_vio;
};
static struct hscd_power_data hscd_power;
static atomic_t flgEna;
static atomic_t delay;
static atomic_t flgSuspend;
extern int sensors_register(struct device *dev, void * drvdata,
struct device_attribute *attributes[], char *name);
static int hscd_i2c_readm(char *rxData, int length)
{
int err;
int tries = 0;
struct i2c_msg msgs[] = {
{
.addr = this_client->addr,
.flags = 0,
.len = 1,
.buf = rxData,
},
{
.addr = this_client->addr,
.flags = I2C_M_RD,
.len = length,
.buf = rxData,
},
};
do {
err = i2c_transfer(this_client->adapter, msgs, 2);
} while ((err != 2) && (++tries < I2C_RETRIES));
if (err != 2) {
dev_err(&this_client->adapter->dev, "read transfer error\n");
err = -EIO;
} else {
err = 0;
}
return err;
}
static int hscd_i2c_writem(char *txData, int length)
{
int err;
int tries = 0;
#ifdef ALPS_DEBUG
int i;
#endif
struct i2c_msg msg[] = {
{
.addr = this_client->addr,
.flags = 0,
.len = length,
.buf = txData,
},
};
#ifdef ALPS_DEBUG
printk("[HSCD] i2c_writem : ");
for (i = 0; i < length; i++)
printk("0X%02X, ", txData[i]);
printk("\n");
#endif
do {
err = i2c_transfer(this_client->adapter, msg, 1);
} while ((err != 1) && (++tries < I2C_RETRIES));
if (err != 1) {
dev_err(&this_client->adapter->dev, "write transfer error\n");
err = -EIO;
} else {
err = 0;
}
return err;
}
static int hscd_power_on(void)
{
int err = 0;
printk(KERN_INFO "%s\n", __func__);
if (hscd_power.regulator_vdd) {
err = regulator_enable(hscd_power.regulator_vdd);
if (err) {
pr_err("%s: Couldn't enable vdd_hscdtd %d\n", __func__, err);
return err;
}
}
if (hscd_power.regulator_vio) {
err = regulator_enable(hscd_power.regulator_vio);
if (err) {
pr_err("%s: Couldn't enable vio_hscdtd %d\n", __func__, err);
return err;
}
}
msleep(60);
return err;
}
static int hscd_power_off(void)
{
int err = 0;
printk(KERN_INFO "%s\n", __func__);
if (hscd_power.regulator_vdd) {
err = regulator_disable(hscd_power.regulator_vdd);
if (err) {
pr_err("%s: Couldn't disable vdd_hscdtd %d\n", __func__, err);
return err;
}
}
printk(" %s, %d\n", __func__, __LINE__);
if (hscd_power.regulator_vio) {
err = regulator_disable(hscd_power.regulator_vio);
if (err) {
pr_err("%s: Couldn't disable vio_hscdtd %d\n", __func__, err);
return err;
}
}
return err;
}
int hscd_self_test_A(void)
{
u8 sx[2], cr1[1];
if (atomic_read(&flgSuspend) == 1)
return -1;
/* Control register1 backup */
cr1[0] = HSCD_CTRL1;
if (hscd_i2c_readm(cr1, 1))
return 1;
#ifdef ALPS_DEBUG
else
printk("[HSCD] Control register1 value, %02X\n", cr1[0]);
#endif
mdelay(1);
/* Move to active mode (force state) */
sx[0] = HSCD_CTRL1;
sx[1] = 0x8A;
if (hscd_i2c_writem(sx, 2))
return 1;
/* Get inital value of self-test-A register */
sx[0] = HSCD_STB;
hscd_i2c_readm(sx, 1);
mdelay(1);
sx[0] = HSCD_STB;
if (hscd_i2c_readm(sx, 1))
return 1;
#ifdef ALPS_DEBUG
else
printk("[HSCD] self test A register value, %02X\n", sx[0]);
#endif
if (sx[0] != 0x55) {
printk("error: self-test-A, initial value is %02X\n", sx[0]);
return 2;
}
/* do self-test*/
sx[0] = HSCD_CTRL3;
sx[1] = 0x10;
if (hscd_i2c_writem(sx, 2))
return 1;
mdelay(3);
/* Get 1st value of self-test-A register */
sx[0] = HSCD_STB;
if (hscd_i2c_readm(sx, 1))
return 1;
#ifdef ALPS_DEBUG
else
printk("[HSCD] self test register value, %02X\n", sx[0]);
#endif
if (sx[0] != 0xAA) {
printk("error: self-test, 1st value is %02X\n", sx[0]);
return 3;
}
mdelay(3);
/* Get 2nd value of self-test register */
sx[0] = HSCD_STB;
if (hscd_i2c_readm(sx, 1))
return 1;
#ifdef ALPS_DEBUG
else
printk("[HSCD] self test register value, %02X\n", sx[0]);
#endif
if (sx[0] != 0x55) {
printk("error: self-test, 2nd value is %02X\n", sx[0]);
return 4;
}
/* Resume */
sx[0] = HSCD_CTRL1;
sx[1] = cr1[0];
if (hscd_i2c_writem(sx, 2))
return 1;
return 0;
}
int hscd_self_test_B(void)
{
if (atomic_read(&flgSuspend) == 1)
return -1;
return 0;
}
int hscd_get_magnetic_field_data(int *xyz)
{
int err = -1;
int i;
u8 sx[6];
if (atomic_read(&flgSuspend) == 1)
return err;
sx[0] = HSCD_XOUT;
err = hscd_i2c_readm(sx, 6);
if (err < 0)
return err;
for (i = 0; i < 3; i++) {
xyz[i] = (int) ((short)((sx[2*i + 1] << 8) | (sx[2*i])));
}
#ifdef ALPS_DEBUG
printk("Mag_I2C, x:%d, y:%d, z:%d\n",xyz[0], xyz[1], xyz[2]);
#endif
return err;
}
void hscd_activate(int flgatm, int flg, int dtime)
{
u8 buf[2];
if (this_client == NULL)
return;
if (flg != 0)
flg = 1;
if (flg) {
buf[0] = HSCD_CTRL4; // 15 bit signed value
buf[1] = 0x90;
hscd_i2c_writem(buf, 2);
}
mdelay(1);
if (dtime <= 20)
buf[1] = (3 << 3); // 100Hz- 10msec
else if (dtime <= 70)
buf[1] = (2 << 3); // 20Hz- 50msec
else
buf[1] = (1 << 3); // 10Hz-100msec
buf[0] = HSCD_CTRL1;
buf[1] |= (flg << 7);
hscd_i2c_writem(buf, 2);
mdelay(3);
if (flgatm) {
atomic_set(&flgEna, flg);
atomic_set(&delay, dtime);
}
}
static void hscd_register_init(void)
{
int v[3];
u8 buf[2];
#ifdef ALPS_DEBUG
printk("[HSCD] register_init\n");
#endif
buf[0] = HSCD_CTRL3;
buf[1] = 0x80;
hscd_i2c_writem(buf, 2);
mdelay(5);
atomic_set(&delay, 100);
hscd_activate(0, 1, atomic_read(&delay));
hscd_get_magnetic_field_data(v);
printk("[HSCD] x:%d y:%d z:%d\n", v[0], v[1], v[2]);
hscd_activate(0, 0, atomic_read(&delay));
}
static ssize_t selftest_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int result1, result2;
if (!atomic_read(&flgEna))
hscd_power_on();
result1 = hscd_self_test_A();
result2 = hscd_self_test_B();
printk(" %s, %d\n", __func__, __LINE__);
/*if (!atomic_read(&flgEna))
hscd_power_off();*/
if (result1 == 0)
result1 = 1;
else
result1 = 0;
if (result2 == 0)
result2 = 1;
else
result2 = 0;
pr_info("Selftest Result is %d, %d\n", result1, result2);
return snprintf(buf, PAGE_SIZE, "%d, %d\n", result1, result2);
}
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int result;
if (!atomic_read(&flgEna))
hscd_power_on();
result = hscd_self_test_B();
printk(" %s, %d\n", __func__, __LINE__);
/*if (!atomic_read(&flgEna))
hscd_power_off();*/
if (result == 0)
result = 1;
else
result = 0;
return snprintf(buf, PAGE_SIZE, "%d,%d\n", result, 0);
}
static ssize_t adc_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int data[3];
if (!atomic_read(&flgEna))
hscd_activate(0, 1, 100);
msleep(20);
hscd_get_magnetic_field_data(data);
pr_info("[HSCD] x: %d y: %d z: %d\n", data[0], data[1], data[2]);
if (!atomic_read(&flgEna))
hscd_activate(0, 0, 100);
return snprintf(buf, PAGE_SIZE, "%d,%d,%d\n",
data[0], data[1], data[2]);
}
static ssize_t mag_raw_data_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
int xyz[3] = {0};
printk("%s\n", __func__);
if (!atomic_read(&flgEna))
hscd_power_on();
hscd_get_magnetic_field_data(xyz);
/*if (!atomic_read(&flgEna))
hscd_power_off();*/
return snprintf(buf, PAGE_SIZE, "%d,%d,%d\n",
xyz[0], xyz[1], xyz[2]);
}
static DEVICE_ATTR(selftest, S_IRUGO | S_IWUSR | S_IWGRP,
selftest_show, NULL);
static DEVICE_ATTR(status, S_IRUGO | S_IWUSR | S_IWGRP,
status_show, NULL);
static DEVICE_ATTR(adc, S_IRUGO | S_IWUSR | S_IWGRP,
adc_show, NULL);
static DEVICE_ATTR(raw_data, S_IRUGO | S_IWUSR | S_IWGRP,
mag_raw_data_read, NULL);
static struct device_attribute *magnetic_attrs[] = {
&dev_attr_selftest,
&dev_attr_status,
&dev_attr_adc,
&dev_attr_raw_data,
NULL,
};
static int hscd_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
int ret = 0;
struct device *magnetic_device = NULL;
this_client = client;
printk("[HSCD] probe\n");
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->adapter->dev, "client not i2c capable\n");
return -ENOMEM;
}
hscd_power.regulator_vdd = NULL;
hscd_power.regulator_vio = NULL;
hscd_power.regulator_vdd = regulator_get(&client->dev, "vdd_hscdtd");
if (IS_ERR(hscd_power.regulator_vdd)) {
ret = PTR_ERR(hscd_power.regulator_vdd);
hscd_power.regulator_vdd = NULL;
pr_err("%s: failed to get vdd_hscdtd %d\n", __func__, ret);
goto err_setup_regulator;
}
hscd_power.regulator_vio = regulator_get(&client->dev, "vio_hscdtd");
if (IS_ERR(hscd_power.regulator_vio)) {
ret = PTR_ERR(hscd_power.regulator_vio);
hscd_power.regulator_vio = NULL;
pr_err("%s: failed to get vio_hscdtd %d\n", __func__, ret);
goto err_setup_regulator;
}
hscd_power_on();
/* read chip id */
ret = i2c_smbus_read_byte_data(this_client, WHO_AM_I);
pr_info("%s : device ID = 0x%x, reading ID = 0x%x\n", __func__,
DEVICE_ID, ret);
if (ret == DEVICE_ID) /* Normal Operation */
ret = 0;
else {
if (ret < 0)
pr_err("%s: i2c for reading chip id failed\n",
__func__);
else {
pr_err("%s : Device identification failed\n",
__func__);
ret = -ENODEV;
}
goto err_setup_regulator;
}
sensors_register(magnetic_device, NULL, magnetic_attrs,
"magnetic_sensor");
atomic_set(&flgEna, 0);
atomic_set(&flgSuspend, 0);
atomic_set(&delay, 100);
#ifdef CONFIG_HAS_EARLYSUSPEND
register_early_suspend(&hscd_early_suspend_handler);
#endif
pr_info("%s: success.\n", __func__);
return 0;
err_setup_regulator:
if (hscd_power.regulator_vdd) {
regulator_disable(hscd_power.regulator_vdd);
regulator_put(hscd_power.regulator_vdd);
}
if (hscd_power.regulator_vio) {
regulator_disable(hscd_power.regulator_vio);
regulator_put(hscd_power.regulator_vio);
}
this_client = NULL;
pr_err("%s: failed!\n", __func__);
return ret;
}
static int __devexit hscd_remove(struct i2c_client *client)
{
printk("[HSCD] remove\n");
hscd_activate(0, 0, atomic_read(&delay));
#ifdef CONFIG_HAS_EARLYSUSPEND
unregister_early_suspend(&hscd_early_suspend_handler);
#endif
if (hscd_power.regulator_vdd) {
regulator_disable(hscd_power.regulator_vdd);
regulator_put(hscd_power.regulator_vdd);
}
if (hscd_power.regulator_vio) {
regulator_disable(hscd_power.regulator_vio);
regulator_put(hscd_power.regulator_vio);
}
this_client = NULL;
return 0;
}
static int hscd_suspend(struct i2c_client *client, pm_message_t mesg)
{
#ifdef ALPS_DEBUG
printk("[HSCD] suspend\n");
#endif
atomic_set(&flgSuspend, 1);
hscd_activate(0, 0, atomic_read(&delay));
return 0;
}
static int hscd_resume(struct i2c_client *client)
{
#ifdef ALPS_DEBUG
printk("[HSCD] resume\n");
#endif
atomic_set(&flgSuspend, 0);
hscd_activate(0, atomic_read(&flgEna), atomic_read(&delay));
return 0;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
static void hscd_early_suspend(struct early_suspend *handler)
{
#ifdef ALPS_DEBUG
printk("[HSCD] early_suspend\n");
#endif
hscd_suspend(this_client, PMSG_SUSPEND);
}
static void hscd_early_resume(struct early_suspend *handler)
{
#ifdef ALPS_DEBUG
printk("[HSCD] early_resume\n");
#endif
hscd_resume(this_client);
}
#endif
static const struct i2c_device_id ALPS_id[] = {
{ HSCD_DRIVER_NAME, 0 },
{ }
};
static struct i2c_driver hscd_driver = {
.probe = hscd_probe,
.remove = hscd_remove,
.id_table = ALPS_id,
.driver = {
.name = HSCD_DRIVER_NAME,
},
.suspend = hscd_suspend,
.resume = hscd_resume,
};
#ifdef CONFIG_HAS_EARLYSUSPEND
static struct early_suspend hscd_early_suspend_handler = {
.suspend = hscd_early_suspend,
.resume = hscd_early_resume,
};
#endif
static int __init hscd_init(void)
{
return i2c_add_driver(&hscd_driver);
}
static void __exit hscd_exit(void)
{
#ifdef ALPS_DEBUG
printk("[HSCD] exit\n");
#endif
i2c_del_driver(&hscd_driver);
}
module_init(hscd_init);
module_exit(hscd_exit);
EXPORT_SYMBOL(hscd_self_test_A);
EXPORT_SYMBOL(hscd_self_test_B);
EXPORT_SYMBOL(hscd_get_magnetic_field_data);
EXPORT_SYMBOL(hscd_activate);
MODULE_DESCRIPTION("Alps HSCDTD008A Compass Device");
MODULE_AUTHOR("ALPS ELECTRIC CO., LTD.");
MODULE_LICENSE("GPL v2");
| PeterKraus/linux-golden-nfc | drivers/sensor/alps/hscdtd008a_i2c.c | C | gpl-2.0 | 14,804 |
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "prims/jni.h"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"
// This file contains copies of the fdlibm routines used by
// StrictMath. It turns out that it is almost always required to use
// these runtime routines; the Intel CPU doesn't meet the Java
// specification for sin/cos outside a certain limited argument range,
// and the SPARC CPU doesn't appear to have sin/cos instructions. It
// also turns out that avoiding the indirect call through function
// pointer out to libjava.so in SharedRuntime speeds these routines up
// by roughly 15% on both Win32/x86 and Solaris/SPARC.
// Enabling optimizations in this file causes incorrect code to be
// generated; can not figure out how to turn down optimization for one
// file in the IDE on Windows
#ifdef WIN32
# pragma optimize ( "", off )
#endif
/* The above workaround now causes more problems with the latest MS compiler.
* Visual Studio 2010's /GS option tries to guard against buffer overruns.
* /GS is on by default if you specify optimizations, which we do globally
* via /W3 /O2. However the above selective turning off of optimizations means
* that /GS issues a warning "4748". And since we treat warnings as errors (/WX)
* then the compilation fails. There are several possible solutions
* (1) Remove that pragma above as obsolete with VS2010 - requires testing.
* (2) Stop treating warnings as errors - would be a backward step
* (3) Disable /GS - may help performance but you lose the security checks
* (4) Disable the warning with "#pragma warning( disable : 4748 )"
* (5) Disable planting the code with __declspec(safebuffers)
* I've opted for (5) although we should investigate the local performance
* benefits of (1) and global performance benefit of (3).
*/
#if defined(WIN32) && (defined(_MSC_VER) && (_MSC_VER >= 1600))
#define SAFEBUF __declspec(safebuffers)
#else
#define SAFEBUF
#endif
#include "runtime/sharedRuntimeMath.hpp"
/*
* __kernel_rem_pio2(x,y,e0,nx,prec,ipio2)
* double x[],y[]; int e0,nx,prec; int ipio2[];
*
* __kernel_rem_pio2 return the last three digits of N with
* y = x - N*pi/2
* so that |y| < pi/2.
*
* The method is to compute the integer (mod 8) and fraction parts of
* (2/pi)*x without doing the full multiplication. In general we
* skip the part of the product that are known to be a huge integer (
* more accurately, = 0 mod 8 ). Thus the number of operations are
* independent of the exponent of the input.
*
* (2/pi) is represented by an array of 24-bit integers in ipio2[].
*
* Input parameters:
* x[] The input value (must be positive) is broken into nx
* pieces of 24-bit integers in double precision format.
* x[i] will be the i-th 24 bit of x. The scaled exponent
* of x[0] is given in input parameter e0 (i.e., x[0]*2^e0
* match x's up to 24 bits.
*
* Example of breaking a double positive z into x[0]+x[1]+x[2]:
* e0 = ilogb(z)-23
* z = scalbn(z,-e0)
* for i = 0,1,2
* x[i] = floor(z)
* z = (z-x[i])*2**24
*
*
* y[] ouput result in an array of double precision numbers.
* The dimension of y[] is:
* 24-bit precision 1
* 53-bit precision 2
* 64-bit precision 2
* 113-bit precision 3
* The actual value is the sum of them. Thus for 113-bit
* precsion, one may have to do something like:
*
* long double t,w,r_head, r_tail;
* t = (long double)y[2] + (long double)y[1];
* w = (long double)y[0];
* r_head = t+w;
* r_tail = w - (r_head - t);
*
* e0 The exponent of x[0]
*
* nx dimension of x[]
*
* prec an interger indicating the precision:
* 0 24 bits (single)
* 1 53 bits (double)
* 2 64 bits (extended)
* 3 113 bits (quad)
*
* ipio2[]
* integer array, contains the (24*i)-th to (24*i+23)-th
* bit of 2/pi after binary point. The corresponding
* floating value is
*
* ipio2[i] * 2^(-24(i+1)).
*
* External function:
* double scalbn(), floor();
*
*
* Here is the description of some local variables:
*
* jk jk+1 is the initial number of terms of ipio2[] needed
* in the computation. The recommended value is 2,3,4,
* 6 for single, double, extended,and quad.
*
* jz local integer variable indicating the number of
* terms of ipio2[] used.
*
* jx nx - 1
*
* jv index for pointing to the suitable ipio2[] for the
* computation. In general, we want
* ( 2^e0*x[0] * ipio2[jv-1]*2^(-24jv) )/8
* is an integer. Thus
* e0-3-24*jv >= 0 or (e0-3)/24 >= jv
* Hence jv = max(0,(e0-3)/24).
*
* jp jp+1 is the number of terms in PIo2[] needed, jp = jk.
*
* q[] double array with integral value, representing the
* 24-bits chunk of the product of x and 2/pi.
*
* q0 the corresponding exponent of q[0]. Note that the
* exponent for q[i] would be q0-24*i.
*
* PIo2[] double precision array, obtained by cutting pi/2
* into 24 bits chunks.
*
* f[] ipio2[] in floating point
*
* iq[] integer array by breaking up q[] in 24-bits chunk.
*
* fq[] final product of x*(2/pi) in fq[0],..,fq[jk]
*
* ih integer. If >0 it indicats q[] is >= 0.5, hence
* it also indicates the *sign* of the result.
*
*/
/*
* Constants:
* The hexadecimal values are the intended ones for the following
* constants. The decimal values may be used, provided that the
* compiler will convert from decimal to binary accurately enough
* to produce the hexadecimal values shown.
*/
static const int init_jk[] = {2,3,4,6}; /* initial value for jk */
static const double PIo2[] = {
1.57079625129699707031e+00, /* 0x3FF921FB, 0x40000000 */
7.54978941586159635335e-08, /* 0x3E74442D, 0x00000000 */
5.39030252995776476554e-15, /* 0x3CF84698, 0x80000000 */
3.28200341580791294123e-22, /* 0x3B78CC51, 0x60000000 */
1.27065575308067607349e-29, /* 0x39F01B83, 0x80000000 */
1.22933308981111328932e-36, /* 0x387A2520, 0x40000000 */
2.73370053816464559624e-44, /* 0x36E38222, 0x80000000 */
2.16741683877804819444e-51, /* 0x3569F31D, 0x00000000 */
};
static const double
zeroB = 0.0,
one = 1.0,
two24B = 1.67772160000000000000e+07, /* 0x41700000, 0x00000000 */
twon24 = 5.96046447753906250000e-08; /* 0x3E700000, 0x00000000 */
static SAFEBUF int __kernel_rem_pio2(double *x, double *y, int e0, int nx, int prec, const int *ipio2) {
int jz,jx,jv,jp,jk,carry,n,iq[20],i,j,k,m,q0,ih;
double z,fw,f[20],fq[20],q[20];
/* initialize jk*/
jk = init_jk[prec];
jp = jk;
/* determine jx,jv,q0, note that 3>q0 */
jx = nx-1;
jv = (e0-3)/24; if(jv<0) jv=0;
q0 = e0-24*(jv+1);
/* set up f[0] to f[jx+jk] where f[jx+jk] = ipio2[jv+jk] */
j = jv-jx; m = jx+jk;
for(i=0;i<=m;i++,j++) f[i] = (j<0)? zeroB : (double) ipio2[j];
/* compute q[0],q[1],...q[jk] */
for (i=0;i<=jk;i++) {
for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j]; q[i] = fw;
}
jz = jk;
recompute:
/* distill q[] into iq[] reversingly */
for(i=0,j=jz,z=q[jz];j>0;i++,j--) {
fw = (double)((int)(twon24* z));
iq[i] = (int)(z-two24B*fw);
z = q[j-1]+fw;
}
/* compute n */
z = scalbnA(z,q0); /* actual value of z */
z -= 8.0*floor(z*0.125); /* trim off integer >= 8 */
n = (int) z;
z -= (double)n;
ih = 0;
if(q0>0) { /* need iq[jz-1] to determine n */
i = (iq[jz-1]>>(24-q0)); n += i;
iq[jz-1] -= i<<(24-q0);
ih = iq[jz-1]>>(23-q0);
}
else if(q0==0) ih = iq[jz-1]>>23;
else if(z>=0.5) ih=2;
if(ih>0) { /* q > 0.5 */
n += 1; carry = 0;
for(i=0;i<jz ;i++) { /* compute 1-q */
j = iq[i];
if(carry==0) {
if(j!=0) {
carry = 1; iq[i] = 0x1000000- j;
}
} else iq[i] = 0xffffff - j;
}
if(q0>0) { /* rare case: chance is 1 in 12 */
switch(q0) {
case 1:
iq[jz-1] &= 0x7fffff; break;
case 2:
iq[jz-1] &= 0x3fffff; break;
}
}
if(ih==2) {
z = one - z;
if(carry!=0) z -= scalbnA(one,q0);
}
}
/* check if recomputation is needed */
if(z==zeroB) {
j = 0;
for (i=jz-1;i>=jk;i--) j |= iq[i];
if(j==0) { /* need recomputation */
for(k=1;iq[jk-k]==0;k++); /* k = no. of terms needed */
for(i=jz+1;i<=jz+k;i++) { /* add q[jz+1] to q[jz+k] */
f[jx+i] = (double) ipio2[jv+i];
for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j];
q[i] = fw;
}
jz += k;
goto recompute;
}
}
/* chop off zero terms */
if(z==0.0) {
jz -= 1; q0 -= 24;
while(iq[jz]==0) { jz--; q0-=24;}
} else { /* break z into 24-bit if neccessary */
z = scalbnA(z,-q0);
if(z>=two24B) {
fw = (double)((int)(twon24*z));
iq[jz] = (int)(z-two24B*fw);
jz += 1; q0 += 24;
iq[jz] = (int) fw;
} else iq[jz] = (int) z ;
}
/* convert integer "bit" chunk to floating-point value */
fw = scalbnA(one,q0);
for(i=jz;i>=0;i--) {
q[i] = fw*(double)iq[i]; fw*=twon24;
}
/* compute PIo2[0,...,jp]*q[jz,...,0] */
for(i=jz;i>=0;i--) {
for(fw=0.0,k=0;k<=jp&&k<=jz-i;k++) fw += PIo2[k]*q[i+k];
fq[jz-i] = fw;
}
/* compress fq[] into y[] */
switch(prec) {
case 0:
fw = 0.0;
for (i=jz;i>=0;i--) fw += fq[i];
y[0] = (ih==0)? fw: -fw;
break;
case 1:
case 2:
fw = 0.0;
for (i=jz;i>=0;i--) fw += fq[i];
y[0] = (ih==0)? fw: -fw;
fw = fq[0]-fw;
for (i=1;i<=jz;i++) fw += fq[i];
y[1] = (ih==0)? fw: -fw;
break;
case 3: /* painful */
for (i=jz;i>0;i--) {
fw = fq[i-1]+fq[i];
fq[i] += fq[i-1]-fw;
fq[i-1] = fw;
}
for (i=jz;i>1;i--) {
fw = fq[i-1]+fq[i];
fq[i] += fq[i-1]-fw;
fq[i-1] = fw;
}
for (fw=0.0,i=jz;i>=2;i--) fw += fq[i];
if(ih==0) {
y[0] = fq[0]; y[1] = fq[1]; y[2] = fw;
} else {
y[0] = -fq[0]; y[1] = -fq[1]; y[2] = -fw;
}
}
return n&7;
}
/*
* ====================================================
* Copyright (c) 1993 Oracle and/or its affilates. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*
*/
/* __ieee754_rem_pio2(x,y)
*
* return the remainder of x rem pi/2 in y[0]+y[1]
* use __kernel_rem_pio2()
*/
/*
* Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
*/
static const int two_over_pi[] = {
0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62,
0x95993C, 0x439041, 0xFE5163, 0xABDEBB, 0xC561B7, 0x246E3A,
0x424DD2, 0xE00649, 0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129,
0xA73EE8, 0x8235F5, 0x2EBB44, 0x84E99C, 0x7026B4, 0x5F7E41,
0x3991D6, 0x398353, 0x39F49C, 0x845F8B, 0xBDF928, 0x3B1FF8,
0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D, 0x367ECF,
0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5,
0xF17B3D, 0x0739F7, 0x8A5292, 0xEA6BFB, 0x5FB11F, 0x8D5D08,
0x560330, 0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3,
0x91615E, 0xE61B08, 0x659985, 0x5F14A0, 0x68408D, 0xFFD880,
0x4D7327, 0x310606, 0x1556CA, 0x73A8C9, 0x60E27B, 0xC08C6B,
};
static const int npio2_hw[] = {
0x3FF921FB, 0x400921FB, 0x4012D97C, 0x401921FB, 0x401F6A7A, 0x4022D97C,
0x4025FDBB, 0x402921FB, 0x402C463A, 0x402F6A7A, 0x4031475C, 0x4032D97C,
0x40346B9C, 0x4035FDBB, 0x40378FDB, 0x403921FB, 0x403AB41B, 0x403C463A,
0x403DD85A, 0x403F6A7A, 0x40407E4C, 0x4041475C, 0x4042106C, 0x4042D97C,
0x4043A28C, 0x40446B9C, 0x404534AC, 0x4045FDBB, 0x4046C6CB, 0x40478FDB,
0x404858EB, 0x404921FB,
};
/*
* invpio2: 53 bits of 2/pi
* pio2_1: first 33 bit of pi/2
* pio2_1t: pi/2 - pio2_1
* pio2_2: second 33 bit of pi/2
* pio2_2t: pi/2 - (pio2_1+pio2_2)
* pio2_3: third 33 bit of pi/2
* pio2_3t: pi/2 - (pio2_1+pio2_2+pio2_3)
*/
static const double
zeroA = 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
half = 5.00000000000000000000e-01, /* 0x3FE00000, 0x00000000 */
two24A = 1.67772160000000000000e+07, /* 0x41700000, 0x00000000 */
invpio2 = 6.36619772367581382433e-01, /* 0x3FE45F30, 0x6DC9C883 */
pio2_1 = 1.57079632673412561417e+00, /* 0x3FF921FB, 0x54400000 */
pio2_1t = 6.07710050650619224932e-11, /* 0x3DD0B461, 0x1A626331 */
pio2_2 = 6.07710050630396597660e-11, /* 0x3DD0B461, 0x1A600000 */
pio2_2t = 2.02226624879595063154e-21, /* 0x3BA3198A, 0x2E037073 */
pio2_3 = 2.02226624871116645580e-21, /* 0x3BA3198A, 0x2E000000 */
pio2_3t = 8.47842766036889956997e-32; /* 0x397B839A, 0x252049C1 */
static SAFEBUF int __ieee754_rem_pio2(double x, double *y) {
double z,w,t,r,fn;
double tx[3];
int e0,i,j,nx,n,ix,hx,i0;
i0 = ((*(int*)&two24A)>>30)^1; /* high word index */
hx = *(i0+(int*)&x); /* high word of x */
ix = hx&0x7fffffff;
if(ix<=0x3fe921fb) /* |x| ~<= pi/4 , no need for reduction */
{y[0] = x; y[1] = 0; return 0;}
if(ix<0x4002d97c) { /* |x| < 3pi/4, special case with n=+-1 */
if(hx>0) {
z = x - pio2_1;
if(ix!=0x3ff921fb) { /* 33+53 bit pi is good enough */
y[0] = z - pio2_1t;
y[1] = (z-y[0])-pio2_1t;
} else { /* near pi/2, use 33+33+53 bit pi */
z -= pio2_2;
y[0] = z - pio2_2t;
y[1] = (z-y[0])-pio2_2t;
}
return 1;
} else { /* negative x */
z = x + pio2_1;
if(ix!=0x3ff921fb) { /* 33+53 bit pi is good enough */
y[0] = z + pio2_1t;
y[1] = (z-y[0])+pio2_1t;
} else { /* near pi/2, use 33+33+53 bit pi */
z += pio2_2;
y[0] = z + pio2_2t;
y[1] = (z-y[0])+pio2_2t;
}
return -1;
}
}
if(ix<=0x413921fb) { /* |x| ~<= 2^19*(pi/2), medium size */
t = fabsd(x);
n = (int) (t*invpio2+half);
fn = (double)n;
r = t-fn*pio2_1;
w = fn*pio2_1t; /* 1st round good to 85 bit */
if(n<32&&ix!=npio2_hw[n-1]) {
y[0] = r-w; /* quick check no cancellation */
} else {
j = ix>>20;
y[0] = r-w;
i = j-(((*(i0+(int*)&y[0]))>>20)&0x7ff);
if(i>16) { /* 2nd iteration needed, good to 118 */
t = r;
w = fn*pio2_2;
r = t-w;
w = fn*pio2_2t-((t-r)-w);
y[0] = r-w;
i = j-(((*(i0+(int*)&y[0]))>>20)&0x7ff);
if(i>49) { /* 3rd iteration need, 151 bits acc */
t = r; /* will cover all possible cases */
w = fn*pio2_3;
r = t-w;
w = fn*pio2_3t-((t-r)-w);
y[0] = r-w;
}
}
}
y[1] = (r-y[0])-w;
if(hx<0) {y[0] = -y[0]; y[1] = -y[1]; return -n;}
else return n;
}
/*
* all other (large) arguments
*/
if(ix>=0x7ff00000) { /* x is inf or NaN */
y[0]=y[1]=x-x; return 0;
}
/* set z = scalbn(|x|,ilogb(x)-23) */
*(1-i0+(int*)&z) = *(1-i0+(int*)&x);
e0 = (ix>>20)-1046; /* e0 = ilogb(z)-23; */
*(i0+(int*)&z) = ix - (e0<<20);
for(i=0;i<2;i++) {
tx[i] = (double)((int)(z));
z = (z-tx[i])*two24A;
}
tx[2] = z;
nx = 3;
while(tx[nx-1]==zeroA) nx--; /* skip zero term */
n = __kernel_rem_pio2(tx,y,e0,nx,2,two_over_pi);
if(hx<0) {y[0] = -y[0]; y[1] = -y[1]; return -n;}
return n;
}
/* __kernel_sin( x, y, iy)
* kernel sin function on [-pi/4, pi/4], pi/4 ~ 0.7854
* Input x is assumed to be bounded by ~pi/4 in magnitude.
* Input y is the tail of x.
* Input iy indicates whether y is 0. (if iy=0, y assume to be 0).
*
* Algorithm
* 1. Since sin(-x) = -sin(x), we need only to consider positive x.
* 2. if x < 2^-27 (hx<0x3e400000 0), return x with inexact if x!=0.
* 3. sin(x) is approximated by a polynomial of degree 13 on
* [0,pi/4]
* 3 13
* sin(x) ~ x + S1*x + ... + S6*x
* where
*
* |sin(x) 2 4 6 8 10 12 | -58
* |----- - (1+S1*x +S2*x +S3*x +S4*x +S5*x +S6*x )| <= 2
* | x |
*
* 4. sin(x+y) = sin(x) + sin'(x')*y
* ~ sin(x) + (1-x*x/2)*y
* For better accuracy, let
* 3 2 2 2 2
* r = x *(S2+x *(S3+x *(S4+x *(S5+x *S6))))
* then 3 2
* sin(x) = x + (S1*x + (x *(r-y/2)+y))
*/
static const double
S1 = -1.66666666666666324348e-01, /* 0xBFC55555, 0x55555549 */
S2 = 8.33333333332248946124e-03, /* 0x3F811111, 0x1110F8A6 */
S3 = -1.98412698298579493134e-04, /* 0xBF2A01A0, 0x19C161D5 */
S4 = 2.75573137070700676789e-06, /* 0x3EC71DE3, 0x57B1FE7D */
S5 = -2.50507602534068634195e-08, /* 0xBE5AE5E6, 0x8A2B9CEB */
S6 = 1.58969099521155010221e-10; /* 0x3DE5D93A, 0x5ACFD57C */
static double __kernel_sin(double x, double y, int iy)
{
double z,r,v;
int ix;
ix = high(x)&0x7fffffff; /* high word of x */
if(ix<0x3e400000) /* |x| < 2**-27 */
{if((int)x==0) return x;} /* generate inexact */
z = x*x;
v = z*x;
r = S2+z*(S3+z*(S4+z*(S5+z*S6)));
if(iy==0) return x+v*(S1+z*r);
else return x-((z*(half*y-v*r)-y)-v*S1);
}
/*
* __kernel_cos( x, y )
* kernel cos function on [-pi/4, pi/4], pi/4 ~ 0.785398164
* Input x is assumed to be bounded by ~pi/4 in magnitude.
* Input y is the tail of x.
*
* Algorithm
* 1. Since cos(-x) = cos(x), we need only to consider positive x.
* 2. if x < 2^-27 (hx<0x3e400000 0), return 1 with inexact if x!=0.
* 3. cos(x) is approximated by a polynomial of degree 14 on
* [0,pi/4]
* 4 14
* cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x
* where the remez error is
*
* | 2 4 6 8 10 12 14 | -58
* |cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x +C6*x )| <= 2
* | |
*
* 4 6 8 10 12 14
* 4. let r = C1*x +C2*x +C3*x +C4*x +C5*x +C6*x , then
* cos(x) = 1 - x*x/2 + r
* since cos(x+y) ~ cos(x) - sin(x)*y
* ~ cos(x) - x*y,
* a correction term is necessary in cos(x) and hence
* cos(x+y) = 1 - (x*x/2 - (r - x*y))
* For better accuracy when x > 0.3, let qx = |x|/4 with
* the last 32 bits mask off, and if x > 0.78125, let qx = 0.28125.
* Then
* cos(x+y) = (1-qx) - ((x*x/2-qx) - (r-x*y)).
* Note that 1-qx and (x*x/2-qx) is EXACT here, and the
* magnitude of the latter is at least a quarter of x*x/2,
* thus, reducing the rounding error in the subtraction.
*/
static const double
C1 = 4.16666666666666019037e-02, /* 0x3FA55555, 0x5555554C */
C2 = -1.38888888888741095749e-03, /* 0xBF56C16C, 0x16C15177 */
C3 = 2.48015872894767294178e-05, /* 0x3EFA01A0, 0x19CB1590 */
C4 = -2.75573143513906633035e-07, /* 0xBE927E4F, 0x809C52AD */
C5 = 2.08757232129817482790e-09, /* 0x3E21EE9E, 0xBDB4B1C4 */
C6 = -1.13596475577881948265e-11; /* 0xBDA8FAE9, 0xBE8838D4 */
static double __kernel_cos(double x, double y)
{
double a,h,z,r,qx=0;
int ix;
ix = high(x)&0x7fffffff; /* ix = |x|'s high word*/
if(ix<0x3e400000) { /* if x < 2**27 */
if(((int)x)==0) return one; /* generate inexact */
}
z = x*x;
r = z*(C1+z*(C2+z*(C3+z*(C4+z*(C5+z*C6)))));
if(ix < 0x3FD33333) /* if |x| < 0.3 */
return one - (0.5*z - (z*r - x*y));
else {
if(ix > 0x3fe90000) { /* x > 0.78125 */
qx = 0.28125;
} else {
set_high(&qx, ix-0x00200000); /* x/4 */
set_low(&qx, 0);
}
h = 0.5*z-qx;
a = one-qx;
return a - (h - (z*r-x*y));
}
}
/* __kernel_tan( x, y, k )
* kernel tan function on [-pi/4, pi/4], pi/4 ~ 0.7854
* Input x is assumed to be bounded by ~pi/4 in magnitude.
* Input y is the tail of x.
* Input k indicates whether tan (if k=1) or
* -1/tan (if k= -1) is returned.
*
* Algorithm
* 1. Since tan(-x) = -tan(x), we need only to consider positive x.
* 2. if x < 2^-28 (hx<0x3e300000 0), return x with inexact if x!=0.
* 3. tan(x) is approximated by a odd polynomial of degree 27 on
* [0,0.67434]
* 3 27
* tan(x) ~ x + T1*x + ... + T13*x
* where
*
* |tan(x) 2 4 26 | -59.2
* |----- - (1+T1*x +T2*x +.... +T13*x )| <= 2
* | x |
*
* Note: tan(x+y) = tan(x) + tan'(x)*y
* ~ tan(x) + (1+x*x)*y
* Therefore, for better accuracy in computing tan(x+y), let
* 3 2 2 2 2
* r = x *(T2+x *(T3+x *(...+x *(T12+x *T13))))
* then
* 3 2
* tan(x+y) = x + (T1*x + (x *(r+y)+y))
*
* 4. For x in [0.67434,pi/4], let y = pi/4 - x, then
* tan(x) = tan(pi/4-y) = (1-tan(y))/(1+tan(y))
* = 1 - 2*(tan(y) - (tan(y)^2)/(1+tan(y)))
*/
static const double
pio4 = 7.85398163397448278999e-01, /* 0x3FE921FB, 0x54442D18 */
pio4lo= 3.06161699786838301793e-17, /* 0x3C81A626, 0x33145C07 */
T[] = {
3.33333333333334091986e-01, /* 0x3FD55555, 0x55555563 */
1.33333333333201242699e-01, /* 0x3FC11111, 0x1110FE7A */
5.39682539762260521377e-02, /* 0x3FABA1BA, 0x1BB341FE */
2.18694882948595424599e-02, /* 0x3F9664F4, 0x8406D637 */
8.86323982359930005737e-03, /* 0x3F8226E3, 0xE96E8493 */
3.59207910759131235356e-03, /* 0x3F6D6D22, 0xC9560328 */
1.45620945432529025516e-03, /* 0x3F57DBC8, 0xFEE08315 */
5.88041240820264096874e-04, /* 0x3F4344D8, 0xF2F26501 */
2.46463134818469906812e-04, /* 0x3F3026F7, 0x1A8D1068 */
7.81794442939557092300e-05, /* 0x3F147E88, 0xA03792A6 */
7.14072491382608190305e-05, /* 0x3F12B80F, 0x32F0A7E9 */
-1.85586374855275456654e-05, /* 0xBEF375CB, 0xDB605373 */
2.59073051863633712884e-05, /* 0x3EFB2A70, 0x74BF7AD4 */
};
static double __kernel_tan(double x, double y, int iy)
{
double z,r,v,w,s;
int ix,hx;
hx = high(x); /* high word of x */
ix = hx&0x7fffffff; /* high word of |x| */
if(ix<0x3e300000) { /* x < 2**-28 */
if((int)x==0) { /* generate inexact */
if (((ix | low(x)) | (iy + 1)) == 0)
return one / fabsd(x);
else {
if (iy == 1)
return x;
else { /* compute -1 / (x+y) carefully */
double a, t;
z = w = x + y;
set_low(&z, 0);
v = y - (z - x);
t = a = -one / w;
set_low(&t, 0);
s = one + t * z;
return t + a * (s + t * v);
}
}
}
}
if(ix>=0x3FE59428) { /* |x|>=0.6744 */
if(hx<0) {x = -x; y = -y;}
z = pio4-x;
w = pio4lo-y;
x = z+w; y = 0.0;
}
z = x*x;
w = z*z;
/* Break x^5*(T[1]+x^2*T[2]+...) into
* x^5(T[1]+x^4*T[3]+...+x^20*T[11]) +
* x^5(x^2*(T[2]+x^4*T[4]+...+x^22*[T12]))
*/
r = T[1]+w*(T[3]+w*(T[5]+w*(T[7]+w*(T[9]+w*T[11]))));
v = z*(T[2]+w*(T[4]+w*(T[6]+w*(T[8]+w*(T[10]+w*T[12])))));
s = z*x;
r = y + z*(s*(r+v)+y);
r += T[0]*s;
w = x+r;
if(ix>=0x3FE59428) {
v = (double)iy;
return (double)(1-((hx>>30)&2))*(v-2.0*(x-(w*w/(w+v)-r)));
}
if(iy==1) return w;
else { /* if allow error up to 2 ulp,
simply return -1.0/(x+r) here */
/* compute -1.0/(x+r) accurately */
double a,t;
z = w;
set_low(&z, 0);
v = r-(z - x); /* z+v = r+x */
t = a = -1.0/w; /* a = -1.0/w */
set_low(&t, 0);
s = 1.0+t*z;
return t+a*(s+t*v);
}
}
//----------------------------------------------------------------------
//
// Routines for new sin/cos implementation
//
//----------------------------------------------------------------------
/* sin(x)
* Return sine function of x.
*
* kernel function:
* __kernel_sin ... sine function on [-pi/4,pi/4]
* __kernel_cos ... cose function on [-pi/4,pi/4]
* __ieee754_rem_pio2 ... argument reduction routine
*
* Method.
* Let S,C and T denote the sin, cos and tan respectively on
* [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
* in [-pi/4 , +pi/4], and let n = k mod 4.
* We have
*
* n sin(x) cos(x) tan(x)
* ----------------------------------------------------------
* 0 S C T
* 1 C -S -1/T
* 2 -S -C T
* 3 -C S -1/T
* ----------------------------------------------------------
*
* Special cases:
* Let trig be any of sin, cos, or tan.
* trig(+-INF) is NaN, with signals;
* trig(NaN) is that NaN;
*
* Accuracy:
* TRIG(x) returns trig(x) nearly rounded
*/
JRT_LEAF(jdouble, SharedRuntime::dsin(jdouble x))
double y[2],z=0.0;
int n, ix;
/* High word of x. */
ix = high(x);
/* |x| ~< pi/4 */
ix &= 0x7fffffff;
if(ix <= 0x3fe921fb) return __kernel_sin(x,z,0);
/* sin(Inf or NaN) is NaN */
else if (ix>=0x7ff00000) return x-x;
/* argument reduction needed */
else {
n = __ieee754_rem_pio2(x,y);
switch(n&3) {
case 0: return __kernel_sin(y[0],y[1],1);
case 1: return __kernel_cos(y[0],y[1]);
case 2: return -__kernel_sin(y[0],y[1],1);
default:
return -__kernel_cos(y[0],y[1]);
}
}
JRT_END
/* cos(x)
* Return cosine function of x.
*
* kernel function:
* __kernel_sin ... sine function on [-pi/4,pi/4]
* __kernel_cos ... cosine function on [-pi/4,pi/4]
* __ieee754_rem_pio2 ... argument reduction routine
*
* Method.
* Let S,C and T denote the sin, cos and tan respectively on
* [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
* in [-pi/4 , +pi/4], and let n = k mod 4.
* We have
*
* n sin(x) cos(x) tan(x)
* ----------------------------------------------------------
* 0 S C T
* 1 C -S -1/T
* 2 -S -C T
* 3 -C S -1/T
* ----------------------------------------------------------
*
* Special cases:
* Let trig be any of sin, cos, or tan.
* trig(+-INF) is NaN, with signals;
* trig(NaN) is that NaN;
*
* Accuracy:
* TRIG(x) returns trig(x) nearly rounded
*/
JRT_LEAF(jdouble, SharedRuntime::dcos(jdouble x))
double y[2],z=0.0;
int n, ix;
/* High word of x. */
ix = high(x);
/* |x| ~< pi/4 */
ix &= 0x7fffffff;
if(ix <= 0x3fe921fb) return __kernel_cos(x,z);
/* cos(Inf or NaN) is NaN */
else if (ix>=0x7ff00000) return x-x;
/* argument reduction needed */
else {
n = __ieee754_rem_pio2(x,y);
switch(n&3) {
case 0: return __kernel_cos(y[0],y[1]);
case 1: return -__kernel_sin(y[0],y[1],1);
case 2: return -__kernel_cos(y[0],y[1]);
default:
return __kernel_sin(y[0],y[1],1);
}
}
JRT_END
/* tan(x)
* Return tangent function of x.
*
* kernel function:
* __kernel_tan ... tangent function on [-pi/4,pi/4]
* __ieee754_rem_pio2 ... argument reduction routine
*
* Method.
* Let S,C and T denote the sin, cos and tan respectively on
* [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
* in [-pi/4 , +pi/4], and let n = k mod 4.
* We have
*
* n sin(x) cos(x) tan(x)
* ----------------------------------------------------------
* 0 S C T
* 1 C -S -1/T
* 2 -S -C T
* 3 -C S -1/T
* ----------------------------------------------------------
*
* Special cases:
* Let trig be any of sin, cos, or tan.
* trig(+-INF) is NaN, with signals;
* trig(NaN) is that NaN;
*
* Accuracy:
* TRIG(x) returns trig(x) nearly rounded
*/
JRT_LEAF(jdouble, SharedRuntime::dtan(jdouble x))
double y[2],z=0.0;
int n, ix;
/* High word of x. */
ix = high(x);
/* |x| ~< pi/4 */
ix &= 0x7fffffff;
if(ix <= 0x3fe921fb) return __kernel_tan(x,z,1);
/* tan(Inf or NaN) is NaN */
else if (ix>=0x7ff00000) return x-x; /* NaN */
/* argument reduction needed */
else {
n = __ieee754_rem_pio2(x,y);
return __kernel_tan(y[0],y[1],1-((n&1)<<1)); /* 1 -- n even
-1 -- n odd */
}
JRT_END
#ifdef WIN32
# pragma optimize ( "", on )
#endif
| MyProgrammingStyle/hotspot | src/share/vm/runtime/sharedRuntimeTrig.cpp | C++ | gpl-2.0 | 30,973 |
/* vi: set sw=4 ts=4: */
/*
* setconsole.c - redirect system console output
*
* Copyright (C) 2004,2005 Enrik Berkhan <Enrik.Berkhan@inka.de>
* Copyright (C) 2008 Bernhard Reutner-Fischer
*
* Licensed under GPLv2 or later, see file LICENSE in this source tree.
*/
//usage:#define setconsole_trivial_usage
//usage: "[-r" IF_FEATURE_SETCONSOLE_LONG_OPTIONS("|--reset") "] [DEVICE]"
//usage:#define setconsole_full_usage "\n\n"
//usage: "Redirect system console output to DEVICE (default: /dev/tty)\n"
//usage: "\n -r Reset output to /dev/console"
#include "libbb.h"
int setconsole_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
int setconsole_main(int argc UNUSED_PARAM, char **argv)
{
const char *device = CURRENT_TTY;
bool reset;
#if ENABLE_FEATURE_SETCONSOLE_LONG_OPTIONS
static const char setconsole_longopts[] ALIGN1 =
"reset\0" No_argument "r"
;
applet_long_options = setconsole_longopts;
#endif
/* at most one non-option argument */
opt_complementary = "?1";
reset = getopt32(argv, "r");
argv += 1 + reset;
if (*argv) {
device = *argv;
} else {
if (reset)
device = DEV_CONSOLE;
}
xioctl(xopen(device, O_WRONLY), TIOCCONS, NULL);
return EXIT_SUCCESS;
}
| KitKatPurity/platform_external_busybox | console-tools/setconsole.c | C | gpl-2.0 | 1,220 |
<?php
namespace Drupal\block_content\Tests;
use Drupal\block_content\Entity\BlockContent;
use Drupal\Component\Utility\Unicode;
use Drupal\Core\Database\Database;
/**
* Create a block and test saving it.
*
* @group block_content
*/
class BlockContentCreationTest extends BlockContentTestBase {
/**
* Modules to enable.
*
* Enable dummy module that implements hook_block_insert() for exceptions and
* field_ui to edit display settings.
*
* @var array
*/
public static $modules = array('block_content_test', 'dblog', 'field_ui');
/**
* Permissions to grant admin user.
*
* @var array
*/
protected $permissions = array(
'administer blocks',
'administer block_content display'
);
/**
* Sets the test up.
*/
protected function setUp() {
parent::setUp();
$this->drupalLogin($this->adminUser);
}
/**
* Creates a "Basic page" block and verifies its consistency in the database.
*/
public function testBlockContentCreation() {
$this->drupalLogin($this->adminUser);
// Create a block.
$edit = array();
$edit['info[0][value]'] = 'Test Block';
$edit['body[0][value]'] = $this->randomMachineName(16);
$this->drupalPostForm('block/add/basic', $edit, t('Save'));
// Check that the Basic block has been created.
$this->assertRaw(format_string('@block %name has been created.', array(
'@block' => 'basic',
'%name' => $edit['info[0][value]']
)), 'Basic block created.');
// Check that the view mode setting is hidden because only one exists.
$this->assertNoFieldByXPath('//select[@name="settings[view_mode]"]', NULL, 'View mode setting hidden because only one exists');
// Check that the block exists in the database.
$blocks = entity_load_multiple_by_properties('block_content', array('info' => $edit['info[0][value]']));
$block = reset($blocks);
$this->assertTrue($block, 'Custom Block found in database.');
// Check that attempting to create another block with the same value for
// 'info' returns an error.
$this->drupalPostForm('block/add/basic', $edit, t('Save'));
// Check that the Basic block has been created.
$this->assertRaw(format_string('A custom block with block description %value already exists.', array(
'%value' => $edit['info[0][value]']
)));
$this->assertResponse(200);
}
/**
* Creates a "Basic page" block with multiple view modes.
*/
public function testBlockContentCreationMultipleViewModes() {
// Add a new view mode and verify if it is selected as expected.
$this->drupalLogin($this->drupalCreateUser(array('administer display modes')));
$this->drupalGet('admin/structure/display-modes/view/add/block_content');
$edit = array(
'id' => 'test_view_mode',
'label' => 'Test View Mode',
);
$this->drupalPostForm(NULL, $edit, t('Save'));
$this->assertRaw(t('Saved the %label view mode.', array('%label' => $edit['label'])));
$this->drupalLogin($this->adminUser);
// Create a block.
$edit = array();
$edit['info[0][value]'] = 'Test Block';
$edit['body[0][value]'] = $this->randomMachineName(16);
$this->drupalPostForm('block/add/basic', $edit, t('Save'));
// Check that the Basic block has been created.
$this->assertRaw(format_string('@block %name has been created.', array(
'@block' => 'basic',
'%name' => $edit['info[0][value]']
)), 'Basic block created.');
// Save our block permanently
$this->drupalPostForm(NULL, NULL, t('Save block'));
// Set test_view_mode as a custom display to be available on the list.
$this->drupalGet('admin/structure/block/block-content');
$this->drupalGet('admin/structure/block/block-content/types');
$this->clickLink(t('Manage display'));
$this->drupalGet('admin/structure/block/block-content/manage/basic/display');
$custom_view_mode = array(
'display_modes_custom[test_view_mode]' => 1,
);
$this->drupalPostForm(NULL, $custom_view_mode, t('Save'));
// Go to the configure page and change the view mode.
$this->drupalGet('admin/structure/block/manage/testblock');
// Test the available view mode options.
$this->assertOption('edit-settings-view-mode', 'default', 'The default view mode is available.');
$this->assertOption('edit-settings-view-mode', 'test_view_mode', 'The test view mode is available.');
$view_mode['settings[view_mode]'] = 'test_view_mode';
$this->drupalPostForm(NULL, $view_mode, t('Save block'));
// Check that the view mode setting is shown because more than one exists.
$this->drupalGet('admin/structure/block/manage/testblock');
$this->assertFieldByXPath('//select[@name="settings[view_mode]"]', NULL, 'View mode setting shown because multiple exist');
// Change the view mode.
$view_mode['settings[view_mode]'] = 'test_view_mode';
$this->drupalPostForm(NULL, $view_mode, t('Save block'));
// Go to the configure page and verify the view mode has changed.
$this->drupalGet('admin/structure/block/manage/testblock');
$this->assertFieldByXPath('//select[@name="settings[view_mode]"]/option[@selected="selected"]/@value', 'test_view_mode', 'View mode changed to Test View Mode');
// Check that the block exists in the database.
$blocks = entity_load_multiple_by_properties('block_content', array('info' => $edit['info[0][value]']));
$block = reset($blocks);
$this->assertTrue($block, 'Custom Block found in database.');
// Check that attempting to create another block with the same value for
// 'info' returns an error.
$this->drupalPostForm('block/add/basic', $edit, t('Save'));
// Check that the Basic block has been created.
$this->assertRaw(format_string('A custom block with block description %value already exists.', array(
'%value' => $edit['info[0][value]']
)));
$this->assertResponse(200);
}
/**
* Create a default custom block.
*
* Creates a custom block from defaults and ensures that the 'basic block'
* type is being used.
*/
public function testDefaultBlockContentCreation() {
$edit = array();
$edit['info[0][value]'] = $this->randomMachineName(8);
$edit['body[0][value]'] = $this->randomMachineName(16);
// Don't pass the custom block type in the url so the default is forced.
$this->drupalPostForm('block/add', $edit, t('Save'));
// Check that the block has been created and that it is a basic block.
$this->assertRaw(format_string('@block %name has been created.', array(
'@block' => 'basic',
'%name' => $edit['info[0][value]'],
)), 'Basic block created.');
// Check that the block exists in the database.
$blocks = entity_load_multiple_by_properties('block_content', array('info' => $edit['info[0][value]']));
$block = reset($blocks);
$this->assertTrue($block, 'Default Custom Block found in database.');
}
/**
* Verifies that a transaction rolls back the failed creation.
*/
public function testFailedBlockCreation() {
// Create a block.
try {
$this->createBlockContent('fail_creation');
$this->fail('Expected exception has not been thrown.');
}
catch (\Exception $e) {
$this->pass('Expected exception has been thrown.');
}
if (Database::getConnection()->supportsTransactions()) {
// Check that the block does not exist in the database.
$id = db_select('block_content_field_data', 'b')
->fields('b', array('id'))
->condition('info', 'fail_creation')
->execute()
->fetchField();
$this->assertFalse($id, 'Transactions supported, and block not found in database.');
}
else {
// Check that the block exists in the database.
$id = db_select('block_content_field_data', 'b')
->fields('b', array('id'))
->condition('info', 'fail_creation')
->execute()
->fetchField();
$this->assertTrue($id, 'Transactions not supported, and block found in database.');
// Check that the failed rollback was logged.
$records = db_query("SELECT wid FROM {watchdog} WHERE message LIKE 'Explicit rollback failed%'")->fetchAll();
$this->assertTrue(count($records) > 0, 'Transactions not supported, and rollback error logged to watchdog.');
}
}
/**
* Test deleting a block.
*/
public function testBlockDelete() {
// Create a block.
$edit = array();
$edit['info[0][value]'] = $this->randomMachineName(8);
$body = $this->randomMachineName(16);
$edit['body[0][value]'] = $body;
$this->drupalPostForm('block/add/basic', $edit, t('Save'));
// Place the block.
$instance = array(
'id' => Unicode::strtolower($edit['info[0][value]']),
'settings[label]' => $edit['info[0][value]'],
'region' => 'sidebar_first',
);
$block = BlockContent::load(1);
$url = 'admin/structure/block/add/block_content:' . $block->uuid() . '/' . $this->config('system.theme')->get('default');
$this->drupalPostForm($url, $instance, t('Save block'));
$block = BlockContent::load(1);
// Test getInstances method.
$this->assertEqual(1, count($block->getInstances()));
// Navigate to home page.
$this->drupalGet('');
$this->assertText($body);
// Delete the block.
$this->drupalGet('block/1/delete');
$this->assertText(\Drupal::translation()->formatPlural(1, 'This will also remove 1 placed block instance.', 'This will also remove @count placed block instance.'));
$this->drupalPostForm(NULL, array(), 'Delete');
$this->assertRaw(t('The custom block %name has been deleted.', array('%name' => $edit['info[0][value]'])));
// Create another block and force the plugin cache to flush.
$edit2 = array();
$edit2['info[0][value]'] = $this->randomMachineName(8);
$body2 = $this->randomMachineName(16);
$edit2['body[0][value]'] = $body2;
$this->drupalPostForm('block/add/basic', $edit2, t('Save'));
$this->assertNoRaw('Error message');
// Create another block with no instances, and test we don't get a
// confirmation message about deleting instances.
$edit3 = array();
$edit3['info[0][value]'] = $this->randomMachineName(8);
$body = $this->randomMachineName(16);
$edit3['body[0][value]'] = $body;
$this->drupalPostForm('block/add/basic', $edit3, t('Save'));
// Show the delete confirm form.
$this->drupalGet('block/3/delete');
$this->assertNoText('This will also remove');
}
/**
* Test that placed content blocks create a dependency in the block placement.
*/
public function testConfigDependencies() {
$block = $this->createBlockContent();
// Place the block.
$block_placement_id = Unicode::strtolower($block->label());
$instance = array(
'id' => $block_placement_id,
'settings[label]' => $block->label(),
'region' => 'sidebar_first',
);
$block = BlockContent::load(1);
$url = 'admin/structure/block/add/block_content:' . $block->uuid() . '/' . $this->config('system.theme')->get('default');
$this->drupalPostForm($url, $instance, t('Save block'));
$dependencies = \Drupal::service('config.manager')->findConfigEntityDependentsAsEntities('content', array($block->getConfigDependencyName()));
$block_placement = reset($dependencies);
$this->assertEqual($block_placement_id, $block_placement->id(), "The block placement config entity has a dependency on the block content entity.");
}
}
| delion1/D8-Beer | core/modules/block_content/src/Tests/BlockContentCreationTest.php | PHP | gpl-2.0 | 11,518 |
/*
* Driver for the NVIDIA Tegra pinmux
*
* Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
*
* Derived from code:
* Copyright (C) 2010 Google, Inc.
* Copyright (C) 2010 NVIDIA Corporation
* Copyright (C) 2009-2011 ST-Ericsson AB
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/slab.h>
#include "core.h"
#include "pinctrl-tegra.h"
#include "pinctrl-utils.h"
struct tegra_pmx {
struct device *dev;
struct pinctrl_dev *pctl;
const struct tegra_pinctrl_soc_data *soc;
const char **group_pins;
int nbanks;
void __iomem **regs;
};
static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg)
{
return readl(pmx->regs[bank] + reg);
}
static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg)
{
writel(val, pmx->regs[bank] + reg);
}
static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
return pmx->soc->ngroups;
}
static const char *tegra_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
unsigned group)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
return pmx->soc->groups[group].name;
}
static int tegra_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group,
const unsigned **pins,
unsigned *num_pins)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
*pins = pmx->soc->groups[group].pins;
*num_pins = pmx->soc->groups[group].npins;
return 0;
}
#ifdef CONFIG_DEBUG_FS
static void tegra_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s,
unsigned offset)
{
seq_printf(s, " %s", dev_name(pctldev->dev));
}
#endif
static const struct cfg_param {
const char *property;
enum tegra_pinconf_param param;
} cfg_params[] = {
{"nvidia,pull", TEGRA_PINCONF_PARAM_PULL},
{"nvidia,tristate", TEGRA_PINCONF_PARAM_TRISTATE},
{"nvidia,enable-input", TEGRA_PINCONF_PARAM_ENABLE_INPUT},
{"nvidia,open-drain", TEGRA_PINCONF_PARAM_OPEN_DRAIN},
{"nvidia,lock", TEGRA_PINCONF_PARAM_LOCK},
{"nvidia,io-reset", TEGRA_PINCONF_PARAM_IORESET},
{"nvidia,rcv-sel", TEGRA_PINCONF_PARAM_RCV_SEL},
{"nvidia,high-speed-mode", TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE},
{"nvidia,schmitt", TEGRA_PINCONF_PARAM_SCHMITT},
{"nvidia,low-power-mode", TEGRA_PINCONF_PARAM_LOW_POWER_MODE},
{"nvidia,pull-down-strength", TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH},
{"nvidia,pull-up-strength", TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH},
{"nvidia,slew-rate-falling", TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING},
{"nvidia,slew-rate-rising", TEGRA_PINCONF_PARAM_SLEW_RATE_RISING},
{"nvidia,drive-type", TEGRA_PINCONF_PARAM_DRIVE_TYPE},
};
static int tegra_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
struct device_node *np,
struct pinctrl_map **map,
unsigned *reserved_maps,
unsigned *num_maps)
{
struct device *dev = pctldev->dev;
int ret, i;
const char *function;
u32 val;
unsigned long config;
unsigned long *configs = NULL;
unsigned num_configs = 0;
unsigned reserve;
struct property *prop;
const char *group;
ret = of_property_read_string(np, "nvidia,function", &function);
if (ret < 0) {
/* EINVAL=missing, which is fine since it's optional */
if (ret != -EINVAL)
dev_err(dev,
"could not parse property nvidia,function\n");
function = NULL;
}
for (i = 0; i < ARRAY_SIZE(cfg_params); i++) {
ret = of_property_read_u32(np, cfg_params[i].property, &val);
if (!ret) {
config = TEGRA_PINCONF_PACK(cfg_params[i].param, val);
ret = pinctrl_utils_add_config(pctldev, &configs,
&num_configs, config);
if (ret < 0)
goto exit;
/* EINVAL=missing, which is fine since it's optional */
} else if (ret != -EINVAL) {
dev_err(dev, "could not parse property %s\n",
cfg_params[i].property);
}
}
reserve = 0;
if (function != NULL)
reserve++;
if (num_configs)
reserve++;
ret = of_property_count_strings(np, "nvidia,pins");
if (ret < 0) {
dev_err(dev, "could not parse property nvidia,pins\n");
goto exit;
}
reserve *= ret;
ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps,
num_maps, reserve);
if (ret < 0)
goto exit;
of_property_for_each_string(np, "nvidia,pins", prop, group) {
if (function) {
ret = pinctrl_utils_add_map_mux(pctldev, map,
reserved_maps, num_maps, group,
function);
if (ret < 0)
goto exit;
}
if (num_configs) {
ret = pinctrl_utils_add_map_configs(pctldev, map,
reserved_maps, num_maps, group,
configs, num_configs,
PIN_MAP_TYPE_CONFIGS_GROUP);
if (ret < 0)
goto exit;
}
}
ret = 0;
exit:
kfree(configs);
return ret;
}
static int tegra_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np_config,
struct pinctrl_map **map,
unsigned *num_maps)
{
unsigned reserved_maps;
struct device_node *np;
int ret;
reserved_maps = 0;
*map = NULL;
*num_maps = 0;
for_each_child_of_node(np_config, np) {
ret = tegra_pinctrl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
pinctrl_utils_dt_free_map(pctldev, *map,
*num_maps);
return ret;
}
}
return 0;
}
static const struct pinctrl_ops tegra_pinctrl_ops = {
.get_groups_count = tegra_pinctrl_get_groups_count,
.get_group_name = tegra_pinctrl_get_group_name,
.get_group_pins = tegra_pinctrl_get_group_pins,
#ifdef CONFIG_DEBUG_FS
.pin_dbg_show = tegra_pinctrl_pin_dbg_show,
#endif
.dt_node_to_map = tegra_pinctrl_dt_node_to_map,
.dt_free_map = pinctrl_utils_dt_free_map,
};
static int tegra_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
return pmx->soc->nfunctions;
}
static const char *tegra_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
unsigned function)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
return pmx->soc->functions[function].name;
}
static int tegra_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
unsigned function,
const char * const **groups,
unsigned * const num_groups)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
*groups = pmx->soc->functions[function].groups;
*num_groups = pmx->soc->functions[function].ngroups;
return 0;
}
static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev,
unsigned function,
unsigned group)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
const struct tegra_pingroup *g;
int i;
u32 val;
g = &pmx->soc->groups[group];
if (WARN_ON(g->mux_reg < 0))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(g->funcs); i++) {
if (g->funcs[i] == function)
break;
}
if (WARN_ON(i == ARRAY_SIZE(g->funcs)))
return -EINVAL;
val = pmx_readl(pmx, g->mux_bank, g->mux_reg);
val &= ~(0x3 << g->mux_bit);
val |= i << g->mux_bit;
pmx_writel(pmx, val, g->mux_bank, g->mux_reg);
return 0;
}
static const struct pinmux_ops tegra_pinmux_ops = {
.get_functions_count = tegra_pinctrl_get_funcs_count,
.get_function_name = tegra_pinctrl_get_func_name,
.get_function_groups = tegra_pinctrl_get_func_groups,
.set_mux = tegra_pinctrl_set_mux,
};
static int tegra_pinconf_reg(struct tegra_pmx *pmx,
const struct tegra_pingroup *g,
enum tegra_pinconf_param param,
bool report_err,
s8 *bank, s16 *reg, s8 *bit, s8 *width)
{
switch (param) {
case TEGRA_PINCONF_PARAM_PULL:
*bank = g->pupd_bank;
*reg = g->pupd_reg;
*bit = g->pupd_bit;
*width = 2;
break;
case TEGRA_PINCONF_PARAM_TRISTATE:
*bank = g->tri_bank;
*reg = g->tri_reg;
*bit = g->tri_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_ENABLE_INPUT:
*bank = g->mux_bank;
*reg = g->mux_reg;
*bit = g->einput_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_OPEN_DRAIN:
*bank = g->mux_bank;
*reg = g->mux_reg;
*bit = g->odrain_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_LOCK:
*bank = g->mux_bank;
*reg = g->mux_reg;
*bit = g->lock_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_IORESET:
*bank = g->mux_bank;
*reg = g->mux_reg;
*bit = g->ioreset_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_RCV_SEL:
*bank = g->mux_bank;
*reg = g->mux_reg;
*bit = g->rcv_sel_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->hsm_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_SCHMITT:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->schmitt_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_LOW_POWER_MODE:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->lpmd_bit;
*width = 2;
break;
case TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->drvdn_bit;
*width = g->drvdn_width;
break;
case TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->drvup_bit;
*width = g->drvup_width;
break;
case TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->slwf_bit;
*width = g->slwf_width;
break;
case TEGRA_PINCONF_PARAM_SLEW_RATE_RISING:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->slwr_bit;
*width = g->slwr_width;
break;
case TEGRA_PINCONF_PARAM_DRIVE_TYPE:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->drvtype_bit;
*width = 2;
break;
default:
dev_err(pmx->dev, "Invalid config param %04x\n", param);
return -ENOTSUPP;
}
if (*reg < 0 || *bit > 31) {
if (report_err) {
const char *prop = "unknown";
int i;
for (i = 0; i < ARRAY_SIZE(cfg_params); i++) {
if (cfg_params[i].param == param) {
prop = cfg_params[i].property;
break;
}
}
dev_err(pmx->dev,
"Config param %04x (%s) not supported on group %s\n",
param, prop, g->name);
}
return -ENOTSUPP;
}
return 0;
}
static int tegra_pinconf_get(struct pinctrl_dev *pctldev,
unsigned pin, unsigned long *config)
{
dev_err(pctldev->dev, "pin_config_get op not supported\n");
return -ENOTSUPP;
}
static int tegra_pinconf_set(struct pinctrl_dev *pctldev,
unsigned pin, unsigned long *configs,
unsigned num_configs)
{
dev_err(pctldev->dev, "pin_config_set op not supported\n");
return -ENOTSUPP;
}
static int tegra_pinconf_group_get(struct pinctrl_dev *pctldev,
unsigned group, unsigned long *config)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
enum tegra_pinconf_param param = TEGRA_PINCONF_UNPACK_PARAM(*config);
u16 arg;
const struct tegra_pingroup *g;
int ret;
s8 bank, bit, width;
s16 reg;
u32 val, mask;
g = &pmx->soc->groups[group];
ret = tegra_pinconf_reg(pmx, g, param, true, &bank, ®, &bit,
&width);
if (ret < 0)
return ret;
val = pmx_readl(pmx, bank, reg);
mask = (1 << width) - 1;
arg = (val >> bit) & mask;
*config = TEGRA_PINCONF_PACK(param, arg);
return 0;
}
static int tegra_pinconf_group_set(struct pinctrl_dev *pctldev,
unsigned group, unsigned long *configs,
unsigned num_configs)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
enum tegra_pinconf_param param;
u16 arg;
const struct tegra_pingroup *g;
int ret, i;
s8 bank, bit, width;
s16 reg;
u32 val, mask;
g = &pmx->soc->groups[group];
for (i = 0; i < num_configs; i++) {
param = TEGRA_PINCONF_UNPACK_PARAM(configs[i]);
arg = TEGRA_PINCONF_UNPACK_ARG(configs[i]);
ret = tegra_pinconf_reg(pmx, g, param, true, &bank, ®, &bit,
&width);
if (ret < 0)
return ret;
val = pmx_readl(pmx, bank, reg);
/* LOCK can't be cleared */
if (param == TEGRA_PINCONF_PARAM_LOCK) {
if ((val & BIT(bit)) && !arg) {
dev_err(pctldev->dev, "LOCK bit cannot be cleared\n");
return -EINVAL;
}
}
/* Special-case Boolean values; allow any non-zero as true */
if (width == 1)
arg = !!arg;
/* Range-check user-supplied value */
mask = (1 << width) - 1;
if (arg & ~mask) {
dev_err(pctldev->dev,
"config %lx: %x too big for %d bit register\n",
configs[i], arg, width);
return -EINVAL;
}
/* Update register */
val &= ~(mask << bit);
val |= arg << bit;
pmx_writel(pmx, val, bank, reg);
} /* for each config */
return 0;
}
#ifdef CONFIG_DEBUG_FS
static void tegra_pinconf_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned offset)
{
}
static const char *strip_prefix(const char *s)
{
const char *comma = strchr(s, ',');
if (!comma)
return s;
return comma + 1;
}
static void tegra_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned group)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
const struct tegra_pingroup *g;
int i, ret;
s8 bank, bit, width;
s16 reg;
u32 val;
g = &pmx->soc->groups[group];
for (i = 0; i < ARRAY_SIZE(cfg_params); i++) {
ret = tegra_pinconf_reg(pmx, g, cfg_params[i].param, false,
&bank, ®, &bit, &width);
if (ret < 0)
continue;
val = pmx_readl(pmx, bank, reg);
val >>= bit;
val &= (1 << width) - 1;
seq_printf(s, "\n\t%s=%u",
strip_prefix(cfg_params[i].property), val);
}
}
static void tegra_pinconf_config_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s,
unsigned long config)
{
enum tegra_pinconf_param param = TEGRA_PINCONF_UNPACK_PARAM(config);
u16 arg = TEGRA_PINCONF_UNPACK_ARG(config);
const char *pname = "unknown";
int i;
for (i = 0; i < ARRAY_SIZE(cfg_params); i++) {
if (cfg_params[i].param == param) {
pname = cfg_params[i].property;
break;
}
}
seq_printf(s, "%s=%d", strip_prefix(pname), arg);
}
#endif
static const struct pinconf_ops tegra_pinconf_ops = {
.pin_config_get = tegra_pinconf_get,
.pin_config_set = tegra_pinconf_set,
.pin_config_group_get = tegra_pinconf_group_get,
.pin_config_group_set = tegra_pinconf_group_set,
#ifdef CONFIG_DEBUG_FS
.pin_config_dbg_show = tegra_pinconf_dbg_show,
.pin_config_group_dbg_show = tegra_pinconf_group_dbg_show,
.pin_config_config_dbg_show = tegra_pinconf_config_dbg_show,
#endif
};
static struct pinctrl_gpio_range tegra_pinctrl_gpio_range = {
.name = "Tegra GPIOs",
.id = 0,
.base = 0,
};
static struct pinctrl_desc tegra_pinctrl_desc = {
.pctlops = &tegra_pinctrl_ops,
.pmxops = &tegra_pinmux_ops,
.confops = &tegra_pinconf_ops,
.owner = THIS_MODULE,
};
int tegra_pinctrl_probe(struct platform_device *pdev,
const struct tegra_pinctrl_soc_data *soc_data)
{
struct tegra_pmx *pmx;
struct resource *res;
int i;
const char **group_pins;
int fn, gn, gfn;
pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL);
if (!pmx) {
dev_err(&pdev->dev, "Can't alloc tegra_pmx\n");
return -ENOMEM;
}
pmx->dev = &pdev->dev;
pmx->soc = soc_data;
/*
* Each mux group will appear in 4 functions' list of groups.
* This over-allocates slightly, since not all groups are mux groups.
*/
pmx->group_pins = devm_kzalloc(&pdev->dev,
soc_data->ngroups * 4 * sizeof(*pmx->group_pins),
GFP_KERNEL);
if (!pmx->group_pins)
return -ENOMEM;
group_pins = pmx->group_pins;
for (fn = 0; fn < soc_data->nfunctions; fn++) {
struct tegra_function *func = &soc_data->functions[fn];
func->groups = group_pins;
for (gn = 0; gn < soc_data->ngroups; gn++) {
const struct tegra_pingroup *g = &soc_data->groups[gn];
if (g->mux_reg == -1)
continue;
for (gfn = 0; gfn < 4; gfn++)
if (g->funcs[gfn] == fn)
break;
if (gfn == 4)
continue;
BUG_ON(group_pins - pmx->group_pins >=
soc_data->ngroups * 4);
*group_pins++ = g->name;
func->ngroups++;
}
}
tegra_pinctrl_gpio_range.npins = pmx->soc->ngpios;
tegra_pinctrl_desc.name = dev_name(&pdev->dev);
tegra_pinctrl_desc.pins = pmx->soc->pins;
tegra_pinctrl_desc.npins = pmx->soc->npins;
for (i = 0; ; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
break;
}
pmx->nbanks = i;
pmx->regs = devm_kzalloc(&pdev->dev, pmx->nbanks * sizeof(*pmx->regs),
GFP_KERNEL);
if (!pmx->regs) {
dev_err(&pdev->dev, "Can't alloc regs pointer\n");
return -ENOMEM;
}
for (i = 0; i < pmx->nbanks; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
pmx->regs[i] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pmx->regs[i]))
return PTR_ERR(pmx->regs[i]);
}
pmx->pctl = pinctrl_register(&tegra_pinctrl_desc, &pdev->dev, pmx);
if (!pmx->pctl) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
return -ENODEV;
}
pinctrl_add_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
platform_set_drvdata(pdev, pmx);
dev_dbg(&pdev->dev, "Probed Tegra pinctrl driver\n");
return 0;
}
EXPORT_SYMBOL_GPL(tegra_pinctrl_probe);
int tegra_pinctrl_remove(struct platform_device *pdev)
{
struct tegra_pmx *pmx = platform_get_drvdata(pdev);
pinctrl_unregister(pmx->pctl);
return 0;
}
EXPORT_SYMBOL_GPL(tegra_pinctrl_remove);
| wikimedia/operations-debs-linux | drivers/pinctrl/pinctrl-tegra.c | C | gpl-2.0 | 17,552 |
/*
* Glue Code for AVX assembler version of Twofish Cipher
*
* Copyright (C) 2012 Johannes Goetzfried
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
*
* Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/twofish.h>
#include <crypto/cryptd.h>
#include <crypto/b128ops.h>
#include <crypto/ctr.h>
#include <crypto/lrw.h>
#include <crypto/xts.h>
#include <asm/i387.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <asm/crypto/twofish.h>
#include <asm/crypto/glue_helper.h>
#include <crypto/scatterwalk.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#define TWOFISH_PARALLEL_BLOCKS 8
/* 8-way parallel cipher functions */
asmlinkage void twofish_ecb_enc_8way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void twofish_ecb_dec_8way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void twofish_cbc_dec_8way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
asmlinkage void twofish_ctr_8way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src, le128 *iv);
asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src, le128 *iv);
asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src, le128 *iv);
static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src)
{
__twofish_enc_blk_3way(ctx, dst, src, false);
}
static void twofish_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
{
glue_xts_crypt_128bit_one(ctx, dst, src, iv,
GLUE_FUNC_CAST(twofish_enc_blk));
}
static void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
{
glue_xts_crypt_128bit_one(ctx, dst, src, iv,
GLUE_FUNC_CAST(twofish_dec_blk));
}
static const struct common_glue_ctx twofish_enc = {
.num_funcs = 3,
.fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_enc_8way) }
}, {
.num_blocks = 3,
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) }
}, {
.num_blocks = 1,
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) }
} }
};
static const struct common_glue_ctx twofish_ctr = {
.num_funcs = 3,
.fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_ctr_8way) }
}, {
.num_blocks = 3,
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_3way) }
}, {
.num_blocks = 1,
.fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr) }
} }
};
static const struct common_glue_ctx twofish_enc_xts = {
.num_funcs = 2,
.fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc_8way) }
}, {
.num_blocks = 1,
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc) }
} }
};
static const struct common_glue_ctx twofish_dec = {
.num_funcs = 3,
.fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_dec_8way) }
}, {
.num_blocks = 3,
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) }
}, {
.num_blocks = 1,
.fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) }
} }
};
static const struct common_glue_ctx twofish_dec_cbc = {
.num_funcs = 3,
.fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_cbc_dec_8way) }
}, {
.num_blocks = 3,
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) }
}, {
.num_blocks = 1,
.fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) }
} }
};
static const struct common_glue_ctx twofish_dec_xts = {
.num_funcs = 2,
.fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS,
.funcs = { {
.num_blocks = TWOFISH_PARALLEL_BLOCKS,
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec_8way) }
}, {
.num_blocks = 1,
.fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec) }
} }
};
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
return glue_ecb_crypt_128bit(&twofish_enc, desc, dst, src, nbytes);
}
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
return glue_ecb_crypt_128bit(&twofish_dec, desc, dst, src, nbytes);
}
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(twofish_enc_blk), desc,
dst, src, nbytes);
}
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
return glue_cbc_decrypt_128bit(&twofish_dec_cbc, desc, dst, src,
nbytes);
}
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
return glue_ctr_crypt_128bit(&twofish_ctr, desc, dst, src, nbytes);
}
static inline bool twofish_fpu_begin(bool fpu_enabled, unsigned int nbytes)
{
return glue_fpu_begin(TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS, NULL,
fpu_enabled, nbytes);
}
static inline void twofish_fpu_end(bool fpu_enabled)
{
glue_fpu_end(fpu_enabled);
}
struct crypt_priv {
struct twofish_ctx *ctx;
bool fpu_enabled;
};
static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
{
const unsigned int bsize = TF_BLOCK_SIZE;
struct crypt_priv *ctx = priv;
int i;
ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes);
if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
twofish_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
return;
}
for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
twofish_enc_blk_3way(ctx->ctx, srcdst, srcdst);
nbytes %= bsize * 3;
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
twofish_enc_blk(ctx->ctx, srcdst, srcdst);
}
static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
{
const unsigned int bsize = TF_BLOCK_SIZE;
struct crypt_priv *ctx = priv;
int i;
ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes);
if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
twofish_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
return;
}
for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
twofish_dec_blk_3way(ctx->ctx, srcdst, srcdst);
nbytes %= bsize * 3;
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
twofish_dec_blk(ctx->ctx, srcdst, srcdst);
}
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[TWOFISH_PARALLEL_BLOCKS];
struct crypt_priv crypt_ctx = {
.ctx = &ctx->twofish_ctx,
.fpu_enabled = false,
};
struct lrw_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.table_ctx = &ctx->lrw_table,
.crypt_ctx = &crypt_ctx,
.crypt_fn = encrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ret = lrw_crypt(desc, dst, src, nbytes, &req);
twofish_fpu_end(crypt_ctx.fpu_enabled);
return ret;
}
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[TWOFISH_PARALLEL_BLOCKS];
struct crypt_priv crypt_ctx = {
.ctx = &ctx->twofish_ctx,
.fpu_enabled = false,
};
struct lrw_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.table_ctx = &ctx->lrw_table,
.crypt_ctx = &crypt_ctx,
.crypt_fn = decrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ret = lrw_crypt(desc, dst, src, nbytes, &req);
twofish_fpu_end(crypt_ctx.fpu_enabled);
return ret;
}
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
return glue_xts_crypt_128bit(&twofish_enc_xts, desc, dst, src, nbytes,
XTS_TWEAK_CAST(twofish_enc_blk),
&ctx->tweak_ctx, &ctx->crypt_ctx);
}
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
return glue_xts_crypt_128bit(&twofish_dec_xts, desc, dst, src, nbytes,
XTS_TWEAK_CAST(twofish_enc_blk),
&ctx->tweak_ctx, &ctx->crypt_ctx);
}
static struct crypto_alg twofish_algs[10] = { {
.cra_name = "__ecb-twofish-avx",
.cra_driver_name = "__driver-ecb-twofish-avx",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE,
.max_keysize = TF_MAX_KEY_SIZE,
.setkey = twofish_setkey,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
},
},
}, {
.cra_name = "__cbc-twofish-avx",
.cra_driver_name = "__driver-cbc-twofish-avx",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE,
.max_keysize = TF_MAX_KEY_SIZE,
.setkey = twofish_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
},
},
}, {
.cra_name = "__ctr-twofish-avx",
.cra_driver_name = "__driver-ctr-twofish-avx",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE,
.max_keysize = TF_MAX_KEY_SIZE,
.ivsize = TF_BLOCK_SIZE,
.setkey = twofish_setkey,
.encrypt = ctr_crypt,
.decrypt = ctr_crypt,
},
},
}, {
.cra_name = "__lrw-twofish-avx",
.cra_driver_name = "__driver-lrw-twofish-avx",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_lrw_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_exit = lrw_twofish_exit_tfm,
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE +
TF_BLOCK_SIZE,
.max_keysize = TF_MAX_KEY_SIZE +
TF_BLOCK_SIZE,
.ivsize = TF_BLOCK_SIZE,
.setkey = lrw_twofish_setkey,
.encrypt = lrw_encrypt,
.decrypt = lrw_decrypt,
},
},
}, {
.cra_name = "__xts-twofish-avx",
.cra_driver_name = "__driver-xts-twofish-avx",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_xts_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = TF_MIN_KEY_SIZE * 2,
.max_keysize = TF_MAX_KEY_SIZE * 2,
.ivsize = TF_BLOCK_SIZE,
.setkey = xts_twofish_setkey,
.encrypt = xts_encrypt,
.decrypt = xts_decrypt,
},
},
}, {
.cra_name = "ecb(twofish)",
.cra_driver_name = "ecb-twofish-avx",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = TF_MIN_KEY_SIZE,
.max_keysize = TF_MAX_KEY_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
}, {
.cra_name = "cbc(twofish)",
.cra_driver_name = "cbc-twofish-avx",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = TF_MIN_KEY_SIZE,
.max_keysize = TF_MAX_KEY_SIZE,
.ivsize = TF_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = __ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
}, {
.cra_name = "ctr(twofish)",
.cra_driver_name = "ctr-twofish-avx",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = TF_MIN_KEY_SIZE,
.max_keysize = TF_MAX_KEY_SIZE,
.ivsize = TF_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_encrypt,
.geniv = "chainiv",
},
},
}, {
.cra_name = "lrw(twofish)",
.cra_driver_name = "lrw-twofish-avx",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = TF_MIN_KEY_SIZE +
TF_BLOCK_SIZE,
.max_keysize = TF_MAX_KEY_SIZE +
TF_BLOCK_SIZE,
.ivsize = TF_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
}, {
.cra_name = "xts(twofish)",
.cra_driver_name = "xts-twofish-avx",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = TF_MIN_KEY_SIZE * 2,
.max_keysize = TF_MAX_KEY_SIZE * 2,
.ivsize = TF_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
} };
static int __init twofish_init(void)
{
u64 xcr0;
if (!cpu_has_avx || !cpu_has_osxsave) {
printk(KERN_INFO "AVX instructions are not detected.\n");
return -ENODEV;
}
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
printk(KERN_INFO "AVX detected but unusable.\n");
return -ENODEV;
}
return crypto_register_algs(twofish_algs, ARRAY_SIZE(twofish_algs));
}
static void __exit twofish_exit(void)
{
crypto_unregister_algs(twofish_algs, ARRAY_SIZE(twofish_algs));
}
module_init(twofish_init);
module_exit(twofish_exit);
MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("twofish");
| jcadduono/nethunter_kernel_g5 | arch/x86/crypto/twofish_avx_glue.c | C | gpl-2.0 | 16,131 |
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <stdio.h>
#include <stdlib.h>
#include "ebb.h"
/*
* Test that PMC5 & 6 are frozen (ie. don't overflow) when they are not being
* used. Tests the MMCR0_FC56 logic in the kernel.
*/
static int pmc56_overflowed;
static void ebb_callee(void)
{
uint64_t val;
val = mfspr(SPRN_BESCR);
if (!(val & BESCR_PMEO)) {
ebb_state.stats.spurious++;
goto out;
}
ebb_state.stats.ebb_count++;
count_pmc(2, sample_period);
val = mfspr(SPRN_PMC5);
if (val >= COUNTER_OVERFLOW)
pmc56_overflowed++;
count_pmc(5, COUNTER_OVERFLOW);
val = mfspr(SPRN_PMC6);
if (val >= COUNTER_OVERFLOW)
pmc56_overflowed++;
count_pmc(6, COUNTER_OVERFLOW);
out:
reset_ebb();
}
int pmc56_overflow(void)
{
struct event event;
/* Use PMC2 so we set PMCjCE, which enables PMC5/6 */
event_init(&event, 0x2001e);
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
setup_ebb_handler(ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
mtspr(SPRN_PMC5, 0);
mtspr(SPRN_PMC6, 0);
while (ebb_state.stats.ebb_count < 10)
FAIL_IF(core_busy_loop());
ebb_global_disable();
ebb_freeze_pmcs();
count_pmc(2, sample_period);
dump_ebb_state();
printf("PMC5/6 overflow %d\n", pmc56_overflowed);
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0 || pmc56_overflowed != 0);
return 0;
}
int main(void)
{
return test_harness(pmc56_overflow, "pmc56_overflow");
}
| RomanHargrave/pf-kernel | tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c | C | gpl-2.0 | 1,636 |
#!/usr/bin/perl -w
# (c) 2007, Joe Perches <joe@perches.com>
# created from checkpatch.pl
#
# Print selected MAINTAINERS information for
# the files modified in a patch or for a file
#
# usage: perl scripts/get_maintainer.pl [OPTIONS] <patch>
# perl scripts/get_maintainer.pl [OPTIONS] -f <file>
#
# Licensed under the terms of the GNU GPL License version 2
use strict;
my $P = $0;
my $V = '0.26';
use Getopt::Long qw(:config no_auto_abbrev);
my $lk_path = "./";
my $email = 1;
my $email_usename = 1;
my $email_maintainer = 1;
my $email_list = 1;
my $email_subscriber_list = 0;
my $email_git_penguin_chiefs = 0;
my $email_git = 0;
my $email_git_all_signature_types = 0;
my $email_git_blame = 0;
my $email_git_blame_signatures = 1;
my $email_git_fallback = 1;
my $email_git_min_signatures = 1;
my $email_git_max_maintainers = 5;
my $email_git_min_percent = 5;
my $email_git_since = "1-year-ago";
my $email_hg_since = "-365";
my $interactive = 0;
my $email_remove_duplicates = 1;
my $email_use_mailmap = 1;
my $output_multiline = 1;
my $output_separator = ", ";
my $output_roles = 0;
my $output_rolestats = 1;
my $scm = 0;
my $web = 0;
my $subsystem = 0;
my $status = 0;
my $keywords = 1;
my $sections = 0;
my $file_emails = 0;
my $from_filename = 0;
my $pattern_depth = 0;
my $version = 0;
my $help = 0;
my $vcs_used = 0;
my $exit = 0;
my %commit_author_hash;
my %commit_signer_hash;
my @penguin_chief = ();
push(@penguin_chief, "Linus Torvalds:torvalds\@linux-foundation.org");
#Andrew wants in on most everything - 2009/01/14
#push(@penguin_chief, "Andrew Morton:akpm\@linux-foundation.org");
my @penguin_chief_names = ();
foreach my $chief (@penguin_chief) {
if ($chief =~ m/^(.*):(.*)/) {
my $chief_name = $1;
my $chief_addr = $2;
push(@penguin_chief_names, $chief_name);
}
}
my $penguin_chiefs = "\(" . join("|", @penguin_chief_names) . "\)";
# Signature types of people who are either
# a) responsible for the code in question, or
# b) familiar enough with it to give relevant feedback
my @signature_tags = ();
push(@signature_tags, "Signed-off-by:");
push(@signature_tags, "Reviewed-by:");
push(@signature_tags, "Acked-by:");
my $signature_pattern = "\(" . join("|", @signature_tags) . "\)";
# rfc822 email address - preloaded methods go here.
my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])";
my $rfc822_char = '[\\000-\\377]';
# VCS command support: class-like functions and strings
my %VCS_cmds;
my %VCS_cmds_git = (
"execute_cmd" => \&git_execute_cmd,
"available" => '(which("git") ne "") && (-d ".git")',
"find_signers_cmd" =>
"git log --no-color --follow --since=\$email_git_since " .
'--format="GitCommit: %H%n' .
'GitAuthor: %an <%ae>%n' .
'GitDate: %aD%n' .
'GitSubject: %s%n' .
'%b%n"' .
" -- \$file",
"find_commit_signers_cmd" =>
"git log --no-color " .
'--format="GitCommit: %H%n' .
'GitAuthor: %an <%ae>%n' .
'GitDate: %aD%n' .
'GitSubject: %s%n' .
'%b%n"' .
" -1 \$commit",
"find_commit_author_cmd" =>
"git log --no-color " .
'--format="GitCommit: %H%n' .
'GitAuthor: %an <%ae>%n' .
'GitDate: %aD%n' .
'GitSubject: %s%n"' .
" -1 \$commit",
"blame_range_cmd" => "git blame -l -L \$diff_start,+\$diff_length \$file",
"blame_file_cmd" => "git blame -l \$file",
"commit_pattern" => "^GitCommit: ([0-9a-f]{40,40})",
"blame_commit_pattern" => "^([0-9a-f]+) ",
"author_pattern" => "^GitAuthor: (.*)",
"subject_pattern" => "^GitSubject: (.*)",
);
my %VCS_cmds_hg = (
"execute_cmd" => \&hg_execute_cmd,
"available" => '(which("hg") ne "") && (-d ".hg")',
"find_signers_cmd" =>
"hg log --date=\$email_hg_since " .
"--template='HgCommit: {node}\\n" .
"HgAuthor: {author}\\n" .
"HgSubject: {desc}\\n'" .
" -- \$file",
"find_commit_signers_cmd" =>
"hg log " .
"--template='HgSubject: {desc}\\n'" .
" -r \$commit",
"find_commit_author_cmd" =>
"hg log " .
"--template='HgCommit: {node}\\n" .
"HgAuthor: {author}\\n" .
"HgSubject: {desc|firstline}\\n'" .
" -r \$commit",
"blame_range_cmd" => "", # not supported
"blame_file_cmd" => "hg blame -n \$file",
"commit_pattern" => "^HgCommit: ([0-9a-f]{40,40})",
"blame_commit_pattern" => "^([ 0-9a-f]+):",
"author_pattern" => "^HgAuthor: (.*)",
"subject_pattern" => "^HgSubject: (.*)",
);
my $conf = which_conf(".get_maintainer.conf");
if (-f $conf) {
my @conf_args;
open(my $conffile, '<', "$conf")
or warn "$P: Can't find a readable .get_maintainer.conf file $!\n";
while (<$conffile>) {
my $line = $_;
$line =~ s/\s*\n?$//g;
$line =~ s/^\s*//g;
$line =~ s/\s+/ /g;
next if ($line =~ m/^\s*#/);
next if ($line =~ m/^\s*$/);
my @words = split(" ", $line);
foreach my $word (@words) {
last if ($word =~ m/^#/);
push (@conf_args, $word);
}
}
close($conffile);
unshift(@ARGV, @conf_args) if @conf_args;
}
if (!GetOptions(
'email!' => \$email,
'git!' => \$email_git,
'git-all-signature-types!' => \$email_git_all_signature_types,
'git-blame!' => \$email_git_blame,
'git-blame-signatures!' => \$email_git_blame_signatures,
'git-fallback!' => \$email_git_fallback,
'git-chief-penguins!' => \$email_git_penguin_chiefs,
'git-min-signatures=i' => \$email_git_min_signatures,
'git-max-maintainers=i' => \$email_git_max_maintainers,
'git-min-percent=i' => \$email_git_min_percent,
'git-since=s' => \$email_git_since,
'hg-since=s' => \$email_hg_since,
'i|interactive!' => \$interactive,
'remove-duplicates!' => \$email_remove_duplicates,
'mailmap!' => \$email_use_mailmap,
'm!' => \$email_maintainer,
'n!' => \$email_usename,
'l!' => \$email_list,
's!' => \$email_subscriber_list,
'multiline!' => \$output_multiline,
'roles!' => \$output_roles,
'rolestats!' => \$output_rolestats,
'separator=s' => \$output_separator,
'subsystem!' => \$subsystem,
'status!' => \$status,
'scm!' => \$scm,
'web!' => \$web,
'pattern-depth=i' => \$pattern_depth,
'k|keywords!' => \$keywords,
'sections!' => \$sections,
'fe|file-emails!' => \$file_emails,
'f|file' => \$from_filename,
'v|version' => \$version,
'h|help|usage' => \$help,
)) {
die "$P: invalid argument - use --help if necessary\n";
}
if ($help != 0) {
usage();
exit 0;
}
if ($version != 0) {
print("${P} ${V}\n");
exit 0;
}
if (-t STDIN && !@ARGV) {
# We're talking to a terminal, but have no command line arguments.
die "$P: missing patchfile or -f file - use --help if necessary\n";
}
$output_multiline = 0 if ($output_separator ne ", ");
$output_rolestats = 1 if ($interactive);
$output_roles = 1 if ($output_rolestats);
if ($sections) {
$email = 0;
$email_list = 0;
$scm = 0;
$status = 0;
$subsystem = 0;
$web = 0;
$keywords = 0;
$interactive = 0;
} else {
my $selections = $email + $scm + $status + $subsystem + $web;
if ($selections == 0) {
die "$P: Missing required option: email, scm, status, subsystem or web\n";
}
}
if ($email &&
($email_maintainer + $email_list + $email_subscriber_list +
$email_git + $email_git_penguin_chiefs + $email_git_blame) == 0) {
die "$P: Please select at least 1 email option\n";
}
if (!top_of_kernel_tree($lk_path)) {
die "$P: The current directory does not appear to be "
. "a linux kernel source tree.\n";
}
## Read MAINTAINERS for type/value pairs
my @typevalue = ();
my %keyword_hash;
open (my $maint, '<', "${lk_path}MAINTAINERS")
or die "$P: Can't open MAINTAINERS: $!\n";
while (<$maint>) {
my $line = $_;
if ($line =~ m/^(\C):\s*(.*)/) {
my $type = $1;
my $value = $2;
##Filename pattern matching
if ($type eq "F" || $type eq "X") {
$value =~ s@\.@\\\.@g; ##Convert . to \.
$value =~ s/\*/\.\*/g; ##Convert * to .*
$value =~ s/\?/\./g; ##Convert ? to .
##if pattern is a directory and it lacks a trailing slash, add one
if ((-d $value)) {
$value =~ s@([^/])$@$1/@;
}
} elsif ($type eq "K") {
$keyword_hash{@typevalue} = $value;
}
push(@typevalue, "$type:$value");
} elsif (!/^(\s)*$/) {
$line =~ s/\n$//g;
push(@typevalue, $line);
}
}
close($maint);
#
# Read mail address map
#
my $mailmap;
read_mailmap();
sub read_mailmap {
$mailmap = {
names => {},
addresses => {}
};
return if (!$email_use_mailmap || !(-f "${lk_path}.mailmap"));
open(my $mailmap_file, '<', "${lk_path}.mailmap")
or warn "$P: Can't open .mailmap: $!\n";
while (<$mailmap_file>) {
s/#.*$//; #strip comments
s/^\s+|\s+$//g; #trim
next if (/^\s*$/); #skip empty lines
#entries have one of the following formats:
# name1 <mail1>
# <mail1> <mail2>
# name1 <mail1> <mail2>
# name1 <mail1> name2 <mail2>
# (see man git-shortlog)
if (/^([^<]+)<([^>]+)>$/) {
my $real_name = $1;
my $address = $2;
$real_name =~ s/\s+$//;
($real_name, $address) = parse_email("$real_name <$address>");
$mailmap->{names}->{$address} = $real_name;
} elsif (/^<([^>]+)>\s*<([^>]+)>$/) {
my $real_address = $1;
my $wrong_address = $2;
$mailmap->{addresses}->{$wrong_address} = $real_address;
} elsif (/^(.+)<([^>]+)>\s*<([^>]+)>$/) {
my $real_name = $1;
my $real_address = $2;
my $wrong_address = $3;
$real_name =~ s/\s+$//;
($real_name, $real_address) =
parse_email("$real_name <$real_address>");
$mailmap->{names}->{$wrong_address} = $real_name;
$mailmap->{addresses}->{$wrong_address} = $real_address;
} elsif (/^(.+)<([^>]+)>\s*(.+)\s*<([^>]+)>$/) {
my $real_name = $1;
my $real_address = $2;
my $wrong_name = $3;
my $wrong_address = $4;
$real_name =~ s/\s+$//;
($real_name, $real_address) =
parse_email("$real_name <$real_address>");
$wrong_name =~ s/\s+$//;
($wrong_name, $wrong_address) =
parse_email("$wrong_name <$wrong_address>");
my $wrong_email = format_email($wrong_name, $wrong_address, 1);
$mailmap->{names}->{$wrong_email} = $real_name;
$mailmap->{addresses}->{$wrong_email} = $real_address;
}
}
close($mailmap_file);
}
## use the filenames on the command line or find the filenames in the patchfiles
my @files = ();
my @range = ();
my @keyword_tvi = ();
my @file_emails = ();
if (!@ARGV) {
push(@ARGV, "&STDIN");
}
foreach my $file (@ARGV) {
if ($file ne "&STDIN") {
##if $file is a directory and it lacks a trailing slash, add one
if ((-d $file)) {
$file =~ s@([^/])$@$1/@;
} elsif (!(-f $file)) {
die "$P: file '${file}' not found\n";
}
}
if ($from_filename) {
push(@files, $file);
if ($file ne "MAINTAINERS" && -f $file && ($keywords || $file_emails)) {
open(my $f, '<', $file)
or die "$P: Can't open $file: $!\n";
my $text = do { local($/) ; <$f> };
close($f);
if ($keywords) {
foreach my $line (keys %keyword_hash) {
if ($text =~ m/$keyword_hash{$line}/x) {
push(@keyword_tvi, $line);
}
}
}
if ($file_emails) {
my @poss_addr = $text =~ m$[A-Za-zÀ-ÿ\"\' \,\.\+-]*\s*[\,]*\s*[\(\<\{]{0,1}[A-Za-z0-9_\.\+-]+\@[A-Za-z0-9\.-]+\.[A-Za-z0-9]+[\)\>\}]{0,1}$g;
push(@file_emails, clean_file_emails(@poss_addr));
}
}
} else {
my $file_cnt = @files;
my $lastfile;
open(my $patch, "< $file")
or die "$P: Can't open $file: $!\n";
# We can check arbitrary information before the patch
# like the commit message, mail headers, etc...
# This allows us to match arbitrary keywords against any part
# of a git format-patch generated file (subject tags, etc...)
my $patch_prefix = ""; #Parsing the intro
while (<$patch>) {
my $patch_line = $_;
if (m/^\+\+\+\s+(\S+)/ or m/^---\s+(\S+)/) {
my $filename = $1;
$filename =~ s@^[^/]*/@@;
$filename =~ s@\n@@;
$lastfile = $filename;
push(@files, $filename);
$patch_prefix = "^[+-].*"; #Now parsing the actual patch
} elsif (m/^\@\@ -(\d+),(\d+)/) {
if ($email_git_blame) {
push(@range, "$lastfile:$1:$2");
}
} elsif ($keywords) {
foreach my $line (keys %keyword_hash) {
if ($patch_line =~ m/${patch_prefix}$keyword_hash{$line}/x) {
push(@keyword_tvi, $line);
}
}
}
}
close($patch);
if ($file_cnt == @files) {
warn "$P: file '${file}' doesn't appear to be a patch. "
. "Add -f to options?\n";
}
@files = sort_and_uniq(@files);
}
}
@file_emails = uniq(@file_emails);
my %email_hash_name;
my %email_hash_address;
my @email_to = ();
my %hash_list_to;
my @list_to = ();
my @scm = ();
my @web = ();
my @subsystem = ();
my @status = ();
my %deduplicate_name_hash = ();
my %deduplicate_address_hash = ();
my @maintainers = get_maintainers();
if (@maintainers) {
@maintainers = merge_email(@maintainers);
output(@maintainers);
}
if ($scm) {
@scm = uniq(@scm);
output(@scm);
}
if ($status) {
@status = uniq(@status);
output(@status);
}
if ($subsystem) {
@subsystem = uniq(@subsystem);
output(@subsystem);
}
if ($web) {
@web = uniq(@web);
output(@web);
}
exit($exit);
sub range_is_maintained {
my ($start, $end) = @_;
for (my $i = $start; $i < $end; $i++) {
my $line = $typevalue[$i];
if ($line =~ m/^(\C):\s*(.*)/) {
my $type = $1;
my $value = $2;
if ($type eq 'S') {
if ($value =~ /(maintain|support)/i) {
return 1;
}
}
}
}
return 0;
}
sub range_has_maintainer {
my ($start, $end) = @_;
for (my $i = $start; $i < $end; $i++) {
my $line = $typevalue[$i];
if ($line =~ m/^(\C):\s*(.*)/) {
my $type = $1;
my $value = $2;
if ($type eq 'M') {
return 1;
}
}
}
return 0;
}
sub get_maintainers {
%email_hash_name = ();
%email_hash_address = ();
%commit_author_hash = ();
%commit_signer_hash = ();
@email_to = ();
%hash_list_to = ();
@list_to = ();
@scm = ();
@web = ();
@subsystem = ();
@status = ();
%deduplicate_name_hash = ();
%deduplicate_address_hash = ();
if ($email_git_all_signature_types) {
$signature_pattern = "(.+?)[Bb][Yy]:";
} else {
$signature_pattern = "\(" . join("|", @signature_tags) . "\)";
}
# Find responsible parties
my %exact_pattern_match_hash = ();
foreach my $file (@files) {
my %hash;
my $tvi = find_first_section();
while ($tvi < @typevalue) {
my $start = find_starting_index($tvi);
my $end = find_ending_index($tvi);
my $exclude = 0;
my $i;
#Do not match excluded file patterns
for ($i = $start; $i < $end; $i++) {
my $line = $typevalue[$i];
if ($line =~ m/^(\C):\s*(.*)/) {
my $type = $1;
my $value = $2;
if ($type eq 'X') {
if (file_match_pattern($file, $value)) {
$exclude = 1;
last;
}
}
}
}
if (!$exclude) {
for ($i = $start; $i < $end; $i++) {
my $line = $typevalue[$i];
if ($line =~ m/^(\C):\s*(.*)/) {
my $type = $1;
my $value = $2;
if ($type eq 'F') {
if (file_match_pattern($file, $value)) {
my $value_pd = ($value =~ tr@/@@);
my $file_pd = ($file =~ tr@/@@);
$value_pd++ if (substr($value,-1,1) ne "/");
$value_pd = -1 if ($value =~ /^\.\*/);
if ($value_pd >= $file_pd &&
range_is_maintained($start, $end) &&
range_has_maintainer($start, $end)) {
$exact_pattern_match_hash{$file} = 1;
}
if ($pattern_depth == 0 ||
(($file_pd - $value_pd) < $pattern_depth)) {
$hash{$tvi} = $value_pd;
}
}
} elsif ($type eq 'N') {
if ($file =~ m/$value/x) {
$hash{$tvi} = 0;
}
}
}
}
}
$tvi = $end + 1;
}
foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
add_categories($line);
if ($sections) {
my $i;
my $start = find_starting_index($line);
my $end = find_ending_index($line);
for ($i = $start; $i < $end; $i++) {
my $line = $typevalue[$i];
if ($line =~ /^[FX]:/) { ##Restore file patterns
$line =~ s/([^\\])\.([^\*])/$1\?$2/g;
$line =~ s/([^\\])\.$/$1\?/g; ##Convert . back to ?
$line =~ s/\\\./\./g; ##Convert \. to .
$line =~ s/\.\*/\*/g; ##Convert .* to *
}
$line =~ s/^([A-Z]):/$1:\t/g;
print("$line\n");
}
print("\n");
}
}
}
if ($keywords) {
@keyword_tvi = sort_and_uniq(@keyword_tvi);
foreach my $line (@keyword_tvi) {
add_categories($line);
}
}
foreach my $email (@email_to, @list_to) {
$email->[0] = deduplicate_email($email->[0]);
}
foreach my $file (@files) {
if ($email &&
($email_git || ($email_git_fallback &&
!$exact_pattern_match_hash{$file}))) {
vcs_file_signoffs($file);
}
if ($email && $email_git_blame) {
vcs_file_blame($file);
}
}
if ($email) {
foreach my $chief (@penguin_chief) {
if ($chief =~ m/^(.*):(.*)/) {
my $email_address;
$email_address = format_email($1, $2, $email_usename);
if ($email_git_penguin_chiefs) {
push(@email_to, [$email_address, 'chief penguin']);
} else {
@email_to = grep($_->[0] !~ /${email_address}/, @email_to);
}
}
}
foreach my $email (@file_emails) {
my ($name, $address) = parse_email($email);
my $tmp_email = format_email($name, $address, $email_usename);
push_email_address($tmp_email, '');
add_role($tmp_email, 'in file');
}
}
my @to = ();
if ($email || $email_list) {
if ($email) {
@to = (@to, @email_to);
}
if ($email_list) {
@to = (@to, @list_to);
}
}
if ($interactive) {
@to = interactive_get_maintainers(\@to);
}
return @to;
}
sub file_match_pattern {
my ($file, $pattern) = @_;
if (substr($pattern, -1) eq "/") {
if ($file =~ m@^$pattern@) {
return 1;
}
} else {
if ($file =~ m@^$pattern@) {
my $s1 = ($file =~ tr@/@@);
my $s2 = ($pattern =~ tr@/@@);
if ($s1 == $s2) {
return 1;
}
}
}
return 0;
}
sub usage {
print <<EOT;
usage: $P [options] patchfile
$P [options] -f file|directory
version: $V
MAINTAINER field selection options:
--email => print email address(es) if any
--git => include recent git \*-by: signers
--git-all-signature-types => include signers regardless of signature type
or use only ${signature_pattern} signers (default: $email_git_all_signature_types)
--git-fallback => use git when no exact MAINTAINERS pattern (default: $email_git_fallback)
--git-chief-penguins => include ${penguin_chiefs}
--git-min-signatures => number of signatures required (default: $email_git_min_signatures)
--git-max-maintainers => maximum maintainers to add (default: $email_git_max_maintainers)
--git-min-percent => minimum percentage of commits required (default: $email_git_min_percent)
--git-blame => use git blame to find modified commits for patch or file
--git-since => git history to use (default: $email_git_since)
--hg-since => hg history to use (default: $email_hg_since)
--interactive => display a menu (mostly useful if used with the --git option)
--m => include maintainer(s) if any
--n => include name 'Full Name <addr\@domain.tld>'
--l => include list(s) if any
--s => include subscriber only list(s) if any
--remove-duplicates => minimize duplicate email names/addresses
--roles => show roles (status:subsystem, git-signer, list, etc...)
--rolestats => show roles and statistics (commits/total_commits, %)
--file-emails => add email addresses found in -f file (default: 0 (off))
--scm => print SCM tree(s) if any
--status => print status if any
--subsystem => print subsystem name if any
--web => print website(s) if any
Output type options:
--separator [, ] => separator for multiple entries on 1 line
using --separator also sets --nomultiline if --separator is not [, ]
--multiline => print 1 entry per line
Other options:
--pattern-depth => Number of pattern directory traversals (default: 0 (all))
--keywords => scan patch for keywords (default: $keywords)
--sections => print all of the subsystem sections with pattern matches
--mailmap => use .mailmap file (default: $email_use_mailmap)
--version => show version
--help => show this help information
Default options:
[--email --nogit --git-fallback --m --n --l --multiline -pattern-depth=0
--remove-duplicates --rolestats]
Notes:
Using "-f directory" may give unexpected results:
Used with "--git", git signators for _all_ files in and below
directory are examined as git recurses directories.
Any specified X: (exclude) pattern matches are _not_ ignored.
Used with "--nogit", directory is used as a pattern match,
no individual file within the directory or subdirectory
is matched.
Used with "--git-blame", does not iterate all files in directory
Using "--git-blame" is slow and may add old committers and authors
that are no longer active maintainers to the output.
Using "--roles" or "--rolestats" with git send-email --cc-cmd or any
other automated tools that expect only ["name"] <email address>
may not work because of additional output after <email address>.
Using "--rolestats" and "--git-blame" shows the #/total=% commits,
not the percentage of the entire file authored. # of commits is
not a good measure of amount of code authored. 1 major commit may
contain a thousand lines, 5 trivial commits may modify a single line.
If git is not installed, but mercurial (hg) is installed and an .hg
repository exists, the following options apply to mercurial:
--git,
--git-min-signatures, --git-max-maintainers, --git-min-percent, and
--git-blame
Use --hg-since not --git-since to control date selection
File ".get_maintainer.conf", if it exists in the linux kernel source root
directory, can change whatever get_maintainer defaults are desired.
Entries in this file can be any command line argument.
This file is prepended to any additional command line arguments.
Multiple lines and # comments are allowed.
EOT
}
sub top_of_kernel_tree {
my ($lk_path) = @_;
if ($lk_path ne "" && substr($lk_path,length($lk_path)-1,1) ne "/") {
$lk_path .= "/";
}
if ( (-f "${lk_path}COPYING")
&& (-f "${lk_path}CREDITS")
&& (-f "${lk_path}Kbuild")
&& (-f "${lk_path}MAINTAINERS")
&& (-f "${lk_path}Makefile")
&& (-f "${lk_path}README")
&& (-d "${lk_path}Documentation")
&& (-d "${lk_path}arch")
&& (-d "${lk_path}include")
&& (-d "${lk_path}drivers")
&& (-d "${lk_path}fs")
&& (-d "${lk_path}init")
&& (-d "${lk_path}ipc")
&& (-d "${lk_path}kernel")
&& (-d "${lk_path}lib")
&& (-d "${lk_path}scripts")) {
return 1;
}
return 0;
}
sub parse_email {
my ($formatted_email) = @_;
my $name = "";
my $address = "";
if ($formatted_email =~ /^([^<]+)<(.+\@.*)>.*$/) {
$name = $1;
$address = $2;
} elsif ($formatted_email =~ /^\s*<(.+\@\S*)>.*$/) {
$address = $1;
} elsif ($formatted_email =~ /^(.+\@\S*).*$/) {
$address = $1;
}
$name =~ s/^\s+|\s+$//g;
$name =~ s/^\"|\"$//g;
$address =~ s/^\s+|\s+$//g;
if ($name =~ /[^\w \-]/i) { ##has "must quote" chars
$name =~ s/(?<!\\)"/\\"/g; ##escape quotes
$name = "\"$name\"";
}
return ($name, $address);
}
sub format_email {
my ($name, $address, $usename) = @_;
my $formatted_email;
$name =~ s/^\s+|\s+$//g;
$name =~ s/^\"|\"$//g;
$address =~ s/^\s+|\s+$//g;
if ($name =~ /[^\w \-]/i) { ##has "must quote" chars
$name =~ s/(?<!\\)"/\\"/g; ##escape quotes
$name = "\"$name\"";
}
if ($usename) {
if ("$name" eq "") {
$formatted_email = "$address";
} else {
$formatted_email = "$name <$address>";
}
} else {
$formatted_email = $address;
}
return $formatted_email;
}
sub find_first_section {
my $index = 0;
while ($index < @typevalue) {
my $tv = $typevalue[$index];
if (($tv =~ m/^(\C):\s*(.*)/)) {
last;
}
$index++;
}
return $index;
}
sub find_starting_index {
my ($index) = @_;
while ($index > 0) {
my $tv = $typevalue[$index];
if (!($tv =~ m/^(\C):\s*(.*)/)) {
last;
}
$index--;
}
return $index;
}
sub find_ending_index {
my ($index) = @_;
while ($index < @typevalue) {
my $tv = $typevalue[$index];
if (!($tv =~ m/^(\C):\s*(.*)/)) {
last;
}
$index++;
}
return $index;
}
sub get_maintainer_role {
my ($index) = @_;
my $i;
my $start = find_starting_index($index);
my $end = find_ending_index($index);
my $role = "unknown";
my $subsystem = $typevalue[$start];
if (length($subsystem) > 20) {
$subsystem = substr($subsystem, 0, 17);
$subsystem =~ s/\s*$//;
$subsystem = $subsystem . "...";
}
for ($i = $start + 1; $i < $end; $i++) {
my $tv = $typevalue[$i];
if ($tv =~ m/^(\C):\s*(.*)/) {
my $ptype = $1;
my $pvalue = $2;
if ($ptype eq "S") {
$role = $pvalue;
}
}
}
$role = lc($role);
if ($role eq "supported") {
$role = "supporter";
} elsif ($role eq "maintained") {
$role = "maintainer";
} elsif ($role eq "odd fixes") {
$role = "odd fixer";
} elsif ($role eq "orphan") {
$role = "orphan minder";
} elsif ($role eq "obsolete") {
$role = "obsolete minder";
} elsif ($role eq "buried alive in reporters") {
$role = "chief penguin";
}
return $role . ":" . $subsystem;
}
sub get_list_role {
my ($index) = @_;
my $i;
my $start = find_starting_index($index);
my $end = find_ending_index($index);
my $subsystem = $typevalue[$start];
if (length($subsystem) > 20) {
$subsystem = substr($subsystem, 0, 17);
$subsystem =~ s/\s*$//;
$subsystem = $subsystem . "...";
}
if ($subsystem eq "THE REST") {
$subsystem = "";
}
return $subsystem;
}
sub add_categories {
my ($index) = @_;
my $i;
my $start = find_starting_index($index);
my $end = find_ending_index($index);
push(@subsystem, $typevalue[$start]);
for ($i = $start + 1; $i < $end; $i++) {
my $tv = $typevalue[$i];
if ($tv =~ m/^(\C):\s*(.*)/) {
my $ptype = $1;
my $pvalue = $2;
if ($ptype eq "L") {
my $list_address = $pvalue;
my $list_additional = "";
my $list_role = get_list_role($i);
if ($list_role ne "") {
$list_role = ":" . $list_role;
}
if ($list_address =~ m/([^\s]+)\s+(.*)$/) {
$list_address = $1;
$list_additional = $2;
}
if ($list_additional =~ m/subscribers-only/) {
if ($email_subscriber_list) {
if (!$hash_list_to{lc($list_address)}) {
$hash_list_to{lc($list_address)} = 1;
push(@list_to, [$list_address,
"subscriber list${list_role}"]);
}
}
} else {
if ($email_list) {
if (!$hash_list_to{lc($list_address)}) {
$hash_list_to{lc($list_address)} = 1;
if ($list_additional =~ m/moderated/) {
push(@list_to, [$list_address,
"moderated list${list_role}"]);
} else {
push(@list_to, [$list_address,
"open list${list_role}"]);
}
}
}
}
} elsif ($ptype eq "M") {
my ($name, $address) = parse_email($pvalue);
if ($name eq "") {
if ($i > 0) {
my $tv = $typevalue[$i - 1];
if ($tv =~ m/^(\C):\s*(.*)/) {
if ($1 eq "P") {
$name = $2;
$pvalue = format_email($name, $address, $email_usename);
}
}
}
}
if ($email_maintainer) {
my $role = get_maintainer_role($i);
push_email_addresses($pvalue, $role);
}
} elsif ($ptype eq "T") {
push(@scm, $pvalue);
} elsif ($ptype eq "W") {
push(@web, $pvalue);
} elsif ($ptype eq "S") {
push(@status, $pvalue);
}
}
}
}
sub email_inuse {
my ($name, $address) = @_;
return 1 if (($name eq "") && ($address eq ""));
return 1 if (($name ne "") && exists($email_hash_name{lc($name)}));
return 1 if (($address ne "") && exists($email_hash_address{lc($address)}));
return 0;
}
sub push_email_address {
my ($line, $role) = @_;
my ($name, $address) = parse_email($line);
if ($address eq "") {
return 0;
}
if (!$email_remove_duplicates) {
push(@email_to, [format_email($name, $address, $email_usename), $role]);
} elsif (!email_inuse($name, $address)) {
push(@email_to, [format_email($name, $address, $email_usename), $role]);
$email_hash_name{lc($name)}++ if ($name ne "");
$email_hash_address{lc($address)}++;
}
return 1;
}
sub push_email_addresses {
my ($address, $role) = @_;
my @address_list = ();
if (rfc822_valid($address)) {
push_email_address($address, $role);
} elsif (@address_list = rfc822_validlist($address)) {
my $array_count = shift(@address_list);
while (my $entry = shift(@address_list)) {
push_email_address($entry, $role);
}
} else {
if (!push_email_address($address, $role)) {
warn("Invalid MAINTAINERS address: '" . $address . "'\n");
}
}
}
sub add_role {
my ($line, $role) = @_;
my ($name, $address) = parse_email($line);
my $email = format_email($name, $address, $email_usename);
foreach my $entry (@email_to) {
if ($email_remove_duplicates) {
my ($entry_name, $entry_address) = parse_email($entry->[0]);
if (($name eq $entry_name || $address eq $entry_address)
&& ($role eq "" || !($entry->[1] =~ m/$role/))
) {
if ($entry->[1] eq "") {
$entry->[1] = "$role";
} else {
$entry->[1] = "$entry->[1],$role";
}
}
} else {
if ($email eq $entry->[0]
&& ($role eq "" || !($entry->[1] =~ m/$role/))
) {
if ($entry->[1] eq "") {
$entry->[1] = "$role";
} else {
$entry->[1] = "$entry->[1],$role";
}
}
}
}
}
sub which {
my ($bin) = @_;
foreach my $path (split(/:/, $ENV{PATH})) {
if (-e "$path/$bin") {
return "$path/$bin";
}
}
return "";
}
sub which_conf {
my ($conf) = @_;
foreach my $path (split(/:/, ".:$ENV{HOME}:.scripts")) {
if (-e "$path/$conf") {
return "$path/$conf";
}
}
return "";
}
sub mailmap_email {
my ($line) = @_;
my ($name, $address) = parse_email($line);
my $email = format_email($name, $address, 1);
my $real_name = $name;
my $real_address = $address;
if (exists $mailmap->{names}->{$email} ||
exists $mailmap->{addresses}->{$email}) {
if (exists $mailmap->{names}->{$email}) {
$real_name = $mailmap->{names}->{$email};
}
if (exists $mailmap->{addresses}->{$email}) {
$real_address = $mailmap->{addresses}->{$email};
}
} else {
if (exists $mailmap->{names}->{$address}) {
$real_name = $mailmap->{names}->{$address};
}
if (exists $mailmap->{addresses}->{$address}) {
$real_address = $mailmap->{addresses}->{$address};
}
}
return format_email($real_name, $real_address, 1);
}
sub mailmap {
my (@addresses) = @_;
my @mapped_emails = ();
foreach my $line (@addresses) {
push(@mapped_emails, mailmap_email($line));
}
merge_by_realname(@mapped_emails) if ($email_use_mailmap);
return @mapped_emails;
}
sub merge_by_realname {
my %address_map;
my (@emails) = @_;
foreach my $email (@emails) {
my ($name, $address) = parse_email($email);
if (exists $address_map{$name}) {
$address = $address_map{$name};
$email = format_email($name, $address, 1);
} else {
$address_map{$name} = $address;
}
}
}
sub git_execute_cmd {
my ($cmd) = @_;
my @lines = ();
my $output = `$cmd`;
$output =~ s/^\s*//gm;
@lines = split("\n", $output);
return @lines;
}
sub hg_execute_cmd {
my ($cmd) = @_;
my @lines = ();
my $output = `$cmd`;
@lines = split("\n", $output);
return @lines;
}
sub extract_formatted_signatures {
my (@signature_lines) = @_;
my @type = @signature_lines;
s/\s*(.*):.*/$1/ for (@type);
# cut -f2- -d":"
s/\s*.*:\s*(.+)\s*/$1/ for (@signature_lines);
## Reformat email addresses (with names) to avoid badly written signatures
foreach my $signer (@signature_lines) {
$signer = deduplicate_email($signer);
}
return (\@type, \@signature_lines);
}
sub vcs_find_signers {
my ($cmd) = @_;
my $commits;
my @lines = ();
my @signatures = ();
@lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
my $pattern = $VCS_cmds{"commit_pattern"};
$commits = grep(/$pattern/, @lines); # of commits
@signatures = grep(/^[ \t]*${signature_pattern}.*\@.*$/, @lines);
return (0, @signatures) if !@signatures;
save_commits_by_author(@lines) if ($interactive);
save_commits_by_signer(@lines) if ($interactive);
if (!$email_git_penguin_chiefs) {
@signatures = grep(!/${penguin_chiefs}/i, @signatures);
}
my ($types_ref, $signers_ref) = extract_formatted_signatures(@signatures);
return ($commits, @$signers_ref);
}
sub vcs_find_author {
my ($cmd) = @_;
my @lines = ();
@lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
if (!$email_git_penguin_chiefs) {
@lines = grep(!/${penguin_chiefs}/i, @lines);
}
return @lines if !@lines;
my @authors = ();
foreach my $line (@lines) {
if ($line =~ m/$VCS_cmds{"author_pattern"}/) {
my $author = $1;
my ($name, $address) = parse_email($author);
$author = format_email($name, $address, 1);
push(@authors, $author);
}
}
save_commits_by_author(@lines) if ($interactive);
save_commits_by_signer(@lines) if ($interactive);
return @authors;
}
sub vcs_save_commits {
my ($cmd) = @_;
my @lines = ();
my @commits = ();
@lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
foreach my $line (@lines) {
if ($line =~ m/$VCS_cmds{"blame_commit_pattern"}/) {
push(@commits, $1);
}
}
return @commits;
}
sub vcs_blame {
my ($file) = @_;
my $cmd;
my @commits = ();
return @commits if (!(-f $file));
if (@range && $VCS_cmds{"blame_range_cmd"} eq "") {
my @all_commits = ();
$cmd = $VCS_cmds{"blame_file_cmd"};
$cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
@all_commits = vcs_save_commits($cmd);
foreach my $file_range_diff (@range) {
next if (!($file_range_diff =~ m/(.+):(.+):(.+)/));
my $diff_file = $1;
my $diff_start = $2;
my $diff_length = $3;
next if ("$file" ne "$diff_file");
for (my $i = $diff_start; $i < $diff_start + $diff_length; $i++) {
push(@commits, $all_commits[$i]);
}
}
} elsif (@range) {
foreach my $file_range_diff (@range) {
next if (!($file_range_diff =~ m/(.+):(.+):(.+)/));
my $diff_file = $1;
my $diff_start = $2;
my $diff_length = $3;
next if ("$file" ne "$diff_file");
$cmd = $VCS_cmds{"blame_range_cmd"};
$cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
push(@commits, vcs_save_commits($cmd));
}
} else {
$cmd = $VCS_cmds{"blame_file_cmd"};
$cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
@commits = vcs_save_commits($cmd);
}
foreach my $commit (@commits) {
$commit =~ s/^\^//g;
}
return @commits;
}
my $printed_novcs = 0;
sub vcs_exists {
%VCS_cmds = %VCS_cmds_git;
return 1 if eval $VCS_cmds{"available"};
%VCS_cmds = %VCS_cmds_hg;
return 2 if eval $VCS_cmds{"available"};
%VCS_cmds = ();
if (!$printed_novcs) {
warn("$P: No supported VCS found. Add --nogit to options?\n");
warn("Using a git repository produces better results.\n");
warn("Try Linus Torvalds' latest git repository using:\n");
warn("git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git\n");
$printed_novcs = 1;
}
return 0;
}
sub vcs_is_git {
vcs_exists();
return $vcs_used == 1;
}
sub vcs_is_hg {
return $vcs_used == 2;
}
sub interactive_get_maintainers {
my ($list_ref) = @_;
my @list = @$list_ref;
vcs_exists();
my %selected;
my %authored;
my %signed;
my $count = 0;
my $maintained = 0;
foreach my $entry (@list) {
$maintained = 1 if ($entry->[1] =~ /^(maintainer|supporter)/i);
$selected{$count} = 1;
$authored{$count} = 0;
$signed{$count} = 0;
$count++;
}
#menu loop
my $done = 0;
my $print_options = 0;
my $redraw = 1;
while (!$done) {
$count = 0;
if ($redraw) {
printf STDERR "\n%1s %2s %-65s",
"*", "#", "email/list and role:stats";
if ($email_git ||
($email_git_fallback && !$maintained) ||
$email_git_blame) {
print STDERR "auth sign";
}
print STDERR "\n";
foreach my $entry (@list) {
my $email = $entry->[0];
my $role = $entry->[1];
my $sel = "";
$sel = "*" if ($selected{$count});
my $commit_author = $commit_author_hash{$email};
my $commit_signer = $commit_signer_hash{$email};
my $authored = 0;
my $signed = 0;
$authored++ for (@{$commit_author});
$signed++ for (@{$commit_signer});
printf STDERR "%1s %2d %-65s", $sel, $count + 1, $email;
printf STDERR "%4d %4d", $authored, $signed
if ($authored > 0 || $signed > 0);
printf STDERR "\n %s\n", $role;
if ($authored{$count}) {
my $commit_author = $commit_author_hash{$email};
foreach my $ref (@{$commit_author}) {
print STDERR " Author: @{$ref}[1]\n";
}
}
if ($signed{$count}) {
my $commit_signer = $commit_signer_hash{$email};
foreach my $ref (@{$commit_signer}) {
print STDERR " @{$ref}[2]: @{$ref}[1]\n";
}
}
$count++;
}
}
my $date_ref = \$email_git_since;
$date_ref = \$email_hg_since if (vcs_is_hg());
if ($print_options) {
$print_options = 0;
if (vcs_exists()) {
print STDERR <<EOT
Version Control options:
g use git history [$email_git]
gf use git-fallback [$email_git_fallback]
b use git blame [$email_git_blame]
bs use blame signatures [$email_git_blame_signatures]
c# minimum commits [$email_git_min_signatures]
%# min percent [$email_git_min_percent]
d# history to use [$$date_ref]
x# max maintainers [$email_git_max_maintainers]
t all signature types [$email_git_all_signature_types]
m use .mailmap [$email_use_mailmap]
EOT
}
print STDERR <<EOT
Additional options:
0 toggle all
tm toggle maintainers
tg toggle git entries
tl toggle open list entries
ts toggle subscriber list entries
f emails in file [$file_emails]
k keywords in file [$keywords]
r remove duplicates [$email_remove_duplicates]
p# pattern match depth [$pattern_depth]
EOT
}
print STDERR
"\n#(toggle), A#(author), S#(signed) *(all), ^(none), O(options), Y(approve): ";
my $input = <STDIN>;
chomp($input);
$redraw = 1;
my $rerun = 0;
my @wish = split(/[, ]+/, $input);
foreach my $nr (@wish) {
$nr = lc($nr);
my $sel = substr($nr, 0, 1);
my $str = substr($nr, 1);
my $val = 0;
$val = $1 if $str =~ /^(\d+)$/;
if ($sel eq "y") {
$interactive = 0;
$done = 1;
$output_rolestats = 0;
$output_roles = 0;
last;
} elsif ($nr =~ /^\d+$/ && $nr > 0 && $nr <= $count) {
$selected{$nr - 1} = !$selected{$nr - 1};
} elsif ($sel eq "*" || $sel eq '^') {
my $toggle = 0;
$toggle = 1 if ($sel eq '*');
for (my $i = 0; $i < $count; $i++) {
$selected{$i} = $toggle;
}
} elsif ($sel eq "0") {
for (my $i = 0; $i < $count; $i++) {
$selected{$i} = !$selected{$i};
}
} elsif ($sel eq "t") {
if (lc($str) eq "m") {
for (my $i = 0; $i < $count; $i++) {
$selected{$i} = !$selected{$i}
if ($list[$i]->[1] =~ /^(maintainer|supporter)/i);
}
} elsif (lc($str) eq "g") {
for (my $i = 0; $i < $count; $i++) {
$selected{$i} = !$selected{$i}
if ($list[$i]->[1] =~ /^(author|commit|signer)/i);
}
} elsif (lc($str) eq "l") {
for (my $i = 0; $i < $count; $i++) {
$selected{$i} = !$selected{$i}
if ($list[$i]->[1] =~ /^(open list)/i);
}
} elsif (lc($str) eq "s") {
for (my $i = 0; $i < $count; $i++) {
$selected{$i} = !$selected{$i}
if ($list[$i]->[1] =~ /^(subscriber list)/i);
}
}
} elsif ($sel eq "a") {
if ($val > 0 && $val <= $count) {
$authored{$val - 1} = !$authored{$val - 1};
} elsif ($str eq '*' || $str eq '^') {
my $toggle = 0;
$toggle = 1 if ($str eq '*');
for (my $i = 0; $i < $count; $i++) {
$authored{$i} = $toggle;
}
}
} elsif ($sel eq "s") {
if ($val > 0 && $val <= $count) {
$signed{$val - 1} = !$signed{$val - 1};
} elsif ($str eq '*' || $str eq '^') {
my $toggle = 0;
$toggle = 1 if ($str eq '*');
for (my $i = 0; $i < $count; $i++) {
$signed{$i} = $toggle;
}
}
} elsif ($sel eq "o") {
$print_options = 1;
$redraw = 1;
} elsif ($sel eq "g") {
if ($str eq "f") {
bool_invert(\$email_git_fallback);
} else {
bool_invert(\$email_git);
}
$rerun = 1;
} elsif ($sel eq "b") {
if ($str eq "s") {
bool_invert(\$email_git_blame_signatures);
} else {
bool_invert(\$email_git_blame);
}
$rerun = 1;
} elsif ($sel eq "c") {
if ($val > 0) {
$email_git_min_signatures = $val;
$rerun = 1;
}
} elsif ($sel eq "x") {
if ($val > 0) {
$email_git_max_maintainers = $val;
$rerun = 1;
}
} elsif ($sel eq "%") {
if ($str ne "" && $val >= 0) {
$email_git_min_percent = $val;
$rerun = 1;
}
} elsif ($sel eq "d") {
if (vcs_is_git()) {
$email_git_since = $str;
} elsif (vcs_is_hg()) {
$email_hg_since = $str;
}
$rerun = 1;
} elsif ($sel eq "t") {
bool_invert(\$email_git_all_signature_types);
$rerun = 1;
} elsif ($sel eq "f") {
bool_invert(\$file_emails);
$rerun = 1;
} elsif ($sel eq "r") {
bool_invert(\$email_remove_duplicates);
$rerun = 1;
} elsif ($sel eq "m") {
bool_invert(\$email_use_mailmap);
read_mailmap();
$rerun = 1;
} elsif ($sel eq "k") {
bool_invert(\$keywords);
$rerun = 1;
} elsif ($sel eq "p") {
if ($str ne "" && $val >= 0) {
$pattern_depth = $val;
$rerun = 1;
}
} elsif ($sel eq "h" || $sel eq "?") {
print STDERR <<EOT
Interactive mode allows you to select the various maintainers, submitters,
commit signers and mailing lists that could be CC'd on a patch.
Any *'d entry is selected.
If you have git or hg installed, you can choose to summarize the commit
history of files in the patch. Also, each line of the current file can
be matched to its commit author and that commits signers with blame.
Various knobs exist to control the length of time for active commit
tracking, the maximum number of commit authors and signers to add,
and such.
Enter selections at the prompt until you are satisfied that the selected
maintainers are appropriate. You may enter multiple selections separated
by either commas or spaces.
EOT
} else {
print STDERR "invalid option: '$nr'\n";
$redraw = 0;
}
}
if ($rerun) {
print STDERR "git-blame can be very slow, please have patience..."
if ($email_git_blame);
goto &get_maintainers;
}
}
#drop not selected entries
$count = 0;
my @new_emailto = ();
foreach my $entry (@list) {
if ($selected{$count}) {
push(@new_emailto, $list[$count]);
}
$count++;
}
return @new_emailto;
}
sub bool_invert {
my ($bool_ref) = @_;
if ($$bool_ref) {
$$bool_ref = 0;
} else {
$$bool_ref = 1;
}
}
sub deduplicate_email {
my ($email) = @_;
my $matched = 0;
my ($name, $address) = parse_email($email);
$email = format_email($name, $address, 1);
$email = mailmap_email($email);
return $email if (!$email_remove_duplicates);
($name, $address) = parse_email($email);
if ($name ne "" && $deduplicate_name_hash{lc($name)}) {
$name = $deduplicate_name_hash{lc($name)}->[0];
$address = $deduplicate_name_hash{lc($name)}->[1];
$matched = 1;
} elsif ($deduplicate_address_hash{lc($address)}) {
$name = $deduplicate_address_hash{lc($address)}->[0];
$address = $deduplicate_address_hash{lc($address)}->[1];
$matched = 1;
}
if (!$matched) {
$deduplicate_name_hash{lc($name)} = [ $name, $address ];
$deduplicate_address_hash{lc($address)} = [ $name, $address ];
}
$email = format_email($name, $address, 1);
$email = mailmap_email($email);
return $email;
}
sub save_commits_by_author {
my (@lines) = @_;
my @authors = ();
my @commits = ();
my @subjects = ();
foreach my $line (@lines) {
if ($line =~ m/$VCS_cmds{"author_pattern"}/) {
my $author = $1;
$author = deduplicate_email($author);
push(@authors, $author);
}
push(@commits, $1) if ($line =~ m/$VCS_cmds{"commit_pattern"}/);
push(@subjects, $1) if ($line =~ m/$VCS_cmds{"subject_pattern"}/);
}
for (my $i = 0; $i < @authors; $i++) {
my $exists = 0;
foreach my $ref(@{$commit_author_hash{$authors[$i]}}) {
if (@{$ref}[0] eq $commits[$i] &&
@{$ref}[1] eq $subjects[$i]) {
$exists = 1;
last;
}
}
if (!$exists) {
push(@{$commit_author_hash{$authors[$i]}},
[ ($commits[$i], $subjects[$i]) ]);
}
}
}
sub save_commits_by_signer {
my (@lines) = @_;
my $commit = "";
my $subject = "";
foreach my $line (@lines) {
$commit = $1 if ($line =~ m/$VCS_cmds{"commit_pattern"}/);
$subject = $1 if ($line =~ m/$VCS_cmds{"subject_pattern"}/);
if ($line =~ /^[ \t]*${signature_pattern}.*\@.*$/) {
my @signatures = ($line);
my ($types_ref, $signers_ref) = extract_formatted_signatures(@signatures);
my @types = @$types_ref;
my @signers = @$signers_ref;
my $type = $types[0];
my $signer = $signers[0];
$signer = deduplicate_email($signer);
my $exists = 0;
foreach my $ref(@{$commit_signer_hash{$signer}}) {
if (@{$ref}[0] eq $commit &&
@{$ref}[1] eq $subject &&
@{$ref}[2] eq $type) {
$exists = 1;
last;
}
}
if (!$exists) {
push(@{$commit_signer_hash{$signer}},
[ ($commit, $subject, $type) ]);
}
}
}
}
sub vcs_assign {
my ($role, $divisor, @lines) = @_;
my %hash;
my $count = 0;
return if (@lines <= 0);
if ($divisor <= 0) {
warn("Bad divisor in " . (caller(0))[3] . ": $divisor\n");
$divisor = 1;
}
@lines = mailmap(@lines);
return if (@lines <= 0);
@lines = sort(@lines);
# uniq -c
$hash{$_}++ for @lines;
# sort -rn
foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
my $sign_offs = $hash{$line};
my $percent = $sign_offs * 100 / $divisor;
$percent = 100 if ($percent > 100);
$count++;
last if ($sign_offs < $email_git_min_signatures ||
$count > $email_git_max_maintainers ||
$percent < $email_git_min_percent);
push_email_address($line, '');
if ($output_rolestats) {
my $fmt_percent = sprintf("%.0f", $percent);
add_role($line, "$role:$sign_offs/$divisor=$fmt_percent%");
} else {
add_role($line, $role);
}
}
}
sub vcs_file_signoffs {
my ($file) = @_;
my @signers = ();
my $commits;
$vcs_used = vcs_exists();
return if (!$vcs_used);
my $cmd = $VCS_cmds{"find_signers_cmd"};
$cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd
($commits, @signers) = vcs_find_signers($cmd);
foreach my $signer (@signers) {
$signer = deduplicate_email($signer);
}
vcs_assign("commit_signer", $commits, @signers);
}
sub vcs_file_blame {
my ($file) = @_;
my @signers = ();
my @all_commits = ();
my @commits = ();
my $total_commits;
my $total_lines;
$vcs_used = vcs_exists();
return if (!$vcs_used);
@all_commits = vcs_blame($file);
@commits = uniq(@all_commits);
$total_commits = @commits;
$total_lines = @all_commits;
if ($email_git_blame_signatures) {
if (vcs_is_hg()) {
my $commit_count;
my @commit_signers = ();
my $commit = join(" -r ", @commits);
my $cmd;
$cmd = $VCS_cmds{"find_commit_signers_cmd"};
$cmd =~ s/(\$\w+)/$1/eeg; #substitute variables in $cmd
($commit_count, @commit_signers) = vcs_find_signers($cmd);
push(@signers, @commit_signers);
} else {
foreach my $commit (@commits) {
my $commit_count;
my @commit_signers = ();
my $cmd;
$cmd = $VCS_cmds{"find_commit_signers_cmd"};
$cmd =~ s/(\$\w+)/$1/eeg; #substitute variables in $cmd
($commit_count, @commit_signers) = vcs_find_signers($cmd);
push(@signers, @commit_signers);
}
}
}
if ($from_filename) {
if ($output_rolestats) {
my @blame_signers;
if (vcs_is_hg()) {{ # Double brace for last exit
my $commit_count;
my @commit_signers = ();
@commits = uniq(@commits);
@commits = sort(@commits);
my $commit = join(" -r ", @commits);
my $cmd;
$cmd = $VCS_cmds{"find_commit_author_cmd"};
$cmd =~ s/(\$\w+)/$1/eeg; #substitute variables in $cmd
my @lines = ();
@lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
if (!$email_git_penguin_chiefs) {
@lines = grep(!/${penguin_chiefs}/i, @lines);
}
last if !@lines;
my @authors = ();
foreach my $line (@lines) {
if ($line =~ m/$VCS_cmds{"author_pattern"}/) {
my $author = $1;
$author = deduplicate_email($author);
push(@authors, $author);
}
}
save_commits_by_author(@lines) if ($interactive);
save_commits_by_signer(@lines) if ($interactive);
push(@signers, @authors);
}}
else {
foreach my $commit (@commits) {
my $i;
my $cmd = $VCS_cmds{"find_commit_author_cmd"};
$cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
my @author = vcs_find_author($cmd);
next if !@author;
my $formatted_author = deduplicate_email($author[0]);
my $count = grep(/$commit/, @all_commits);
for ($i = 0; $i < $count ; $i++) {
push(@blame_signers, $formatted_author);
}
}
}
if (@blame_signers) {
vcs_assign("authored lines", $total_lines, @blame_signers);
}
}
foreach my $signer (@signers) {
$signer = deduplicate_email($signer);
}
vcs_assign("commits", $total_commits, @signers);
} else {
foreach my $signer (@signers) {
$signer = deduplicate_email($signer);
}
vcs_assign("modified commits", $total_commits, @signers);
}
}
sub uniq {
my (@parms) = @_;
my %saw;
@parms = grep(!$saw{$_}++, @parms);
return @parms;
}
sub sort_and_uniq {
my (@parms) = @_;
my %saw;
@parms = sort @parms;
@parms = grep(!$saw{$_}++, @parms);
return @parms;
}
sub clean_file_emails {
my (@file_emails) = @_;
my @fmt_emails = ();
foreach my $email (@file_emails) {
$email =~ s/[\(\<\{]{0,1}([A-Za-z0-9_\.\+-]+\@[A-Za-z0-9\.-]+)[\)\>\}]{0,1}/\<$1\>/g;
my ($name, $address) = parse_email($email);
if ($name eq '"[,\.]"') {
$name = "";
}
my @nw = split(/[^A-Za-zÀ-ÿ\'\,\.\+-]/, $name);
if (@nw > 2) {
my $first = $nw[@nw - 3];
my $middle = $nw[@nw - 2];
my $last = $nw[@nw - 1];
if (((length($first) == 1 && $first =~ m/[A-Za-z]/) ||
(length($first) == 2 && substr($first, -1) eq ".")) ||
(length($middle) == 1 ||
(length($middle) == 2 && substr($middle, -1) eq "."))) {
$name = "$first $middle $last";
} else {
$name = "$middle $last";
}
}
if (substr($name, -1) =~ /[,\.]/) {
$name = substr($name, 0, length($name) - 1);
} elsif (substr($name, -2) =~ /[,\.]"/) {
$name = substr($name, 0, length($name) - 2) . '"';
}
if (substr($name, 0, 1) =~ /[,\.]/) {
$name = substr($name, 1, length($name) - 1);
} elsif (substr($name, 0, 2) =~ /"[,\.]/) {
$name = '"' . substr($name, 2, length($name) - 2);
}
my $fmt_email = format_email($name, $address, $email_usename);
push(@fmt_emails, $fmt_email);
}
return @fmt_emails;
}
sub merge_email {
my @lines;
my %saw;
for (@_) {
my ($address, $role) = @$_;
if (!$saw{$address}) {
if ($output_roles) {
push(@lines, "$address ($role)");
} else {
push(@lines, $address);
}
$saw{$address} = 1;
}
}
return @lines;
}
sub output {
my (@parms) = @_;
if ($output_multiline) {
foreach my $line (@parms) {
print("${line}\n");
}
} else {
print(join($output_separator, @parms));
print("\n");
}
}
my $rfc822re;
sub make_rfc822re {
# Basic lexical tokens are specials, domain_literal, quoted_string, atom, and
# comment. We must allow for rfc822_lwsp (or comments) after each of these.
# This regexp will only work on addresses which have had comments stripped
# and replaced with rfc822_lwsp.
my $specials = '()<>@,;:\\\\".\\[\\]';
my $controls = '\\000-\\037\\177';
my $dtext = "[^\\[\\]\\r\\\\]";
my $domain_literal = "\\[(?:$dtext|\\\\.)*\\]$rfc822_lwsp*";
my $quoted_string = "\"(?:[^\\\"\\r\\\\]|\\\\.|$rfc822_lwsp)*\"$rfc822_lwsp*";
# Use zero-width assertion to spot the limit of an atom. A simple
# $rfc822_lwsp* causes the regexp engine to hang occasionally.
my $atom = "[^$specials $controls]+(?:$rfc822_lwsp+|\\Z|(?=[\\[\"$specials]))";
my $word = "(?:$atom|$quoted_string)";
my $localpart = "$word(?:\\.$rfc822_lwsp*$word)*";
my $sub_domain = "(?:$atom|$domain_literal)";
my $domain = "$sub_domain(?:\\.$rfc822_lwsp*$sub_domain)*";
my $addr_spec = "$localpart\@$rfc822_lwsp*$domain";
my $phrase = "$word*";
my $route = "(?:\@$domain(?:,\@$rfc822_lwsp*$domain)*:$rfc822_lwsp*)";
my $route_addr = "\\<$rfc822_lwsp*$route?$addr_spec\\>$rfc822_lwsp*";
my $mailbox = "(?:$addr_spec|$phrase$route_addr)";
my $group = "$phrase:$rfc822_lwsp*(?:$mailbox(?:,\\s*$mailbox)*)?;\\s*";
my $address = "(?:$mailbox|$group)";
return "$rfc822_lwsp*$address";
}
sub rfc822_strip_comments {
my $s = shift;
# Recursively remove comments, and replace with a single space. The simpler
# regexps in the Email Addressing FAQ are imperfect - they will miss escaped
# chars in atoms, for example.
while ($s =~ s/^((?:[^"\\]|\\.)*
(?:"(?:[^"\\]|\\.)*"(?:[^"\\]|\\.)*)*)
\((?:[^()\\]|\\.)*\)/$1 /osx) {}
return $s;
}
# valid: returns true if the parameter is an RFC822 valid address
#
sub rfc822_valid {
my $s = rfc822_strip_comments(shift);
if (!$rfc822re) {
$rfc822re = make_rfc822re();
}
return $s =~ m/^$rfc822re$/so && $s =~ m/^$rfc822_char*$/;
}
# validlist: In scalar context, returns true if the parameter is an RFC822
# valid list of addresses.
#
# In list context, returns an empty list on failure (an invalid
# address was found); otherwise a list whose first element is the
# number of addresses found and whose remaining elements are the
# addresses. This is needed to disambiguate failure (invalid)
# from success with no addresses found, because an empty string is
# a valid list.
sub rfc822_validlist {
my $s = rfc822_strip_comments(shift);
if (!$rfc822re) {
$rfc822re = make_rfc822re();
}
# * null list items are valid according to the RFC
# * the '1' business is to aid in distinguishing failure from no results
my @r;
if ($s =~ m/^(?:$rfc822re)?(?:,(?:$rfc822re)?)*$/so &&
$s =~ m/^$rfc822_char*$/) {
while ($s =~ m/(?:^|,$rfc822_lwsp*)($rfc822re)/gos) {
push(@r, $1);
}
return wantarray ? (scalar(@r), @r) : 1;
}
return wantarray ? () : 0;
}
| maxwen/android_kernel_oppo_msm8916 | scripts/get_maintainer.pl | Perl | gpl-2.0 | 55,413 |
/*
* MUSB OTG driver - support for Mentor's DMA controller
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2007 by Texas Instruments
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "musb_core.h"
#include "musbhsdma.h"
static int dma_controller_start(struct dma_controller *c)
{
/* nothing to do */
return 0;
}
static void dma_channel_release(struct dma_channel *channel);
static int dma_controller_stop(struct dma_controller *c)
{
struct musb_dma_controller *controller = container_of(c,
struct musb_dma_controller, controller);
struct musb *musb = controller->private_data;
struct dma_channel *channel;
u8 bit;
if (controller->used_channels != 0) {
dev_err(musb->controller,
"Stopping DMA controller while channel active\n");
for (bit = 0; bit < MUSB_HSDMA_CHANNELS; bit++) {
if (controller->used_channels & (1 << bit)) {
channel = &controller->channel[bit].channel;
dma_channel_release(channel);
if (!controller->used_channels)
break;
}
}
}
return 0;
}
static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
struct musb_hw_ep *hw_ep, u8 transmit)
{
struct musb_dma_controller *controller = container_of(c,
struct musb_dma_controller, controller);
struct musb_dma_channel *musb_channel = NULL;
struct dma_channel *channel = NULL;
u8 bit;
for (bit = 0; bit < MUSB_HSDMA_CHANNELS; bit++) {
if (!(controller->used_channels & (1 << bit))) {
controller->used_channels |= (1 << bit);
musb_channel = &(controller->channel[bit]);
musb_channel->controller = controller;
musb_channel->idx = bit;
musb_channel->epnum = hw_ep->epnum;
musb_channel->transmit = transmit;
channel = &(musb_channel->channel);
channel->private_data = musb_channel;
channel->status = MUSB_DMA_STATUS_FREE;
channel->max_len = 0x100000;
/* Tx => mode 1; Rx => mode 0 */
channel->desired_mode = transmit;
channel->actual_len = 0;
break;
}
}
return channel;
}
static void dma_channel_release(struct dma_channel *channel)
{
struct musb_dma_channel *musb_channel = channel->private_data;
channel->actual_len = 0;
musb_channel->start_addr = 0;
musb_channel->len = 0;
musb_channel->controller->used_channels &=
~(1 << musb_channel->idx);
channel->status = MUSB_DMA_STATUS_UNKNOWN;
}
static void configure_channel(struct dma_channel *channel,
u16 packet_sz, u8 mode,
dma_addr_t dma_addr, u32 len)
{
struct musb_dma_channel *musb_channel = channel->private_data;
struct musb_dma_controller *controller = musb_channel->controller;
struct musb *musb = controller->private_data;
void __iomem *mbase = controller->base;
u8 bchannel = musb_channel->idx;
u16 csr = 0;
dev_dbg(musb->controller, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
channel, packet_sz, dma_addr, len, mode);
if (mode) {
csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
BUG_ON(len < packet_sz);
}
csr |= MUSB_HSDMA_BURSTMODE_INCR16
<< MUSB_HSDMA_BURSTMODE_SHIFT;
csr |= (musb_channel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
| (1 << MUSB_HSDMA_ENABLE_SHIFT)
| (1 << MUSB_HSDMA_IRQENABLE_SHIFT)
| (musb_channel->transmit
? (1 << MUSB_HSDMA_TRANSMIT_SHIFT)
: 0);
/* address/count */
musb_write_hsdma_addr(mbase, bchannel, dma_addr);
musb_write_hsdma_count(mbase, bchannel, len);
/* control (this should start things) */
musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL),
csr);
}
static int dma_channel_program(struct dma_channel *channel,
u16 packet_sz, u8 mode,
dma_addr_t dma_addr, u32 len)
{
struct musb_dma_channel *musb_channel = channel->private_data;
struct musb_dma_controller *controller = musb_channel->controller;
struct musb *musb = controller->private_data;
dev_dbg(musb->controller, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
musb_channel->epnum,
musb_channel->transmit ? "Tx" : "Rx",
packet_sz, dma_addr, len, mode);
BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
channel->status == MUSB_DMA_STATUS_BUSY);
/* Let targets check/tweak the arguments */
if (musb->ops->adjust_channel_params) {
int ret = musb->ops->adjust_channel_params(channel,
packet_sz, &mode, &dma_addr, &len);
if (ret)
return ret;
}
/*
* The DMA engine in RTL1.8 and above cannot handle
* DMA addresses that are not aligned to a 4 byte boundary.
* It ends up masking the last two bits of the address
* programmed in DMA_ADDR.
*
* Fail such DMA transfers, so that the backup PIO mode
* can carry out the transfer
*/
if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4))
return false;
channel->actual_len = 0;
musb_channel->start_addr = dma_addr;
musb_channel->len = len;
musb_channel->max_packet_sz = packet_sz;
channel->status = MUSB_DMA_STATUS_BUSY;
configure_channel(channel, packet_sz, mode, dma_addr, len);
return true;
}
static int dma_channel_abort(struct dma_channel *channel)
{
struct musb_dma_channel *musb_channel = channel->private_data;
void __iomem *mbase = musb_channel->controller->base;
u8 bchannel = musb_channel->idx;
int offset;
u16 csr;
if (channel->status == MUSB_DMA_STATUS_BUSY) {
if (musb_channel->transmit) {
offset = MUSB_EP_OFFSET(musb_channel->epnum,
MUSB_TXCSR);
/*
* The programming guide says that we must clear
* the DMAENAB bit before the DMAMODE bit...
*/
csr = musb_readw(mbase, offset);
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
musb_writew(mbase, offset, csr);
csr &= ~MUSB_TXCSR_DMAMODE;
musb_writew(mbase, offset, csr);
} else {
offset = MUSB_EP_OFFSET(musb_channel->epnum,
MUSB_RXCSR);
csr = musb_readw(mbase, offset);
csr &= ~(MUSB_RXCSR_AUTOCLEAR |
MUSB_RXCSR_DMAENAB |
MUSB_RXCSR_DMAMODE);
musb_writew(mbase, offset, csr);
}
musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL),
0);
musb_write_hsdma_addr(mbase, bchannel, 0);
musb_write_hsdma_count(mbase, bchannel, 0);
channel->status = MUSB_DMA_STATUS_FREE;
}
return 0;
}
static irqreturn_t dma_controller_irq(int irq, void *private_data)
{
struct musb_dma_controller *controller = private_data;
struct musb *musb = controller->private_data;
struct musb_dma_channel *musb_channel;
struct dma_channel *channel;
void __iomem *mbase = controller->base;
irqreturn_t retval = IRQ_NONE;
unsigned long flags;
u8 bchannel;
u8 int_hsdma;
u32 addr, count;
u16 csr;
spin_lock_irqsave(&musb->lock, flags);
int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
#ifdef CONFIG_BLACKFIN
/* Clear DMA interrupt flags */
musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
#endif
if (!int_hsdma) {
dev_dbg(musb->controller, "spurious DMA irq\n");
for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
musb_channel = (struct musb_dma_channel *)
&(controller->channel[bchannel]);
channel = &musb_channel->channel;
if (channel->status == MUSB_DMA_STATUS_BUSY) {
count = musb_read_hsdma_count(mbase, bchannel);
if (count == 0)
int_hsdma |= (1 << bchannel);
}
}
dev_dbg(musb->controller, "int_hsdma = 0x%x\n", int_hsdma);
if (!int_hsdma)
goto done;
}
for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
if (int_hsdma & (1 << bchannel)) {
musb_channel = (struct musb_dma_channel *)
&(controller->channel[bchannel]);
channel = &musb_channel->channel;
csr = musb_readw(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel,
MUSB_HSDMA_CONTROL));
if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT)) {
musb_channel->channel.status =
MUSB_DMA_STATUS_BUS_ABORT;
} else {
u8 devctl;
addr = musb_read_hsdma_addr(mbase,
bchannel);
channel->actual_len = addr
- musb_channel->start_addr;
dev_dbg(musb->controller, "ch %p, 0x%x -> 0x%x (%zu / %d) %s\n",
channel, musb_channel->start_addr,
addr, channel->actual_len,
musb_channel->len,
(channel->actual_len
< musb_channel->len) ?
"=> reconfig 0" : "=> complete");
devctl = musb_readb(mbase, MUSB_DEVCTL);
channel->status = MUSB_DMA_STATUS_FREE;
/* completed */
if ((devctl & MUSB_DEVCTL_HM)
&& (musb_channel->transmit)
&& ((channel->desired_mode == 0)
|| (channel->actual_len &
(musb_channel->max_packet_sz - 1)))
) {
u8 epnum = musb_channel->epnum;
int offset = MUSB_EP_OFFSET(epnum,
MUSB_TXCSR);
u16 txcsr;
/*
* The programming guide says that we
* must clear DMAENAB before DMAMODE.
*/
musb_ep_select(mbase, epnum);
txcsr = musb_readw(mbase, offset);
txcsr &= ~(MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_AUTOSET);
musb_writew(mbase, offset, txcsr);
/* Send out the packet */
txcsr &= ~MUSB_TXCSR_DMAMODE;
txcsr |= MUSB_TXCSR_TXPKTRDY;
musb_writew(mbase, offset, txcsr);
}
musb_dma_completion(musb, musb_channel->epnum,
musb_channel->transmit);
}
}
}
retval = IRQ_HANDLED;
done:
spin_unlock_irqrestore(&musb->lock, flags);
return retval;
}
void dma_controller_destroy(struct dma_controller *c)
{
struct musb_dma_controller *controller = container_of(c,
struct musb_dma_controller, controller);
if (!controller)
return;
if (controller->irq)
free_irq(controller->irq, c);
kfree(controller);
}
struct dma_controller *dma_controller_create(struct musb *musb, void __iomem *base)
{
struct musb_dma_controller *controller;
struct device *dev = musb->controller;
struct platform_device *pdev = to_platform_device(dev);
int irq = platform_get_irq_byname(pdev, "dma");
if (irq <= 0) {
dev_err(dev, "No DMA interrupt line!\n");
return NULL;
}
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
return NULL;
controller->channel_count = MUSB_HSDMA_CHANNELS;
controller->private_data = musb;
controller->base = base;
controller->controller.start = dma_controller_start;
controller->controller.stop = dma_controller_stop;
controller->controller.channel_alloc = dma_channel_allocate;
controller->controller.channel_release = dma_channel_release;
controller->controller.channel_program = dma_channel_program;
controller->controller.channel_abort = dma_channel_abort;
if (request_irq(irq, dma_controller_irq, 0,
dev_name(musb->controller), &controller->controller)) {
dev_err(dev, "request_irq %d failed!\n", irq);
dma_controller_destroy(&controller->controller);
return NULL;
}
controller->irq = irq;
return &controller->controller;
}
| halcyonaoh/blackbox_sprout | drivers/usb/musb/musbhsdma.c | C | gpl-2.0 | 11,984 |
/*
* Kernel-based Virtual Machine driver for Linux
*
* AMD SVM support
*
* Copyright (C) 2006 Qumranet, Inc.
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Yaniv Kamay <yaniv@qumranet.com>
* Avi Kivity <avi@qumranet.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#include <linux/kvm_host.h>
#include "irq.h"
#include "mmu.h"
#include "kvm_cache_regs.h"
#include "x86.h"
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/ftrace_event.h>
#include <linux/slab.h>
#include <asm/tlbflush.h>
#include <asm/desc.h>
#include <asm/kvm_para.h>
#include <asm/virtext.h>
#include "trace.h"
#define __ex(x) __kvm_handle_fault_on_reboot(x)
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
#define IOPM_ALLOC_ORDER 2
#define MSRPM_ALLOC_ORDER 1
#define SEG_TYPE_LDT 2
#define SEG_TYPE_BUSY_TSS16 3
#define SVM_FEATURE_NPT (1 << 0)
#define SVM_FEATURE_LBRV (1 << 1)
#define SVM_FEATURE_SVML (1 << 2)
#define SVM_FEATURE_NRIP (1 << 3)
#define SVM_FEATURE_TSC_RATE (1 << 4)
#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
#define SVM_FEATURE_FLUSH_ASID (1 << 6)
#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
#define TSC_RATIO_RSVD 0xffffff0000000000ULL
#define TSC_RATIO_MIN 0x0000000000000001ULL
#define TSC_RATIO_MAX 0x000000ffffffffffULL
static bool erratum_383_found __read_mostly;
static const u32 host_save_user_msrs[] = {
#ifdef CONFIG_X86_64
MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
MSR_FS_BASE,
#endif
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
};
#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
struct kvm_vcpu;
struct nested_state {
struct vmcb *hsave;
u64 hsave_msr;
u64 vm_cr_msr;
u64 vmcb;
/* These are the merged vectors */
u32 *msrpm;
/* gpa pointers to the real vectors */
u64 vmcb_msrpm;
u64 vmcb_iopm;
/* A VMEXIT is required but not yet emulated */
bool exit_required;
/* cache for intercepts of the guest */
u32 intercept_cr;
u32 intercept_dr;
u32 intercept_exceptions;
u64 intercept;
/* Nested Paging related state */
u64 nested_cr3;
};
#define MSRPM_OFFSETS 16
static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
struct vcpu_svm {
struct kvm_vcpu vcpu;
struct vmcb *vmcb;
unsigned long vmcb_pa;
struct svm_cpu_data *svm_data;
uint64_t asid_generation;
uint64_t sysenter_esp;
uint64_t sysenter_eip;
u64 next_rip;
u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
struct {
u16 fs;
u16 gs;
u16 ldt;
u64 gs_base;
} host;
u32 *msrpm;
ulong nmi_iret_rip;
struct nested_state nested;
bool nmi_singlestep;
unsigned int3_injected;
unsigned long int3_rip;
u32 apf_reason;
u64 tsc_ratio;
};
static DEFINE_PER_CPU(u64, current_tsc_ratio);
#define TSC_RATIO_DEFAULT 0x0100000000ULL
#define MSR_INVALID 0xffffffffU
static struct svm_direct_access_msrs {
u32 index; /* Index of the MSR */
bool always; /* True if intercept is always on */
} direct_access_msrs[] = {
{ .index = MSR_STAR, .always = true },
{ .index = MSR_IA32_SYSENTER_CS, .always = true },
#ifdef CONFIG_X86_64
{ .index = MSR_GS_BASE, .always = true },
{ .index = MSR_FS_BASE, .always = true },
{ .index = MSR_KERNEL_GS_BASE, .always = true },
{ .index = MSR_LSTAR, .always = true },
{ .index = MSR_CSTAR, .always = true },
{ .index = MSR_SYSCALL_MASK, .always = true },
#endif
{ .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
{ .index = MSR_IA32_LASTINTTOIP, .always = false },
{ .index = MSR_INVALID, .always = false },
};
/* enable NPT for AMD64 and X86 with PAE */
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
static bool npt_enabled = true;
#else
static bool npt_enabled;
#endif
static int npt = 1;
module_param(npt, int, S_IRUGO);
static int nested = 1;
module_param(nested, int, S_IRUGO);
static void svm_flush_tlb(struct kvm_vcpu *vcpu);
static void svm_complete_interrupts(struct vcpu_svm *svm);
static int nested_svm_exit_handled(struct vcpu_svm *svm);
static int nested_svm_intercept(struct vcpu_svm *svm);
static int nested_svm_vmexit(struct vcpu_svm *svm);
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code);
static u64 __scale_tsc(u64 ratio, u64 tsc);
enum {
VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
pause filter count */
VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
VMCB_ASID, /* ASID */
VMCB_INTR, /* int_ctl, int_vector */
VMCB_NPT, /* npt_en, nCR3, gPAT */
VMCB_CR, /* CR0, CR3, CR4, EFER */
VMCB_DR, /* DR6, DR7 */
VMCB_DT, /* GDT, IDT */
VMCB_SEG, /* CS, DS, SS, ES, CPL */
VMCB_CR2, /* CR2 only */
VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
VMCB_DIRTY_MAX,
};
/* TPR and CR2 are always written before VMRUN */
#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
static inline void mark_all_dirty(struct vmcb *vmcb)
{
vmcb->control.clean = 0;
}
static inline void mark_all_clean(struct vmcb *vmcb)
{
vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
& ~VMCB_ALWAYS_DIRTY_MASK;
}
static inline void mark_dirty(struct vmcb *vmcb, int bit)
{
vmcb->control.clean &= ~(1 << bit);
}
static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
{
return container_of(vcpu, struct vcpu_svm, vcpu);
}
static void recalc_intercepts(struct vcpu_svm *svm)
{
struct vmcb_control_area *c, *h;
struct nested_state *g;
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
if (!is_guest_mode(&svm->vcpu))
return;
c = &svm->vmcb->control;
h = &svm->nested.hsave->control;
g = &svm->nested;
c->intercept_cr = h->intercept_cr | g->intercept_cr;
c->intercept_dr = h->intercept_dr | g->intercept_dr;
c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
c->intercept = h->intercept | g->intercept;
}
static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
{
if (is_guest_mode(&svm->vcpu))
return svm->nested.hsave;
else
return svm->vmcb;
}
static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept_cr |= (1U << bit);
recalc_intercepts(svm);
}
static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept_cr &= ~(1U << bit);
recalc_intercepts(svm);
}
static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
return vmcb->control.intercept_cr & (1U << bit);
}
static inline void set_dr_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept_dr |= (1U << bit);
recalc_intercepts(svm);
}
static inline void clr_dr_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept_dr &= ~(1U << bit);
recalc_intercepts(svm);
}
static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept_exceptions |= (1U << bit);
recalc_intercepts(svm);
}
static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept_exceptions &= ~(1U << bit);
recalc_intercepts(svm);
}
static inline void set_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept |= (1ULL << bit);
recalc_intercepts(svm);
}
static inline void clr_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept &= ~(1ULL << bit);
recalc_intercepts(svm);
}
static inline void enable_gif(struct vcpu_svm *svm)
{
svm->vcpu.arch.hflags |= HF_GIF_MASK;
}
static inline void disable_gif(struct vcpu_svm *svm)
{
svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
}
static inline bool gif_set(struct vcpu_svm *svm)
{
return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
}
static unsigned long iopm_base;
struct kvm_ldttss_desc {
u16 limit0;
u16 base0;
unsigned base1:8, type:5, dpl:2, p:1;
unsigned limit1:4, zero0:3, g:1, base2:8;
u32 base3;
u32 zero1;
} __attribute__((packed));
struct svm_cpu_data {
int cpu;
u64 asid_generation;
u32 max_asid;
u32 next_asid;
struct kvm_ldttss_desc *tss_desc;
struct page *save_area;
};
static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
struct svm_init_data {
int cpu;
int r;
};
static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
#define MSRS_RANGE_SIZE 2048
#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
static u32 svm_msrpm_offset(u32 msr)
{
u32 offset;
int i;
for (i = 0; i < NUM_MSR_MAPS; i++) {
if (msr < msrpm_ranges[i] ||
msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
continue;
offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
offset += (i * MSRS_RANGE_SIZE); /* add range offset */
/* Now we have the u8 offset - but need the u32 offset */
return offset / 4;
}
/* MSR not in any range */
return MSR_INVALID;
}
#define MAX_INST_SIZE 15
static inline void clgi(void)
{
asm volatile (__ex(SVM_CLGI));
}
static inline void stgi(void)
{
asm volatile (__ex(SVM_STGI));
}
static inline void invlpga(unsigned long addr, u32 asid)
{
asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
}
static int get_npt_level(void)
{
#ifdef CONFIG_X86_64
return PT64_ROOT_LEVEL;
#else
return PT32E_ROOT_LEVEL;
#endif
}
static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
vcpu->arch.efer = efer;
if (!npt_enabled && !(efer & EFER_LMA))
efer &= ~EFER_LME;
to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
}
static int is_external_interrupt(u32 info)
{
info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
}
static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
{
struct vcpu_svm *svm = to_svm(vcpu);
u32 ret = 0;
if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
return ret & mask;
}
static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (mask == 0)
svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
else
svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
}
static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (svm->vmcb->control.next_rip != 0)
svm->next_rip = svm->vmcb->control.next_rip;
if (!svm->next_rip) {
if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
EMULATE_DONE)
printk(KERN_DEBUG "%s: NOP\n", __func__);
return;
}
if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
__func__, kvm_rip_read(vcpu), svm->next_rip);
kvm_rip_write(vcpu, svm->next_rip);
svm_set_interrupt_shadow(vcpu, 0);
}
static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code,
bool reinject)
{
struct vcpu_svm *svm = to_svm(vcpu);
/*
* If we are within a nested VM we'd better #VMEXIT and let the guest
* handle the exception
*/
if (!reinject &&
nested_svm_check_exception(svm, nr, has_error_code, error_code))
return;
if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
/*
* For guest debugging where we have to reinject #BP if some
* INT3 is guest-owned:
* Emulate nRIP by moving RIP forward. Will fail if injection
* raises a fault that is not intercepted. Still better than
* failing in all cases.
*/
skip_emulated_instruction(&svm->vcpu);
rip = kvm_rip_read(&svm->vcpu);
svm->int3_rip = rip + svm->vmcb->save.cs.base;
svm->int3_injected = rip - old_rip;
}
svm->vmcb->control.event_inj = nr
| SVM_EVTINJ_VALID
| (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
| SVM_EVTINJ_TYPE_EXEPT;
svm->vmcb->control.event_inj_err = error_code;
}
static void svm_init_erratum_383(void)
{
u32 low, high;
int err;
u64 val;
if (!cpu_has_amd_erratum(amd_erratum_383))
return;
/* Use _safe variants to not break nested virtualization */
val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
if (err)
return;
val |= (1ULL << 47);
low = lower_32_bits(val);
high = upper_32_bits(val);
native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
erratum_383_found = true;
}
static int has_svm(void)
{
const char *msg;
if (!cpu_has_svm(&msg)) {
printk(KERN_INFO "has_svm: %s\n", msg);
return 0;
}
return 1;
}
static void svm_hardware_disable(void *garbage)
{
/* Make sure we clean up behind us */
if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
cpu_svm_disable();
}
static int svm_hardware_enable(void *garbage)
{
struct svm_cpu_data *sd;
uint64_t efer;
struct desc_ptr gdt_descr;
struct desc_struct *gdt;
int me = raw_smp_processor_id();
rdmsrl(MSR_EFER, efer);
if (efer & EFER_SVME)
return -EBUSY;
if (!has_svm()) {
printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n",
me);
return -EINVAL;
}
sd = per_cpu(svm_data, me);
if (!sd) {
printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
me);
return -EINVAL;
}
sd->asid_generation = 1;
sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
sd->next_asid = sd->max_asid + 1;
native_store_gdt(&gdt_descr);
gdt = (struct desc_struct *)gdt_descr.address;
sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
wrmsrl(MSR_EFER, efer | EFER_SVME);
wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
__get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT;
}
svm_init_erratum_383();
return 0;
}
static void svm_cpu_uninit(int cpu)
{
struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
if (!sd)
return;
per_cpu(svm_data, raw_smp_processor_id()) = NULL;
__free_page(sd->save_area);
kfree(sd);
}
static int svm_cpu_init(int cpu)
{
struct svm_cpu_data *sd;
int r;
sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
if (!sd)
return -ENOMEM;
sd->cpu = cpu;
sd->save_area = alloc_page(GFP_KERNEL);
r = -ENOMEM;
if (!sd->save_area)
goto err_1;
per_cpu(svm_data, cpu) = sd;
return 0;
err_1:
kfree(sd);
return r;
}
static bool valid_msr_intercept(u32 index)
{
int i;
for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
if (direct_access_msrs[i].index == index)
return true;
return false;
}
static void set_msr_interception(u32 *msrpm, unsigned msr,
int read, int write)
{
u8 bit_read, bit_write;
unsigned long tmp;
u32 offset;
/*
* If this warning triggers extend the direct_access_msrs list at the
* beginning of the file
*/
WARN_ON(!valid_msr_intercept(msr));
offset = svm_msrpm_offset(msr);
bit_read = 2 * (msr & 0x0f);
bit_write = 2 * (msr & 0x0f) + 1;
tmp = msrpm[offset];
BUG_ON(offset == MSR_INVALID);
read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
msrpm[offset] = tmp;
}
static void svm_vcpu_init_msrpm(u32 *msrpm)
{
int i;
memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
if (!direct_access_msrs[i].always)
continue;
set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
}
}
static void add_msr_offset(u32 offset)
{
int i;
for (i = 0; i < MSRPM_OFFSETS; ++i) {
/* Offset already in list? */
if (msrpm_offsets[i] == offset)
return;
/* Slot used by another offset? */
if (msrpm_offsets[i] != MSR_INVALID)
continue;
/* Add offset to list */
msrpm_offsets[i] = offset;
return;
}
/*
* If this BUG triggers the msrpm_offsets table has an overflow. Just
* increase MSRPM_OFFSETS in this case.
*/
BUG();
}
static void init_msrpm_offsets(void)
{
int i;
memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
u32 offset;
offset = svm_msrpm_offset(direct_access_msrs[i].index);
BUG_ON(offset == MSR_INVALID);
add_msr_offset(offset);
}
}
static void svm_enable_lbrv(struct vcpu_svm *svm)
{
u32 *msrpm = svm->msrpm;
svm->vmcb->control.lbr_ctl = 1;
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
}
static void svm_disable_lbrv(struct vcpu_svm *svm)
{
u32 *msrpm = svm->msrpm;
svm->vmcb->control.lbr_ctl = 0;
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
}
static __init int svm_hardware_setup(void)
{
int cpu;
struct page *iopm_pages;
void *iopm_va;
int r;
iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
if (!iopm_pages)
return -ENOMEM;
iopm_va = page_address(iopm_pages);
memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
init_msrpm_offsets();
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
kvm_enable_efer_bits(EFER_FFXSR);
if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
u64 max;
kvm_has_tsc_control = true;
/*
* Make sure the user can only configure tsc_khz values that
* fit into a signed integer.
* A min value is not calculated needed because it will always
* be 1 on all machines and a value of 0 is used to disable
* tsc-scaling for the vcpu.
*/
max = min(0x7fffffffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX));
kvm_max_guest_tsc_khz = max;
}
if (nested) {
printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
}
for_each_possible_cpu(cpu) {
r = svm_cpu_init(cpu);
if (r)
goto err;
}
if (!boot_cpu_has(X86_FEATURE_NPT))
npt_enabled = false;
if (npt_enabled && !npt) {
printk(KERN_INFO "kvm: Nested Paging disabled\n");
npt_enabled = false;
}
if (npt_enabled) {
printk(KERN_INFO "kvm: Nested Paging enabled\n");
kvm_enable_tdp();
} else
kvm_disable_tdp();
return 0;
err:
__free_pages(iopm_pages, IOPM_ALLOC_ORDER);
iopm_base = 0;
return r;
}
static __exit void svm_hardware_unsetup(void)
{
int cpu;
for_each_possible_cpu(cpu)
svm_cpu_uninit(cpu);
__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
iopm_base = 0;
}
static void init_seg(struct vmcb_seg *seg)
{
seg->selector = 0;
seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
seg->limit = 0xffff;
seg->base = 0;
}
static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
{
seg->selector = 0;
seg->attrib = SVM_SELECTOR_P_MASK | type;
seg->limit = 0xffff;
seg->base = 0;
}
static u64 __scale_tsc(u64 ratio, u64 tsc)
{
u64 mult, frac, _tsc;
mult = ratio >> 32;
frac = ratio & ((1ULL << 32) - 1);
_tsc = tsc;
_tsc *= mult;
_tsc += (tsc >> 32) * frac;
_tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;
return _tsc;
}
static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 _tsc = tsc;
if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
_tsc = __scale_tsc(svm->tsc_ratio, tsc);
return _tsc;
}
static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 ratio;
u64 khz;
/* TSC scaling supported? */
if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR))
return;
/* TSC-Scaling disabled or guest TSC same frequency as host TSC? */
if (user_tsc_khz == 0) {
vcpu->arch.virtual_tsc_khz = 0;
svm->tsc_ratio = TSC_RATIO_DEFAULT;
return;
}
khz = user_tsc_khz;
/* TSC scaling required - calculate ratio */
ratio = khz << 32;
do_div(ratio, tsc_khz);
if (ratio == 0 || ratio & TSC_RATIO_RSVD) {
WARN_ONCE(1, "Invalid TSC ratio - virtual-tsc-khz=%u\n",
user_tsc_khz);
return;
}
vcpu->arch.virtual_tsc_khz = user_tsc_khz;
svm->tsc_ratio = ratio;
}
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 g_tsc_offset = 0;
if (is_guest_mode(vcpu)) {
g_tsc_offset = svm->vmcb->control.tsc_offset -
svm->nested.hsave->control.tsc_offset;
svm->nested.hsave->control.tsc_offset = offset;
}
svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}
static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->control.tsc_offset += adjustment;
if (is_guest_mode(vcpu))
svm->nested.hsave->control.tsc_offset += adjustment;
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}
static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
u64 tsc;
tsc = svm_scale_tsc(vcpu, native_read_tsc());
return target_tsc - tsc;
}
static void init_vmcb(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;
struct vmcb_save_area *save = &svm->vmcb->save;
svm->vcpu.fpu_active = 1;
svm->vcpu.arch.hflags = 0;
set_cr_intercept(svm, INTERCEPT_CR0_READ);
set_cr_intercept(svm, INTERCEPT_CR3_READ);
set_cr_intercept(svm, INTERCEPT_CR4_READ);
set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
set_dr_intercept(svm, INTERCEPT_DR0_READ);
set_dr_intercept(svm, INTERCEPT_DR1_READ);
set_dr_intercept(svm, INTERCEPT_DR2_READ);
set_dr_intercept(svm, INTERCEPT_DR3_READ);
set_dr_intercept(svm, INTERCEPT_DR4_READ);
set_dr_intercept(svm, INTERCEPT_DR5_READ);
set_dr_intercept(svm, INTERCEPT_DR6_READ);
set_dr_intercept(svm, INTERCEPT_DR7_READ);
set_dr_intercept(svm, INTERCEPT_DR0_WRITE);
set_dr_intercept(svm, INTERCEPT_DR1_WRITE);
set_dr_intercept(svm, INTERCEPT_DR2_WRITE);
set_dr_intercept(svm, INTERCEPT_DR3_WRITE);
set_dr_intercept(svm, INTERCEPT_DR4_WRITE);
set_dr_intercept(svm, INTERCEPT_DR5_WRITE);
set_dr_intercept(svm, INTERCEPT_DR6_WRITE);
set_dr_intercept(svm, INTERCEPT_DR7_WRITE);
set_exception_intercept(svm, PF_VECTOR);
set_exception_intercept(svm, UD_VECTOR);
set_exception_intercept(svm, MC_VECTOR);
set_intercept(svm, INTERCEPT_INTR);
set_intercept(svm, INTERCEPT_NMI);
set_intercept(svm, INTERCEPT_SMI);
set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
set_intercept(svm, INTERCEPT_CPUID);
set_intercept(svm, INTERCEPT_INVD);
set_intercept(svm, INTERCEPT_HLT);
set_intercept(svm, INTERCEPT_INVLPG);
set_intercept(svm, INTERCEPT_INVLPGA);
set_intercept(svm, INTERCEPT_IOIO_PROT);
set_intercept(svm, INTERCEPT_MSR_PROT);
set_intercept(svm, INTERCEPT_TASK_SWITCH);
set_intercept(svm, INTERCEPT_SHUTDOWN);
set_intercept(svm, INTERCEPT_VMRUN);
set_intercept(svm, INTERCEPT_VMMCALL);
set_intercept(svm, INTERCEPT_VMLOAD);
set_intercept(svm, INTERCEPT_VMSAVE);
set_intercept(svm, INTERCEPT_STGI);
set_intercept(svm, INTERCEPT_CLGI);
set_intercept(svm, INTERCEPT_SKINIT);
set_intercept(svm, INTERCEPT_WBINVD);
set_intercept(svm, INTERCEPT_MONITOR);
set_intercept(svm, INTERCEPT_MWAIT);
set_intercept(svm, INTERCEPT_XSETBV);
control->iopm_base_pa = iopm_base;
control->msrpm_base_pa = __pa(svm->msrpm);
control->int_ctl = V_INTR_MASKING_MASK;
init_seg(&save->es);
init_seg(&save->ss);
init_seg(&save->ds);
init_seg(&save->fs);
init_seg(&save->gs);
save->cs.selector = 0xf000;
/* Executable/Readable Code Segment */
save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
save->cs.limit = 0xffff;
/*
* cs.base should really be 0xffff0000, but vmx can't handle that, so
* be consistent with it.
*
* Replace when we have real mode working for vmx.
*/
save->cs.base = 0xf0000;
save->gdtr.limit = 0xffff;
save->idtr.limit = 0xffff;
init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
svm_set_efer(&svm->vcpu, 0);
save->dr6 = 0xffff0ff0;
save->dr7 = 0x400;
kvm_set_rflags(&svm->vcpu, 2);
save->rip = 0x0000fff0;
svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
/*
* This is the guest-visible cr0 value.
* svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
*/
svm->vcpu.arch.cr0 = 0;
(void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
save->cr4 = X86_CR4_PAE;
/* rdx = ?? */
if (npt_enabled) {
/* Setup VMCB for Nested Paging */
control->nested_ctl = 1;
clr_intercept(svm, INTERCEPT_TASK_SWITCH);
clr_intercept(svm, INTERCEPT_INVLPG);
clr_exception_intercept(svm, PF_VECTOR);
clr_cr_intercept(svm, INTERCEPT_CR3_READ);
clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
save->g_pat = 0x0007040600070406ULL;
save->cr3 = 0;
save->cr4 = 0;
}
svm->asid_generation = 0;
svm->nested.vmcb = 0;
svm->vcpu.arch.hflags = 0;
if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
control->pause_filter_count = 3000;
set_intercept(svm, INTERCEPT_PAUSE);
}
mark_all_dirty(svm->vmcb);
enable_gif(svm);
}
static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
init_vmcb(svm);
if (!kvm_vcpu_is_bsp(vcpu)) {
kvm_rip_write(vcpu, 0);
svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
}
vcpu->arch.regs_avail = ~0;
vcpu->arch.regs_dirty = ~0;
return 0;
}
static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
{
struct vcpu_svm *svm;
struct page *page;
struct page *msrpm_pages;
struct page *hsave_page;
struct page *nested_msrpm_pages;
int err;
svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!svm) {
err = -ENOMEM;
goto out;
}
svm->tsc_ratio = TSC_RATIO_DEFAULT;
err = kvm_vcpu_init(&svm->vcpu, kvm, id);
if (err)
goto free_svm;
err = -ENOMEM;
page = alloc_page(GFP_KERNEL);
if (!page)
goto uninit;
msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!msrpm_pages)
goto free_page1;
nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!nested_msrpm_pages)
goto free_page2;
hsave_page = alloc_page(GFP_KERNEL);
if (!hsave_page)
goto free_page3;
svm->nested.hsave = page_address(hsave_page);
svm->msrpm = page_address(msrpm_pages);
svm_vcpu_init_msrpm(svm->msrpm);
svm->nested.msrpm = page_address(nested_msrpm_pages);
svm_vcpu_init_msrpm(svm->nested.msrpm);
svm->vmcb = page_address(page);
clear_page(svm->vmcb);
svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
svm->asid_generation = 0;
init_vmcb(svm);
kvm_write_tsc(&svm->vcpu, 0);
err = fx_init(&svm->vcpu);
if (err)
goto free_page4;
svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
if (kvm_vcpu_is_bsp(&svm->vcpu))
svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
return &svm->vcpu;
free_page4:
__free_page(hsave_page);
free_page3:
__free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
free_page2:
__free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
free_page1:
__free_page(page);
uninit:
kvm_vcpu_uninit(&svm->vcpu);
free_svm:
kmem_cache_free(kvm_vcpu_cache, svm);
out:
return ERR_PTR(err);
}
static void svm_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
__free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
__free_page(virt_to_page(svm->nested.hsave));
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, svm);
}
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
int i;
if (unlikely(cpu != vcpu->cpu)) {
svm->asid_generation = 0;
mark_all_dirty(svm->vmcb);
}
#ifdef CONFIG_X86_64
rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
#endif
savesegment(fs, svm->host.fs);
savesegment(gs, svm->host.gs);
svm->host.ldt = kvm_read_ldt();
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) {
__get_cpu_var(current_tsc_ratio) = svm->tsc_ratio;
wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
}
}
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
int i;
++vcpu->stat.host_state_reload;
kvm_load_ldt(svm->host.ldt);
#ifdef CONFIG_X86_64
loadsegment(fs, svm->host.fs);
wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
load_gs_index(svm->host.gs);
#else
#ifdef CONFIG_X86_32_LAZY_GS
loadsegment(gs, svm->host.gs);
#endif
#endif
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
}
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
{
return to_svm(vcpu)->vmcb->save.rflags;
}
static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
to_svm(vcpu)->vmcb->save.rflags = rflags;
}
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
switch (reg) {
case VCPU_EXREG_PDPTR:
BUG_ON(!npt_enabled);
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
break;
default:
BUG();
}
}
static void svm_set_vintr(struct vcpu_svm *svm)
{
set_intercept(svm, INTERCEPT_VINTR);
}
static void svm_clear_vintr(struct vcpu_svm *svm)
{
clr_intercept(svm, INTERCEPT_VINTR);
}
static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
{
struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
switch (seg) {
case VCPU_SREG_CS: return &save->cs;
case VCPU_SREG_DS: return &save->ds;
case VCPU_SREG_ES: return &save->es;
case VCPU_SREG_FS: return &save->fs;
case VCPU_SREG_GS: return &save->gs;
case VCPU_SREG_SS: return &save->ss;
case VCPU_SREG_TR: return &save->tr;
case VCPU_SREG_LDTR: return &save->ldtr;
}
BUG();
return NULL;
}
static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
struct vmcb_seg *s = svm_seg(vcpu, seg);
return s->base;
}
static void svm_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
struct vmcb_seg *s = svm_seg(vcpu, seg);
var->base = s->base;
var->limit = s->limit;
var->selector = s->selector;
var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
/*
* AMD's VMCB does not have an explicit unusable field, so emulate it
* for cross vendor migration purposes by "not present"
*/
var->unusable = !var->present || (var->type == 0);
switch (seg) {
case VCPU_SREG_CS:
/*
* SVM always stores 0 for the 'G' bit in the CS selector in
* the VMCB on a VMEXIT. This hurts cross-vendor migration:
* Intel's VMENTRY has a check on the 'G' bit.
*/
var->g = s->limit > 0xfffff;
break;
case VCPU_SREG_TR:
/*
* Work around a bug where the busy flag in the tr selector
* isn't exposed
*/
var->type |= 0x2;
break;
case VCPU_SREG_DS:
case VCPU_SREG_ES:
case VCPU_SREG_FS:
case VCPU_SREG_GS:
/*
* The accessed bit must always be set in the segment
* descriptor cache, although it can be cleared in the
* descriptor, the cached bit always remains at 1. Since
* Intel has a check on this, set it here to support
* cross-vendor migration.
*/
if (!var->unusable)
var->type |= 0x1;
break;
case VCPU_SREG_SS:
/*
* On AMD CPUs sometimes the DB bit in the segment
* descriptor is left as 1, although the whole segment has
* been made unusable. Clear it here to pass an Intel VMX
* entry check when cross vendor migrating.
*/
if (var->unusable)
var->db = 0;
break;
}
}
static int svm_get_cpl(struct kvm_vcpu *vcpu)
{
struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
return save->cpl;
}
static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
dt->size = svm->vmcb->save.idtr.limit;
dt->address = svm->vmcb->save.idtr.base;
}
static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.idtr.limit = dt->size;
svm->vmcb->save.idtr.base = dt->address ;
mark_dirty(svm->vmcb, VMCB_DT);
}
static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
dt->size = svm->vmcb->save.gdtr.limit;
dt->address = svm->vmcb->save.gdtr.base;
}
static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.gdtr.limit = dt->size;
svm->vmcb->save.gdtr.base = dt->address ;
mark_dirty(svm->vmcb, VMCB_DT);
}
static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
}
static void svm_decache_cr3(struct kvm_vcpu *vcpu)
{
}
static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
{
}
static void update_cr0_intercept(struct vcpu_svm *svm)
{
ulong gcr0 = svm->vcpu.arch.cr0;
u64 *hcr0 = &svm->vmcb->save.cr0;
if (!svm->vcpu.fpu_active)
*hcr0 |= SVM_CR0_SELECTIVE_MASK;
else
*hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
| (gcr0 & SVM_CR0_SELECTIVE_MASK);
mark_dirty(svm->vmcb, VMCB_CR);
if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
clr_cr_intercept(svm, INTERCEPT_CR0_READ);
clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
} else {
set_cr_intercept(svm, INTERCEPT_CR0_READ);
set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
}
}
static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_svm *svm = to_svm(vcpu);
#ifdef CONFIG_X86_64
if (vcpu->arch.efer & EFER_LME) {
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
vcpu->arch.efer |= EFER_LMA;
svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
}
if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
vcpu->arch.efer &= ~EFER_LMA;
svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
}
}
#endif
vcpu->arch.cr0 = cr0;
if (!npt_enabled)
cr0 |= X86_CR0_PG | X86_CR0_WP;
if (!vcpu->fpu_active)
cr0 |= X86_CR0_TS;
/*
* re-enable caching here because the QEMU bios
* does not do it - this results in some delay at
* reboot
*/
cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
svm->vmcb->save.cr0 = cr0;
mark_dirty(svm->vmcb, VMCB_CR);
update_cr0_intercept(svm);
}
static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
svm_flush_tlb(vcpu);
vcpu->arch.cr4 = cr4;
if (!npt_enabled)
cr4 |= X86_CR4_PAE;
cr4 |= host_cr4_mce;
to_svm(vcpu)->vmcb->save.cr4 = cr4;
mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
}
static void svm_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_seg *s = svm_seg(vcpu, seg);
s->base = var->base;
s->limit = var->limit;
s->selector = var->selector;
if (var->unusable)
s->attrib = 0;
else {
s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
}
if (seg == VCPU_SREG_CS)
svm->vmcb->save.cpl
= (svm->vmcb->save.cs.attrib
>> SVM_SELECTOR_DPL_SHIFT) & 3;
mark_dirty(svm->vmcb, VMCB_SEG);
}
static void update_db_intercept(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
clr_exception_intercept(svm, DB_VECTOR);
clr_exception_intercept(svm, BP_VECTOR);
if (svm->nmi_singlestep)
set_exception_intercept(svm, DB_VECTOR);
if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
if (vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
set_exception_intercept(svm, DB_VECTOR);
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
set_exception_intercept(svm, BP_VECTOR);
} else
vcpu->guest_debug = 0;
}
static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
else
svm->vmcb->save.dr7 = vcpu->arch.dr7;
mark_dirty(svm->vmcb, VMCB_DR);
update_db_intercept(vcpu);
}
static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
{
if (sd->next_asid > sd->max_asid) {
++sd->asid_generation;
sd->next_asid = 1;
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
}
svm->asid_generation = sd->asid_generation;
svm->vmcb->control.asid = sd->next_asid++;
mark_dirty(svm->vmcb, VMCB_ASID);
}
static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.dr7 = value;
mark_dirty(svm->vmcb, VMCB_DR);
}
static int pf_interception(struct vcpu_svm *svm)
{
u64 fault_address = svm->vmcb->control.exit_info_2;
u32 error_code;
int r = 1;
switch (svm->apf_reason) {
default:
error_code = svm->vmcb->control.exit_info_1;
trace_kvm_page_fault(fault_address, error_code);
if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
svm->vmcb->control.insn_bytes,
svm->vmcb->control.insn_len);
break;
case KVM_PV_REASON_PAGE_NOT_PRESENT:
svm->apf_reason = 0;
local_irq_disable();
kvm_async_pf_task_wait(fault_address);
local_irq_enable();
break;
case KVM_PV_REASON_PAGE_READY:
svm->apf_reason = 0;
local_irq_disable();
kvm_async_pf_task_wake(fault_address);
local_irq_enable();
break;
}
return r;
}
static int db_interception(struct vcpu_svm *svm)
{
struct kvm_run *kvm_run = svm->vcpu.run;
if (!(svm->vcpu.guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
!svm->nmi_singlestep) {
kvm_queue_exception(&svm->vcpu, DB_VECTOR);
return 1;
}
if (svm->nmi_singlestep) {
svm->nmi_singlestep = false;
if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
svm->vmcb->save.rflags &=
~(X86_EFLAGS_TF | X86_EFLAGS_RF);
update_db_intercept(&svm->vcpu);
}
if (svm->vcpu.guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.pc =
svm->vmcb->save.cs.base + svm->vmcb->save.rip;
kvm_run->debug.arch.exception = DB_VECTOR;
return 0;
}
return 1;
}
static int bp_interception(struct vcpu_svm *svm)
{
struct kvm_run *kvm_run = svm->vcpu.run;
kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
kvm_run->debug.arch.exception = BP_VECTOR;
return 0;
}
static int ud_interception(struct vcpu_svm *svm)
{
int er;
er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
if (er != EMULATE_DONE)
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
static void svm_fpu_activate(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
clr_exception_intercept(svm, NM_VECTOR);
svm->vcpu.fpu_active = 1;
update_cr0_intercept(svm);
}
static int nm_interception(struct vcpu_svm *svm)
{
svm_fpu_activate(&svm->vcpu);
return 1;
}
static bool is_erratum_383(void)
{
int err, i;
u64 value;
if (!erratum_383_found)
return false;
value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
if (err)
return false;
/* Bit 62 may or may not be set for this mce */
value &= ~(1ULL << 62);
if (value != 0xb600000000010015ULL)
return false;
/* Clear MCi_STATUS registers */
for (i = 0; i < 6; ++i)
native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
if (!err) {
u32 low, high;
value &= ~(1ULL << 2);
low = lower_32_bits(value);
high = upper_32_bits(value);
native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
}
/* Flush tlb to evict multi-match entries */
__flush_tlb_all();
return true;
}
static void svm_handle_mce(struct vcpu_svm *svm)
{
if (is_erratum_383()) {
/*
* Erratum 383 triggered. Guest state is corrupt so kill the
* guest.
*/
pr_err("KVM: Guest triggered AMD Erratum 383\n");
kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
return;
}
/*
* On an #MC intercept the MCE handler is not called automatically in
* the host. So do it by hand here.
*/
asm volatile (
"int $0x12\n");
/* not sure if we ever come back to this point */
return;
}
static int mc_interception(struct vcpu_svm *svm)
{
return 1;
}
static int shutdown_interception(struct vcpu_svm *svm)
{
struct kvm_run *kvm_run = svm->vcpu.run;
/*
* VMCB is undefined after a SHUTDOWN intercept
* so reinitialize it.
*/
clear_page(svm->vmcb);
init_vmcb(svm);
kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
return 0;
}
static int io_interception(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
int size, in, string;
unsigned port;
++svm->vcpu.stat.io_exits;
string = (io_info & SVM_IOIO_STR_MASK) != 0;
in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
if (string || in)
return emulate_instruction(vcpu, 0) == EMULATE_DONE;
port = io_info >> 16;
size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
svm->next_rip = svm->vmcb->control.exit_info_2;
skip_emulated_instruction(&svm->vcpu);
return kvm_fast_pio_out(vcpu, size, port);
}
static int nmi_interception(struct vcpu_svm *svm)
{
return 1;
}
static int intr_interception(struct vcpu_svm *svm)
{
++svm->vcpu.stat.irq_exits;
return 1;
}
static int nop_on_interception(struct vcpu_svm *svm)
{
return 1;
}
static int halt_interception(struct vcpu_svm *svm)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
skip_emulated_instruction(&svm->vcpu);
return kvm_emulate_halt(&svm->vcpu);
}
static int vmmcall_interception(struct vcpu_svm *svm)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
kvm_emulate_hypercall(&svm->vcpu);
return 1;
}
static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
return svm->nested.nested_cr3;
}
static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
unsigned long root)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->control.nested_cr3 = root;
mark_dirty(svm->vmcb, VMCB_NPT);
svm_flush_tlb(vcpu);
}
static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->control.exit_code = SVM_EXIT_NPF;
svm->vmcb->control.exit_code_hi = 0;
svm->vmcb->control.exit_info_1 = fault->error_code;
svm->vmcb->control.exit_info_2 = fault->address;
nested_svm_vmexit(svm);
}
static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{
int r;
r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
vcpu->arch.mmu.shadow_root_level = get_npt_level();
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
return r;
}
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
{
vcpu->arch.walk_mmu = &vcpu->arch.mmu;
}
static int nested_svm_check_permissions(struct vcpu_svm *svm)
{
if (!(svm->vcpu.arch.efer & EFER_SVME)
|| !is_paging(&svm->vcpu)) {
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
if (svm->vmcb->save.cpl) {
kvm_inject_gp(&svm->vcpu, 0);
return 1;
}
return 0;
}
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code)
{
int vmexit;
if (!is_guest_mode(&svm->vcpu))
return 0;
svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
svm->vmcb->control.exit_code_hi = 0;
svm->vmcb->control.exit_info_1 = error_code;
svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
vmexit = nested_svm_intercept(svm);
if (vmexit == NESTED_EXIT_DONE)
svm->nested.exit_required = true;
return vmexit;
}
/* This function returns true if it is save to enable the irq window */
static inline bool nested_svm_intr(struct vcpu_svm *svm)
{
if (!is_guest_mode(&svm->vcpu))
return true;
if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
return true;
if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
return false;
/*
* if vmexit was already requested (by intercepted exception
* for instance) do not overwrite it with "external interrupt"
* vmexit.
*/
if (svm->nested.exit_required)
return false;
svm->vmcb->control.exit_code = SVM_EXIT_INTR;
svm->vmcb->control.exit_info_1 = 0;
svm->vmcb->control.exit_info_2 = 0;
if (svm->nested.intercept & 1ULL) {
/*
* The #vmexit can't be emulated here directly because this
* code path runs with irqs and preemtion disabled. A
* #vmexit emulation might sleep. Only signal request for
* the #vmexit here.
*/
svm->nested.exit_required = true;
trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
return false;
}
return true;
}
/* This function returns true if it is save to enable the nmi window */
static inline bool nested_svm_nmi(struct vcpu_svm *svm)
{
if (!is_guest_mode(&svm->vcpu))
return true;
if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
return true;
svm->vmcb->control.exit_code = SVM_EXIT_NMI;
svm->nested.exit_required = true;
return false;
}
static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
{
struct page *page;
might_sleep();
page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
if (is_error_page(page))
goto error;
*_page = page;
return kmap(page);
error:
kvm_release_page_clean(page);
kvm_inject_gp(&svm->vcpu, 0);
return NULL;
}
static void nested_svm_unmap(struct page *page)
{
kunmap(page);
kvm_release_page_dirty(page);
}
static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
{
unsigned port;
u8 val, bit;
u64 gpa;
if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
return NESTED_EXIT_HOST;
port = svm->vmcb->control.exit_info_1 >> 16;
gpa = svm->nested.vmcb_iopm + (port / 8);
bit = port % 8;
val = 0;
if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
val &= (1 << bit);
return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
}
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
{
u32 offset, msr, value;
int write, mask;
if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
return NESTED_EXIT_HOST;
msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
offset = svm_msrpm_offset(msr);
write = svm->vmcb->control.exit_info_1 & 1;
mask = 1 << ((2 * (msr & 0xf)) + write);
if (offset == MSR_INVALID)
return NESTED_EXIT_DONE;
/* Offset is in 32 bit units but need in 8 bit units */
offset *= 4;
if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
return NESTED_EXIT_DONE;
return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
}
static int nested_svm_exit_special(struct vcpu_svm *svm)
{
u32 exit_code = svm->vmcb->control.exit_code;
switch (exit_code) {
case SVM_EXIT_INTR:
case SVM_EXIT_NMI:
case SVM_EXIT_EXCP_BASE + MC_VECTOR:
return NESTED_EXIT_HOST;
case SVM_EXIT_NPF:
/* For now we are always handling NPFs when using them */
if (npt_enabled)
return NESTED_EXIT_HOST;
break;
case SVM_EXIT_EXCP_BASE + PF_VECTOR:
/* When we're shadowing, trap PFs, but not async PF */
if (!npt_enabled && svm->apf_reason == 0)
return NESTED_EXIT_HOST;
break;
case SVM_EXIT_EXCP_BASE + NM_VECTOR:
nm_interception(svm);
break;
default:
break;
}
return NESTED_EXIT_CONTINUE;
}
/*
* If this function returns true, this #vmexit was already handled
*/
static int nested_svm_intercept(struct vcpu_svm *svm)
{
u32 exit_code = svm->vmcb->control.exit_code;
int vmexit = NESTED_EXIT_HOST;
switch (exit_code) {
case SVM_EXIT_MSR:
vmexit = nested_svm_exit_handled_msr(svm);
break;
case SVM_EXIT_IOIO:
vmexit = nested_svm_intercept_ioio(svm);
break;
case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
if (svm->nested.intercept_cr & bit)
vmexit = NESTED_EXIT_DONE;
break;
}
case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
if (svm->nested.intercept_dr & bit)
vmexit = NESTED_EXIT_DONE;
break;
}
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
if (svm->nested.intercept_exceptions & excp_bits)
vmexit = NESTED_EXIT_DONE;
/* async page fault always cause vmexit */
else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
svm->apf_reason != 0)
vmexit = NESTED_EXIT_DONE;
break;
}
case SVM_EXIT_ERR: {
vmexit = NESTED_EXIT_DONE;
break;
}
default: {
u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
if (svm->nested.intercept & exit_bits)
vmexit = NESTED_EXIT_DONE;
}
}
return vmexit;
}
static int nested_svm_exit_handled(struct vcpu_svm *svm)
{
int vmexit;
vmexit = nested_svm_intercept(svm);
if (vmexit == NESTED_EXIT_DONE)
nested_svm_vmexit(svm);
return vmexit;
}
static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
{
struct vmcb_control_area *dst = &dst_vmcb->control;
struct vmcb_control_area *from = &from_vmcb->control;
dst->intercept_cr = from->intercept_cr;
dst->intercept_dr = from->intercept_dr;
dst->intercept_exceptions = from->intercept_exceptions;
dst->intercept = from->intercept;
dst->iopm_base_pa = from->iopm_base_pa;
dst->msrpm_base_pa = from->msrpm_base_pa;
dst->tsc_offset = from->tsc_offset;
dst->asid = from->asid;
dst->tlb_ctl = from->tlb_ctl;
dst->int_ctl = from->int_ctl;
dst->int_vector = from->int_vector;
dst->int_state = from->int_state;
dst->exit_code = from->exit_code;
dst->exit_code_hi = from->exit_code_hi;
dst->exit_info_1 = from->exit_info_1;
dst->exit_info_2 = from->exit_info_2;
dst->exit_int_info = from->exit_int_info;
dst->exit_int_info_err = from->exit_int_info_err;
dst->nested_ctl = from->nested_ctl;
dst->event_inj = from->event_inj;
dst->event_inj_err = from->event_inj_err;
dst->nested_cr3 = from->nested_cr3;
dst->lbr_ctl = from->lbr_ctl;
}
static int nested_svm_vmexit(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
struct page *page;
trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
vmcb->control.exit_info_1,
vmcb->control.exit_info_2,
vmcb->control.exit_int_info,
vmcb->control.exit_int_info_err);
nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
if (!nested_vmcb)
return 1;
/* Exit Guest-Mode */
leave_guest_mode(&svm->vcpu);
svm->nested.vmcb = 0;
/* Give the current vmcb to the guest */
disable_gif(svm);
nested_vmcb->save.es = vmcb->save.es;
nested_vmcb->save.cs = vmcb->save.cs;
nested_vmcb->save.ss = vmcb->save.ss;
nested_vmcb->save.ds = vmcb->save.ds;
nested_vmcb->save.gdtr = vmcb->save.gdtr;
nested_vmcb->save.idtr = vmcb->save.idtr;
nested_vmcb->save.efer = svm->vcpu.arch.efer;
nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
nested_vmcb->save.cr2 = vmcb->save.cr2;
nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
nested_vmcb->save.rip = vmcb->save.rip;
nested_vmcb->save.rsp = vmcb->save.rsp;
nested_vmcb->save.rax = vmcb->save.rax;
nested_vmcb->save.dr7 = vmcb->save.dr7;
nested_vmcb->save.dr6 = vmcb->save.dr6;
nested_vmcb->save.cpl = vmcb->save.cpl;
nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
nested_vmcb->control.int_vector = vmcb->control.int_vector;
nested_vmcb->control.int_state = vmcb->control.int_state;
nested_vmcb->control.exit_code = vmcb->control.exit_code;
nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
nested_vmcb->control.next_rip = vmcb->control.next_rip;
/*
* If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
* to make sure that we do not lose injected events. So check event_inj
* here and copy it to exit_int_info if it is valid.
* Exit_int_info and event_inj can't be both valid because the case
* below only happens on a VMRUN instruction intercept which has
* no valid exit_int_info set.
*/
if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
struct vmcb_control_area *nc = &nested_vmcb->control;
nc->exit_int_info = vmcb->control.event_inj;
nc->exit_int_info_err = vmcb->control.event_inj_err;
}
nested_vmcb->control.tlb_ctl = 0;
nested_vmcb->control.event_inj = 0;
nested_vmcb->control.event_inj_err = 0;
/* We always set V_INTR_MASKING and remember the old value in hflags */
if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
/* Restore the original control entries */
copy_vmcb_control_area(vmcb, hsave);
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);
svm->nested.nested_cr3 = 0;
/* Restore selected save entries */
svm->vmcb->save.es = hsave->save.es;
svm->vmcb->save.cs = hsave->save.cs;
svm->vmcb->save.ss = hsave->save.ss;
svm->vmcb->save.ds = hsave->save.ds;
svm->vmcb->save.gdtr = hsave->save.gdtr;
svm->vmcb->save.idtr = hsave->save.idtr;
kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
svm_set_efer(&svm->vcpu, hsave->save.efer);
svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
svm_set_cr4(&svm->vcpu, hsave->save.cr4);
if (npt_enabled) {
svm->vmcb->save.cr3 = hsave->save.cr3;
svm->vcpu.arch.cr3 = hsave->save.cr3;
} else {
(void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
}
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
svm->vmcb->save.dr7 = 0;
svm->vmcb->save.cpl = 0;
svm->vmcb->control.exit_int_info = 0;
mark_all_dirty(svm->vmcb);
nested_svm_unmap(page);
nested_svm_uninit_mmu_context(&svm->vcpu);
kvm_mmu_reset_context(&svm->vcpu);
kvm_mmu_load(&svm->vcpu);
return 0;
}
static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
{
/*
* This function merges the msr permission bitmaps of kvm and the
* nested vmcb. It is omptimized in that it only merges the parts where
* the kvm msr permission bitmap may contain zero bits
*/
int i;
if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
return true;
for (i = 0; i < MSRPM_OFFSETS; i++) {
u32 value, p;
u64 offset;
if (msrpm_offsets[i] == 0xffffffff)
break;
p = msrpm_offsets[i];
offset = svm->nested.vmcb_msrpm + (p * 4);
if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
return false;
svm->nested.msrpm[p] = svm->msrpm[p] | value;
}
svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
return true;
}
static bool nested_vmcb_checks(struct vmcb *vmcb)
{
if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
return false;
if (vmcb->control.asid == 0)
return false;
if (vmcb->control.nested_ctl && !npt_enabled)
return false;
return true;
}
static bool nested_svm_vmrun(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
struct page *page;
u64 vmcb_gpa;
vmcb_gpa = svm->vmcb->save.rax;
nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return false;
if (!nested_vmcb_checks(nested_vmcb)) {
nested_vmcb->control.exit_code = SVM_EXIT_ERR;
nested_vmcb->control.exit_code_hi = 0;
nested_vmcb->control.exit_info_1 = 0;
nested_vmcb->control.exit_info_2 = 0;
nested_svm_unmap(page);
return false;
}
trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
nested_vmcb->save.rip,
nested_vmcb->control.int_ctl,
nested_vmcb->control.event_inj,
nested_vmcb->control.nested_ctl);
trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
nested_vmcb->control.intercept_cr >> 16,
nested_vmcb->control.intercept_exceptions,
nested_vmcb->control.intercept);
/* Clear internal status */
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);
/*
* Save the old vmcb, so we don't need to pick what we save, but can
* restore everything when a VMEXIT occurs
*/
hsave->save.es = vmcb->save.es;
hsave->save.cs = vmcb->save.cs;
hsave->save.ss = vmcb->save.ss;
hsave->save.ds = vmcb->save.ds;
hsave->save.gdtr = vmcb->save.gdtr;
hsave->save.idtr = vmcb->save.idtr;
hsave->save.efer = svm->vcpu.arch.efer;
hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
hsave->save.cr4 = svm->vcpu.arch.cr4;
hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
hsave->save.rip = kvm_rip_read(&svm->vcpu);
hsave->save.rsp = vmcb->save.rsp;
hsave->save.rax = vmcb->save.rax;
if (npt_enabled)
hsave->save.cr3 = vmcb->save.cr3;
else
hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
copy_vmcb_control_area(hsave, vmcb);
if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
svm->vcpu.arch.hflags |= HF_HIF_MASK;
else
svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
if (nested_vmcb->control.nested_ctl) {
kvm_mmu_unload(&svm->vcpu);
svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
nested_svm_init_mmu_context(&svm->vcpu);
}
/* Load the nested guest state */
svm->vmcb->save.es = nested_vmcb->save.es;
svm->vmcb->save.cs = nested_vmcb->save.cs;
svm->vmcb->save.ss = nested_vmcb->save.ss;
svm->vmcb->save.ds = nested_vmcb->save.ds;
svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
svm->vmcb->save.idtr = nested_vmcb->save.idtr;
kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
if (npt_enabled) {
svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
} else
(void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
/* Guest paging mode is active - reset mmu */
kvm_mmu_reset_context(&svm->vcpu);
svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
/* In case we don't even reach vcpu_run, the fields are not updated */
svm->vmcb->save.rax = nested_vmcb->save.rax;
svm->vmcb->save.rsp = nested_vmcb->save.rsp;
svm->vmcb->save.rip = nested_vmcb->save.rip;
svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
svm->vmcb->save.cpl = nested_vmcb->save.cpl;
svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
/* cache intercepts */
svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
svm->nested.intercept = nested_vmcb->control.intercept;
svm_flush_tlb(&svm->vcpu);
svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
svm->vcpu.arch.hflags |= HF_VINTR_MASK;
else
svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
/* We only want the cr8 intercept bits of the guest */
clr_cr_intercept(svm, INTERCEPT_CR8_READ);
clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
}
/* We don't want to see VMMCALLs from a nested guest */
clr_intercept(svm, INTERCEPT_VMMCALL);
svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
svm->vmcb->control.int_state = nested_vmcb->control.int_state;
svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
nested_svm_unmap(page);
/* Enter Guest-Mode */
enter_guest_mode(&svm->vcpu);
/*
* Merge guest and host intercepts - must be called with vcpu in
* guest-mode to take affect here
*/
recalc_intercepts(svm);
svm->nested.vmcb = vmcb_gpa;
enable_gif(svm);
mark_all_dirty(svm->vmcb);
return true;
}
static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
{
to_vmcb->save.fs = from_vmcb->save.fs;
to_vmcb->save.gs = from_vmcb->save.gs;
to_vmcb->save.tr = from_vmcb->save.tr;
to_vmcb->save.ldtr = from_vmcb->save.ldtr;
to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
to_vmcb->save.star = from_vmcb->save.star;
to_vmcb->save.lstar = from_vmcb->save.lstar;
to_vmcb->save.cstar = from_vmcb->save.cstar;
to_vmcb->save.sfmask = from_vmcb->save.sfmask;
to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
}
static int vmload_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
struct page *page;
if (nested_svm_check_permissions(svm))
return 1;
nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return 1;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
nested_svm_unmap(page);
return 1;
}
static int vmsave_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
struct page *page;
if (nested_svm_check_permissions(svm))
return 1;
nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return 1;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
nested_svm_unmap(page);
return 1;
}
static int vmrun_interception(struct vcpu_svm *svm)
{
if (nested_svm_check_permissions(svm))
return 1;
/* Save rip after vmrun instruction */
kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
if (!nested_svm_vmrun(svm))
return 1;
if (!nested_svm_vmrun_msrpm(svm))
goto failed;
return 1;
failed:
svm->vmcb->control.exit_code = SVM_EXIT_ERR;
svm->vmcb->control.exit_code_hi = 0;
svm->vmcb->control.exit_info_1 = 0;
svm->vmcb->control.exit_info_2 = 0;
nested_svm_vmexit(svm);
return 1;
}
static int stgi_interception(struct vcpu_svm *svm)
{
if (nested_svm_check_permissions(svm))
return 1;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
enable_gif(svm);
return 1;
}
static int clgi_interception(struct vcpu_svm *svm)
{
if (nested_svm_check_permissions(svm))
return 1;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
disable_gif(svm);
/* After a CLGI no interrupts should come */
svm_clear_vintr(svm);
svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
mark_dirty(svm->vmcb, VMCB_INTR);
return 1;
}
static int invlpga_interception(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
vcpu->arch.regs[VCPU_REGS_RAX]);
/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
return 1;
}
static int skinit_interception(struct vcpu_svm *svm)
{
trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
static int xsetbv_interception(struct vcpu_svm *svm)
{
u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
}
return 1;
}
static int invalid_op_interception(struct vcpu_svm *svm)
{
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
static int task_switch_interception(struct vcpu_svm *svm)
{
u16 tss_selector;
int reason;
int int_type = svm->vmcb->control.exit_int_info &
SVM_EXITINTINFO_TYPE_MASK;
int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
uint32_t type =
svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
uint32_t idt_v =
svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
bool has_error_code = false;
u32 error_code = 0;
tss_selector = (u16)svm->vmcb->control.exit_info_1;
if (svm->vmcb->control.exit_info_2 &
(1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
reason = TASK_SWITCH_IRET;
else if (svm->vmcb->control.exit_info_2 &
(1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
reason = TASK_SWITCH_JMP;
else if (idt_v)
reason = TASK_SWITCH_GATE;
else
reason = TASK_SWITCH_CALL;
if (reason == TASK_SWITCH_GATE) {
switch (type) {
case SVM_EXITINTINFO_TYPE_NMI:
svm->vcpu.arch.nmi_injected = false;
break;
case SVM_EXITINTINFO_TYPE_EXEPT:
if (svm->vmcb->control.exit_info_2 &
(1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
has_error_code = true;
error_code =
(u32)svm->vmcb->control.exit_info_2;
}
kvm_clear_exception_queue(&svm->vcpu);
break;
case SVM_EXITINTINFO_TYPE_INTR:
kvm_clear_interrupt_queue(&svm->vcpu);
break;
default:
break;
}
}
if (reason != TASK_SWITCH_GATE ||
int_type == SVM_EXITINTINFO_TYPE_SOFT ||
(int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
(int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
skip_emulated_instruction(&svm->vcpu);
if (kvm_task_switch(&svm->vcpu, tss_selector, reason,
has_error_code, error_code) == EMULATE_FAIL) {
svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
svm->vcpu.run->internal.ndata = 0;
return 0;
}
return 1;
}
static int cpuid_interception(struct vcpu_svm *svm)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
kvm_emulate_cpuid(&svm->vcpu);
return 1;
}
static int iret_interception(struct vcpu_svm *svm)
{
++svm->vcpu.stat.nmi_window_exits;
clr_intercept(svm, INTERCEPT_IRET);
svm->vcpu.arch.hflags |= HF_IRET_MASK;
svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
return 1;
}
static int invlpg_interception(struct vcpu_svm *svm)
{
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
skip_emulated_instruction(&svm->vcpu);
return 1;
}
static int emulate_on_interception(struct vcpu_svm *svm)
{
return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
}
bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
{
unsigned long cr0 = svm->vcpu.arch.cr0;
bool ret = false;
u64 intercept;
intercept = svm->nested.intercept;
if (!is_guest_mode(&svm->vcpu) ||
(!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
return false;
cr0 &= ~SVM_CR0_SELECTIVE_MASK;
val &= ~SVM_CR0_SELECTIVE_MASK;
if (cr0 ^ val) {
svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
}
return ret;
}
#define CR_VALID (1ULL << 63)
static int cr_interception(struct vcpu_svm *svm)
{
int reg, cr;
unsigned long val;
int err;
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
return emulate_on_interception(svm);
if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
return emulate_on_interception(svm);
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
err = 0;
if (cr >= 16) { /* mov to cr */
cr -= 16;
val = kvm_register_read(&svm->vcpu, reg);
switch (cr) {
case 0:
if (!check_selective_cr0_intercepted(svm, val))
err = kvm_set_cr0(&svm->vcpu, val);
else
return 1;
break;
case 3:
err = kvm_set_cr3(&svm->vcpu, val);
break;
case 4:
err = kvm_set_cr4(&svm->vcpu, val);
break;
case 8:
err = kvm_set_cr8(&svm->vcpu, val);
break;
default:
WARN(1, "unhandled write to CR%d", cr);
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
} else { /* mov from cr */
switch (cr) {
case 0:
val = kvm_read_cr0(&svm->vcpu);
break;
case 2:
val = svm->vcpu.arch.cr2;
break;
case 3:
val = kvm_read_cr3(&svm->vcpu);
break;
case 4:
val = kvm_read_cr4(&svm->vcpu);
break;
case 8:
val = kvm_get_cr8(&svm->vcpu);
break;
default:
WARN(1, "unhandled read from CR%d", cr);
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
kvm_register_write(&svm->vcpu, reg, val);
}
kvm_complete_insn_gp(&svm->vcpu, err);
return 1;
}
static int dr_interception(struct vcpu_svm *svm)
{
int reg, dr;
unsigned long val;
int err;
if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
return emulate_on_interception(svm);
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
if (dr >= 16) { /* mov to DRn */
val = kvm_register_read(&svm->vcpu, reg);
kvm_set_dr(&svm->vcpu, dr - 16, val);
} else {
err = kvm_get_dr(&svm->vcpu, dr, &val);
if (!err)
kvm_register_write(&svm->vcpu, reg, val);
}
skip_emulated_instruction(&svm->vcpu);
return 1;
}
static int cr8_write_interception(struct vcpu_svm *svm)
{
struct kvm_run *kvm_run = svm->vcpu.run;
int r;
u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
/* instruction emulation calls kvm_set_cr8() */
r = cr_interception(svm);
if (irqchip_in_kernel(svm->vcpu.kvm)) {
clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
return r;
}
if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
return r;
kvm_run->exit_reason = KVM_EXIT_SET_TPR;
return 0;
}
static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
{
struct vcpu_svm *svm = to_svm(vcpu);
switch (ecx) {
case MSR_IA32_TSC: {
struct vmcb *vmcb = get_host_vmcb(svm);
*data = vmcb->control.tsc_offset +
svm_scale_tsc(vcpu, native_read_tsc());
break;
}
case MSR_STAR:
*data = svm->vmcb->save.star;
break;
#ifdef CONFIG_X86_64
case MSR_LSTAR:
*data = svm->vmcb->save.lstar;
break;
case MSR_CSTAR:
*data = svm->vmcb->save.cstar;
break;
case MSR_KERNEL_GS_BASE:
*data = svm->vmcb->save.kernel_gs_base;
break;
case MSR_SYSCALL_MASK:
*data = svm->vmcb->save.sfmask;
break;
#endif
case MSR_IA32_SYSENTER_CS:
*data = svm->vmcb->save.sysenter_cs;
break;
case MSR_IA32_SYSENTER_EIP:
*data = svm->sysenter_eip;
break;
case MSR_IA32_SYSENTER_ESP:
*data = svm->sysenter_esp;
break;
/*
* Nobody will change the following 5 values in the VMCB so we can
* safely return them on rdmsr. They will always be 0 until LBRV is
* implemented.
*/
case MSR_IA32_DEBUGCTLMSR:
*data = svm->vmcb->save.dbgctl;
break;
case MSR_IA32_LASTBRANCHFROMIP:
*data = svm->vmcb->save.br_from;
break;
case MSR_IA32_LASTBRANCHTOIP:
*data = svm->vmcb->save.br_to;
break;
case MSR_IA32_LASTINTFROMIP:
*data = svm->vmcb->save.last_excp_from;
break;
case MSR_IA32_LASTINTTOIP:
*data = svm->vmcb->save.last_excp_to;
break;
case MSR_VM_HSAVE_PA:
*data = svm->nested.hsave_msr;
break;
case MSR_VM_CR:
*data = svm->nested.vm_cr_msr;
break;
case MSR_IA32_UCODE_REV:
*data = 0x01000065;
break;
default:
return kvm_get_msr_common(vcpu, ecx, data);
}
return 0;
}
static int rdmsr_interception(struct vcpu_svm *svm)
{
u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
u64 data;
if (svm_get_msr(&svm->vcpu, ecx, &data)) {
trace_kvm_msr_read_ex(ecx);
kvm_inject_gp(&svm->vcpu, 0);
} else {
trace_kvm_msr_read(ecx, data);
svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
skip_emulated_instruction(&svm->vcpu);
}
return 1;
}
static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
{
struct vcpu_svm *svm = to_svm(vcpu);
int svm_dis, chg_mask;
if (data & ~SVM_VM_CR_VALID_MASK)
return 1;
chg_mask = SVM_VM_CR_VALID_MASK;
if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
svm->nested.vm_cr_msr &= ~chg_mask;
svm->nested.vm_cr_msr |= (data & chg_mask);
svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
/* check for svm_disable while efer.svme is set */
if (svm_dis && (vcpu->arch.efer & EFER_SVME))
return 1;
return 0;
}
static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
{
struct vcpu_svm *svm = to_svm(vcpu);
switch (ecx) {
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, data);
break;
case MSR_STAR:
svm->vmcb->save.star = data;
break;
#ifdef CONFIG_X86_64
case MSR_LSTAR:
svm->vmcb->save.lstar = data;
break;
case MSR_CSTAR:
svm->vmcb->save.cstar = data;
break;
case MSR_KERNEL_GS_BASE:
svm->vmcb->save.kernel_gs_base = data;
break;
case MSR_SYSCALL_MASK:
svm->vmcb->save.sfmask = data;
break;
#endif
case MSR_IA32_SYSENTER_CS:
svm->vmcb->save.sysenter_cs = data;
break;
case MSR_IA32_SYSENTER_EIP:
svm->sysenter_eip = data;
svm->vmcb->save.sysenter_eip = data;
break;
case MSR_IA32_SYSENTER_ESP:
svm->sysenter_esp = data;
svm->vmcb->save.sysenter_esp = data;
break;
case MSR_IA32_DEBUGCTLMSR:
if (!boot_cpu_has(X86_FEATURE_LBRV)) {
pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
__func__, data);
break;
}
if (data & DEBUGCTL_RESERVED_BITS)
return 1;
svm->vmcb->save.dbgctl = data;
mark_dirty(svm->vmcb, VMCB_LBR);
if (data & (1ULL<<0))
svm_enable_lbrv(svm);
else
svm_disable_lbrv(svm);
break;
case MSR_VM_HSAVE_PA:
svm->nested.hsave_msr = data;
break;
case MSR_VM_CR:
return svm_set_vm_cr(vcpu, data);
case MSR_VM_IGNNE:
pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break;
default:
return kvm_set_msr_common(vcpu, ecx, data);
}
return 0;
}
static int wrmsr_interception(struct vcpu_svm *svm)
{
u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
if (svm_set_msr(&svm->vcpu, ecx, data)) {
trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(&svm->vcpu, 0);
} else {
trace_kvm_msr_write(ecx, data);
skip_emulated_instruction(&svm->vcpu);
}
return 1;
}
static int msr_interception(struct vcpu_svm *svm)
{
if (svm->vmcb->control.exit_info_1)
return wrmsr_interception(svm);
else
return rdmsr_interception(svm);
}
static int interrupt_window_interception(struct vcpu_svm *svm)
{
struct kvm_run *kvm_run = svm->vcpu.run;
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
svm_clear_vintr(svm);
svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
mark_dirty(svm->vmcb, VMCB_INTR);
/*
* If the user space waits to inject interrupts, exit as soon as
* possible
*/
if (!irqchip_in_kernel(svm->vcpu.kvm) &&
kvm_run->request_interrupt_window &&
!kvm_cpu_has_interrupt(&svm->vcpu)) {
++svm->vcpu.stat.irq_window_exits;
kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
return 0;
}
return 1;
}
static int pause_interception(struct vcpu_svm *svm)
{
kvm_vcpu_on_spin(&(svm->vcpu));
return 1;
}
static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_READ_CR0] = cr_interception,
[SVM_EXIT_READ_CR3] = cr_interception,
[SVM_EXIT_READ_CR4] = cr_interception,
[SVM_EXIT_READ_CR8] = cr_interception,
[SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
[SVM_EXIT_WRITE_CR0] = cr_interception,
[SVM_EXIT_WRITE_CR3] = cr_interception,
[SVM_EXIT_WRITE_CR4] = cr_interception,
[SVM_EXIT_WRITE_CR8] = cr8_write_interception,
[SVM_EXIT_READ_DR0] = dr_interception,
[SVM_EXIT_READ_DR1] = dr_interception,
[SVM_EXIT_READ_DR2] = dr_interception,
[SVM_EXIT_READ_DR3] = dr_interception,
[SVM_EXIT_READ_DR4] = dr_interception,
[SVM_EXIT_READ_DR5] = dr_interception,
[SVM_EXIT_READ_DR6] = dr_interception,
[SVM_EXIT_READ_DR7] = dr_interception,
[SVM_EXIT_WRITE_DR0] = dr_interception,
[SVM_EXIT_WRITE_DR1] = dr_interception,
[SVM_EXIT_WRITE_DR2] = dr_interception,
[SVM_EXIT_WRITE_DR3] = dr_interception,
[SVM_EXIT_WRITE_DR4] = dr_interception,
[SVM_EXIT_WRITE_DR5] = dr_interception,
[SVM_EXIT_WRITE_DR6] = dr_interception,
[SVM_EXIT_WRITE_DR7] = dr_interception,
[SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
[SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
[SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
[SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
[SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
[SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
[SVM_EXIT_INTR] = intr_interception,
[SVM_EXIT_NMI] = nmi_interception,
[SVM_EXIT_SMI] = nop_on_interception,
[SVM_EXIT_INIT] = nop_on_interception,
[SVM_EXIT_VINTR] = interrupt_window_interception,
[SVM_EXIT_CPUID] = cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception,
[SVM_EXIT_PAUSE] = pause_interception,
[SVM_EXIT_HLT] = halt_interception,
[SVM_EXIT_INVLPG] = invlpg_interception,
[SVM_EXIT_INVLPGA] = invlpga_interception,
[SVM_EXIT_IOIO] = io_interception,
[SVM_EXIT_MSR] = msr_interception,
[SVM_EXIT_TASK_SWITCH] = task_switch_interception,
[SVM_EXIT_SHUTDOWN] = shutdown_interception,
[SVM_EXIT_VMRUN] = vmrun_interception,
[SVM_EXIT_VMMCALL] = vmmcall_interception,
[SVM_EXIT_VMLOAD] = vmload_interception,
[SVM_EXIT_VMSAVE] = vmsave_interception,
[SVM_EXIT_STGI] = stgi_interception,
[SVM_EXIT_CLGI] = clgi_interception,
[SVM_EXIT_SKINIT] = skinit_interception,
[SVM_EXIT_WBINVD] = emulate_on_interception,
[SVM_EXIT_MONITOR] = invalid_op_interception,
[SVM_EXIT_MWAIT] = invalid_op_interception,
[SVM_EXIT_XSETBV] = xsetbv_interception,
[SVM_EXIT_NPF] = pf_interception,
};
static void dump_vmcb(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control;
struct vmcb_save_area *save = &svm->vmcb->save;
pr_err("VMCB Control Area:\n");
pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
pr_err("%-20s%d\n", "asid:", control->asid);
pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
pr_err("%-20s%08x\n", "int_state:", control->int_state);
pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
pr_err("VMCB State Save Area:\n");
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"es:",
save->es.selector, save->es.attrib,
save->es.limit, save->es.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"cs:",
save->cs.selector, save->cs.attrib,
save->cs.limit, save->cs.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"ss:",
save->ss.selector, save->ss.attrib,
save->ss.limit, save->ss.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"ds:",
save->ds.selector, save->ds.attrib,
save->ds.limit, save->ds.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"fs:",
save->fs.selector, save->fs.attrib,
save->fs.limit, save->fs.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"gs:",
save->gs.selector, save->gs.attrib,
save->gs.limit, save->gs.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"gdtr:",
save->gdtr.selector, save->gdtr.attrib,
save->gdtr.limit, save->gdtr.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"ldtr:",
save->ldtr.selector, save->ldtr.attrib,
save->ldtr.limit, save->ldtr.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"idtr:",
save->idtr.selector, save->idtr.attrib,
save->idtr.limit, save->idtr.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"tr:",
save->tr.selector, save->tr.attrib,
save->tr.limit, save->tr.base);
pr_err("cpl: %d efer: %016llx\n",
save->cpl, save->efer);
pr_err("%-15s %016llx %-13s %016llx\n",
"cr0:", save->cr0, "cr2:", save->cr2);
pr_err("%-15s %016llx %-13s %016llx\n",
"cr3:", save->cr3, "cr4:", save->cr4);
pr_err("%-15s %016llx %-13s %016llx\n",
"dr6:", save->dr6, "dr7:", save->dr7);
pr_err("%-15s %016llx %-13s %016llx\n",
"rip:", save->rip, "rflags:", save->rflags);
pr_err("%-15s %016llx %-13s %016llx\n",
"rsp:", save->rsp, "rax:", save->rax);
pr_err("%-15s %016llx %-13s %016llx\n",
"star:", save->star, "lstar:", save->lstar);
pr_err("%-15s %016llx %-13s %016llx\n",
"cstar:", save->cstar, "sfmask:", save->sfmask);
pr_err("%-15s %016llx %-13s %016llx\n",
"kernel_gs_base:", save->kernel_gs_base,
"sysenter_cs:", save->sysenter_cs);
pr_err("%-15s %016llx %-13s %016llx\n",
"sysenter_esp:", save->sysenter_esp,
"sysenter_eip:", save->sysenter_eip);
pr_err("%-15s %016llx %-13s %016llx\n",
"gpat:", save->g_pat, "dbgctl:", save->dbgctl);
pr_err("%-15s %016llx %-13s %016llx\n",
"br_from:", save->br_from, "br_to:", save->br_to);
pr_err("%-15s %016llx %-13s %016llx\n",
"excp_from:", save->last_excp_from,
"excp_to:", save->last_excp_to);
}
static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
{
struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
*info1 = control->exit_info_1;
*info2 = control->exit_info_2;
}
static int handle_exit(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_run *kvm_run = vcpu->run;
u32 exit_code = svm->vmcb->control.exit_code;
trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
vcpu->arch.cr0 = svm->vmcb->save.cr0;
if (npt_enabled)
vcpu->arch.cr3 = svm->vmcb->save.cr3;
if (unlikely(svm->nested.exit_required)) {
nested_svm_vmexit(svm);
svm->nested.exit_required = false;
return 1;
}
if (is_guest_mode(vcpu)) {
int vmexit;
trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
svm->vmcb->control.exit_info_1,
svm->vmcb->control.exit_info_2,
svm->vmcb->control.exit_int_info,
svm->vmcb->control.exit_int_info_err);
vmexit = nested_svm_exit_special(svm);
if (vmexit == NESTED_EXIT_CONTINUE)
vmexit = nested_svm_exit_handled(svm);
if (vmexit == NESTED_EXIT_DONE)
return 1;
}
svm_complete_interrupts(svm);
if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason
= svm->vmcb->control.exit_code;
pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
dump_vmcb(vcpu);
return 0;
}
if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
"exit_code 0x%x\n",
__func__, svm->vmcb->control.exit_int_info,
exit_code);
if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
|| !svm_exit_handlers[exit_code]) {
kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
kvm_run->hw.hardware_exit_reason = exit_code;
return 0;
}
return svm_exit_handlers[exit_code](svm);
}
static void reload_tss(struct kvm_vcpu *vcpu)
{
int cpu = raw_smp_processor_id();
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
sd->tss_desc->type = 9; /* available 32/64-bit TSS */
load_TR_desc();
}
static void pre_svm_run(struct vcpu_svm *svm)
{
int cpu = raw_smp_processor_id();
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
/* FIXME: handle wraparound of asid_generation */
if (svm->asid_generation != sd->asid_generation)
new_asid(svm, sd);
}
static void svm_inject_nmi(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
vcpu->arch.hflags |= HF_NMI_MASK;
set_intercept(svm, INTERCEPT_IRET);
++vcpu->stat.nmi_injections;
}
static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
{
struct vmcb_control_area *control;
control = &svm->vmcb->control;
control->int_vector = irq;
control->int_ctl &= ~V_INTR_PRIO_MASK;
control->int_ctl |= V_IRQ_MASK |
((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
mark_dirty(svm->vmcb, VMCB_INTR);
}
static void svm_set_irq(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
BUG_ON(!(gif_set(svm)));
trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
++vcpu->stat.irq_injections;
svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
}
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
return;
if (irr == -1)
return;
if (tpr >= irr)
set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
}
static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;
int ret;
ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
!(svm->vcpu.arch.hflags & HF_NMI_MASK);
ret = ret && gif_set(svm) && nested_svm_nmi(svm);
return ret;
}
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
}
static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (masked) {
svm->vcpu.arch.hflags |= HF_NMI_MASK;
set_intercept(svm, INTERCEPT_IRET);
} else {
svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
clr_intercept(svm, INTERCEPT_IRET);
}
}
static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;
int ret;
if (!gif_set(svm) ||
(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
return 0;
ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
if (is_guest_mode(vcpu))
return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
return ret;
}
static void enable_irq_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
/*
* In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
* 1, because that's a separate STGI/VMRUN intercept. The next time we
* get that intercept, this function will be called again though and
* we'll get the vintr intercept.
*/
if (gif_set(svm) && nested_svm_intr(svm)) {
svm_set_vintr(svm);
svm_inject_irq(svm, 0x0);
}
}
static void enable_nmi_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
== HF_NMI_MASK)
return; /* IRET will cause a vm exit */
/*
* Something prevents NMI from been injected. Single step over possible
* problem (IRET or exception injection or interrupt shadow)
*/
svm->nmi_singlestep = true;
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
update_db_intercept(vcpu);
}
static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
{
return 0;
}
static void svm_flush_tlb(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
else
svm->asid_generation--;
}
static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
}
static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
return;
if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
kvm_set_cr8(vcpu, cr8);
}
}
static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 cr8;
if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
return;
cr8 = kvm_get_cr8(vcpu);
svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
}
static void svm_complete_interrupts(struct vcpu_svm *svm)
{
u8 vector;
int type;
u32 exitintinfo = svm->vmcb->control.exit_int_info;
unsigned int3_injected = svm->int3_injected;
svm->int3_injected = 0;
/*
* If we've made progress since setting HF_IRET_MASK, we've
* executed an IRET and can allow NMI injection.
*/
if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
&& kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
}
svm->vcpu.arch.nmi_injected = false;
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);
if (!(exitintinfo & SVM_EXITINTINFO_VALID))
return;
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
switch (type) {
case SVM_EXITINTINFO_TYPE_NMI:
svm->vcpu.arch.nmi_injected = true;
break;
case SVM_EXITINTINFO_TYPE_EXEPT:
/*
* In case of software exceptions, do not reinject the vector,
* but re-execute the instruction instead. Rewind RIP first
* if we emulated INT3 before.
*/
if (kvm_exception_is_soft(vector)) {
if (vector == BP_VECTOR && int3_injected &&
kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
kvm_rip_write(&svm->vcpu,
kvm_rip_read(&svm->vcpu) -
int3_injected);
break;
}
if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
u32 err = svm->vmcb->control.exit_int_info_err;
kvm_requeue_exception_e(&svm->vcpu, vector, err);
} else
kvm_requeue_exception(&svm->vcpu, vector);
break;
case SVM_EXITINTINFO_TYPE_INTR:
kvm_queue_interrupt(&svm->vcpu, vector, false);
break;
default:
break;
}
}
static void svm_cancel_injection(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control;
control->exit_int_info = control->event_inj;
control->exit_int_info_err = control->event_inj_err;
control->event_inj = 0;
svm_complete_interrupts(svm);
}
#ifdef CONFIG_X86_64
#define R "r"
#else
#define R "e"
#endif
static void svm_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
/*
* A vmexit emulation is required before the vcpu can be executed
* again.
*/
if (unlikely(svm->nested.exit_required))
return;
pre_svm_run(svm);
sync_lapic_to_cr8(vcpu);
svm->vmcb->save.cr2 = vcpu->arch.cr2;
clgi();
local_irq_enable();
asm volatile (
"push %%"R"bp; \n\t"
"mov %c[rbx](%[svm]), %%"R"bx \n\t"
"mov %c[rcx](%[svm]), %%"R"cx \n\t"
"mov %c[rdx](%[svm]), %%"R"dx \n\t"
"mov %c[rsi](%[svm]), %%"R"si \n\t"
"mov %c[rdi](%[svm]), %%"R"di \n\t"
"mov %c[rbp](%[svm]), %%"R"bp \n\t"
#ifdef CONFIG_X86_64
"mov %c[r8](%[svm]), %%r8 \n\t"
"mov %c[r9](%[svm]), %%r9 \n\t"
"mov %c[r10](%[svm]), %%r10 \n\t"
"mov %c[r11](%[svm]), %%r11 \n\t"
"mov %c[r12](%[svm]), %%r12 \n\t"
"mov %c[r13](%[svm]), %%r13 \n\t"
"mov %c[r14](%[svm]), %%r14 \n\t"
"mov %c[r15](%[svm]), %%r15 \n\t"
#endif
/* Enter guest mode */
"push %%"R"ax \n\t"
"mov %c[vmcb](%[svm]), %%"R"ax \n\t"
__ex(SVM_VMLOAD) "\n\t"
__ex(SVM_VMRUN) "\n\t"
__ex(SVM_VMSAVE) "\n\t"
"pop %%"R"ax \n\t"
/* Save guest registers, load host registers */
"mov %%"R"bx, %c[rbx](%[svm]) \n\t"
"mov %%"R"cx, %c[rcx](%[svm]) \n\t"
"mov %%"R"dx, %c[rdx](%[svm]) \n\t"
"mov %%"R"si, %c[rsi](%[svm]) \n\t"
"mov %%"R"di, %c[rdi](%[svm]) \n\t"
"mov %%"R"bp, %c[rbp](%[svm]) \n\t"
#ifdef CONFIG_X86_64
"mov %%r8, %c[r8](%[svm]) \n\t"
"mov %%r9, %c[r9](%[svm]) \n\t"
"mov %%r10, %c[r10](%[svm]) \n\t"
"mov %%r11, %c[r11](%[svm]) \n\t"
"mov %%r12, %c[r12](%[svm]) \n\t"
"mov %%r13, %c[r13](%[svm]) \n\t"
"mov %%r14, %c[r14](%[svm]) \n\t"
"mov %%r15, %c[r15](%[svm]) \n\t"
#endif
"pop %%"R"bp"
:
: [svm]"a"(svm),
[vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
[rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
[rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
[rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
[rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
[rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
[rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
#ifdef CONFIG_X86_64
, [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
[r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
[r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
[r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
[r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
[r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
[r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
[r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
#endif
: "cc", "memory"
, R"bx", R"cx", R"dx", R"si", R"di"
#ifdef CONFIG_X86_64
, "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
#endif
);
#ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, svm->host.gs_base);
#else
loadsegment(fs, svm->host.fs);
#ifndef CONFIG_X86_32_LAZY_GS
loadsegment(gs, svm->host.gs);
#endif
#endif
reload_tss(vcpu);
local_irq_disable();
vcpu->arch.cr2 = svm->vmcb->save.cr2;
vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_before_handle_nmi(&svm->vcpu);
stgi();
/* Any pending NMI will happen here */
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_after_handle_nmi(&svm->vcpu);
sync_cr8_to_lapic(vcpu);
svm->next_rip = 0;
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
/* if exit due to PF check for async PF */
if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
svm->apf_reason = kvm_read_and_reset_pf_reason();
if (npt_enabled) {
vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
}
/*
* We need to handle MC intercepts here before the vcpu has a chance to
* change the physical cpu
*/
if (unlikely(svm->vmcb->control.exit_code ==
SVM_EXIT_EXCP_BASE + MC_VECTOR))
svm_handle_mce(svm);
mark_all_clean(svm->vmcb);
}
#undef R
static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.cr3 = root;
mark_dirty(svm->vmcb, VMCB_CR);
svm_flush_tlb(vcpu);
}
static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->control.nested_cr3 = root;
mark_dirty(svm->vmcb, VMCB_NPT);
/* Also sync guest cr3 here in case we live migrate */
svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
mark_dirty(svm->vmcb, VMCB_CR);
svm_flush_tlb(vcpu);
}
static int is_disabled(void)
{
u64 vm_cr;
rdmsrl(MSR_VM_CR, vm_cr);
if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
return 1;
return 0;
}
static void
svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
{
/*
* Patch in the VMMCALL instruction:
*/
hypercall[0] = 0x0f;
hypercall[1] = 0x01;
hypercall[2] = 0xd9;
}
static void svm_check_processor_compat(void *rtn)
{
*(int *)rtn = 0;
}
static bool svm_cpu_has_accelerated_tpr(void)
{
return false;
}
static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{
return 0;
}
static void svm_cpuid_update(struct kvm_vcpu *vcpu)
{
}
static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
{
switch (func) {
case 0x80000001:
if (nested)
entry->ecx |= (1 << 2); /* Set SVM bit */
break;
case 0x8000000A:
entry->eax = 1; /* SVM revision 1 */
entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
ASID emulation to nested SVM */
entry->ecx = 0; /* Reserved */
entry->edx = 0; /* Per default do not support any
additional features */
/* Support next_rip if host supports it */
if (boot_cpu_has(X86_FEATURE_NRIPS))
entry->edx |= SVM_FEATURE_NRIP;
/* Support NPT for the guest if enabled */
if (npt_enabled)
entry->edx |= SVM_FEATURE_NPT;
break;
}
}
static const struct trace_print_flags svm_exit_reasons_str[] = {
{ SVM_EXIT_READ_CR0, "read_cr0" },
{ SVM_EXIT_READ_CR3, "read_cr3" },
{ SVM_EXIT_READ_CR4, "read_cr4" },
{ SVM_EXIT_READ_CR8, "read_cr8" },
{ SVM_EXIT_WRITE_CR0, "write_cr0" },
{ SVM_EXIT_WRITE_CR3, "write_cr3" },
{ SVM_EXIT_WRITE_CR4, "write_cr4" },
{ SVM_EXIT_WRITE_CR8, "write_cr8" },
{ SVM_EXIT_READ_DR0, "read_dr0" },
{ SVM_EXIT_READ_DR1, "read_dr1" },
{ SVM_EXIT_READ_DR2, "read_dr2" },
{ SVM_EXIT_READ_DR3, "read_dr3" },
{ SVM_EXIT_WRITE_DR0, "write_dr0" },
{ SVM_EXIT_WRITE_DR1, "write_dr1" },
{ SVM_EXIT_WRITE_DR2, "write_dr2" },
{ SVM_EXIT_WRITE_DR3, "write_dr3" },
{ SVM_EXIT_WRITE_DR5, "write_dr5" },
{ SVM_EXIT_WRITE_DR7, "write_dr7" },
{ SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" },
{ SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" },
{ SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" },
{ SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" },
{ SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" },
{ SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" },
{ SVM_EXIT_INTR, "interrupt" },
{ SVM_EXIT_NMI, "nmi" },
{ SVM_EXIT_SMI, "smi" },
{ SVM_EXIT_INIT, "init" },
{ SVM_EXIT_VINTR, "vintr" },
{ SVM_EXIT_CPUID, "cpuid" },
{ SVM_EXIT_INVD, "invd" },
{ SVM_EXIT_HLT, "hlt" },
{ SVM_EXIT_INVLPG, "invlpg" },
{ SVM_EXIT_INVLPGA, "invlpga" },
{ SVM_EXIT_IOIO, "io" },
{ SVM_EXIT_MSR, "msr" },
{ SVM_EXIT_TASK_SWITCH, "task_switch" },
{ SVM_EXIT_SHUTDOWN, "shutdown" },
{ SVM_EXIT_VMRUN, "vmrun" },
{ SVM_EXIT_VMMCALL, "hypercall" },
{ SVM_EXIT_VMLOAD, "vmload" },
{ SVM_EXIT_VMSAVE, "vmsave" },
{ SVM_EXIT_STGI, "stgi" },
{ SVM_EXIT_CLGI, "clgi" },
{ SVM_EXIT_SKINIT, "skinit" },
{ SVM_EXIT_WBINVD, "wbinvd" },
{ SVM_EXIT_MONITOR, "monitor" },
{ SVM_EXIT_MWAIT, "mwait" },
{ SVM_EXIT_XSETBV, "xsetbv" },
{ SVM_EXIT_NPF, "npf" },
{ -1, NULL }
};
static int svm_get_lpage_level(void)
{
return PT_PDPE_LEVEL;
}
static bool svm_rdtscp_supported(void)
{
return false;
}
static bool svm_has_wbinvd_exit(void)
{
return true;
}
static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
set_exception_intercept(svm, NM_VECTOR);
update_cr0_intercept(svm);
}
#define PRE_EX(exit) { .exit_code = (exit), \
.stage = X86_ICPT_PRE_EXCEPT, }
#define POST_EX(exit) { .exit_code = (exit), \
.stage = X86_ICPT_POST_EXCEPT, }
#define POST_MEM(exit) { .exit_code = (exit), \
.stage = X86_ICPT_POST_MEMACCESS, }
static struct __x86_intercept {
u32 exit_code;
enum x86_intercept_stage stage;
} x86_intercept_map[] = {
[x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
[x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
[x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
[x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
[x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
[x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
[x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
[x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
[x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
[x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
[x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
[x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
[x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
[x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
[x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
[x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
[x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
[x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
[x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
[x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
[x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
[x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
[x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
[x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
[x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
[x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
[x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
[x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
[x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
[x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
[x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
[x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
[x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
[x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
[x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
[x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
[x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
[x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
[x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
[x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
[x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
[x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
[x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
[x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
[x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
[x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
};
#undef PRE_EX
#undef POST_EX
#undef POST_MEM
static int svm_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
struct vcpu_svm *svm = to_svm(vcpu);
int vmexit, ret = X86EMUL_CONTINUE;
struct __x86_intercept icpt_info;
struct vmcb *vmcb = svm->vmcb;
if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
goto out;
icpt_info = x86_intercept_map[info->intercept];
if (stage != icpt_info.stage)
goto out;
switch (icpt_info.exit_code) {
case SVM_EXIT_READ_CR0:
if (info->intercept == x86_intercept_cr_read)
icpt_info.exit_code += info->modrm_reg;
break;
case SVM_EXIT_WRITE_CR0: {
unsigned long cr0, val;
u64 intercept;
if (info->intercept == x86_intercept_cr_write)
icpt_info.exit_code += info->modrm_reg;
if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
break;
intercept = svm->nested.intercept;
if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
break;
cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
if (info->intercept == x86_intercept_lmsw) {
cr0 &= 0xfUL;
val &= 0xfUL;
/* lmsw can't clear PE - catch this here */
if (cr0 & X86_CR0_PE)
val |= X86_CR0_PE;
}
if (cr0 ^ val)
icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
break;
}
case SVM_EXIT_READ_DR0:
case SVM_EXIT_WRITE_DR0:
icpt_info.exit_code += info->modrm_reg;
break;
case SVM_EXIT_MSR:
if (info->intercept == x86_intercept_wrmsr)
vmcb->control.exit_info_1 = 1;
else
vmcb->control.exit_info_1 = 0;
break;
case SVM_EXIT_PAUSE:
/*
* We get this for NOP only, but pause
* is rep not, check this here
*/
if (info->rep_prefix != REPE_PREFIX)
goto out;
case SVM_EXIT_IOIO: {
u64 exit_info;
u32 bytes;
exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16;
if (info->intercept == x86_intercept_in ||
info->intercept == x86_intercept_ins) {
exit_info |= SVM_IOIO_TYPE_MASK;
bytes = info->src_bytes;
} else {
bytes = info->dst_bytes;
}
if (info->intercept == x86_intercept_outs ||
info->intercept == x86_intercept_ins)
exit_info |= SVM_IOIO_STR_MASK;
if (info->rep_prefix)
exit_info |= SVM_IOIO_REP_MASK;
bytes = min(bytes, 4u);
exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
vmcb->control.exit_info_1 = exit_info;
vmcb->control.exit_info_2 = info->next_rip;
break;
}
default:
break;
}
vmcb->control.next_rip = info->next_rip;
vmcb->control.exit_code = icpt_info.exit_code;
vmexit = nested_svm_exit_handled(svm);
ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
: X86EMUL_CONTINUE;
out:
return ret;
}
static struct kvm_x86_ops svm_x86_ops = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
.hardware_setup = svm_hardware_setup,
.hardware_unsetup = svm_hardware_unsetup,
.check_processor_compatibility = svm_check_processor_compat,
.hardware_enable = svm_hardware_enable,
.hardware_disable = svm_hardware_disable,
.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
.vcpu_create = svm_create_vcpu,
.vcpu_free = svm_free_vcpu,
.vcpu_reset = svm_vcpu_reset,
.prepare_guest_switch = svm_prepare_guest_switch,
.vcpu_load = svm_vcpu_load,
.vcpu_put = svm_vcpu_put,
.set_guest_debug = svm_guest_debug,
.get_msr = svm_get_msr,
.set_msr = svm_set_msr,
.get_segment_base = svm_get_segment_base,
.get_segment = svm_get_segment,
.set_segment = svm_set_segment,
.get_cpl = svm_get_cpl,
.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
.decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
.decache_cr3 = svm_decache_cr3,
.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
.set_cr0 = svm_set_cr0,
.set_cr3 = svm_set_cr3,
.set_cr4 = svm_set_cr4,
.set_efer = svm_set_efer,
.get_idt = svm_get_idt,
.set_idt = svm_set_idt,
.get_gdt = svm_get_gdt,
.set_gdt = svm_set_gdt,
.set_dr7 = svm_set_dr7,
.cache_reg = svm_cache_reg,
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,
.fpu_activate = svm_fpu_activate,
.fpu_deactivate = svm_fpu_deactivate,
.tlb_flush = svm_flush_tlb,
.run = svm_vcpu_run,
.handle_exit = handle_exit,
.skip_emulated_instruction = skip_emulated_instruction,
.set_interrupt_shadow = svm_set_interrupt_shadow,
.get_interrupt_shadow = svm_get_interrupt_shadow,
.patch_hypercall = svm_patch_hypercall,
.set_irq = svm_set_irq,
.set_nmi = svm_inject_nmi,
.queue_exception = svm_queue_exception,
.cancel_injection = svm_cancel_injection,
.interrupt_allowed = svm_interrupt_allowed,
.nmi_allowed = svm_nmi_allowed,
.get_nmi_mask = svm_get_nmi_mask,
.set_nmi_mask = svm_set_nmi_mask,
.enable_nmi_window = enable_nmi_window,
.enable_irq_window = enable_irq_window,
.update_cr8_intercept = update_cr8_intercept,
.set_tss_addr = svm_set_tss_addr,
.get_tdp_level = get_npt_level,
.get_mt_mask = svm_get_mt_mask,
.get_exit_info = svm_get_exit_info,
.exit_reasons_str = svm_exit_reasons_str,
.get_lpage_level = svm_get_lpage_level,
.cpuid_update = svm_cpuid_update,
.rdtscp_supported = svm_rdtscp_supported,
.set_supported_cpuid = svm_set_supported_cpuid,
.has_wbinvd_exit = svm_has_wbinvd_exit,
.set_tsc_khz = svm_set_tsc_khz,
.write_tsc_offset = svm_write_tsc_offset,
.adjust_tsc_offset = svm_adjust_tsc_offset,
.compute_tsc_offset = svm_compute_tsc_offset,
.set_tdp_cr3 = set_tdp_cr3,
.check_intercept = svm_check_intercept,
};
static int __init svm_init(void)
{
return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
__alignof__(struct vcpu_svm), THIS_MODULE);
}
static void __exit svm_exit(void)
{
kvm_exit();
}
module_init(svm_init)
module_exit(svm_exit)
| sleshepic/epic_touch_kernel | arch/x86/kvm/svm.c | C | gpl-2.0 | 111,159 |
/*
* Copyright (C) 2011
* Boaz Harrosh <bharrosh@panasas.com>
*
* This file is part of the objects raid engine (ore).
*
* It is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License
* along with "ore". If not, write to the Free Software Foundation, Inc:
* "Free Software Foundation <info@fsf.org>"
*/
#include <linux/gfp.h>
#include <linux/async_tx.h>
#include "ore_raid.h"
#undef ORE_DBGMSG2
#define ORE_DBGMSG2 ORE_DBGMSG
struct page *_raid_page_alloc(void)
{
return alloc_page(GFP_KERNEL);
}
void _raid_page_free(struct page *p)
{
__free_page(p);
}
/* This struct is forward declare in ore_io_state, but is private to here.
* It is put on ios->sp2d for RAID5/6 writes only. See _gen_xor_unit.
*
* __stripe_pages_2d is a 2d array of pages, and it is also a corner turn.
* Ascending page index access is sp2d(p-minor, c-major). But storage is
* sp2d[p-minor][c-major], so it can be properlly presented to the async-xor
* API.
*/
struct __stripe_pages_2d {
/* Cache some hot path repeated calculations */
unsigned parity;
unsigned data_devs;
unsigned pages_in_unit;
bool needed ;
/* Array size is pages_in_unit (layout->stripe_unit / PAGE_SIZE) */
struct __1_page_stripe {
bool alloc;
unsigned write_count;
struct async_submit_ctl submit;
struct dma_async_tx_descriptor *tx;
/* The size of this array is data_devs + parity */
struct page **pages;
struct page **scribble;
/* bool array, size of this array is data_devs */
char *page_is_read;
} _1p_stripes[];
};
/* This can get bigger then a page. So support multiple page allocations
* _sp2d_free should be called even if _sp2d_alloc fails (by returning
* none-zero).
*/
static int _sp2d_alloc(unsigned pages_in_unit, unsigned group_width,
unsigned parity, struct __stripe_pages_2d **psp2d)
{
struct __stripe_pages_2d *sp2d;
unsigned data_devs = group_width - parity;
struct _alloc_all_bytes {
struct __alloc_stripe_pages_2d {
struct __stripe_pages_2d sp2d;
struct __1_page_stripe _1p_stripes[pages_in_unit];
} __asp2d;
struct __alloc_1p_arrays {
struct page *pages[group_width];
struct page *scribble[group_width];
char page_is_read[data_devs];
} __a1pa[pages_in_unit];
} *_aab;
struct __alloc_1p_arrays *__a1pa;
struct __alloc_1p_arrays *__a1pa_end;
const unsigned sizeof__a1pa = sizeof(_aab->__a1pa[0]);
unsigned num_a1pa, alloc_size, i;
/* FIXME: check these numbers in ore_verify_layout */
BUG_ON(sizeof(_aab->__asp2d) > PAGE_SIZE);
BUG_ON(sizeof__a1pa > PAGE_SIZE);
if (sizeof(*_aab) > PAGE_SIZE) {
num_a1pa = (PAGE_SIZE - sizeof(_aab->__asp2d)) / sizeof__a1pa;
alloc_size = sizeof(_aab->__asp2d) + sizeof__a1pa * num_a1pa;
} else {
num_a1pa = pages_in_unit;
alloc_size = sizeof(*_aab);
}
_aab = kzalloc(alloc_size, GFP_KERNEL);
if (unlikely(!_aab)) {
ORE_DBGMSG("!! Failed to alloc sp2d size=%d\n", alloc_size);
return -ENOMEM;
}
sp2d = &_aab->__asp2d.sp2d;
*psp2d = sp2d; /* From here Just call _sp2d_free */
__a1pa = _aab->__a1pa;
__a1pa_end = __a1pa + num_a1pa;
for (i = 0; i < pages_in_unit; ++i) {
if (unlikely(__a1pa >= __a1pa_end)) {
num_a1pa = min_t(unsigned, PAGE_SIZE / sizeof__a1pa,
pages_in_unit - i);
__a1pa = kzalloc(num_a1pa * sizeof__a1pa, GFP_KERNEL);
if (unlikely(!__a1pa)) {
ORE_DBGMSG("!! Failed to _alloc_1p_arrays=%d\n",
num_a1pa);
return -ENOMEM;
}
__a1pa_end = __a1pa + num_a1pa;
/* First *pages is marked for kfree of the buffer */
sp2d->_1p_stripes[i].alloc = true;
}
sp2d->_1p_stripes[i].pages = __a1pa->pages;
sp2d->_1p_stripes[i].scribble = __a1pa->scribble ;
sp2d->_1p_stripes[i].page_is_read = __a1pa->page_is_read;
++__a1pa;
}
sp2d->parity = parity;
sp2d->data_devs = data_devs;
sp2d->pages_in_unit = pages_in_unit;
return 0;
}
static void _sp2d_reset(struct __stripe_pages_2d *sp2d,
const struct _ore_r4w_op *r4w, void *priv)
{
unsigned data_devs = sp2d->data_devs;
unsigned group_width = data_devs + sp2d->parity;
int p, c;
if (!sp2d->needed)
return;
for (c = data_devs - 1; c >= 0; --c)
for (p = sp2d->pages_in_unit - 1; p >= 0; --p) {
struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
if (_1ps->page_is_read[c]) {
struct page *page = _1ps->pages[c];
r4w->put_page(priv, page);
_1ps->page_is_read[c] = false;
}
}
for (p = 0; p < sp2d->pages_in_unit; p++) {
struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
memset(_1ps->pages, 0, group_width * sizeof(*_1ps->pages));
_1ps->write_count = 0;
_1ps->tx = NULL;
}
sp2d->needed = false;
}
static void _sp2d_free(struct __stripe_pages_2d *sp2d)
{
unsigned i;
if (!sp2d)
return;
for (i = 0; i < sp2d->pages_in_unit; ++i) {
if (sp2d->_1p_stripes[i].alloc)
kfree(sp2d->_1p_stripes[i].pages);
}
kfree(sp2d);
}
static unsigned _sp2d_min_pg(struct __stripe_pages_2d *sp2d)
{
unsigned p;
for (p = 0; p < sp2d->pages_in_unit; p++) {
struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
if (_1ps->write_count)
return p;
}
return ~0;
}
static unsigned _sp2d_max_pg(struct __stripe_pages_2d *sp2d)
{
int p;
for (p = sp2d->pages_in_unit - 1; p >= 0; --p) {
struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
if (_1ps->write_count)
return p;
}
return ~0;
}
static void _gen_xor_unit(struct __stripe_pages_2d *sp2d)
{
unsigned p;
for (p = 0; p < sp2d->pages_in_unit; p++) {
struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
if (!_1ps->write_count)
continue;
init_async_submit(&_1ps->submit,
ASYNC_TX_XOR_ZERO_DST | ASYNC_TX_ACK,
NULL,
NULL, NULL,
(addr_conv_t *)_1ps->scribble);
/* TODO: raid6 */
_1ps->tx = async_xor(_1ps->pages[sp2d->data_devs], _1ps->pages,
0, sp2d->data_devs, PAGE_SIZE,
&_1ps->submit);
}
for (p = 0; p < sp2d->pages_in_unit; p++) {
struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
/* NOTE: We wait for HW synchronously (I don't have such HW
* to test with.) Is parallelism needed with today's multi
* cores?
*/
async_tx_issue_pending(_1ps->tx);
}
}
void _ore_add_stripe_page(struct __stripe_pages_2d *sp2d,
struct ore_striping_info *si, struct page *page)
{
struct __1_page_stripe *_1ps;
sp2d->needed = true;
_1ps = &sp2d->_1p_stripes[si->cur_pg];
_1ps->pages[si->cur_comp] = page;
++_1ps->write_count;
si->cur_pg = (si->cur_pg + 1) % sp2d->pages_in_unit;
/* si->cur_comp is advanced outside at main loop */
}
void _ore_add_sg_seg(struct ore_per_dev_state *per_dev, unsigned cur_len,
bool not_last)
{
struct osd_sg_entry *sge;
ORE_DBGMSG("dev=%d cur_len=0x%x not_last=%d cur_sg=%d "
"offset=0x%llx length=0x%x last_sgs_total=0x%x\n",
per_dev->dev, cur_len, not_last, per_dev->cur_sg,
_LLU(per_dev->offset), per_dev->length,
per_dev->last_sgs_total);
if (!per_dev->cur_sg) {
sge = per_dev->sglist;
/* First time we prepare two entries */
if (per_dev->length) {
++per_dev->cur_sg;
sge->offset = per_dev->offset;
sge->len = per_dev->length;
} else {
/* Here the parity is the first unit of this object.
* This happens every time we reach a parity device on
* the same stripe as the per_dev->offset. We need to
* just skip this unit.
*/
per_dev->offset += cur_len;
return;
}
} else {
/* finalize the last one */
sge = &per_dev->sglist[per_dev->cur_sg - 1];
sge->len = per_dev->length - per_dev->last_sgs_total;
}
if (not_last) {
/* Partly prepare the next one */
struct osd_sg_entry *next_sge = sge + 1;
++per_dev->cur_sg;
next_sge->offset = sge->offset + sge->len + cur_len;
/* Save cur len so we know how mutch was added next time */
per_dev->last_sgs_total = per_dev->length;
next_sge->len = 0;
} else if (!sge->len) {
/* Optimize for when the last unit is a parity */
--per_dev->cur_sg;
}
}
static int _alloc_read_4_write(struct ore_io_state *ios)
{
struct ore_layout *layout = ios->layout;
int ret;
/* We want to only read those pages not in cache so worst case
* is a stripe populated with every other page
*/
unsigned sgs_per_dev = ios->sp2d->pages_in_unit + 2;
ret = _ore_get_io_state(layout, ios->oc,
layout->group_width * layout->mirrors_p1,
sgs_per_dev, 0, &ios->ios_read_4_write);
return ret;
}
/* @si contains info of the to-be-inserted page. Update of @si should be
* maintained by caller. Specificaly si->dev, si->obj_offset, ...
*/
static int _add_to_r4w(struct ore_io_state *ios, struct ore_striping_info *si,
struct page *page, unsigned pg_len)
{
struct request_queue *q;
struct ore_per_dev_state *per_dev;
struct ore_io_state *read_ios;
unsigned first_dev = si->dev - (si->dev %
(ios->layout->group_width * ios->layout->mirrors_p1));
unsigned comp = si->dev - first_dev;
unsigned added_len;
if (!ios->ios_read_4_write) {
int ret = _alloc_read_4_write(ios);
if (unlikely(ret))
return ret;
}
read_ios = ios->ios_read_4_write;
read_ios->numdevs = ios->layout->group_width * ios->layout->mirrors_p1;
per_dev = &read_ios->per_dev[comp];
if (!per_dev->length) {
per_dev->bio = bio_kmalloc(GFP_KERNEL,
ios->sp2d->pages_in_unit);
if (unlikely(!per_dev->bio)) {
ORE_DBGMSG("Failed to allocate BIO size=%u\n",
ios->sp2d->pages_in_unit);
return -ENOMEM;
}
per_dev->offset = si->obj_offset;
per_dev->dev = si->dev;
} else if (si->obj_offset != (per_dev->offset + per_dev->length)) {
u64 gap = si->obj_offset - (per_dev->offset + per_dev->length);
_ore_add_sg_seg(per_dev, gap, true);
}
q = osd_request_queue(ore_comp_dev(read_ios->oc, per_dev->dev));
added_len = bio_add_pc_page(q, per_dev->bio, page, pg_len,
si->obj_offset % PAGE_SIZE);
if (unlikely(added_len != pg_len)) {
ORE_DBGMSG("Failed to bio_add_pc_page bi_vcnt=%d\n",
per_dev->bio->bi_vcnt);
return -ENOMEM;
}
per_dev->length += pg_len;
return 0;
}
/* read the beginning of an unaligned first page */
static int _add_to_r4w_first_page(struct ore_io_state *ios, struct page *page)
{
struct ore_striping_info si;
unsigned pg_len;
ore_calc_stripe_info(ios->layout, ios->offset, 0, &si);
pg_len = si.obj_offset % PAGE_SIZE;
si.obj_offset -= pg_len;
ORE_DBGMSG("offset=0x%llx len=0x%x index=0x%lx dev=%x\n",
_LLU(si.obj_offset), pg_len, page->index, si.dev);
return _add_to_r4w(ios, &si, page, pg_len);
}
/* read the end of an incomplete last page */
static int _add_to_r4w_last_page(struct ore_io_state *ios, u64 *offset)
{
struct ore_striping_info si;
struct page *page;
unsigned pg_len, p, c;
ore_calc_stripe_info(ios->layout, *offset, 0, &si);
p = si.unit_off / PAGE_SIZE;
c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
ios->layout->mirrors_p1, si.par_dev, si.dev);
page = ios->sp2d->_1p_stripes[p].pages[c];
pg_len = PAGE_SIZE - (si.unit_off % PAGE_SIZE);
*offset += pg_len;
ORE_DBGMSG("p=%d, c=%d next-offset=0x%llx len=0x%x dev=%x par_dev=%d\n",
p, c, _LLU(*offset), pg_len, si.dev, si.par_dev);
BUG_ON(!page);
return _add_to_r4w(ios, &si, page, pg_len);
}
static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
{
struct bio_vec *bv;
unsigned i, d;
/* loop on all devices all pages */
for (d = 0; d < ios->numdevs; d++) {
struct bio *bio = ios->per_dev[d].bio;
if (!bio)
continue;
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
SetPageUptodate(page);
if (PageError(page))
ClearPageError(page);
}
}
}
/* read_4_write is hacked to read the start of the first stripe and/or
* the end of the last stripe. If needed, with an sg-gap at each device/page.
* It is assumed to be called after the to_be_written pages of the first stripe
* are populating ios->sp2d[][]
*
* NOTE: We call ios->r4w->lock_fn for all pages needed for parity calculations
* These pages are held at sp2d[p].pages[c] but with
* sp2d[p].page_is_read[c] = true. At _sp2d_reset these pages are
* ios->r4w->lock_fn(). The ios->r4w->lock_fn might signal that the page is
* @uptodate=true, so we don't need to read it, only unlock, after IO.
*
* TODO: The read_4_write should calc a need_to_read_pages_count, if bigger then
* to-be-written count, we should consider the xor-in-place mode.
* need_to_read_pages_count is the actual number of pages not present in cache.
* maybe "devs_in_group - ios->sp2d[p].write_count" is a good enough
* approximation? In this mode the read pages are put in the empty places of
* ios->sp2d[p][*], xor is calculated the same way. These pages are
* allocated/freed and don't go through cache
*/
static int _read_4_write_first_stripe(struct ore_io_state *ios)
{
struct ore_striping_info read_si;
struct __stripe_pages_2d *sp2d = ios->sp2d;
u64 offset = ios->si.first_stripe_start;
unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
if (offset == ios->offset) /* Go to start collect $200 */
goto read_last_stripe;
min_p = _sp2d_min_pg(sp2d);
max_p = _sp2d_max_pg(sp2d);
ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n",
offset, ios->offset, min_p, max_p);
for (c = 0; ; c++) {
ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
read_si.obj_offset += min_p * PAGE_SIZE;
offset += min_p * PAGE_SIZE;
for (p = min_p; p <= max_p; p++) {
struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
struct page **pp = &_1ps->pages[c];
bool uptodate;
if (*pp) {
if (ios->offset % PAGE_SIZE)
/* Read the remainder of the page */
_add_to_r4w_first_page(ios, *pp);
/* to-be-written pages start here */
goto read_last_stripe;
}
*pp = ios->r4w->get_page(ios->private, offset,
&uptodate);
if (unlikely(!*pp))
return -ENOMEM;
if (!uptodate)
_add_to_r4w(ios, &read_si, *pp, PAGE_SIZE);
/* Mark read-pages to be cache_released */
_1ps->page_is_read[c] = true;
read_si.obj_offset += PAGE_SIZE;
offset += PAGE_SIZE;
}
offset += (sp2d->pages_in_unit - p) * PAGE_SIZE;
}
read_last_stripe:
return 0;
}
static int _read_4_write_last_stripe(struct ore_io_state *ios)
{
struct ore_striping_info read_si;
struct __stripe_pages_2d *sp2d = ios->sp2d;
u64 offset;
u64 last_stripe_end;
unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
offset = ios->offset + ios->length;
if (offset % PAGE_SIZE)
_add_to_r4w_last_page(ios, &offset);
/* offset will be aligned to next page */
last_stripe_end = div_u64(offset + bytes_in_stripe - 1, bytes_in_stripe)
* bytes_in_stripe;
if (offset == last_stripe_end) /* Optimize for the aligned case */
goto read_it;
ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
p = read_si.unit_off / PAGE_SIZE;
c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
ios->layout->mirrors_p1, read_si.par_dev, read_si.dev);
if (min_p == sp2d->pages_in_unit) {
/* Didn't do it yet */
min_p = _sp2d_min_pg(sp2d);
max_p = _sp2d_max_pg(sp2d);
}
ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n",
offset, last_stripe_end, min_p, max_p);
while (offset < last_stripe_end) {
struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
if ((min_p <= p) && (p <= max_p)) {
struct page *page;
bool uptodate;
BUG_ON(_1ps->pages[c]);
page = ios->r4w->get_page(ios->private, offset,
&uptodate);
if (unlikely(!page))
return -ENOMEM;
_1ps->pages[c] = page;
/* Mark read-pages to be cache_released */
_1ps->page_is_read[c] = true;
if (!uptodate)
_add_to_r4w(ios, &read_si, page, PAGE_SIZE);
}
offset += PAGE_SIZE;
if (p == (sp2d->pages_in_unit - 1)) {
++c;
p = 0;
ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
} else {
read_si.obj_offset += PAGE_SIZE;
++p;
}
}
read_it:
return 0;
}
static int _read_4_write_execute(struct ore_io_state *ios)
{
struct ore_io_state *ios_read;
unsigned i;
int ret;
ios_read = ios->ios_read_4_write;
if (!ios_read)
return 0;
/* FIXME: Ugly to signal _sbi_read_mirror that we have bio(s). Change
* to check for per_dev->bio
*/
ios_read->pages = ios->pages;
/* Now read these devices */
for (i = 0; i < ios_read->numdevs; i += ios_read->layout->mirrors_p1) {
ret = _ore_read_mirror(ios_read, i);
if (unlikely(ret))
return ret;
}
ret = ore_io_execute(ios_read); /* Synchronus execution */
if (unlikely(ret)) {
ORE_DBGMSG("!! ore_io_execute => %d\n", ret);
return ret;
}
_mark_read4write_pages_uptodate(ios_read, ret);
ore_put_io_state(ios_read);
ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */
return 0;
}
/* In writes @cur_len means length left. .i.e cur_len==0 is the last parity U */
int _ore_add_parity_unit(struct ore_io_state *ios,
struct ore_striping_info *si,
struct ore_per_dev_state *per_dev,
unsigned cur_len)
{
if (ios->reading) {
if (per_dev->cur_sg >= ios->sgs_per_dev) {
ORE_DBGMSG("cur_sg(%d) >= sgs_per_dev(%d)\n" ,
per_dev->cur_sg, ios->sgs_per_dev);
return -ENOMEM;
}
_ore_add_sg_seg(per_dev, cur_len, true);
} else {
struct __stripe_pages_2d *sp2d = ios->sp2d;
struct page **pages = ios->parity_pages + ios->cur_par_page;
unsigned num_pages;
unsigned array_start = 0;
unsigned i;
int ret;
si->cur_pg = _sp2d_min_pg(sp2d);
num_pages = _sp2d_max_pg(sp2d) + 1 - si->cur_pg;
if (!cur_len) /* If last stripe operate on parity comp */
si->cur_comp = sp2d->data_devs;
if (!per_dev->length) {
per_dev->offset += si->cur_pg * PAGE_SIZE;
/* If first stripe, Read in all read4write pages
* (if needed) before we calculate the first parity.
*/
_read_4_write_first_stripe(ios);
}
if (!cur_len) /* If last stripe r4w pages of last stripe */
_read_4_write_last_stripe(ios);
_read_4_write_execute(ios);
for (i = 0; i < num_pages; i++) {
pages[i] = _raid_page_alloc();
if (unlikely(!pages[i]))
return -ENOMEM;
++(ios->cur_par_page);
}
BUG_ON(si->cur_comp != sp2d->data_devs);
BUG_ON(si->cur_pg + num_pages > sp2d->pages_in_unit);
ret = _ore_add_stripe_unit(ios, &array_start, 0, pages,
per_dev, num_pages * PAGE_SIZE);
if (unlikely(ret))
return ret;
/* TODO: raid6 if (last_parity_dev) */
_gen_xor_unit(sp2d);
_sp2d_reset(sp2d, ios->r4w, ios->private);
}
return 0;
}
int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
{
if (ios->parity_pages) {
struct ore_layout *layout = ios->layout;
unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
if (_sp2d_alloc(pages_in_unit, layout->group_width,
layout->parity, &ios->sp2d)) {
return -ENOMEM;
}
}
return 0;
}
void _ore_free_raid_stuff(struct ore_io_state *ios)
{
if (ios->sp2d) { /* writing and raid */
unsigned i;
for (i = 0; i < ios->cur_par_page; i++) {
struct page *page = ios->parity_pages[i];
if (page)
_raid_page_free(page);
}
if (ios->extra_part_alloc)
kfree(ios->parity_pages);
/* If IO returned an error pages might need unlocking */
_sp2d_reset(ios->sp2d, ios->r4w, ios->private);
_sp2d_free(ios->sp2d);
} else {
/* Will only be set if raid reading && sglist is big */
if (ios->extra_part_alloc)
kfree(ios->per_dev[0].sglist);
}
if (ios->ios_read_4_write)
ore_put_io_state(ios->ios_read_4_write);
}
| AndroidGX/SimpleGX-MM-6.0_H815_20i | fs/exofs/ore_raid.c | C | gpl-2.0 | 19,507 |
/*
* IPv6 library code, needed by static components when full IPv6 support is
* not configured or static.
*/
#include <net/ipv6.h>
/*
* find out if nexthdr is a well-known extension header or a protocol
*/
int ipv6_ext_hdr(u8 nexthdr)
{
/*
* find out if nexthdr is an extension header or a protocol
*/
return (nexthdr == NEXTHDR_HOP) ||
(nexthdr == NEXTHDR_ROUTING) ||
(nexthdr == NEXTHDR_FRAGMENT) ||
(nexthdr == NEXTHDR_AUTH) ||
(nexthdr == NEXTHDR_NONE) ||
(nexthdr == NEXTHDR_DEST);
}
/*
* Skip any extension headers. This is used by the ICMP module.
*
* Note that strictly speaking this conflicts with RFC 2460 4.0:
* ...The contents and semantics of each extension header determine whether
* or not to proceed to the next header. Therefore, extension headers must
* be processed strictly in the order they appear in the packet; a
* receiver must not, for example, scan through a packet looking for a
* particular kind of extension header and process that header prior to
* processing all preceding ones.
*
* We do exactly this. This is a protocol bug. We can't decide after a
* seeing an unknown discard-with-error flavour TLV option if it's a
* ICMP error message or not (errors should never be send in reply to
* ICMP error messages).
*
* But I see no other way to do this. This might need to be reexamined
* when Linux implements ESP (and maybe AUTH) headers.
* --AK
*
* This function parses (probably truncated) exthdr set "hdr".
* "nexthdrp" initially points to some place,
* where type of the first header can be found.
*
* It skips all well-known exthdrs, and returns pointer to the start
* of unparsable area i.e. the first header with unknown type.
* If it is not NULL *nexthdr is updated by type/protocol of this header.
*
* NOTES: - if packet terminated with NEXTHDR_NONE it returns NULL.
* - it may return pointer pointing beyond end of packet,
* if the last recognized header is truncated in the middle.
* - if packet is truncated, so that all parsed headers are skipped,
* it returns NULL.
* - First fragment header is skipped, not-first ones
* are considered as unparsable.
* - ESP is unparsable for now and considered like
* normal payload protocol.
* - Note also special handling of AUTH header. Thanks to IPsec wizards.
*
* --ANK (980726)
*/
int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp)
{
u8 nexthdr = *nexthdrp;
while (ipv6_ext_hdr(nexthdr)) {
struct ipv6_opt_hdr _hdr, *hp;
int hdrlen;
if (nexthdr == NEXTHDR_NONE)
return -1;
hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
if (hp == NULL)
return -1;
if (nexthdr == NEXTHDR_FRAGMENT) {
__be16 _frag_off, *fp;
fp = skb_header_pointer(skb,
start+offsetof(struct frag_hdr,
frag_off),
sizeof(_frag_off),
&_frag_off);
if (fp == NULL)
return -1;
if (ntohs(*fp) & ~0x7)
break;
hdrlen = 8;
} else if (nexthdr == NEXTHDR_AUTH)
hdrlen = (hp->hdrlen+2)<<2;
else
hdrlen = ipv6_optlen(hp);
nexthdr = hp->nexthdr;
start += hdrlen;
}
*nexthdrp = nexthdr;
return start;
}
EXPORT_SYMBOL(ipv6_ext_hdr);
EXPORT_SYMBOL(ipv6_skip_exthdr);
| NooNameR/QSD3.0 | net/ipv6/exthdrs_core.c | C | gpl-2.0 | 3,239 |
/*
* linux/fs/9p/vfs_dentry.c
*
* This file contians vfs dentry ops for the 9P2000 protocol.
*
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to:
* Free Software Foundation
* 51 Franklin Street, Fifth Floor
* Boston, MA 02111-1301 USA
*
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/inet.h>
#include <linux/namei.h>
#include <linux/idr.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include "v9fs.h"
#include "v9fs_vfs.h"
#include "fid.h"
/**
* v9fs_dentry_delete - called when dentry refcount equals 0
* @dentry: dentry in question
*
* By returning 1 here we should remove cacheing of unused
* dentry components.
*
*/
static int v9fs_dentry_delete(const struct dentry *dentry)
{
P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
dentry);
return 1;
}
/**
* v9fs_cached_dentry_delete - called when dentry refcount equals 0
* @dentry: dentry in question
*
*/
static int v9fs_cached_dentry_delete(const struct dentry *dentry)
{
P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n",
dentry->d_name.name, dentry);
/* Don't cache negative dentries */
if (!dentry->d_inode)
return 1;
return 0;
}
/**
* v9fs_dentry_release - called when dentry is going to be freed
* @dentry: dentry that is being release
*
*/
static void v9fs_dentry_release(struct dentry *dentry)
{
struct v9fs_dentry *dent;
struct p9_fid *temp, *current_fid;
P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
dentry);
dent = dentry->d_fsdata;
if (dent) {
list_for_each_entry_safe(current_fid, temp, &dent->fidlist,
dlist) {
p9_client_clunk(current_fid);
}
kfree(dent);
dentry->d_fsdata = NULL;
}
}
static int v9fs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
{
struct p9_fid *fid;
struct inode *inode;
struct v9fs_inode *v9inode;
if (nd->flags & LOOKUP_RCU)
return -ECHILD;
inode = dentry->d_inode;
if (!inode)
goto out_valid;
v9inode = V9FS_I(inode);
if (v9inode->cache_validity & V9FS_INO_INVALID_ATTR) {
int retval;
struct v9fs_session_info *v9ses;
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
v9ses = v9fs_inode2v9ses(inode);
if (v9fs_proto_dotl(v9ses))
retval = v9fs_refresh_inode_dotl(fid, inode);
else
retval = v9fs_refresh_inode(fid, inode);
if (retval == -ENOENT)
return 0;
if (retval < 0)
return retval;
}
out_valid:
return 1;
}
const struct dentry_operations v9fs_cached_dentry_operations = {
.d_revalidate = v9fs_lookup_revalidate,
.d_delete = v9fs_cached_dentry_delete,
.d_release = v9fs_dentry_release,
};
const struct dentry_operations v9fs_dentry_operations = {
.d_delete = v9fs_dentry_delete,
.d_release = v9fs_dentry_release,
};
| madmack/i747_kernel_ics | fs/9p/vfs_dentry.c | C | gpl-2.0 | 3,580 |
/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <media/msm/vidc_type.h>
#include "vcd.h"
static const struct vcd_dev_state_table *vcd_dev_state_table[];
static const struct vcd_dev_state_table vcd_dev_table_null;
struct vcd_drv_ctxt *vcd_get_drv_context(void)
{
static struct vcd_drv_ctxt drv_context = {
{&vcd_dev_table_null, VCD_DEVICE_STATE_NULL},
{0},
};
return &drv_context;
}
void vcd_do_device_state_transition(struct vcd_drv_ctxt *drv_ctxt,
enum vcd_dev_state_enum to_state, u32 ev_code)
{
struct vcd_dev_state_ctxt *state_ctxt;
if (!drv_ctxt || to_state >= VCD_DEVICE_STATE_MAX) {
VCD_MSG_ERROR("Bad parameters. drv_ctxt=%p, to_state=%d",
drv_ctxt, to_state);
return;
}
state_ctxt = &drv_ctxt->dev_state;
if (state_ctxt->state == to_state) {
VCD_MSG_HIGH("Device already in requested to_state=%d",
to_state);
return;
}
VCD_MSG_MED("vcd_do_device_state_transition: D%d -> D%d, for api %d",
(int)state_ctxt->state, (int)to_state, ev_code);
if (state_ctxt->state_table->exit)
state_ctxt->state_table->exit(drv_ctxt, ev_code);
state_ctxt->state = to_state;
state_ctxt->state_table = vcd_dev_state_table[to_state];
if (state_ctxt->state_table->entry)
state_ctxt->state_table->entry(drv_ctxt, ev_code);
}
void vcd_hw_timeout_handler(void *user_data)
{
struct vcd_drv_ctxt *drv_ctxt;
VCD_MSG_HIGH("vcd_hw_timeout_handler:");
user_data = NULL;
drv_ctxt = vcd_get_drv_context();
mutex_lock(&drv_ctxt->dev_mutex);
if (drv_ctxt->dev_state.state_table->ev_hdlr.timeout)
drv_ctxt->dev_state.state_table->ev_hdlr.
timeout(drv_ctxt, user_data);
else
VCD_MSG_ERROR("hw_timeout unsupported in device state %d",
drv_ctxt->dev_state.state);
mutex_unlock(&drv_ctxt->dev_mutex);
}
void vcd_ddl_callback(u32 event, u32 status, void *payload,
size_t sz, u32 *ddl_handle, void *const client_data)
{
struct vcd_drv_ctxt *drv_ctxt;
struct vcd_dev_ctxt *dev_ctxt;
struct vcd_dev_state_ctxt *dev_state;
struct vcd_clnt_ctxt *cctxt;
struct vcd_transc *transc;
VCD_MSG_LOW("vcd_ddl_callback:");
VCD_MSG_LOW("event=0x%x status=0x%x", event, status);
drv_ctxt = vcd_get_drv_context();
dev_ctxt = &drv_ctxt->dev_ctxt;
dev_state = &drv_ctxt->dev_state;
dev_ctxt->command_continue = true;
vcd_device_timer_stop(dev_ctxt);
switch (dev_state->state) {
case VCD_DEVICE_STATE_NULL:
{
VCD_MSG_HIGH("Callback unexpected in NULL state");
break;
}
case VCD_DEVICE_STATE_NOT_INIT:
{
VCD_MSG_HIGH("Callback unexpected in NOT_INIT state");
break;
}
case VCD_DEVICE_STATE_INITING:
{
if (dev_state->state_table->ev_hdlr.dev_cb) {
dev_state->state_table->ev_hdlr.
dev_cb(drv_ctxt, event, status,
payload, sz, ddl_handle,
client_data);
} else {
VCD_MSG_HIGH("No device handler in %d state",
dev_state->state);
}
break;
}
case VCD_DEVICE_STATE_READY:
{
transc = (struct vcd_transc *)client_data;
if (!transc || !transc->in_use || !transc->cctxt) {
VCD_MSG_ERROR("Invalid clientdata "
"received from DDL, transc = 0x%x\n",
(u32)transc);
if (transc) {
VCD_MSG_ERROR("transc->in_use = %u, "
"transc->cctxt = 0x%x\n",
transc->in_use,
(u32)transc->cctxt);
}
} else {
cctxt = transc->cctxt;
if (cctxt->clnt_state.state_table->ev_hdlr.
clnt_cb) {
cctxt->clnt_state.state_table->
ev_hdlr.clnt_cb(cctxt,
event, status, payload,
sz, ddl_handle,
client_data);
} else {
VCD_MSG_HIGH
("No client handler in"
" (dsm:READY, csm:%d) state",
(int)cctxt->clnt_state.state);
if (VCD_FAILED(status)) {
VCD_MSG_FATAL("DDL callback"
" returned failure 0x%x",
status);
}
}
}
break;
}
default:
{
VCD_MSG_ERROR("Unknown state");
break;
}
}
}
u32 vcd_init_device_context(struct vcd_drv_ctxt *drv_ctxt,
u32 ev_code)
{
struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
u32 rc;
struct ddl_init_config ddl_init;
VCD_MSG_LOW("vcd_init_device_context:");
dev_ctxt->pending_cmd = VCD_CMD_NONE;
rc = vcd_power_event(dev_ctxt, NULL, VCD_EVT_PWR_DEV_INIT_BEGIN);
VCD_FAILED_RETURN(rc, "VCD_EVT_PWR_DEV_INIT_BEGIN failed");
VCD_MSG_HIGH("Device powered ON and clocked");
rc = vcd_sched_create(&dev_ctxt->sched_clnt_list);
if (VCD_FAILED(rc)) {
VCD_MSG_ERROR("rc = 0x%x. Failed: vcd_sched_create", rc);
(void)vcd_power_event(dev_ctxt, NULL,
VCD_EVT_PWR_DEV_INIT_FAIL);
return rc;
}
VCD_MSG_HIGH("Created scheduler instance.");
ddl_init.core_virtual_base_addr = dev_ctxt->device_base_addr;
ddl_init.interrupt_clr = dev_ctxt->config.interrupt_clr;
ddl_init.ddl_callback = vcd_ddl_callback;
rc = ddl_device_init(&ddl_init, NULL);
if (VCD_FAILED(rc)) {
VCD_MSG_ERROR("rc = 0x%x. Failed: ddl_device_init", rc);
vcd_sched_destroy(&dev_ctxt->sched_clnt_list);
(void)vcd_power_event(dev_ctxt, NULL,
VCD_EVT_PWR_DEV_INIT_FAIL);
} else {
vcd_device_timer_start(dev_ctxt);
vcd_do_device_state_transition(drv_ctxt,
VCD_DEVICE_STATE_INITING,
ev_code);
}
return rc;
}
void vcd_handle_device_init_failed(struct vcd_drv_ctxt *drv_ctxt,
u32 status)
{
struct vcd_clnt_ctxt *client;
struct vcd_clnt_ctxt *tmp_client;
VCD_MSG_ERROR("Device init failed. status = %d", status);
client = drv_ctxt->dev_ctxt.cctxt_list_head;
while (client) {
client->callback(VCD_EVT_RESP_OPEN,
status, NULL, 0, 0, client->client_data);
tmp_client = client;
client = client->next;
vcd_destroy_client_context(tmp_client);
}
if (ddl_device_release(NULL))
VCD_MSG_ERROR("Failed: ddl_device_release");
vcd_sched_destroy(&drv_ctxt->dev_ctxt.sched_clnt_list);
if (vcd_power_event(&drv_ctxt->dev_ctxt,
NULL, VCD_EVT_PWR_DEV_INIT_FAIL))
VCD_MSG_ERROR("VCD_EVT_PWR_DEV_INIT_FAIL failed");
vcd_do_device_state_transition(drv_ctxt,
VCD_DEVICE_STATE_NOT_INIT,
DEVICE_STATE_EVENT_NUMBER(dev_cb));
}
u32 vcd_deinit_device_context(struct vcd_drv_ctxt *drv_ctxt,
u32 ev_code)
{
struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
u32 rc = VCD_S_SUCCESS;
VCD_MSG_LOW("vcd_deinit_device_context:");
rc = vcd_power_event(&drv_ctxt->dev_ctxt, NULL,
VCD_EVT_PWR_DEV_TERM_BEGIN);
VCD_FAILED_RETURN(rc, "VCD_EVT_PWR_DEV_TERM_BEGIN failed");
rc = ddl_device_release(NULL);
if (VCD_FAILED(rc)) {
VCD_MSG_ERROR("rc = 0x%x. Failed: ddl_device_release", rc);
(void)vcd_power_event(dev_ctxt, NULL,
VCD_EVT_PWR_DEV_TERM_FAIL);
} else {
vcd_sched_destroy(&dev_ctxt->sched_clnt_list);
(void) vcd_power_event(dev_ctxt, NULL,
VCD_EVT_PWR_DEV_TERM_END);
vcd_do_device_state_transition(drv_ctxt,
VCD_DEVICE_STATE_NOT_INIT, ev_code);
}
return rc;
}
void vcd_term_driver_context(struct vcd_drv_ctxt *drv_ctxt)
{
struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
VCD_MSG_HIGH("All driver instances terminated");
if (dev_ctxt->config.deregister_isr)
dev_ctxt->config.deregister_isr();
if (dev_ctxt->config.un_map_dev_base_addr)
dev_ctxt->config.un_map_dev_base_addr();
if (dev_ctxt->config.timer_release)
dev_ctxt->config.timer_release(
dev_ctxt->hw_timer_handle);
kfree(dev_ctxt->trans_tbl);
memset(dev_ctxt, 0, sizeof(struct vcd_dev_ctxt));
vcd_do_device_state_transition(drv_ctxt,
VCD_DEVICE_STATE_NULL,
DEVICE_STATE_EVENT_NUMBER(term));
}
u32 vcd_reset_device_context(struct vcd_drv_ctxt *drv_ctxt,
u32 ev_code)
{
struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
u32 rc = VCD_S_SUCCESS;
VCD_MSG_LOW("vcd_reset_device_context:");
vcd_reset_device_channels(dev_ctxt);
rc = vcd_power_event(&drv_ctxt->dev_ctxt, NULL,
VCD_EVT_PWR_DEV_TERM_BEGIN);
VCD_FAILED_RETURN(rc, "VCD_EVT_PWR_DEV_TERM_BEGIN failed");
if (ddl_reset_hw(0))
VCD_MSG_HIGH("HW Reset done");
else
VCD_MSG_FATAL("HW Reset failed");
(void)vcd_power_event(dev_ctxt, NULL, VCD_EVT_PWR_DEV_TERM_END);
return VCD_S_SUCCESS;
}
void vcd_handle_device_err_fatal(struct vcd_dev_ctxt *dev_ctxt,
struct vcd_clnt_ctxt *trig_clnt)
{
struct vcd_clnt_ctxt *cctxt = dev_ctxt->cctxt_list_head;
struct vcd_clnt_ctxt *tmp_clnt = NULL;
VCD_MSG_LOW("vcd_handle_device_err_fatal:");
while (cctxt) {
tmp_clnt = cctxt;
cctxt = cctxt->next;
if (tmp_clnt != trig_clnt)
vcd_clnt_handle_device_err_fatal(tmp_clnt,
tmp_clnt->status.last_evt);
}
dev_ctxt->pending_cmd = VCD_CMD_DEVICE_RESET;
if (!dev_ctxt->cctxt_list_head)
vcd_do_device_state_transition(vcd_get_drv_context(),
VCD_DEVICE_STATE_NOT_INIT,
DEVICE_STATE_EVENT_NUMBER(timeout));
else
vcd_do_device_state_transition(vcd_get_drv_context(),
VCD_DEVICE_STATE_INVALID,
DEVICE_STATE_EVENT_NUMBER(dev_cb));
}
void vcd_handle_for_last_clnt_close(
struct vcd_dev_ctxt *dev_ctxt, u32 send_deinit)
{
if (!dev_ctxt->cctxt_list_head) {
VCD_MSG_HIGH("All clients are closed");
if (send_deinit)
(void) vcd_deinit_device_context(
vcd_get_drv_context(),
DEVICE_STATE_EVENT_NUMBER(close));
else
dev_ctxt->pending_cmd =
VCD_CMD_DEVICE_TERM;
}
}
void vcd_continue(void)
{
struct vcd_drv_ctxt *drv_ctxt;
struct vcd_dev_ctxt *dev_ctxt;
u32 command_continue;
struct vcd_transc *transc;
u32 rc;
VCD_MSG_LOW("vcd_continue:");
drv_ctxt = vcd_get_drv_context();
dev_ctxt = &drv_ctxt->dev_ctxt;
dev_ctxt->command_continue = false;
if (dev_ctxt->pending_cmd == VCD_CMD_DEVICE_INIT) {
VCD_MSG_HIGH("VCD_CMD_DEVICE_INIT is pending");
dev_ctxt->pending_cmd = VCD_CMD_NONE;
(void)vcd_init_device_context(drv_ctxt,
DEVICE_STATE_EVENT_NUMBER(open));
} else if (dev_ctxt->pending_cmd == VCD_CMD_DEVICE_TERM) {
VCD_MSG_HIGH("VCD_CMD_DEVICE_TERM is pending");
dev_ctxt->pending_cmd = VCD_CMD_NONE;
(void)vcd_deinit_device_context(drv_ctxt,
DEVICE_STATE_EVENT_NUMBER(close));
} else if (dev_ctxt->pending_cmd == VCD_CMD_DEVICE_RESET) {
VCD_MSG_HIGH("VCD_CMD_DEVICE_RESET is pending");
dev_ctxt->pending_cmd = VCD_CMD_NONE;
(void)vcd_reset_device_context(drv_ctxt,
DEVICE_STATE_EVENT_NUMBER(dev_cb));
} else {
if (dev_ctxt->set_perf_lvl_pending) {
rc = vcd_power_event(dev_ctxt, NULL,
VCD_EVT_PWR_DEV_SET_PERFLVL);
if (VCD_FAILED(rc)) {
VCD_MSG_ERROR
("VCD_EVT_PWR_CLNT_SET_PERFLVL failed");
VCD_MSG_HIGH
("Not running at desired perf level."
"curr=%d, reqd=%d",
dev_ctxt->curr_perf_lvl,
dev_ctxt->reqd_perf_lvl);
} else {
dev_ctxt->set_perf_lvl_pending = false;
}
}
do {
command_continue = false;
if (vcd_get_command_channel_in_loop
(dev_ctxt, &transc)) {
if (vcd_submit_command_in_continue(dev_ctxt,
transc))
command_continue = true;
else {
VCD_MSG_MED
("No more commands to submit");
vcd_release_command_channel(dev_ctxt,
transc);
vcd_release_interim_command_channels
(dev_ctxt);
}
}
} while (command_continue);
do {
command_continue = false;
if (vcd_get_frame_channel_in_loop
(dev_ctxt, &transc)) {
if (vcd_try_submit_frame_in_continue(dev_ctxt,
transc)) {
command_continue = true;
} else {
VCD_MSG_MED("No more frames to submit");
vcd_release_frame_channel(dev_ctxt,
transc);
vcd_release_interim_frame_channels
(dev_ctxt);
}
}
} while (command_continue);
if (!vcd_core_is_busy(dev_ctxt)) {
rc = vcd_power_event(dev_ctxt, NULL,
VCD_EVT_PWR_CLNT_CMD_END);
if (VCD_FAILED(rc))
VCD_MSG_ERROR("Failed:"
"VCD_EVT_PWR_CLNT_CMD_END");
}
}
}
static void vcd_pause_all_sessions(struct vcd_dev_ctxt *dev_ctxt)
{
struct vcd_clnt_ctxt *cctxt = dev_ctxt->cctxt_list_head;
u32 rc;
while (cctxt) {
if (cctxt->clnt_state.state_table->ev_hdlr.pause) {
rc = cctxt->clnt_state.state_table->ev_hdlr.
pause(cctxt);
if (VCD_FAILED(rc))
VCD_MSG_ERROR("Client pause failed");
}
cctxt = cctxt->next;
}
}
static void vcd_resume_all_sessions(struct vcd_dev_ctxt *dev_ctxt)
{
struct vcd_clnt_ctxt *cctxt = dev_ctxt->cctxt_list_head;
u32 rc;
while (cctxt) {
if (cctxt->clnt_state.state_table->ev_hdlr.resume) {
rc = cctxt->clnt_state.state_table->ev_hdlr.
resume(cctxt);
if (VCD_FAILED(rc))
VCD_MSG_ERROR("Client resume failed");
}
cctxt = cctxt->next;
}
}
static u32 vcd_init_cmn
(struct vcd_drv_ctxt *drv_ctxt,
struct vcd_init_config *config, s32 *driver_handle)
{
struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
s32 driver_id;
if (dev_ctxt->config.interrupt_clr !=
config->interrupt_clr
|| dev_ctxt->config.register_isr !=
config->register_isr
|| dev_ctxt->config.deregister_isr !=
config->deregister_isr
|| dev_ctxt->config.map_dev_base_addr !=
config->map_dev_base_addr
|| dev_ctxt->config.un_map_dev_base_addr !=
config->un_map_dev_base_addr) {
VCD_MSG_HIGH("Device config mismatch. "
"VCD will be using config from 1st vcd_init");
}
*driver_handle = 0;
driver_id = 0;
while (driver_id < VCD_DRIVER_CLIENTS_MAX &&
dev_ctxt->driver_ids[driver_id]) {
++driver_id;
}
if (driver_id == VCD_DRIVER_CLIENTS_MAX) {
VCD_MSG_ERROR("Max driver instances reached");
return VCD_ERR_FAIL;
}
++dev_ctxt->refs;
dev_ctxt->driver_ids[driver_id] = true;
*driver_handle = driver_id + 1;
VCD_MSG_HIGH("Driver_id = %d. No of driver instances = %d",
driver_id, dev_ctxt->refs);
return VCD_S_SUCCESS;
}
static u32 vcd_init_in_null
(struct vcd_drv_ctxt *drv_ctxt,
struct vcd_init_config *config, s32 *driver_handle) {
u32 rc = VCD_S_SUCCESS;
struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
u32 done_create_timer = false;
VCD_MSG_LOW("vcd_init_in_dev_null:");
dev_ctxt->config = *config;
dev_ctxt->device_base_addr =
(u8 *)config->map_dev_base_addr(
dev_ctxt->config.device_name);
if (!dev_ctxt->device_base_addr) {
VCD_MSG_ERROR("NULL Device_base_addr");
return VCD_ERR_FAIL;
}
if (config->register_isr) {
config->register_isr(dev_ctxt->config.
device_name);
}
if (config->timer_create) {
if (config->timer_create(vcd_hw_timeout_handler,
NULL, &dev_ctxt->hw_timer_handle))
done_create_timer = true;
else {
VCD_MSG_ERROR("timercreate failed");
return VCD_ERR_FAIL;
}
}
rc = vcd_init_cmn(drv_ctxt, config, driver_handle);
if (!VCD_FAILED(rc)) {
vcd_do_device_state_transition(drv_ctxt,
VCD_DEVICE_STATE_NOT_INIT,
DEVICE_STATE_EVENT_NUMBER
(init));
} else {
if (dev_ctxt->config.un_map_dev_base_addr)
dev_ctxt->config.un_map_dev_base_addr();
if (dev_ctxt->config.deregister_isr)
dev_ctxt->config.deregister_isr();
if (done_create_timer && dev_ctxt->config.timer_release)
dev_ctxt->config.timer_release(dev_ctxt->
hw_timer_handle);
}
return rc;
}
static u32 vcd_init_in_not_init
(struct vcd_drv_ctxt *drv_ctxt,
struct vcd_init_config *config, s32 *driver_handle)
{
VCD_MSG_LOW("vcd_init_in_dev_not_init:");
return vcd_init_cmn(drv_ctxt, config, driver_handle);
}
static u32 vcd_init_in_initing
(struct vcd_drv_ctxt *drv_ctxt,
struct vcd_init_config *config, s32 *driver_handle) {
VCD_MSG_LOW("vcd_init_in_dev_initing:");
return vcd_init_cmn(drv_ctxt, config, driver_handle);
}
static u32 vcd_init_in_ready
(struct vcd_drv_ctxt *drv_ctxt,
struct vcd_init_config *config, s32 *driver_handle)
{
VCD_MSG_LOW("vcd_init_in_dev_ready:");
return vcd_init_cmn(drv_ctxt, config, driver_handle);
}
static u32 vcd_term_cmn
(struct vcd_drv_ctxt *drv_ctxt, s32 driver_handle)
{
struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
if (!vcd_validate_driver_handle(dev_ctxt, driver_handle)) {
VCD_MSG_ERROR("Invalid driver handle = %d", driver_handle);
return VCD_ERR_BAD_HANDLE;
}
if (vcd_check_for_client_context(dev_ctxt,
driver_handle - 1)) {
VCD_MSG_ERROR("Driver has active client");
return VCD_ERR_BAD_STATE;
}
--dev_ctxt->refs;
dev_ctxt->driver_ids[driver_handle - 1] = false;
VCD_MSG_HIGH("Driver_id %d terminated. No of driver instances = %d",
driver_handle - 1, dev_ctxt->refs);
return VCD_S_SUCCESS;
}
static u32 vcd_term_in_not_init
(struct vcd_drv_ctxt *drv_ctxt, s32 driver_handle)
{
struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
u32 rc;
VCD_MSG_LOW("vcd_term_in_dev_not_init:");
rc = vcd_term_cmn(drv_ctxt, driver_handle);
if (!VCD_FAILED(rc) && !dev_ctxt->refs)
vcd_term_driver_context(drv_ctxt);
return rc;
}
static u32 vcd_term_in_initing
(struct vcd_drv_ctxt *drv_ctxt, s32 driver_handle)
{
VCD_MSG_LOW("vcd_term_in_dev_initing:");
return vcd_term_cmn(drv_ctxt, driver_handle);
}
static u32 vcd_term_in_ready
(struct vcd_drv_ctxt *drv_ctxt, s32 driver_handle)
{
VCD_MSG_LOW("vcd_term_in_dev_ready:");
return vcd_term_cmn(drv_ctxt, driver_handle);
}
static u32 vcd_term_in_invalid(struct vcd_drv_ctxt *drv_ctxt,
s32 driver_handle)
{
u32 rc;
VCD_MSG_LOW("vcd_term_in_invalid:");
rc = vcd_term_cmn(drv_ctxt, driver_handle);
if (!VCD_FAILED(rc) && !drv_ctxt->dev_ctxt.refs)
vcd_term_driver_context(drv_ctxt);
return rc;
}
static u32 vcd_open_cmn
(struct vcd_drv_ctxt *drv_ctxt,
s32 driver_handle,
u32 decoding,
void (*callback) (u32 event, u32 status, void *info, size_t sz,
void *handle, void *const client_data),
void *client_data, struct vcd_clnt_ctxt ** clnt_cctxt)
{
struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
struct vcd_clnt_ctxt *cctxt;
struct vcd_clnt_ctxt *client;
if (!vcd_validate_driver_handle(dev_ctxt, driver_handle)) {
VCD_MSG_ERROR("Invalid driver handle = %d", driver_handle);
return VCD_ERR_BAD_HANDLE;
}
cctxt = (struct vcd_clnt_ctxt *)
kmalloc(sizeof(struct vcd_clnt_ctxt), GFP_KERNEL);
if (!cctxt) {
VCD_MSG_ERROR("No memory for client ctxt");
return VCD_ERR_ALLOC_FAIL;
}
memset(cctxt, 0, sizeof(struct vcd_clnt_ctxt));
cctxt->dev_ctxt = dev_ctxt;
cctxt->driver_id = driver_handle - 1;
cctxt->decoding = decoding;
cctxt->callback = callback;
cctxt->client_data = client_data;
cctxt->status.last_evt = VCD_EVT_RESP_OPEN;
INIT_LIST_HEAD(&cctxt->in_buf_pool.queue);
INIT_LIST_HEAD(&cctxt->out_buf_pool.queue);
client = dev_ctxt->cctxt_list_head;
dev_ctxt->cctxt_list_head = cctxt;
cctxt->next = client;
*clnt_cctxt = cctxt;
return VCD_S_SUCCESS;
}
static u32 vcd_open_in_not_init
(struct vcd_drv_ctxt *drv_ctxt,
s32 driver_handle,
u32 decoding,
void (*callback) (u32 event, u32 status, void *info, size_t sz,
void *handle, void *const client_data),
void *client_data)
{
struct vcd_clnt_ctxt *cctxt;
u32 rc;
VCD_MSG_LOW("vcd_open_in_dev_not_init:");
rc = vcd_open_cmn(drv_ctxt, driver_handle, decoding, callback,
client_data, &cctxt);
VCD_FAILED_RETURN(rc, "Failed: vcd_open_cmn");
rc = vcd_init_device_context(drv_ctxt,
DEVICE_STATE_EVENT_NUMBER(open));
if (VCD_FAILED(rc))
vcd_destroy_client_context(cctxt);
return rc;
}
static u32 vcd_open_in_initing(struct vcd_drv_ctxt *drv_ctxt,
s32 driver_handle, u32 decoding,
void (*callback) (u32 event, u32 status, void *info, size_t sz,
void *handle, void *const client_data),
void *client_data)
{
struct vcd_clnt_ctxt *cctxt;
VCD_MSG_LOW("vcd_open_in_dev_initing:");
return vcd_open_cmn(drv_ctxt, driver_handle, decoding, callback,
client_data, &cctxt);
}
static u32 vcd_open_in_ready
(struct vcd_drv_ctxt *drv_ctxt,
s32 driver_handle,
u32 decoding,
void (*callback) (u32 event, u32 status, void *info, size_t sz,
void *handle, void *const client_data),
void *client_data)
{
struct vcd_clnt_ctxt *cctxt;
struct vcd_handle_container container;
u32 rc;
VCD_MSG_LOW("vcd_open_in_dev_ready:");
rc = vcd_open_cmn(drv_ctxt, driver_handle, decoding, callback,
client_data, &cctxt);
VCD_FAILED_RETURN(rc, "Failed: vcd_open_cmn");
rc = vcd_init_client_context(cctxt);
if (!VCD_FAILED(rc)) {
container.handle = (void *)cctxt;
callback(VCD_EVT_RESP_OPEN,
VCD_S_SUCCESS,
&container,
sizeof(container), container.handle, client_data);
} else {
VCD_MSG_ERROR("rc = 0x%x. Failed: vcd_init_client_context", rc);
vcd_destroy_client_context(cctxt);
}
return rc;
}
static u32 vcd_close_in_ready
(struct vcd_drv_ctxt *drv_ctxt,
struct vcd_clnt_ctxt *cctxt) {
u32 rc;
VCD_MSG_LOW("vcd_close_in_dev_ready:");
if (cctxt->clnt_state.state_table->ev_hdlr.close) {
rc = cctxt->clnt_state.state_table->ev_hdlr.
close(cctxt);
} else {
VCD_MSG_ERROR("Unsupported API in client state %d",
cctxt->clnt_state.state);
vcd_destroy_client_context(cctxt);
rc = VCD_ERR_BAD_STATE;
}
if (!VCD_FAILED(rc))
vcd_handle_for_last_clnt_close(&drv_ctxt->dev_ctxt, true);
return rc;
}
static u32 vcd_close_in_dev_invalid(struct vcd_drv_ctxt *drv_ctxt,
struct vcd_clnt_ctxt *cctxt)
{
u32 rc;
VCD_MSG_LOW("vcd_close_in_dev_invalid:");
if (cctxt->clnt_state.state_table->ev_hdlr.close) {
rc = cctxt->clnt_state.state_table->
ev_hdlr.close(cctxt);
} else {
VCD_MSG_ERROR("Unsupported API in client state %d",
cctxt->clnt_state.state);
rc = VCD_ERR_BAD_STATE;
}
if (!VCD_FAILED(rc) && !drv_ctxt->dev_ctxt.
cctxt_list_head) {
VCD_MSG_HIGH("All INVALID clients are closed");
vcd_do_device_state_transition(drv_ctxt,
VCD_DEVICE_STATE_NOT_INIT,
DEVICE_STATE_EVENT_NUMBER(close));
}
return rc;
}
static u32 vcd_resume_in_ready
(struct vcd_drv_ctxt *drv_ctxt,
struct vcd_clnt_ctxt *cctxt) {
u32 rc = VCD_S_SUCCESS;
VCD_MSG_LOW("vcd_resume_in_ready:");
if (cctxt->clnt_state.state_table->ev_hdlr.resume) {
rc = cctxt->clnt_state.state_table->ev_hdlr.
resume(cctxt);
} else {
VCD_MSG_ERROR("Unsupported API in client state %d",
cctxt->clnt_state.state);
rc = VCD_ERR_BAD_STATE;
}
return rc;
}
static u32 vcd_set_dev_pwr_in_ready
(struct vcd_drv_ctxt *drv_ctxt,
enum vcd_power_state pwr_state)
{
u32 rc = VCD_S_SUCCESS;
struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
VCD_MSG_LOW("vcd_set_dev_pwr_in_ready:");
switch (pwr_state) {
case VCD_PWR_STATE_SLEEP:
{
if (dev_ctxt->pwr_state == VCD_PWR_STATE_ON)
vcd_pause_all_sessions(dev_ctxt);
dev_ctxt->pwr_state = VCD_PWR_STATE_SLEEP;
break;
}
case VCD_PWR_STATE_ON:
{
if (dev_ctxt->pwr_state == VCD_PWR_STATE_SLEEP)
vcd_resume_all_sessions(dev_ctxt);
dev_ctxt->pwr_state = VCD_PWR_STATE_ON;
break;
}
default:
{
VCD_MSG_ERROR("Invalid power state requested %d",
pwr_state);
break;
}
}
return rc;
}
static void vcd_dev_cb_in_initing
(struct vcd_drv_ctxt *drv_ctxt,
u32 event,
u32 status,
void *payload, size_t sz, u32 *ddl_handle, void *const client_data)
{
struct vcd_dev_ctxt *dev_ctxt;
struct vcd_clnt_ctxt *client;
struct vcd_clnt_ctxt *tmp_client;
struct vcd_handle_container container;
u32 rc = VCD_S_SUCCESS;
u32 client_inited = false;
u32 fail_all_open = false;
struct ddl_context *ddl_context;
ddl_context = ddl_get_context();
VCD_MSG_LOW("vcd_dev_cb_in_initing:");
if (event != VCD_EVT_RESP_DEVICE_INIT) {
VCD_MSG_ERROR("vcd_dev_cb_in_initing: Unexpected event %d",
(int)event);
return;
}
dev_ctxt = &drv_ctxt->dev_ctxt;
dev_ctxt->command_continue = false;
if (VCD_FAILED(status)) {
vcd_handle_device_init_failed(drv_ctxt, status);
return;
}
vcd_do_device_state_transition(drv_ctxt,
VCD_DEVICE_STATE_READY,
DEVICE_STATE_EVENT_NUMBER(open));
if (!dev_ctxt->cctxt_list_head) {
VCD_MSG_HIGH("All clients are closed");
dev_ctxt->pending_cmd = VCD_CMD_DEVICE_TERM;
return;
}
if (!dev_ctxt->ddl_cmd_ch_depth
|| !dev_ctxt->trans_tbl)
rc = vcd_setup_with_ddl_capabilities(dev_ctxt);
if (VCD_FAILED(rc)) {
VCD_MSG_ERROR
("rc = 0x%x: Failed vcd_setup_with_ddl_capabilities",
rc);
fail_all_open = true;
}
client = dev_ctxt->cctxt_list_head;
while (client) {
if (!fail_all_open)
rc = vcd_init_client_context(client);
if (!VCD_FAILED(rc)) {
container.handle = (void *)client;
client->callback(VCD_EVT_RESP_OPEN,
VCD_S_SUCCESS,
&container,
sizeof(container),
container.handle,
client->client_data);
client = client->next;
client_inited = true;
} else {
VCD_MSG_ERROR
("rc = 0x%x, Failed: vcd_init_client_context",
rc);
client->callback(VCD_EVT_RESP_OPEN,
rc,
NULL, 0, 0, client->client_data);
tmp_client = client;
client = client->next;
if (tmp_client == dev_ctxt->cctxt_list_head)
fail_all_open = true;
vcd_destroy_client_context(tmp_client);
}
}
if (!client_inited || fail_all_open) {
VCD_MSG_ERROR("All client open requests failed");
DDL_IDLE(ddl_context);
vcd_handle_device_init_failed(drv_ctxt,
DEVICE_STATE_EVENT_NUMBER(close));
dev_ctxt->pending_cmd = VCD_CMD_DEVICE_TERM;
} else {
if (vcd_power_event(dev_ctxt, NULL,
VCD_EVT_PWR_DEV_INIT_END)) {
VCD_MSG_ERROR("VCD_EVT_PWR_DEV_INIT_END failed");
}
}
}
static void vcd_hw_timeout_cmn(struct vcd_drv_ctxt *drv_ctxt,
void *user_data)
{
struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt;
VCD_MSG_LOW("vcd_hw_timeout_cmn:");
vcd_device_timer_stop(dev_ctxt);
vcd_handle_device_err_fatal(dev_ctxt, NULL);
/* Reset HW. */
(void) vcd_reset_device_context(drv_ctxt,
DEVICE_STATE_EVENT_NUMBER(timeout));
}
static void vcd_dev_enter_null
(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
VCD_MSG_MED("Entering DEVICE_STATE_NULL on api %d", state_event);
}
static void vcd_dev_enter_not_init
(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
VCD_MSG_MED("Entering DEVICE_STATE_NOT_INIT on api %d",
state_event);
}
static void vcd_dev_enter_initing
(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
VCD_MSG_MED("Entering DEVICE_STATE_INITING on api %d",
state_event);
}
static void vcd_dev_enter_ready
(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
VCD_MSG_MED("Entering DEVICE_STATE_READY on api %d",
state_event);
}
static void vcd_dev_enter_invalid(struct vcd_drv_ctxt *drv_ctxt,
s32 state_event)
{
VCD_MSG_MED("Entering DEVICE_STATE_INVALID on api %d", state_event);
}
static void vcd_dev_exit_null
(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
VCD_MSG_MED("Exiting DEVICE_STATE_NULL on api %d", state_event);
}
static void vcd_dev_exit_not_init
(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
VCD_MSG_MED("Exiting DEVICE_STATE_NOT_INIT on api %d",
state_event);
}
static void vcd_dev_exit_initing
(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
VCD_MSG_MED("Exiting DEVICE_STATE_INITING on api %d",
state_event);
}
static void vcd_dev_exit_ready
(struct vcd_drv_ctxt *drv_ctxt, s32 state_event) {
VCD_MSG_MED("Exiting DEVICE_STATE_READY on api %d", state_event);
}
static void vcd_dev_exit_invalid(struct vcd_drv_ctxt *drv_ctxt,
s32 state_event)
{
VCD_MSG_MED("Exiting DEVICE_STATE_INVALID on api %d", state_event);
}
static const struct vcd_dev_state_table vcd_dev_table_null = {
{
vcd_init_in_null,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
},
vcd_dev_enter_null,
vcd_dev_exit_null
};
static const struct vcd_dev_state_table vcd_dev_table_not_init = {
{
vcd_init_in_not_init,
vcd_term_in_not_init,
vcd_open_in_not_init,
NULL,
NULL,
NULL,
NULL,
NULL,
},
vcd_dev_enter_not_init,
vcd_dev_exit_not_init
};
static const struct vcd_dev_state_table vcd_dev_table_initing = {
{
vcd_init_in_initing,
vcd_term_in_initing,
vcd_open_in_initing,
NULL,
NULL,
NULL,
vcd_dev_cb_in_initing,
vcd_hw_timeout_cmn,
},
vcd_dev_enter_initing,
vcd_dev_exit_initing
};
static const struct vcd_dev_state_table vcd_dev_table_ready = {
{
vcd_init_in_ready,
vcd_term_in_ready,
vcd_open_in_ready,
vcd_close_in_ready,
vcd_resume_in_ready,
vcd_set_dev_pwr_in_ready,
NULL,
vcd_hw_timeout_cmn,
},
vcd_dev_enter_ready,
vcd_dev_exit_ready
};
static const struct vcd_dev_state_table vcd_dev_table_in_invalid = {
{
NULL,
vcd_term_in_invalid,
NULL,
vcd_close_in_dev_invalid,
NULL,
NULL,
NULL,
NULL,
},
vcd_dev_enter_invalid,
vcd_dev_exit_invalid
};
static const struct vcd_dev_state_table *vcd_dev_state_table[] = {
&vcd_dev_table_null,
&vcd_dev_table_not_init,
&vcd_dev_table_initing,
&vcd_dev_table_ready,
&vcd_dev_table_in_invalid
};
| infected-lp/android_kernel_sony_msm8974 | drivers/video/msm/vidc/common/vcd/vcd_device_sm.c | C | gpl-2.0 | 28,704 |
/*******************************************************************************
*
* (c) 1998 by Computone Corporation
*
********************************************************************************
*
*
* PACKAGE: Linux tty Device Driver for IntelliPort family of multiport
* serial I/O controllers.
*
* DESCRIPTION: Low-level interface code for the device driver
* (This is included source code, not a separate compilation
* module.)
*
*******************************************************************************/
//---------------------------------------------
// Function declarations private to this module
//---------------------------------------------
// Functions called only indirectly through i2eBordStr entries.
static int iiWriteBuf16(i2eBordStrPtr, unsigned char *, int);
static int iiWriteBuf8(i2eBordStrPtr, unsigned char *, int);
static int iiReadBuf16(i2eBordStrPtr, unsigned char *, int);
static int iiReadBuf8(i2eBordStrPtr, unsigned char *, int);
static unsigned short iiReadWord16(i2eBordStrPtr);
static unsigned short iiReadWord8(i2eBordStrPtr);
static void iiWriteWord16(i2eBordStrPtr, unsigned short);
static void iiWriteWord8(i2eBordStrPtr, unsigned short);
static int iiWaitForTxEmptyII(i2eBordStrPtr, int);
static int iiWaitForTxEmptyIIEX(i2eBordStrPtr, int);
static int iiTxMailEmptyII(i2eBordStrPtr);
static int iiTxMailEmptyIIEX(i2eBordStrPtr);
static int iiTrySendMailII(i2eBordStrPtr, unsigned char);
static int iiTrySendMailIIEX(i2eBordStrPtr, unsigned char);
static unsigned short iiGetMailII(i2eBordStrPtr);
static unsigned short iiGetMailIIEX(i2eBordStrPtr);
static void iiEnableMailIrqII(i2eBordStrPtr);
static void iiEnableMailIrqIIEX(i2eBordStrPtr);
static void iiWriteMaskII(i2eBordStrPtr, unsigned char);
static void iiWriteMaskIIEX(i2eBordStrPtr, unsigned char);
static void ii2Nop(void);
//***************
//* Static Data *
//***************
static int ii2Safe; // Safe I/O address for delay routine
static int iiDelayed; // Set when the iiResetDelay function is
// called. Cleared when ANY board is reset.
static DEFINE_RWLOCK(Dl_spinlock);
//********
//* Code *
//********
//=======================================================
// Initialization Routines
//
// iiSetAddress
// iiReset
// iiResetDelay
// iiInitialize
//=======================================================
//******************************************************************************
// Function: iiSetAddress(pB, address, delay)
// Parameters: pB - pointer to the board structure
// address - the purported I/O address of the board
// delay - pointer to the 1-ms delay function to use
// in this and any future operations to this board
//
// Returns: True if everything appears copacetic.
// False if there is any error: the pB->i2eError field has the error
//
// Description:
//
// This routine (roughly) checks for address validity, sets the i2eValid OK and
// sets the state to II_STATE_COLD which means that we haven't even sent a reset
// yet.
//
//******************************************************************************
static int
iiSetAddress( i2eBordStrPtr pB, int address, delayFunc_t delay )
{
// Should any failure occur before init is finished...
pB->i2eValid = I2E_INCOMPLETE;
// Cannot check upper limit except extremely: Might be microchannel
// Address must be on an 8-byte boundary
if ((unsigned int)address <= 0x100
|| (unsigned int)address >= 0xfff8
|| (address & 0x7)
)
{
I2_COMPLETE(pB, I2EE_BADADDR);
}
// Initialize accelerators
pB->i2eBase = address;
pB->i2eData = address + FIFO_DATA;
pB->i2eStatus = address + FIFO_STATUS;
pB->i2ePointer = address + FIFO_PTR;
pB->i2eXMail = address + FIFO_MAIL;
pB->i2eXMask = address + FIFO_MASK;
// Initialize i/o address for ii2DelayIO
ii2Safe = address + FIFO_NOP;
// Initialize the delay routine
pB->i2eDelay = ((delay != (delayFunc_t)NULL) ? delay : (delayFunc_t)ii2Nop);
pB->i2eValid = I2E_MAGIC;
pB->i2eState = II_STATE_COLD;
I2_COMPLETE(pB, I2EE_GOOD);
}
//******************************************************************************
// Function: iiReset(pB)
// Parameters: pB - pointer to the board structure
//
// Returns: True if everything appears copacetic.
// False if there is any error: the pB->i2eError field has the error
//
// Description:
//
// Attempts to reset the board (see also i2hw.h). Normally, we would use this to
// reset a board immediately after iiSetAddress(), but it is valid to reset a
// board from any state, say, in order to change or re-load loadware. (Under
// such circumstances, no reason to re-run iiSetAddress(), which is why it is a
// separate routine and not included in this routine.
//
//******************************************************************************
static int
iiReset(i2eBordStrPtr pB)
{
// Magic number should be set, else even the address is suspect
if (pB->i2eValid != I2E_MAGIC)
{
I2_COMPLETE(pB, I2EE_BADMAGIC);
}
outb(0, pB->i2eBase + FIFO_RESET); /* Any data will do */
iiDelay(pB, 50); // Pause between resets
outb(0, pB->i2eBase + FIFO_RESET); /* Second reset */
// We must wait before even attempting to read anything from the FIFO: the
// board's P.O.S.T may actually attempt to read and write its end of the
// FIFO in order to check flags, loop back (where supported), etc. On
// completion of this testing it would reset the FIFO, and on completion
// of all // P.O.S.T., write the message. We must not mistake data which
// might have been sent for testing as part of the reset message. To
// better utilize time, say, when resetting several boards, we allow the
// delay to be performed externally; in this way the caller can reset
// several boards, delay a single time, then call the initialization
// routine for all.
pB->i2eState = II_STATE_RESET;
iiDelayed = 0; // i.e., the delay routine hasn't been called since the most
// recent reset.
// Ensure anything which would have been of use to standard loadware is
// blanked out, since board has now forgotten everything!.
pB->i2eUsingIrq = I2_IRQ_UNDEFINED; /* to not use an interrupt so far */
pB->i2eWaitingForEmptyFifo = 0;
pB->i2eOutMailWaiting = 0;
pB->i2eChannelPtr = NULL;
pB->i2eChannelCnt = 0;
pB->i2eLeadoffWord[0] = 0;
pB->i2eFifoInInts = 0;
pB->i2eFifoOutInts = 0;
pB->i2eFatalTrap = NULL;
pB->i2eFatal = 0;
I2_COMPLETE(pB, I2EE_GOOD);
}
//******************************************************************************
// Function: iiResetDelay(pB)
// Parameters: pB - pointer to the board structure
//
// Returns: True if everything appears copacetic.
// False if there is any error: the pB->i2eError field has the error
//
// Description:
//
// Using the delay defined in board structure, waits two seconds (for board to
// reset).
//
//******************************************************************************
static int
iiResetDelay(i2eBordStrPtr pB)
{
if (pB->i2eValid != I2E_MAGIC) {
I2_COMPLETE(pB, I2EE_BADMAGIC);
}
if (pB->i2eState != II_STATE_RESET) {
I2_COMPLETE(pB, I2EE_BADSTATE);
}
iiDelay(pB,2000); /* Now we wait for two seconds. */
iiDelayed = 1; /* Delay has been called: ok to initialize */
I2_COMPLETE(pB, I2EE_GOOD);
}
//******************************************************************************
// Function: iiInitialize(pB)
// Parameters: pB - pointer to the board structure
//
// Returns: True if everything appears copacetic.
// False if there is any error: the pB->i2eError field has the error
//
// Description:
//
// Attempts to read the Power-on reset message. Initializes any remaining fields
// in the pB structure.
//
// This should be called as the third step of a process beginning with
// iiReset(), then iiResetDelay(). This routine checks to see that the structure
// is "valid" and in the reset state, also confirms that the delay routine has
// been called since the latest reset (to any board! overly strong!).
//
//******************************************************************************
static int
iiInitialize(i2eBordStrPtr pB)
{
int itemp;
unsigned char c;
unsigned short utemp;
unsigned int ilimit;
if (pB->i2eValid != I2E_MAGIC)
{
I2_COMPLETE(pB, I2EE_BADMAGIC);
}
if (pB->i2eState != II_STATE_RESET || !iiDelayed)
{
I2_COMPLETE(pB, I2EE_BADSTATE);
}
// In case there is a failure short of our completely reading the power-up
// message.
pB->i2eValid = I2E_INCOMPLETE;
// Now attempt to read the message.
for (itemp = 0; itemp < sizeof(porStr); itemp++)
{
// We expect the entire message is ready.
if (!I2_HAS_INPUT(pB)) {
pB->i2ePomSize = itemp;
I2_COMPLETE(pB, I2EE_PORM_SHORT);
}
pB->i2ePom.c[itemp] = c = inb(pB->i2eData);
// We check the magic numbers as soon as they are supposed to be read
// (rather than after) to minimize effect of reading something we
// already suspect can't be "us".
if ( (itemp == POR_1_INDEX && c != POR_MAGIC_1) ||
(itemp == POR_2_INDEX && c != POR_MAGIC_2))
{
pB->i2ePomSize = itemp+1;
I2_COMPLETE(pB, I2EE_BADMAGIC);
}
}
pB->i2ePomSize = itemp;
// Ensure that this was all the data...
if (I2_HAS_INPUT(pB))
I2_COMPLETE(pB, I2EE_PORM_LONG);
// For now, we'll fail to initialize if P.O.S.T reports bad chip mapper:
// Implying we will not be able to download any code either: That's ok: the
// condition is pretty explicit.
if (pB->i2ePom.e.porDiag1 & POR_BAD_MAPPER)
{
I2_COMPLETE(pB, I2EE_POSTERR);
}
// Determine anything which must be done differently depending on the family
// of boards!
switch (pB->i2ePom.e.porID & POR_ID_FAMILY)
{
case POR_ID_FII: // IntelliPort-II
pB->i2eFifoStyle = FIFO_II;
pB->i2eFifoSize = 512; // 512 bytes, always
pB->i2eDataWidth16 = false;
pB->i2eMaxIrq = 15; // Because board cannot tell us it is in an 8-bit
// slot, we do allow it to be done (documentation!)
pB->i2eGoodMap[1] =
pB->i2eGoodMap[2] =
pB->i2eGoodMap[3] =
pB->i2eChannelMap[1] =
pB->i2eChannelMap[2] =
pB->i2eChannelMap[3] = 0;
switch (pB->i2ePom.e.porID & POR_ID_SIZE)
{
case POR_ID_II_4:
pB->i2eGoodMap[0] =
pB->i2eChannelMap[0] = 0x0f; // four-port
// Since porPorts1 is based on the Hardware ID register, the numbers
// should always be consistent for IntelliPort-II. Ditto below...
if (pB->i2ePom.e.porPorts1 != 4)
{
I2_COMPLETE(pB, I2EE_INCONSIST);
}
break;
case POR_ID_II_8:
case POR_ID_II_8R:
pB->i2eGoodMap[0] =
pB->i2eChannelMap[0] = 0xff; // Eight port
if (pB->i2ePom.e.porPorts1 != 8)
{
I2_COMPLETE(pB, I2EE_INCONSIST);
}
break;
case POR_ID_II_6:
pB->i2eGoodMap[0] =
pB->i2eChannelMap[0] = 0x3f; // Six Port
if (pB->i2ePom.e.porPorts1 != 6)
{
I2_COMPLETE(pB, I2EE_INCONSIST);
}
break;
}
// Fix up the "good channel list based on any errors reported.
if (pB->i2ePom.e.porDiag1 & POR_BAD_UART1)
{
pB->i2eGoodMap[0] &= ~0x0f;
}
if (pB->i2ePom.e.porDiag1 & POR_BAD_UART2)
{
pB->i2eGoodMap[0] &= ~0xf0;
}
break; // POR_ID_FII case
case POR_ID_FIIEX: // IntelliPort-IIEX
pB->i2eFifoStyle = FIFO_IIEX;
itemp = pB->i2ePom.e.porFifoSize;
// Implicit assumption that fifo would not grow beyond 32k,
// nor would ever be less than 256.
if (itemp < 8 || itemp > 15)
{
I2_COMPLETE(pB, I2EE_INCONSIST);
}
pB->i2eFifoSize = (1 << itemp);
// These are based on what P.O.S.T thinks should be there, based on
// box ID registers
ilimit = pB->i2ePom.e.porNumBoxes;
if (ilimit > ABS_MAX_BOXES)
{
ilimit = ABS_MAX_BOXES;
}
// For as many boxes as EXIST, gives the type of box.
// Added 8/6/93: check for the ISA-4 (asic) which looks like an
// expandable but for whom "8 or 16?" is not the right question.
utemp = pB->i2ePom.e.porFlags;
if (utemp & POR_CEX4)
{
pB->i2eChannelMap[0] = 0x000f;
} else {
utemp &= POR_BOXES;
for (itemp = 0; itemp < ilimit; itemp++)
{
pB->i2eChannelMap[itemp] =
((utemp & POR_BOX_16) ? 0xffff : 0x00ff);
utemp >>= 1;
}
}
// These are based on what P.O.S.T actually found.
utemp = (pB->i2ePom.e.porPorts2 << 8) + pB->i2ePom.e.porPorts1;
for (itemp = 0; itemp < ilimit; itemp++)
{
pB->i2eGoodMap[itemp] = 0;
if (utemp & 1) pB->i2eGoodMap[itemp] |= 0x000f;
if (utemp & 2) pB->i2eGoodMap[itemp] |= 0x00f0;
if (utemp & 4) pB->i2eGoodMap[itemp] |= 0x0f00;
if (utemp & 8) pB->i2eGoodMap[itemp] |= 0xf000;
utemp >>= 4;
}
// Now determine whether we should transfer in 8 or 16-bit mode.
switch (pB->i2ePom.e.porBus & (POR_BUS_SLOT16 | POR_BUS_DIP16) )
{
case POR_BUS_SLOT16 | POR_BUS_DIP16:
pB->i2eDataWidth16 = true;
pB->i2eMaxIrq = 15;
break;
case POR_BUS_SLOT16:
pB->i2eDataWidth16 = false;
pB->i2eMaxIrq = 15;
break;
case 0:
case POR_BUS_DIP16: // In an 8-bit slot, DIP switch don't care.
default:
pB->i2eDataWidth16 = false;
pB->i2eMaxIrq = 7;
break;
}
break; // POR_ID_FIIEX case
default: // Unknown type of board
I2_COMPLETE(pB, I2EE_BAD_FAMILY);
break;
} // End the switch based on family
// Temporarily, claim there is no room in the outbound fifo.
// We will maintain this whenever we check for an empty outbound FIFO.
pB->i2eFifoRemains = 0;
// Now, based on the bus type, should we expect to be able to re-configure
// interrupts (say, for testing purposes).
switch (pB->i2ePom.e.porBus & POR_BUS_TYPE)
{
case POR_BUS_T_ISA:
case POR_BUS_T_UNK: // If the type of bus is undeclared, assume ok.
case POR_BUS_T_MCA:
case POR_BUS_T_EISA:
break;
default:
I2_COMPLETE(pB, I2EE_BADBUS);
}
if (pB->i2eDataWidth16)
{
pB->i2eWriteBuf = iiWriteBuf16;
pB->i2eReadBuf = iiReadBuf16;
pB->i2eWriteWord = iiWriteWord16;
pB->i2eReadWord = iiReadWord16;
} else {
pB->i2eWriteBuf = iiWriteBuf8;
pB->i2eReadBuf = iiReadBuf8;
pB->i2eWriteWord = iiWriteWord8;
pB->i2eReadWord = iiReadWord8;
}
switch(pB->i2eFifoStyle)
{
case FIFO_II:
pB->i2eWaitForTxEmpty = iiWaitForTxEmptyII;
pB->i2eTxMailEmpty = iiTxMailEmptyII;
pB->i2eTrySendMail = iiTrySendMailII;
pB->i2eGetMail = iiGetMailII;
pB->i2eEnableMailIrq = iiEnableMailIrqII;
pB->i2eWriteMask = iiWriteMaskII;
break;
case FIFO_IIEX:
pB->i2eWaitForTxEmpty = iiWaitForTxEmptyIIEX;
pB->i2eTxMailEmpty = iiTxMailEmptyIIEX;
pB->i2eTrySendMail = iiTrySendMailIIEX;
pB->i2eGetMail = iiGetMailIIEX;
pB->i2eEnableMailIrq = iiEnableMailIrqIIEX;
pB->i2eWriteMask = iiWriteMaskIIEX;
break;
default:
I2_COMPLETE(pB, I2EE_INCONSIST);
}
// Initialize state information.
pB->i2eState = II_STATE_READY; // Ready to load loadware.
// Some Final cleanup:
// For some boards, the bootstrap firmware may perform some sort of test
// resulting in a stray character pending in the incoming mailbox. If one is
// there, it should be read and discarded, especially since for the standard
// firmware, it's the mailbox that interrupts the host.
pB->i2eStartMail = iiGetMail(pB);
// Throw it away and clear the mailbox structure element
pB->i2eStartMail = NO_MAIL_HERE;
// Everything is ok now, return with good status/
pB->i2eValid = I2E_MAGIC;
I2_COMPLETE(pB, I2EE_GOOD);
}
//******************************************************************************
// Function: ii2DelayTimer(mseconds)
// Parameters: mseconds - number of milliseconds to delay
//
// Returns: Nothing
//
// Description:
//
// This routine delays for approximately mseconds milliseconds and is intended
// to be called indirectly through i2Delay field in i2eBordStr. It uses the
// Linux timer_list mechanism.
//
// The Linux timers use a unit called "jiffies" which are 10mS in the Intel
// architecture. This function rounds the delay period up to the next "jiffy".
// In the Alpha architecture the "jiffy" is 1mS, but this driver is not intended
// for Alpha platforms at this time.
//
//******************************************************************************
static void
ii2DelayTimer(unsigned int mseconds)
{
msleep_interruptible(mseconds);
}
#if 0
//static void ii2DelayIO(unsigned int);
//******************************************************************************
// !!! Not Used, this is DOS crap, some of you young folks may be interested in
// in how things were done in the stone age of caculating machines !!!
// Function: ii2DelayIO(mseconds)
// Parameters: mseconds - number of milliseconds to delay
//
// Returns: Nothing
//
// Description:
//
// This routine delays for approximately mseconds milliseconds and is intended
// to be called indirectly through i2Delay field in i2eBordStr. It is intended
// for use where a clock-based function is impossible: for example, DOS drivers.
//
// This function uses the IN instruction to place bounds on the timing and
// assumes that ii2Safe has been set. This is because I/O instructions are not
// subject to caching and will therefore take a certain minimum time. To ensure
// the delay is at least long enough on fast machines, it is based on some
// fastest-case calculations. On slower machines this may cause VERY long
// delays. (3 x fastest case). In the fastest case, everything is cached except
// the I/O instruction itself.
//
// Timing calculations:
// The fastest bus speed for I/O operations is likely to be 10 MHz. The I/O
// operation in question is a byte operation to an odd address. For 8-bit
// operations, the architecture generally enforces two wait states. At 10 MHz, a
// single cycle time is 100nS. A read operation at two wait states takes 6
// cycles for a total time of 600nS. Therefore approximately 1666 iterations
// would be required to generate a single millisecond delay. The worst
// (reasonable) case would be an 8MHz system with no cacheing. In this case, the
// I/O instruction would take 125nS x 6 cyles = 750 nS. More importantly, code
// fetch of other instructions in the loop would take time (zero wait states,
// however) and would be hard to estimate. This is minimized by using in-line
// assembler for the in inner loop of IN instructions. This consists of just a
// few bytes. So we'll guess about four code fetches per loop. Each code fetch
// should take four cycles, so we have 125nS * 8 = 1000nS. Worst case then is
// that what should have taken 1 mS takes instead 1666 * (1750) = 2.9 mS.
//
// So much for theoretical timings: results using 1666 value on some actual
// machines:
// IBM 286 6MHz 3.15 mS
// Zenith 386 33MHz 2.45 mS
// (brandX) 386 33MHz 1.90 mS (has cache)
// (brandY) 486 33MHz 2.35 mS
// NCR 486 ?? 1.65 mS (microchannel)
//
// For most machines, it is probably safe to scale this number back (remember,
// for robust operation use an actual timed delay if possible), so we are using
// a value of 1190. This yields 1.17 mS for the fastest machine in our sample,
// 1.75 mS for typical 386 machines, and 2.25 mS the absolute slowest machine.
//
// 1/29/93:
// The above timings are too slow. Actual cycle times might be faster. ISA cycle
// times could approach 500 nS, and ...
// The IBM model 77 being microchannel has no wait states for 8-bit reads and
// seems to be accessing the I/O at 440 nS per access (from start of one to
// start of next). This would imply we need 1000/.440 = 2272 iterations to
// guarantee we are fast enough. In actual testing, we see that 2 * 1190 are in
// fact enough. For diagnostics, we keep the level at 1190, but developers note
// this needs tuning.
//
// Safe assumption: 2270 i/o reads = 1 millisecond
//
//******************************************************************************
static int ii2DelValue = 1190; // See timing calculations below
// 1666 for fastest theoretical machine
// 1190 safe for most fast 386 machines
// 1000 for fastest machine tested here
// 540 (sic) for AT286/6Mhz
static void
ii2DelayIO(unsigned int mseconds)
{
if (!ii2Safe)
return; /* Do nothing if this variable uninitialized */
while(mseconds--) {
int i = ii2DelValue;
while ( i-- ) {
inb(ii2Safe);
}
}
}
#endif
//******************************************************************************
// Function: ii2Nop()
// Parameters: None
//
// Returns: Nothing
//
// Description:
//
// iiInitialize will set i2eDelay to this if the delay parameter is NULL. This
// saves checking for a NULL pointer at every call.
//******************************************************************************
static void
ii2Nop(void)
{
return; // no mystery here
}
//=======================================================
// Routines which are available in 8/16-bit versions, or
// in different fifo styles. These are ALL called
// indirectly through the board structure.
//=======================================================
//******************************************************************************
// Function: iiWriteBuf16(pB, address, count)
// Parameters: pB - pointer to board structure
// address - address of data to write
// count - number of data bytes to write
//
// Returns: True if everything appears copacetic.
// False if there is any error: the pB->i2eError field has the error
//
// Description:
//
// Writes 'count' bytes from 'address' to the data fifo specified by the board
// structure pointer pB. Should count happen to be odd, an extra pad byte is
// sent (identity unknown...). Uses 16-bit (word) operations. Is called
// indirectly through pB->i2eWriteBuf.
//
//******************************************************************************
static int
iiWriteBuf16(i2eBordStrPtr pB, unsigned char *address, int count)
{
// Rudimentary sanity checking here.
if (pB->i2eValid != I2E_MAGIC)
I2_COMPLETE(pB, I2EE_INVALID);
I2_OUTSW(pB->i2eData, address, count);
I2_COMPLETE(pB, I2EE_GOOD);
}
//******************************************************************************
// Function: iiWriteBuf8(pB, address, count)
// Parameters: pB - pointer to board structure
// address - address of data to write
// count - number of data bytes to write
//
// Returns: True if everything appears copacetic.
// False if there is any error: the pB->i2eError field has the error
//
// Description:
//
// Writes 'count' bytes from 'address' to the data fifo specified by the board
// structure pointer pB. Should count happen to be odd, an extra pad byte is
// sent (identity unknown...). This is to be consistent with the 16-bit version.
// Uses 8-bit (byte) operations. Is called indirectly through pB->i2eWriteBuf.
//
//******************************************************************************
static int
iiWriteBuf8(i2eBordStrPtr pB, unsigned char *address, int count)
{
/* Rudimentary sanity checking here */
if (pB->i2eValid != I2E_MAGIC)
I2_COMPLETE(pB, I2EE_INVALID);
I2_OUTSB(pB->i2eData, address, count);
I2_COMPLETE(pB, I2EE_GOOD);
}
//******************************************************************************
// Function: iiReadBuf16(pB, address, count)
// Parameters: pB - pointer to board structure
// address - address to put data read
// count - number of data bytes to read
//
// Returns: True if everything appears copacetic.
// False if there is any error: the pB->i2eError field has the error
//
// Description:
//
// Reads 'count' bytes into 'address' from the data fifo specified by the board
// structure pointer pB. Should count happen to be odd, an extra pad byte is
// received (identity unknown...). Uses 16-bit (word) operations. Is called
// indirectly through pB->i2eReadBuf.
//
//******************************************************************************
static int
iiReadBuf16(i2eBordStrPtr pB, unsigned char *address, int count)
{
// Rudimentary sanity checking here.
if (pB->i2eValid != I2E_MAGIC)
I2_COMPLETE(pB, I2EE_INVALID);
I2_INSW(pB->i2eData, address, count);
I2_COMPLETE(pB, I2EE_GOOD);
}
//******************************************************************************
// Function: iiReadBuf8(pB, address, count)
// Parameters: pB - pointer to board structure
// address - address to put data read
// count - number of data bytes to read
//
// Returns: True if everything appears copacetic.
// False if there is any error: the pB->i2eError field has the error
//
// Description:
//
// Reads 'count' bytes into 'address' from the data fifo specified by the board
// structure pointer pB. Should count happen to be odd, an extra pad byte is
// received (identity unknown...). This to match the 16-bit behaviour. Uses
// 8-bit (byte) operations. Is called indirectly through pB->i2eReadBuf.
//
//******************************************************************************
static int
iiReadBuf8(i2eBordStrPtr pB, unsigned char *address, int count)
{
// Rudimentary sanity checking here.
if (pB->i2eValid != I2E_MAGIC)
I2_COMPLETE(pB, I2EE_INVALID);
I2_INSB(pB->i2eData, address, count);
I2_COMPLETE(pB, I2EE_GOOD);
}
//******************************************************************************
// Function: iiReadWord16(pB)
// Parameters: pB - pointer to board structure
//
// Returns: True if everything appears copacetic.
// False if there is any error: the pB->i2eError field has the error
//
// Description:
//
// Returns the word read from the data fifo specified by the board-structure
// pointer pB. Uses a 16-bit operation. Is called indirectly through
// pB->i2eReadWord.
//
//******************************************************************************
static unsigned short
iiReadWord16(i2eBordStrPtr pB)
{
return inw(pB->i2eData);
}
//******************************************************************************
// Function: iiReadWord8(pB)
// Parameters: pB - pointer to board structure
//
// Returns: True if everything appears copacetic.
// False if there is any error: the pB->i2eError field has the error
//
// Description:
//
// Returns the word read from the data fifo specified by the board-structure
// pointer pB. Uses two 8-bit operations. Bytes are assumed to be LSB first. Is
// called indirectly through pB->i2eReadWord.
//
//******************************************************************************
static unsigned short
iiReadWord8(i2eBordStrPtr pB)
{
unsigned short urs;
urs = inb(pB->i2eData);
return (inb(pB->i2eData) << 8) | urs;
}
//******************************************************************************
// Function: iiWriteWord16(pB, value)
// Parameters: pB - pointer to board structure
// value - data to write
//
// Returns: True if everything appears copacetic.
// False if there is any error: the pB->i2eError field has the error
//
// Description:
//
// Writes the word 'value' to the data fifo specified by the board-structure
// pointer pB. Uses 16-bit operation. Is called indirectly through
// pB->i2eWriteWord.
//
//******************************************************************************
static void
iiWriteWord16(i2eBordStrPtr pB, unsigned short value)
{
outw((int)value, pB->i2eData);
}
//******************************************************************************
// Function: iiWriteWord8(pB, value)
// Parameters: pB - pointer to board structure
// value - data to write
//
// Returns: True if everything appears copacetic.
// False if there is any error: the pB->i2eError field has the error
//
// Description:
//
// Writes the word 'value' to the data fifo specified by the board-structure
// pointer pB. Uses two 8-bit operations (writes LSB first). Is called
// indirectly through pB->i2eWriteWord.
//
//******************************************************************************
static void
iiWriteWord8(i2eBordStrPtr pB, unsigned short value)
{
outb((char)value, pB->i2eData);
outb((char)(value >> 8), pB->i2eData);
}
//******************************************************************************
// Function: iiWaitForTxEmptyII(pB, mSdelay)
// Parameters: pB - pointer to board structure
// mSdelay - period to wait before returning
//
// Returns: True if the FIFO is empty.
// False if it not empty in the required time: the pB->i2eError
// field has the error.
//
// Description:
//
// Waits up to "mSdelay" milliseconds for the outgoing FIFO to become empty; if
// not empty by the required time, returns false and error in pB->i2eError,
// otherwise returns true.
//
// mSdelay == 0 is taken to mean must be empty on the first test.
//
// This version operates on IntelliPort-II - style FIFO's
//
// Note this routine is organized so that if status is ok there is no delay at
// all called either before or after the test. Is called indirectly through
// pB->i2eWaitForTxEmpty.
//
//******************************************************************************
static int
iiWaitForTxEmptyII(i2eBordStrPtr pB, int mSdelay)
{
unsigned long flags;
int itemp;
for (;;)
{
// This routine hinges on being able to see the "other" status register
// (as seen by the local processor). His incoming fifo is our outgoing
// FIFO.
//
// By the nature of this routine, you would be using this as part of a
// larger atomic context: i.e., you would use this routine to ensure the
// fifo empty, then act on this information. Between these two halves,
// you will generally not want to service interrupts or in any way
// disrupt the assumptions implicit in the larger context.
//
// Even worse, however, this routine "shifts" the status register to
// point to the local status register which is not the usual situation.
// Therefore for extra safety, we force the critical section to be
// completely atomic, and pick up after ourselves before allowing any
// interrupts of any kind.
write_lock_irqsave(&Dl_spinlock, flags);
outb(SEL_COMMAND, pB->i2ePointer);
outb(SEL_CMD_SH, pB->i2ePointer);
itemp = inb(pB->i2eStatus);
outb(SEL_COMMAND, pB->i2ePointer);
outb(SEL_CMD_UNSH, pB->i2ePointer);
if (itemp & ST_IN_EMPTY)
{
I2_UPDATE_FIFO_ROOM(pB);
write_unlock_irqrestore(&Dl_spinlock, flags);
I2_COMPLETE(pB, I2EE_GOOD);
}
write_unlock_irqrestore(&Dl_spinlock, flags);
if (mSdelay-- == 0)
break;
iiDelay(pB, 1); /* 1 mS granularity on checking condition */
}
I2_COMPLETE(pB, I2EE_TXE_TIME);
}
//******************************************************************************
// Function: iiWaitForTxEmptyIIEX(pB, mSdelay)
// Parameters: pB - pointer to board structure
// mSdelay - period to wait before returning
//
// Returns: True if the FIFO is empty.
// False if it not empty in the required time: the pB->i2eError
// field has the error.
//
// Description:
//
// Waits up to "mSdelay" milliseconds for the outgoing FIFO to become empty; if
// not empty by the required time, returns false and error in pB->i2eError,
// otherwise returns true.
//
// mSdelay == 0 is taken to mean must be empty on the first test.
//
// This version operates on IntelliPort-IIEX - style FIFO's
//
// Note this routine is organized so that if status is ok there is no delay at
// all called either before or after the test. Is called indirectly through
// pB->i2eWaitForTxEmpty.
//
//******************************************************************************
static int
iiWaitForTxEmptyIIEX(i2eBordStrPtr pB, int mSdelay)
{
unsigned long flags;
for (;;)
{
// By the nature of this routine, you would be using this as part of a
// larger atomic context: i.e., you would use this routine to ensure the
// fifo empty, then act on this information. Between these two halves,
// you will generally not want to service interrupts or in any way
// disrupt the assumptions implicit in the larger context.
write_lock_irqsave(&Dl_spinlock, flags);
if (inb(pB->i2eStatus) & STE_OUT_MT) {
I2_UPDATE_FIFO_ROOM(pB);
write_unlock_irqrestore(&Dl_spinlock, flags);
I2_COMPLETE(pB, I2EE_GOOD);
}
write_unlock_irqrestore(&Dl_spinlock, flags);
if (mSdelay-- == 0)
break;
iiDelay(pB, 1); // 1 mS granularity on checking condition
}
I2_COMPLETE(pB, I2EE_TXE_TIME);
}
//******************************************************************************
// Function: iiTxMailEmptyII(pB)
// Parameters: pB - pointer to board structure
//
// Returns: True if the transmit mailbox is empty.
// False if it not empty.
//
// Description:
//
// Returns true or false according to whether the transmit mailbox is empty (and
// therefore able to accept more mail)
//
// This version operates on IntelliPort-II - style FIFO's
//
//******************************************************************************
static int
iiTxMailEmptyII(i2eBordStrPtr pB)
{
int port = pB->i2ePointer;
outb(SEL_OUTMAIL, port);
return inb(port) == 0;
}
//******************************************************************************
// Function: iiTxMailEmptyIIEX(pB)
// Parameters: pB - pointer to board structure
//
// Returns: True if the transmit mailbox is empty.
// False if it not empty.
//
// Description:
//
// Returns true or false according to whether the transmit mailbox is empty (and
// therefore able to accept more mail)
//
// This version operates on IntelliPort-IIEX - style FIFO's
//
//******************************************************************************
static int
iiTxMailEmptyIIEX(i2eBordStrPtr pB)
{
return !(inb(pB->i2eStatus) & STE_OUT_MAIL);
}
//******************************************************************************
// Function: iiTrySendMailII(pB,mail)
// Parameters: pB - pointer to board structure
// mail - value to write to mailbox
//
// Returns: True if the transmit mailbox is empty, and mail is sent.
// False if it not empty.
//
// Description:
//
// If outgoing mailbox is empty, sends mail and returns true. If outgoing
// mailbox is not empty, returns false.
//
// This version operates on IntelliPort-II - style FIFO's
//
//******************************************************************************
static int
iiTrySendMailII(i2eBordStrPtr pB, unsigned char mail)
{
int port = pB->i2ePointer;
outb(SEL_OUTMAIL, port);
if (inb(port) == 0) {
outb(SEL_OUTMAIL, port);
outb(mail, port);
return 1;
}
return 0;
}
//******************************************************************************
// Function: iiTrySendMailIIEX(pB,mail)
// Parameters: pB - pointer to board structure
// mail - value to write to mailbox
//
// Returns: True if the transmit mailbox is empty, and mail is sent.
// False if it not empty.
//
// Description:
//
// If outgoing mailbox is empty, sends mail and returns true. If outgoing
// mailbox is not empty, returns false.
//
// This version operates on IntelliPort-IIEX - style FIFO's
//
//******************************************************************************
static int
iiTrySendMailIIEX(i2eBordStrPtr pB, unsigned char mail)
{
if (inb(pB->i2eStatus) & STE_OUT_MAIL)
return 0;
outb(mail, pB->i2eXMail);
return 1;
}
//******************************************************************************
// Function: iiGetMailII(pB,mail)
// Parameters: pB - pointer to board structure
//
// Returns: Mailbox data or NO_MAIL_HERE.
//
// Description:
//
// If no mail available, returns NO_MAIL_HERE otherwise returns the data from
// the mailbox, which is guaranteed != NO_MAIL_HERE.
//
// This version operates on IntelliPort-II - style FIFO's
//
//******************************************************************************
static unsigned short
iiGetMailII(i2eBordStrPtr pB)
{
if (I2_HAS_MAIL(pB)) {
outb(SEL_INMAIL, pB->i2ePointer);
return inb(pB->i2ePointer);
} else {
return NO_MAIL_HERE;
}
}
//******************************************************************************
// Function: iiGetMailIIEX(pB,mail)
// Parameters: pB - pointer to board structure
//
// Returns: Mailbox data or NO_MAIL_HERE.
//
// Description:
//
// If no mail available, returns NO_MAIL_HERE otherwise returns the data from
// the mailbox, which is guaranteed != NO_MAIL_HERE.
//
// This version operates on IntelliPort-IIEX - style FIFO's
//
//******************************************************************************
static unsigned short
iiGetMailIIEX(i2eBordStrPtr pB)
{
if (I2_HAS_MAIL(pB))
return inb(pB->i2eXMail);
else
return NO_MAIL_HERE;
}
//******************************************************************************
// Function: iiEnableMailIrqII(pB)
// Parameters: pB - pointer to board structure
//
// Returns: Nothing
//
// Description:
//
// Enables board to interrupt host (only) by writing to host's in-bound mailbox.
//
// This version operates on IntelliPort-II - style FIFO's
//
//******************************************************************************
static void
iiEnableMailIrqII(i2eBordStrPtr pB)
{
outb(SEL_MASK, pB->i2ePointer);
outb(ST_IN_MAIL, pB->i2ePointer);
}
//******************************************************************************
// Function: iiEnableMailIrqIIEX(pB)
// Parameters: pB - pointer to board structure
//
// Returns: Nothing
//
// Description:
//
// Enables board to interrupt host (only) by writing to host's in-bound mailbox.
//
// This version operates on IntelliPort-IIEX - style FIFO's
//
//******************************************************************************
static void
iiEnableMailIrqIIEX(i2eBordStrPtr pB)
{
outb(MX_IN_MAIL, pB->i2eXMask);
}
//******************************************************************************
// Function: iiWriteMaskII(pB)
// Parameters: pB - pointer to board structure
//
// Returns: Nothing
//
// Description:
//
// Writes arbitrary value to the mask register.
//
// This version operates on IntelliPort-II - style FIFO's
//
//******************************************************************************
static void
iiWriteMaskII(i2eBordStrPtr pB, unsigned char value)
{
outb(SEL_MASK, pB->i2ePointer);
outb(value, pB->i2ePointer);
}
//******************************************************************************
// Function: iiWriteMaskIIEX(pB)
// Parameters: pB - pointer to board structure
//
// Returns: Nothing
//
// Description:
//
// Writes arbitrary value to the mask register.
//
// This version operates on IntelliPort-IIEX - style FIFO's
//
//******************************************************************************
static void
iiWriteMaskIIEX(i2eBordStrPtr pB, unsigned char value)
{
outb(value, pB->i2eXMask);
}
//******************************************************************************
// Function: iiDownloadBlock(pB, pSource, isStandard)
// Parameters: pB - pointer to board structure
// pSource - loadware block to download
// isStandard - True if "standard" loadware, else false.
//
// Returns: Success or Failure
//
// Description:
//
// Downloads a single block (at pSource)to the board referenced by pB. Caller
// sets isStandard to true/false according to whether the "standard" loadware is
// what's being loaded. The normal process, then, is to perform an iiInitialize
// to the board, then perform some number of iiDownloadBlocks using the returned
// state to determine when download is complete.
//
// Possible return values: (see I2ELLIS.H)
// II_DOWN_BADVALID
// II_DOWN_BADFILE
// II_DOWN_CONTINUING
// II_DOWN_GOOD
// II_DOWN_BAD
// II_DOWN_BADSTATE
// II_DOWN_TIMEOUT
//
// Uses the i2eState and i2eToLoad fields (initialized at iiInitialize) to
// determine whether this is the first block, whether to check for magic
// numbers, how many blocks there are to go...
//
//******************************************************************************
static int
iiDownloadBlock ( i2eBordStrPtr pB, loadHdrStrPtr pSource, int isStandard)
{
int itemp;
int loadedFirst;
if (pB->i2eValid != I2E_MAGIC) return II_DOWN_BADVALID;
switch(pB->i2eState)
{
case II_STATE_READY:
// Loading the first block after reset. Must check the magic number of the
// loadfile, store the number of blocks we expect to load.
if (pSource->e.loadMagic != MAGIC_LOADFILE)
{
return II_DOWN_BADFILE;
}
// Next we store the total number of blocks to load, including this one.
pB->i2eToLoad = 1 + pSource->e.loadBlocksMore;
// Set the state, store the version numbers. ('Cause this may have come
// from a file - we might want to report these versions and revisions in
// case of an error!
pB->i2eState = II_STATE_LOADING;
pB->i2eLVersion = pSource->e.loadVersion;
pB->i2eLRevision = pSource->e.loadRevision;
pB->i2eLSub = pSource->e.loadSubRevision;
// The time and date of compilation is also available but don't bother
// storing it for normal purposes.
loadedFirst = 1;
break;
case II_STATE_LOADING:
loadedFirst = 0;
break;
default:
return II_DOWN_BADSTATE;
}
// Now we must be in the II_STATE_LOADING state, and we assume i2eToLoad
// must be positive still, because otherwise we would have cleaned up last
// time and set the state to II_STATE_LOADED.
if (!iiWaitForTxEmpty(pB, MAX_DLOAD_READ_TIME)) {
return II_DOWN_TIMEOUT;
}
if (!iiWriteBuf(pB, pSource->c, LOADWARE_BLOCK_SIZE)) {
return II_DOWN_BADVALID;
}
// If we just loaded the first block, wait for the fifo to empty an extra
// long time to allow for any special startup code in the firmware, like
// sending status messages to the LCD's.
if (loadedFirst) {
if (!iiWaitForTxEmpty(pB, MAX_DLOAD_START_TIME)) {
return II_DOWN_TIMEOUT;
}
}
// Determine whether this was our last block!
if (--(pB->i2eToLoad)) {
return II_DOWN_CONTINUING; // more to come...
}
// It WAS our last block: Clean up operations...
// ...Wait for last buffer to drain from the board...
if (!iiWaitForTxEmpty(pB, MAX_DLOAD_READ_TIME)) {
return II_DOWN_TIMEOUT;
}
// If there were only a single block written, this would come back
// immediately and be harmless, though not strictly necessary.
itemp = MAX_DLOAD_ACK_TIME/10;
while (--itemp) {
if (I2_HAS_INPUT(pB)) {
switch (inb(pB->i2eData)) {
case LOADWARE_OK:
pB->i2eState =
isStandard ? II_STATE_STDLOADED :II_STATE_LOADED;
// Some revisions of the bootstrap firmware (e.g. ISA-8 1.0.2)
// will, // if there is a debug port attached, require some
// time to send information to the debug port now. It will do
// this before // executing any of the code we just downloaded.
// It may take up to 700 milliseconds.
if (pB->i2ePom.e.porDiag2 & POR_DEBUG_PORT) {
iiDelay(pB, 700);
}
return II_DOWN_GOOD;
case LOADWARE_BAD:
default:
return II_DOWN_BAD;
}
}
iiDelay(pB, 10); // 10 mS granularity on checking condition
}
// Drop-through --> timed out waiting for firmware confirmation
pB->i2eState = II_STATE_BADLOAD;
return II_DOWN_TIMEOUT;
}
//******************************************************************************
// Function: iiDownloadAll(pB, pSource, isStandard, size)
// Parameters: pB - pointer to board structure
// pSource - loadware block to download
// isStandard - True if "standard" loadware, else false.
// size - size of data to download (in bytes)
//
// Returns: Success or Failure
//
// Description:
//
// Given a pointer to a board structure, a pointer to the beginning of some
// loadware, whether it is considered the "standard loadware", and the size of
// the array in bytes loads the entire array to the board as loadware.
//
// Assumes the board has been freshly reset and the power-up reset message read.
// (i.e., in II_STATE_READY). Complains if state is bad, or if there seems to be
// too much or too little data to load, or if iiDownloadBlock complains.
//******************************************************************************
static int
iiDownloadAll(i2eBordStrPtr pB, loadHdrStrPtr pSource, int isStandard, int size)
{
int status;
// We know (from context) board should be ready for the first block of
// download. Complain if not.
if (pB->i2eState != II_STATE_READY) return II_DOWN_BADSTATE;
while (size > 0) {
size -= LOADWARE_BLOCK_SIZE; // How much data should there be left to
// load after the following operation ?
// Note we just bump pSource by "one", because its size is actually that
// of an entire block, same as LOADWARE_BLOCK_SIZE.
status = iiDownloadBlock(pB, pSource++, isStandard);
switch(status)
{
case II_DOWN_GOOD:
return ( (size > 0) ? II_DOWN_OVER : II_DOWN_GOOD);
case II_DOWN_CONTINUING:
break;
default:
return status;
}
}
// We shouldn't drop out: it means "while" caught us with nothing left to
// download, yet the previous DownloadBlock did not return complete. Ergo,
// not enough data to match the size byte in the header.
return II_DOWN_UNDER;
}
| drod2169/drodspeed-cfs | drivers/char/ip2/i2ellis.c | C | gpl-2.0 | 45,410 |
/*
* signal quiesce handler
*
* Copyright IBM Corp. 1999, 2004
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/reboot.h>
#include <linux/atomic.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
#include "sclp.h"
static void (*old_machine_restart)(char *);
static void (*old_machine_halt)(void);
static void (*old_machine_power_off)(void);
/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
static void do_machine_quiesce(void)
{
psw_t quiesce_psw;
smp_send_stop();
quiesce_psw.mask =
PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_WAIT;
quiesce_psw.addr = 0xfff;
__load_psw(quiesce_psw);
}
/* Handler for quiesce event. Start shutdown procedure. */
static void sclp_quiesce_handler(struct evbuf_header *evbuf)
{
if (_machine_restart != (void *) do_machine_quiesce) {
old_machine_restart = _machine_restart;
old_machine_halt = _machine_halt;
old_machine_power_off = _machine_power_off;
_machine_restart = (void *) do_machine_quiesce;
_machine_halt = do_machine_quiesce;
_machine_power_off = do_machine_quiesce;
}
ctrl_alt_del();
}
/* Undo machine restart/halt/power_off modification on resume */
static void sclp_quiesce_pm_event(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event)
{
switch (sclp_pm_event) {
case SCLP_PM_EVENT_RESTORE:
if (old_machine_restart) {
_machine_restart = old_machine_restart;
_machine_halt = old_machine_halt;
_machine_power_off = old_machine_power_off;
old_machine_restart = NULL;
old_machine_halt = NULL;
old_machine_power_off = NULL;
}
break;
case SCLP_PM_EVENT_FREEZE:
case SCLP_PM_EVENT_THAW:
break;
}
}
static struct sclp_register sclp_quiesce_event = {
.receive_mask = EVTYP_SIGQUIESCE_MASK,
.receiver_fn = sclp_quiesce_handler,
.pm_event_fn = sclp_quiesce_pm_event
};
/* Initialize quiesce driver. */
static int __init sclp_quiesce_init(void)
{
return sclp_register(&sclp_quiesce_event);
}
module_init(sclp_quiesce_init);
| oliliango/linux-cedarview_gfx | drivers/s390/char/sclp_quiesce.c | C | gpl-2.0 | 2,202 |
/*
* cx18 ADEC VBI functions
*
* Derived from cx25840-vbi.c
*
* Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include "cx18-driver.h"
/*
* For sliced VBI output, we set up to use VIP-1.1, 8-bit mode,
* NN counts 1 byte Dwords, an IDID with the VBI line # in it.
* Thus, according to the VIP-2 Spec, our VBI ancillary data lines
* (should!) look like:
* 4 byte EAV code: 0xff 0x00 0x00 0xRP
* unknown number of possible idle bytes
* 3 byte Anc data preamble: 0x00 0xff 0xff
* 1 byte data identifier: ne010iii (parity bits, 010, DID bits)
* 1 byte secondary data id: nessssss (parity bits, SDID bits)
* 1 byte data word count: necccccc (parity bits, NN Dword count)
* 2 byte Internal DID: VBI-line-# 0x80
* NN data bytes
* 1 byte checksum
* Fill bytes needed to fil out to 4*NN bytes of payload
*
* The RP codes for EAVs when in VIP-1.1 mode, not in raw mode, &
* in the vertical blanking interval are:
* 0xb0 (Task 0 VerticalBlank HorizontalBlank 0 0 0 0)
* 0xf0 (Task EvenField VerticalBlank HorizontalBlank 0 0 0 0)
*
* Since the V bit is only allowed to toggle in the EAV RP code, just
* before the first active region line and for active lines, they are:
* 0x90 (Task 0 0 HorizontalBlank 0 0 0 0)
* 0xd0 (Task EvenField 0 HorizontalBlank 0 0 0 0)
*
* The user application DID bytes we care about are:
* 0x91 (1 0 010 0 !ActiveLine AncDataPresent)
* 0x55 (0 1 010 2ndField !ActiveLine AncDataPresent)
*
*/
static const u8 sliced_vbi_did[2] = { 0x91, 0x55 };
struct vbi_anc_data {
/* u8 eav[4]; */
/* u8 idle[]; Variable number of idle bytes */
u8 preamble[3];
u8 did;
u8 sdid;
u8 data_count;
u8 idid[2];
u8 payload[1]; /* data_count of payload */
/* u8 checksum; */
/* u8 fill[]; Variable number of fill bytes */
};
static int odd_parity(u8 c)
{
c ^= (c >> 4);
c ^= (c >> 2);
c ^= (c >> 1);
return c & 1;
}
static int decode_vps(u8 *dst, u8 *p)
{
static const u8 biphase_tbl[] = {
0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96,
0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2,
0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94,
0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0,
0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5,
0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1,
0xc3, 0x4b, 0x43, 0xc3, 0x87, 0x0f, 0x07, 0x87,
0x83, 0x0b, 0x03, 0x83, 0xc3, 0x4b, 0x43, 0xc3,
0xc1, 0x49, 0x41, 0xc1, 0x85, 0x0d, 0x05, 0x85,
0x81, 0x09, 0x01, 0x81, 0xc1, 0x49, 0x41, 0xc1,
0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5,
0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1,
0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4,
0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0,
0xc2, 0x4a, 0x42, 0xc2, 0x86, 0x0e, 0x06, 0x86,
0x82, 0x0a, 0x02, 0x82, 0xc2, 0x4a, 0x42, 0xc2,
0xc0, 0x48, 0x40, 0xc0, 0x84, 0x0c, 0x04, 0x84,
0x80, 0x08, 0x00, 0x80, 0xc0, 0x48, 0x40, 0xc0,
0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4,
0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0,
0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96,
0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2,
0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94,
0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0,
0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
};
u8 c, err = 0;
int i;
for (i = 0; i < 2 * 13; i += 2) {
err |= biphase_tbl[p[i]] | biphase_tbl[p[i + 1]];
c = (biphase_tbl[p[i + 1]] & 0xf) |
((biphase_tbl[p[i]] & 0xf) << 4);
dst[i / 2] = c;
}
return err & 0xf0;
}
int cx18_av_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
{
struct cx18 *cx = v4l2_get_subdevdata(sd);
struct cx18_av_state *state = &cx->av_state;
static const u16 lcr2vbi[] = {
0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
0, V4L2_SLICED_WSS_625, 0, /* 4 */
V4L2_SLICED_CAPTION_525, /* 6 */
0, 0, V4L2_SLICED_VPS, 0, 0, /* 9 */
0, 0, 0, 0
};
int is_pal = !(state->std & V4L2_STD_525_60);
int i;
memset(svbi->service_lines, 0, sizeof(svbi->service_lines));
svbi->service_set = 0;
/* we're done if raw VBI is active */
if ((cx18_av_read(cx, 0x404) & 0x10) == 0)
return 0;
if (is_pal) {
for (i = 7; i <= 23; i++) {
u8 v = cx18_av_read(cx, 0x424 + i - 7);
svbi->service_lines[0][i] = lcr2vbi[v >> 4];
svbi->service_lines[1][i] = lcr2vbi[v & 0xf];
svbi->service_set |= svbi->service_lines[0][i] |
svbi->service_lines[1][i];
}
} else {
for (i = 10; i <= 21; i++) {
u8 v = cx18_av_read(cx, 0x424 + i - 10);
svbi->service_lines[0][i] = lcr2vbi[v >> 4];
svbi->service_lines[1][i] = lcr2vbi[v & 0xf];
svbi->service_set |= svbi->service_lines[0][i] |
svbi->service_lines[1][i];
}
}
return 0;
}
int cx18_av_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
{
struct cx18 *cx = v4l2_get_subdevdata(sd);
struct cx18_av_state *state = &cx->av_state;
/* Setup standard */
cx18_av_std_setup(cx);
/* VBI Offset */
cx18_av_write(cx, 0x47f, state->slicer_line_delay);
cx18_av_write(cx, 0x404, 0x2e);
return 0;
}
int cx18_av_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
{
struct cx18 *cx = v4l2_get_subdevdata(sd);
struct cx18_av_state *state = &cx->av_state;
int is_pal = !(state->std & V4L2_STD_525_60);
int i, x;
u8 lcr[24];
for (x = 0; x <= 23; x++)
lcr[x] = 0x00;
/* Setup standard */
cx18_av_std_setup(cx);
/* Sliced VBI */
cx18_av_write(cx, 0x404, 0x32); /* Ancillary data */
cx18_av_write(cx, 0x406, 0x13);
cx18_av_write(cx, 0x47f, state->slicer_line_delay);
/* Force impossible lines to 0 */
if (is_pal) {
for (i = 0; i <= 6; i++)
svbi->service_lines[0][i] =
svbi->service_lines[1][i] = 0;
} else {
for (i = 0; i <= 9; i++)
svbi->service_lines[0][i] =
svbi->service_lines[1][i] = 0;
for (i = 22; i <= 23; i++)
svbi->service_lines[0][i] =
svbi->service_lines[1][i] = 0;
}
/* Build register values for requested service lines */
for (i = 7; i <= 23; i++) {
for (x = 0; x <= 1; x++) {
switch (svbi->service_lines[1-x][i]) {
case V4L2_SLICED_TELETEXT_B:
lcr[i] |= 1 << (4 * x);
break;
case V4L2_SLICED_WSS_625:
lcr[i] |= 4 << (4 * x);
break;
case V4L2_SLICED_CAPTION_525:
lcr[i] |= 6 << (4 * x);
break;
case V4L2_SLICED_VPS:
lcr[i] |= 9 << (4 * x);
break;
}
}
}
if (is_pal) {
for (x = 1, i = 0x424; i <= 0x434; i++, x++)
cx18_av_write(cx, i, lcr[6 + x]);
} else {
for (x = 1, i = 0x424; i <= 0x430; i++, x++)
cx18_av_write(cx, i, lcr[9 + x]);
for (i = 0x431; i <= 0x434; i++)
cx18_av_write(cx, i, 0);
}
cx18_av_write(cx, 0x43c, 0x16);
/* Should match vblank set in cx18_av_std_setup() */
cx18_av_write(cx, 0x474, is_pal ? 38 : 26);
return 0;
}
int cx18_av_decode_vbi_line(struct v4l2_subdev *sd,
struct v4l2_decode_vbi_line *vbi)
{
struct cx18 *cx = v4l2_get_subdevdata(sd);
struct cx18_av_state *state = &cx->av_state;
struct vbi_anc_data *anc = (struct vbi_anc_data *)vbi->p;
u8 *p;
int did, sdid, l, err = 0;
/*
* Check for the ancillary data header for sliced VBI
*/
if (anc->preamble[0] ||
anc->preamble[1] != 0xff || anc->preamble[2] != 0xff ||
(anc->did != sliced_vbi_did[0] &&
anc->did != sliced_vbi_did[1])) {
vbi->line = vbi->type = 0;
return 0;
}
did = anc->did;
sdid = anc->sdid & 0xf;
l = anc->idid[0] & 0x3f;
l += state->slicer_line_offset;
p = anc->payload;
/* Decode the SDID set by the slicer */
switch (sdid) {
case 1:
sdid = V4L2_SLICED_TELETEXT_B;
break;
case 4:
sdid = V4L2_SLICED_WSS_625;
break;
case 6:
sdid = V4L2_SLICED_CAPTION_525;
err = !odd_parity(p[0]) || !odd_parity(p[1]);
break;
case 9:
sdid = V4L2_SLICED_VPS;
if (decode_vps(p, p) != 0)
err = 1;
break;
default:
sdid = 0;
err = 1;
break;
}
vbi->type = err ? 0 : sdid;
vbi->line = err ? 0 : l;
vbi->is_second_field = err ? 0 : (did == sliced_vbi_did[1]);
vbi->p = p;
return 0;
}
| AndroidGX/SimpleGX-L-5.0.2_BOE2_G901F | drivers/media/pci/cx18/cx18-av-vbi.c | C | gpl-2.0 | 8,979 |
/*
* arch/sh/kernel/cpu/sh2a/opcode_helper.c
*
* Helper for the SH-2A 32-bit opcodes.
*
* Copyright (C) 2007 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <asm/system.h>
/*
* Instructions on SH are generally fixed at 16-bits, however, SH-2A
* introduces some 32-bit instructions. Since there are no real
* constraints on their use (and they can be mixed and matched), we need
* to check the instruction encoding to work out if it's a true 32-bit
* instruction or not.
*
* Presently, 32-bit opcodes have only slight variations in what the
* actual encoding looks like in the first-half of the instruction, which
* makes it fairly straightforward to differentiate from the 16-bit ones.
*
* First 16-bits of encoding Used by
*
* 0011nnnnmmmm0001 mov.b, mov.w, mov.l, fmov.d,
* fmov.s, movu.b, movu.w
*
* 0011nnnn0iii1001 bclr.b, bld.b, bset.b, bst.b, band.b,
* bandnot.b, bldnot.b, bor.b, bornot.b,
* bxor.b
*
* 0000nnnniiii0000 movi20
* 0000nnnniiii0001 movi20s
*/
unsigned int instruction_size(unsigned int insn)
{
/* Look for the common cases */
switch ((insn & 0xf00f)) {
case 0x0000: /* movi20 */
case 0x0001: /* movi20s */
case 0x3001: /* 32-bit mov/fmov/movu variants */
return 4;
}
/* And the special cases.. */
switch ((insn & 0xf08f)) {
case 0x3009: /* 32-bit b*.b bit operations */
return 4;
}
return 2;
}
| carbonsoft/kernel | arch/sh/kernel/cpu/sh2a/opcode_helper.c | C | gpl-2.0 | 1,572 |
/* conf.c
* Copyright (C) 2002-2005 PCSX2 Team
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*
* PCSX2 members can be contacted through their website at www.pcsx2.net.
*/
#include <errno.h> // errno
#include <stddef.h> // NULL
#include <stdio.h> // sprintf()
#include <stdlib.h> // getenv()
#include <string.h> // strerror()
#include <sys/stat.h> // mkdir(), stat()
#include <sys/types.h> // mkdir(), stat()
#include <unistd.h> // stat()
// #define CDVDdefs
// #include "../PS2Edefs.h"
#include "logfile.h"
#include "../ini.h"
#include "conf.h"
const char *cfgname[] = { \
"./cfg/cfgCDVDlinuz", \
"../cfg/cfgCDVDlinuz", \
"./plugins/cfgCDVDlinuz", \
"../plugins/cfgCDVDlinuz", \
"./cfgCDVDlinuz", \
"../cfgCDVDlinuz", \
NULL };
const char *confnames[] = { "Device", NULL };
const u8 defaultdevice[] = DEFAULT_DEVICE;
const char defaulthome[] = "../inis";
const char defaultdirectory[] = ".PS2E";
const char defaultfile[] = "CDVDlinuz.ini";
char confdirname[256];
char conffilename[256];
CDVDconf conf;
void ExecCfg(char *arg) {
int nameptr;
struct stat filestat;
char templine[256];
#ifdef VERBOSE_FUNCTION_CONF
PrintLog("CDVDiso interface: ExecCfg(%s)", arg);
#endif /* VERBOSE FUNCTION_CONF */
errno = 0;
nameptr = 0;
while((cfgname[nameptr] != NULL) &&
(stat(cfgname[nameptr], &filestat) == -1)) nameptr++;
errno = 0;
if(cfgname[nameptr] == NULL) {
#ifdef VERBOSE_FUNCTION_CONF
PrintLog("CDVDiso interface: Couldn't find configuration program!");
#endif /* VERBOSE_FUNCTION_CONF */
return;
} // ENDIF- Did not find the executable?
sprintf(templine, "%s %s", cfgname[nameptr], arg);
system(templine);
} // END ExecCfg()
void InitConf() {
int i;
int pos;
char *envptr;
#ifdef VERBOSE_FUNCTION_CONF
PrintLog("CDVD config: InitConf()");
#endif /* VERBOSE_FUNCTION_CONF */
i = 0;
while((i < 255) && defaultdevice[i] != 0) {
conf.devicename[i] = defaultdevice[i];
i++;
} // ENDWHILE- copying the default CD/DVD name in
conf.devicename[i] = 0; // 0-terminate the device name
// Locating directory and file positions
pos = 0;
envptr = getenv("HOME");
if(envptr == NULL) {
// = <Default Home>
i = 0;
while((pos < 253) && (defaulthome[i] != 0)) {
confdirname[pos] = defaulthome[i];
conffilename[pos] = defaulthome[i];
pos++;
i++;
} // NEXT- putting a default place to store configuration data
} else {
// = <Env Home>/<Default Directory>
i = 0;
while((pos < 253) && (*(envptr + i) != 0)) {
confdirname[pos] = *(envptr + i);
conffilename[pos] = *(envptr + i);
pos++;
i++;
} // ENDWHILE- copying home directory info in
if(confdirname[pos-1] != '/') {
confdirname[pos] = '/';
conffilename[pos] = '/';
pos++;
} // ENDIF- No directory separator here? Add one.
i = 0;
while((pos < 253) && (defaultdirectory[i] != 0)) {
confdirname[pos] = defaultdirectory[i];
conffilename[pos] = defaultdirectory[i];
pos++;
i++;
} // NEXT- putting a default place to store configuration data
} // ENDIF- No Home directory?
confdirname[pos] = 0; // Directory reference finished
// += /<Config File Name>
if(conffilename[pos-1] != '/') {
conffilename[pos] = '/';
pos++;
} // ENDIF- No directory separator here? Add one.
i = 0;
while((pos < 253) && (defaultfile[i] != 0)) {
conffilename[pos] = defaultfile[i];
pos++;
i++;
} // NEXT- putting a default place to store configuration data
conffilename[pos] = 0; // File reference finished
#ifdef VERBOSE_FUNCTION_CONF
PrintLog("CDVD config: Directory: %s\n", confdirname);
PrintLog("CDVD config: File: %s\n", conffilename);
#endif /* VERBOSE_FUNCTION_CONF */
} // END InitConf()
void LoadConf() {
int retval;
#ifdef VERBOSE_FUNCTION_CONF
PrintLog("CDVD config: LoadConf()\n");
#endif /* VERBOSE_FUNCTION_CONF */
retval = INILoadString(conffilename, "Settings", "Device", conf.devicename);
if(retval < 0) {
sprintf(conf.devicename, "/dev/dvd");
} // ENDIF- Couldn't find keyword? Fill in a default
} // END LoadConf()
void SaveConf() {
#ifdef VERBOSE_FUNCTION_CONF
PrintLog("CDVD config: SaveConf()\n");
#endif /* VERBOSE_FUNCTION_CONF */
mkdir(confdirname, 0755);
INISaveString(conffilename, "Settings", "Device", conf.devicename);
} // END SaveConf()
| adammenges/pcsx2 | plugins/CDVDlinuz/Src/Linux/conf.c | C | gpl-2.0 | 5,093 |
using System;
using System.Collections.Generic;
using System.Linq;
namespace MediaBrowser.Model.Extensions
{
public static class ListHelper
{
public static bool ContainsIgnoreCase(List<string> list, string value)
{
if (value == null)
{
throw new ArgumentNullException("value");
}
return list.Contains(value, StringComparer.OrdinalIgnoreCase);
}
public static bool ContainsIgnoreCase(string[] list, string value)
{
if (value == null)
{
throw new ArgumentNullException("value");
}
return list.Contains(value, StringComparer.OrdinalIgnoreCase);
}
public static bool ContainsAnyIgnoreCase(string[] list, string[] values)
{
if (values == null)
{
throw new ArgumentNullException("values");
}
foreach (string val in values)
{
if (ContainsIgnoreCase(list, val))
{
return true;
}
}
return false;
}
}
}
| paul-777/Emby | MediaBrowser.Model/Extensions/ListHelper.cs | C# | gpl-2.0 | 1,181 |
//
// Copyright(C) 1993-1996 Id Software, Inc.
// Copyright(C) 1993-2008 Raven Software
// Copyright(C) 2005-2014 Simon Howard
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// R_draw.c
#include "doomdef.h"
#include "deh_str.h"
#include "r_local.h"
#include "i_video.h"
#include "v_video.h"
/*
All drawing to the view buffer is accomplished in this file. The other refresh
files only know about ccordinates, not the architecture of the frame buffer.
*/
byte *viewimage;
int viewwidth, scaledviewwidth, viewheight, viewwindowx, viewwindowy;
byte *ylookup[MAXHEIGHT];
int columnofs[MAXWIDTH];
byte translations[3][256]; // color tables for different players
/*
==================
=
= R_DrawColumn
=
= Source is the top of the column to scale
=
==================
*/
lighttable_t *dc_colormap;
int dc_x;
int dc_yl;
int dc_yh;
fixed_t dc_iscale;
fixed_t dc_texturemid;
byte *dc_source; // first pixel in a column (possibly virtual)
int dccount; // just for profiling
void R_DrawColumn(void)
{
int count;
byte *dest;
fixed_t frac, fracstep;
count = dc_yh - dc_yl;
if (count < 0)
return;
#ifdef RANGECHECK
if ((unsigned) dc_x >= SCREENWIDTH || dc_yl < 0 || dc_yh >= SCREENHEIGHT)
I_Error("R_DrawColumn: %i to %i at %i", dc_yl, dc_yh, dc_x);
#endif
dest = ylookup[dc_yl] + columnofs[dc_x];
fracstep = dc_iscale;
frac = dc_texturemid + (dc_yl - centery) * fracstep;
do
{
*dest = dc_colormap[dc_source[(frac >> FRACBITS) & 127]];
dest += SCREENWIDTH;
frac += fracstep;
}
while (count--);
}
void R_DrawColumnLow(void)
{
int count;
byte *dest;
fixed_t frac, fracstep;
count = dc_yh - dc_yl;
if (count < 0)
return;
#ifdef RANGECHECK
if ((unsigned) dc_x >= SCREENWIDTH || dc_yl < 0 || dc_yh >= SCREENHEIGHT)
I_Error("R_DrawColumn: %i to %i at %i", dc_yl, dc_yh, dc_x);
// dccount++;
#endif
dest = ylookup[dc_yl] + columnofs[dc_x];
fracstep = dc_iscale;
frac = dc_texturemid + (dc_yl - centery) * fracstep;
do
{
*dest = dc_colormap[dc_source[(frac >> FRACBITS) & 127]];
dest += SCREENWIDTH;
frac += fracstep;
}
while (count--);
}
// Translucent column draw - blended with background using tinttable.
void R_DrawTLColumn(void)
{
int count;
byte *dest;
fixed_t frac, fracstep;
if (!dc_yl)
dc_yl = 1;
if (dc_yh == viewheight - 1)
dc_yh = viewheight - 2;
count = dc_yh - dc_yl;
if (count < 0)
return;
#ifdef RANGECHECK
if ((unsigned) dc_x >= SCREENWIDTH || dc_yl < 0 || dc_yh >= SCREENHEIGHT)
I_Error("R_DrawTLColumn: %i to %i at %i", dc_yl, dc_yh, dc_x);
#endif
dest = ylookup[dc_yl] + columnofs[dc_x];
fracstep = dc_iscale;
frac = dc_texturemid + (dc_yl - centery) * fracstep;
do
{
*dest =
tinttable[((*dest) << 8) +
dc_colormap[dc_source[(frac >> FRACBITS) & 127]]];
dest += SCREENWIDTH;
frac += fracstep;
}
while (count--);
}
/*
========================
=
= R_DrawTranslatedColumn
=
========================
*/
byte *dc_translation;
byte *translationtables;
void R_DrawTranslatedColumn(void)
{
int count;
byte *dest;
fixed_t frac, fracstep;
count = dc_yh - dc_yl;
if (count < 0)
return;
#ifdef RANGECHECK
if ((unsigned) dc_x >= SCREENWIDTH || dc_yl < 0 || dc_yh >= SCREENHEIGHT)
I_Error("R_DrawColumn: %i to %i at %i", dc_yl, dc_yh, dc_x);
#endif
dest = ylookup[dc_yl] + columnofs[dc_x];
fracstep = dc_iscale;
frac = dc_texturemid + (dc_yl - centery) * fracstep;
do
{
*dest = dc_colormap[dc_translation[dc_source[frac >> FRACBITS]]];
dest += SCREENWIDTH;
frac += fracstep;
}
while (count--);
}
void R_DrawTranslatedTLColumn(void)
{
int count;
byte *dest;
fixed_t frac, fracstep;
count = dc_yh - dc_yl;
if (count < 0)
return;
#ifdef RANGECHECK
if ((unsigned) dc_x >= SCREENWIDTH || dc_yl < 0 || dc_yh >= SCREENHEIGHT)
I_Error("R_DrawColumn: %i to %i at %i", dc_yl, dc_yh, dc_x);
#endif
dest = ylookup[dc_yl] + columnofs[dc_x];
fracstep = dc_iscale;
frac = dc_texturemid + (dc_yl - centery) * fracstep;
do
{
*dest = tinttable[((*dest) << 8)
+
dc_colormap[dc_translation
[dc_source[frac >> FRACBITS]]]];
dest += SCREENWIDTH;
frac += fracstep;
}
while (count--);
}
//--------------------------------------------------------------------------
//
// PROC R_InitTranslationTables
//
//--------------------------------------------------------------------------
void R_InitTranslationTables(void)
{
int i;
V_LoadTintTable();
// Allocate translation tables
translationtables = Z_Malloc(256 * 3, PU_STATIC, 0);
// Fill out the translation tables
for (i = 0; i < 256; i++)
{
if (i >= 225 && i <= 240)
{
translationtables[i] = 114 + (i - 225); // yellow
translationtables[i + 256] = 145 + (i - 225); // red
translationtables[i + 512] = 190 + (i - 225); // blue
}
else
{
translationtables[i] = translationtables[i + 256]
= translationtables[i + 512] = i;
}
}
}
/*
================
=
= R_DrawSpan
=
================
*/
int ds_y;
int ds_x1;
int ds_x2;
lighttable_t *ds_colormap;
fixed_t ds_xfrac;
fixed_t ds_yfrac;
fixed_t ds_xstep;
fixed_t ds_ystep;
byte *ds_source; // start of a 64*64 tile image
int dscount; // just for profiling
void R_DrawSpan(void)
{
fixed_t xfrac, yfrac;
byte *dest;
int count, spot;
#ifdef RANGECHECK
if (ds_x2 < ds_x1 || ds_x1 < 0 || ds_x2 >= SCREENWIDTH
|| (unsigned) ds_y > SCREENHEIGHT)
I_Error("R_DrawSpan: %i to %i at %i", ds_x1, ds_x2, ds_y);
// dscount++;
#endif
xfrac = ds_xfrac;
yfrac = ds_yfrac;
dest = ylookup[ds_y] + columnofs[ds_x1];
count = ds_x2 - ds_x1;
do
{
spot = ((yfrac >> (16 - 6)) & (63 * 64)) + ((xfrac >> 16) & 63);
*dest++ = ds_colormap[ds_source[spot]];
xfrac += ds_xstep;
yfrac += ds_ystep;
}
while (count--);
}
void R_DrawSpanLow(void)
{
fixed_t xfrac, yfrac;
byte *dest;
int count, spot;
#ifdef RANGECHECK
if (ds_x2 < ds_x1 || ds_x1 < 0 || ds_x2 >= SCREENWIDTH
|| (unsigned) ds_y > SCREENHEIGHT)
I_Error("R_DrawSpan: %i to %i at %i", ds_x1, ds_x2, ds_y);
// dscount++;
#endif
xfrac = ds_xfrac;
yfrac = ds_yfrac;
dest = ylookup[ds_y] + columnofs[ds_x1];
count = ds_x2 - ds_x1;
do
{
spot = ((yfrac >> (16 - 6)) & (63 * 64)) + ((xfrac >> 16) & 63);
*dest++ = ds_colormap[ds_source[spot]];
xfrac += ds_xstep;
yfrac += ds_ystep;
}
while (count--);
}
/*
================
=
= R_InitBuffer
=
=================
*/
void R_InitBuffer(int width, int height)
{
int i;
viewwindowx = (SCREENWIDTH - width) >> 1;
for (i = 0; i < width; i++)
columnofs[i] = viewwindowx + i;
if (width == SCREENWIDTH)
viewwindowy = 0;
else
viewwindowy = (SCREENHEIGHT - SBARHEIGHT - height) >> 1;
for (i = 0; i < height; i++)
ylookup[i] = I_VideoBuffer + (i + viewwindowy) * SCREENWIDTH;
}
/*
==================
=
= R_DrawViewBorder
=
= Draws the border around the view for different size windows
==================
*/
boolean BorderNeedRefresh;
void R_DrawViewBorder(void)
{
byte *src, *dest;
int x, y;
if (scaledviewwidth == SCREENWIDTH)
return;
if (gamemode == shareware)
{
src = W_CacheLumpName(DEH_String("FLOOR04"), PU_CACHE);
}
else
{
src = W_CacheLumpName(DEH_String("FLAT513"), PU_CACHE);
}
dest = I_VideoBuffer;
for (y = 0; y < SCREENHEIGHT - SBARHEIGHT; y++)
{
for (x = 0; x < SCREENWIDTH / 64; x++)
{
memcpy(dest, src + ((y & 63) << 6), 64);
dest += 64;
}
if (SCREENWIDTH & 63)
{
memcpy(dest, src + ((y & 63) << 6), SCREENWIDTH & 63);
dest += (SCREENWIDTH & 63);
}
}
for (x = viewwindowx; x < viewwindowx + viewwidth; x += 16)
{
V_DrawPatch(x, viewwindowy - 4,
W_CacheLumpName(DEH_String("bordt"), PU_CACHE));
V_DrawPatch(x, viewwindowy + viewheight,
W_CacheLumpName(DEH_String("bordb"), PU_CACHE));
}
for (y = viewwindowy; y < viewwindowy + viewheight; y += 16)
{
V_DrawPatch(viewwindowx - 4, y,
W_CacheLumpName(DEH_String("bordl"), PU_CACHE));
V_DrawPatch(viewwindowx + viewwidth, y,
W_CacheLumpName(DEH_String("bordr"), PU_CACHE));
}
V_DrawPatch(viewwindowx - 4, viewwindowy - 4,
W_CacheLumpName(DEH_String("bordtl"), PU_CACHE));
V_DrawPatch(viewwindowx + viewwidth, viewwindowy - 4,
W_CacheLumpName(DEH_String("bordtr"), PU_CACHE));
V_DrawPatch(viewwindowx + viewwidth, viewwindowy + viewheight,
W_CacheLumpName(DEH_String("bordbr"), PU_CACHE));
V_DrawPatch(viewwindowx - 4, viewwindowy + viewheight,
W_CacheLumpName(DEH_String("bordbl"), PU_CACHE));
}
/*
==================
=
= R_DrawTopBorder
=
= Draws the top border around the view for different size windows
==================
*/
boolean BorderTopRefresh;
void R_DrawTopBorder(void)
{
byte *src, *dest;
int x, y;
if (scaledviewwidth == SCREENWIDTH)
return;
if (gamemode == shareware)
{
src = W_CacheLumpName(DEH_String("FLOOR04"), PU_CACHE);
}
else
{
src = W_CacheLumpName(DEH_String("FLAT513"), PU_CACHE);
}
dest = I_VideoBuffer;
for (y = 0; y < 30; y++)
{
for (x = 0; x < SCREENWIDTH / 64; x++)
{
memcpy(dest, src + ((y & 63) << 6), 64);
dest += 64;
}
if (SCREENWIDTH & 63)
{
memcpy(dest, src + ((y & 63) << 6), SCREENWIDTH & 63);
dest += (SCREENWIDTH & 63);
}
}
if (viewwindowy < 25)
{
for (x = viewwindowx; x < viewwindowx + viewwidth; x += 16)
{
V_DrawPatch(x, viewwindowy - 4,
W_CacheLumpName(DEH_String("bordt"), PU_CACHE));
}
V_DrawPatch(viewwindowx - 4, viewwindowy,
W_CacheLumpName(DEH_String("bordl"), PU_CACHE));
V_DrawPatch(viewwindowx + viewwidth, viewwindowy,
W_CacheLumpName(DEH_String("bordr"), PU_CACHE));
V_DrawPatch(viewwindowx - 4, viewwindowy + 16,
W_CacheLumpName(DEH_String("bordl"), PU_CACHE));
V_DrawPatch(viewwindowx + viewwidth, viewwindowy + 16,
W_CacheLumpName(DEH_String("bordr"), PU_CACHE));
V_DrawPatch(viewwindowx - 4, viewwindowy - 4,
W_CacheLumpName(DEH_String("bordtl"), PU_CACHE));
V_DrawPatch(viewwindowx + viewwidth, viewwindowy - 4,
W_CacheLumpName(DEH_String("bordtr"), PU_CACHE));
}
}
| Ch0wW/chocolate-doom | src/heretic/r_draw.c | C | gpl-2.0 | 11,833 |
/*
* Xen mmu operations
*
* This file contains the various mmu fetch and update operations.
* The most important job they must perform is the mapping between the
* domain's pfn and the overall machine mfns.
*
* Xen allows guests to directly update the pagetable, in a controlled
* fashion. In other words, the guest modifies the same pagetable
* that the CPU actually uses, which eliminates the overhead of having
* a separate shadow pagetable.
*
* In order to allow this, it falls on the guest domain to map its
* notion of a "physical" pfn - which is just a domain-local linear
* address - into a real "machine address" which the CPU's MMU can
* use.
*
* A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
* inserted directly into the pagetable. When creating a new
* pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
* when reading the content back with __(pgd|pmd|pte)_val, it converts
* the mfn back into a pfn.
*
* The other constraint is that all pages which make up a pagetable
* must be mapped read-only in the guest. This prevents uncontrolled
* guest updates to the pagetable. Xen strictly enforces this, and
* will disallow any pagetable update which will end up mapping a
* pagetable page RW, and will disallow using any writable page as a
* pagetable.
*
* Naively, when loading %cr3 with the base of a new pagetable, Xen
* would need to validate the whole pagetable before going on.
* Naturally, this is quite slow. The solution is to "pin" a
* pagetable, which enforces all the constraints on the pagetable even
* when it is not actively in use. This menas that Xen can be assured
* that it is still valid when you do load it into %cr3, and doesn't
* need to revalidate it.
*
* Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
*/
#include <linux/sched/mm.h>
#include <linux/highmem.h>
#include <linux/debugfs.h>
#include <linux/bug.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/crash_dump.h>
#ifdef CONFIG_KEXEC_CORE
#include <linux/kexec.h>
#endif
#include <trace/events/xen.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
#include <asm/mmu_context.h>
#include <asm/setup.h>
#include <asm/paravirt.h>
#include <asm/e820/api.h>
#include <asm/linkage.h>
#include <asm/page.h>
#include <asm/init.h>
#include <asm/pat.h>
#include <asm/smp.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
#include <xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/hvm/hvm_op.h>
#include <xen/interface/version.h>
#include <xen/interface/memory.h>
#include <xen/hvc-console.h>
#include "multicalls.h"
#include "mmu.h"
#include "debugfs.h"
#ifdef CONFIG_X86_32
/*
* Identity map, in addition to plain kernel map. This needs to be
* large enough to allocate page table pages to allocate the rest.
* Each page can map 2MB.
*/
#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
#endif
#ifdef CONFIG_X86_64
/* l3 pud for userspace vsyscall mapping */
static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
#endif /* CONFIG_X86_64 */
/*
* Note about cr3 (pagetable base) values:
*
* xen_cr3 contains the current logical cr3 value; it contains the
* last set cr3. This may not be the current effective cr3, because
* its update may be being lazily deferred. However, a vcpu looking
* at its own cr3 can use this value knowing that it everything will
* be self-consistent.
*
* xen_current_cr3 contains the actual vcpu cr3; it is set once the
* hypercall to set the vcpu cr3 is complete (so it may be a little
* out of date, but it will never be set early). If one vcpu is
* looking at another vcpu's cr3 value, it should use this variable.
*/
DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
static phys_addr_t xen_pt_base, xen_pt_size __initdata;
/*
* Just beyond the highest usermode address. STACK_TOP_MAX has a
* redzone above it, so round it up to a PGD boundary.
*/
#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
void make_lowmem_page_readonly(void *vaddr)
{
pte_t *pte, ptev;
unsigned long address = (unsigned long)vaddr;
unsigned int level;
pte = lookup_address(address, &level);
if (pte == NULL)
return; /* vaddr missing */
ptev = pte_wrprotect(*pte);
if (HYPERVISOR_update_va_mapping(address, ptev, 0))
BUG();
}
void make_lowmem_page_readwrite(void *vaddr)
{
pte_t *pte, ptev;
unsigned long address = (unsigned long)vaddr;
unsigned int level;
pte = lookup_address(address, &level);
if (pte == NULL)
return; /* vaddr missing */
ptev = pte_mkwrite(*pte);
if (HYPERVISOR_update_va_mapping(address, ptev, 0))
BUG();
}
static bool xen_page_pinned(void *ptr)
{
struct page *page = virt_to_page(ptr);
return PagePinned(page);
}
void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
{
struct multicall_space mcs;
struct mmu_update *u;
trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
mcs = xen_mc_entry(sizeof(*u));
u = mcs.args;
/* ptep might be kmapped when using 32-bit HIGHPTE */
u->ptr = virt_to_machine(ptep).maddr;
u->val = pte_val_ma(pteval);
MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
EXPORT_SYMBOL_GPL(xen_set_domain_pte);
static void xen_extend_mmu_update(const struct mmu_update *update)
{
struct multicall_space mcs;
struct mmu_update *u;
mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
if (mcs.mc != NULL) {
mcs.mc->args[1]++;
} else {
mcs = __xen_mc_entry(sizeof(*u));
MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
}
u = mcs.args;
*u = *update;
}
static void xen_extend_mmuext_op(const struct mmuext_op *op)
{
struct multicall_space mcs;
struct mmuext_op *u;
mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
if (mcs.mc != NULL) {
mcs.mc->args[1]++;
} else {
mcs = __xen_mc_entry(sizeof(*u));
MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
}
u = mcs.args;
*u = *op;
}
static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
{
struct mmu_update u;
preempt_disable();
xen_mc_batch();
/* ptr may be ioremapped for 64-bit pagetable setup */
u.ptr = arbitrary_virt_to_machine(ptr).maddr;
u.val = pmd_val_ma(val);
xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU);
preempt_enable();
}
static void xen_set_pmd(pmd_t *ptr, pmd_t val)
{
trace_xen_mmu_set_pmd(ptr, val);
/* If page is not pinned, we can just update the entry
directly */
if (!xen_page_pinned(ptr)) {
*ptr = val;
return;
}
xen_set_pmd_hyper(ptr, val);
}
/*
* Associate a virtual page frame with a given physical page frame
* and protection flags for that frame.
*/
void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
{
set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
}
static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
{
struct mmu_update u;
if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
return false;
xen_mc_batch();
u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
u.val = pte_val_ma(pteval);
xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU);
return true;
}
static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
{
if (!xen_batched_set_pte(ptep, pteval)) {
/*
* Could call native_set_pte() here and trap and
* emulate the PTE write but with 32-bit guests this
* needs two traps (one for each of the two 32-bit
* words in the PTE) so do one hypercall directly
* instead.
*/
struct mmu_update u;
u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
u.val = pte_val_ma(pteval);
HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
}
}
static void xen_set_pte(pte_t *ptep, pte_t pteval)
{
trace_xen_mmu_set_pte(ptep, pteval);
__xen_set_pte(ptep, pteval);
}
static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
__xen_set_pte(ptep, pteval);
}
pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
/* Just return the pte as-is. We preserve the bits on commit */
trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
return *ptep;
}
void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
struct mmu_update u;
trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
xen_mc_batch();
u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
u.val = pte_val_ma(pte);
xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
{
if (val & _PAGE_PRESENT) {
unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
unsigned long pfn = mfn_to_pfn(mfn);
pteval_t flags = val & PTE_FLAGS_MASK;
if (unlikely(pfn == ~0))
val = flags & ~_PAGE_PRESENT;
else
val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
}
return val;
}
static pteval_t pte_pfn_to_mfn(pteval_t val)
{
if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
unsigned long mfn;
mfn = __pfn_to_mfn(pfn);
/*
* If there's no mfn for the pfn, then just create an
* empty non-present pte. Unfortunately this loses
* information about the original pfn, so
* pte_mfn_to_pfn is asymmetric.
*/
if (unlikely(mfn == INVALID_P2M_ENTRY)) {
mfn = 0;
flags = 0;
} else
mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
}
return val;
}
__visible pteval_t xen_pte_val(pte_t pte)
{
pteval_t pteval = pte.pte;
return pte_mfn_to_pfn(pteval);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
__visible pgdval_t xen_pgd_val(pgd_t pgd)
{
return pte_mfn_to_pfn(pgd.pgd);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
__visible pte_t xen_make_pte(pteval_t pte)
{
pte = pte_pfn_to_mfn(pte);
return native_make_pte(pte);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
__visible pgd_t xen_make_pgd(pgdval_t pgd)
{
pgd = pte_pfn_to_mfn(pgd);
return native_make_pgd(pgd);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
__visible pmdval_t xen_pmd_val(pmd_t pmd)
{
return pte_mfn_to_pfn(pmd.pmd);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
{
struct mmu_update u;
preempt_disable();
xen_mc_batch();
/* ptr may be ioremapped for 64-bit pagetable setup */
u.ptr = arbitrary_virt_to_machine(ptr).maddr;
u.val = pud_val_ma(val);
xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU);
preempt_enable();
}
static void xen_set_pud(pud_t *ptr, pud_t val)
{
trace_xen_mmu_set_pud(ptr, val);
/* If page is not pinned, we can just update the entry
directly */
if (!xen_page_pinned(ptr)) {
*ptr = val;
return;
}
xen_set_pud_hyper(ptr, val);
}
#ifdef CONFIG_X86_PAE
static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
{
trace_xen_mmu_set_pte_atomic(ptep, pte);
set_64bit((u64 *)ptep, native_pte_val(pte));
}
static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
trace_xen_mmu_pte_clear(mm, addr, ptep);
if (!xen_batched_set_pte(ptep, native_make_pte(0)))
native_pte_clear(mm, addr, ptep);
}
static void xen_pmd_clear(pmd_t *pmdp)
{
trace_xen_mmu_pmd_clear(pmdp);
set_pmd(pmdp, __pmd(0));
}
#endif /* CONFIG_X86_PAE */
__visible pmd_t xen_make_pmd(pmdval_t pmd)
{
pmd = pte_pfn_to_mfn(pmd);
return native_make_pmd(pmd);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
#if CONFIG_PGTABLE_LEVELS == 4
__visible pudval_t xen_pud_val(pud_t pud)
{
return pte_mfn_to_pfn(pud.pud);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
__visible pud_t xen_make_pud(pudval_t pud)
{
pud = pte_pfn_to_mfn(pud);
return native_make_pud(pud);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
static pgd_t *xen_get_user_pgd(pgd_t *pgd)
{
pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
unsigned offset = pgd - pgd_page;
pgd_t *user_ptr = NULL;
if (offset < pgd_index(USER_LIMIT)) {
struct page *page = virt_to_page(pgd_page);
user_ptr = (pgd_t *)page->private;
if (user_ptr)
user_ptr += offset;
}
return user_ptr;
}
static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
{
struct mmu_update u;
u.ptr = virt_to_machine(ptr).maddr;
u.val = p4d_val_ma(val);
xen_extend_mmu_update(&u);
}
/*
* Raw hypercall-based set_p4d, intended for in early boot before
* there's a page structure. This implies:
* 1. The only existing pagetable is the kernel's
* 2. It is always pinned
* 3. It has no user pagetable attached to it
*/
static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
{
preempt_disable();
xen_mc_batch();
__xen_set_p4d_hyper(ptr, val);
xen_mc_issue(PARAVIRT_LAZY_MMU);
preempt_enable();
}
static void xen_set_p4d(p4d_t *ptr, p4d_t val)
{
pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
pgd_t pgd_val;
trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
/* If page is not pinned, we can just update the entry
directly */
if (!xen_page_pinned(ptr)) {
*ptr = val;
if (user_ptr) {
WARN_ON(xen_page_pinned(user_ptr));
pgd_val.pgd = p4d_val_ma(val);
*user_ptr = pgd_val;
}
return;
}
/* If it's pinned, then we can at least batch the kernel and
user updates together. */
xen_mc_batch();
__xen_set_p4d_hyper(ptr, val);
if (user_ptr)
__xen_set_p4d_hyper((p4d_t *)user_ptr, val);
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
#endif /* CONFIG_PGTABLE_LEVELS == 4 */
static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
bool last, unsigned long limit)
{
int i, nr, flush = 0;
nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
for (i = 0; i < nr; i++) {
if (!pmd_none(pmd[i]))
flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE);
}
return flush;
}
static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
bool last, unsigned long limit)
{
int i, nr, flush = 0;
nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
for (i = 0; i < nr; i++) {
pmd_t *pmd;
if (pud_none(pud[i]))
continue;
pmd = pmd_offset(&pud[i], 0);
if (PTRS_PER_PMD > 1)
flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
flush |= xen_pmd_walk(mm, pmd, func,
last && i == nr - 1, limit);
}
return flush;
}
static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
bool last, unsigned long limit)
{
int i, nr, flush = 0;
nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D;
for (i = 0; i < nr; i++) {
pud_t *pud;
if (p4d_none(p4d[i]))
continue;
pud = pud_offset(&p4d[i], 0);
if (PTRS_PER_PUD > 1)
flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
flush |= xen_pud_walk(mm, pud, func,
last && i == nr - 1, limit);
}
return flush;
}
/*
* (Yet another) pagetable walker. This one is intended for pinning a
* pagetable. This means that it walks a pagetable and calls the
* callback function on each page it finds making up the page table,
* at every level. It walks the entire pagetable, but it only bothers
* pinning pte pages which are below limit. In the normal case this
* will be STACK_TOP_MAX, but at boot we need to pin up to
* FIXADDR_TOP.
*
* For 32-bit the important bit is that we don't pin beyond there,
* because then we start getting into Xen's ptes.
*
* For 64-bit, we must skip the Xen hole in the middle of the address
* space, just after the big x86-64 virtual hole.
*/
static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
int (*func)(struct mm_struct *mm, struct page *,
enum pt_level),
unsigned long limit)
{
int i, nr, flush = 0;
unsigned hole_low, hole_high;
/* The limit is the last byte to be touched */
limit--;
BUG_ON(limit >= FIXADDR_TOP);
/*
* 64-bit has a great big hole in the middle of the address
* space, which contains the Xen mappings. On 32-bit these
* will end up making a zero-sized hole and so is a no-op.
*/
hole_low = pgd_index(USER_LIMIT);
hole_high = pgd_index(PAGE_OFFSET);
nr = pgd_index(limit) + 1;
for (i = 0; i < nr; i++) {
p4d_t *p4d;
if (i >= hole_low && i < hole_high)
continue;
if (pgd_none(pgd[i]))
continue;
p4d = p4d_offset(&pgd[i], 0);
if (PTRS_PER_P4D > 1)
flush |= (*func)(mm, virt_to_page(p4d), PT_P4D);
flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
}
/* Do the top level last, so that the callbacks can use it as
a cue to do final things like tlb flushes. */
flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
return flush;
}
static int xen_pgd_walk(struct mm_struct *mm,
int (*func)(struct mm_struct *mm, struct page *,
enum pt_level),
unsigned long limit)
{
return __xen_pgd_walk(mm, mm->pgd, func, limit);
}
/* If we're using split pte locks, then take the page's lock and
return a pointer to it. Otherwise return NULL. */
static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
{
spinlock_t *ptl = NULL;
#if USE_SPLIT_PTE_PTLOCKS
ptl = ptlock_ptr(page);
spin_lock_nest_lock(ptl, &mm->page_table_lock);
#endif
return ptl;
}
static void xen_pte_unlock(void *v)
{
spinlock_t *ptl = v;
spin_unlock(ptl);
}
static void xen_do_pin(unsigned level, unsigned long pfn)
{
struct mmuext_op op;
op.cmd = level;
op.arg1.mfn = pfn_to_mfn(pfn);
xen_extend_mmuext_op(&op);
}
static int xen_pin_page(struct mm_struct *mm, struct page *page,
enum pt_level level)
{
unsigned pgfl = TestSetPagePinned(page);
int flush;
if (pgfl)
flush = 0; /* already pinned */
else if (PageHighMem(page))
/* kmaps need flushing if we found an unpinned
highpage */
flush = 1;
else {
void *pt = lowmem_page_address(page);
unsigned long pfn = page_to_pfn(page);
struct multicall_space mcs = __xen_mc_entry(0);
spinlock_t *ptl;
flush = 0;
/*
* We need to hold the pagetable lock between the time
* we make the pagetable RO and when we actually pin
* it. If we don't, then other users may come in and
* attempt to update the pagetable by writing it,
* which will fail because the memory is RO but not
* pinned, so Xen won't do the trap'n'emulate.
*
* If we're using split pte locks, we can't hold the
* entire pagetable's worth of locks during the
* traverse, because we may wrap the preempt count (8
* bits). The solution is to mark RO and pin each PTE
* page while holding the lock. This means the number
* of locks we end up holding is never more than a
* batch size (~32 entries, at present).
*
* If we're not using split pte locks, we needn't pin
* the PTE pages independently, because we're
* protected by the overall pagetable lock.
*/
ptl = NULL;
if (level == PT_PTE)
ptl = xen_pte_lock(page, mm);
MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
pfn_pte(pfn, PAGE_KERNEL_RO),
level == PT_PGD ? UVMF_TLB_FLUSH : 0);
if (ptl) {
xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
/* Queue a deferred unlock for when this batch
is completed. */
xen_mc_callback(xen_pte_unlock, ptl);
}
}
return flush;
}
/* This is called just after a mm has been created, but it has not
been used yet. We need to make sure that its pagetable is all
read-only, and can be pinned. */
static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
{
trace_xen_mmu_pgd_pin(mm, pgd);
xen_mc_batch();
if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
/* re-enable interrupts for flushing */
xen_mc_issue(0);
kmap_flush_unused();
xen_mc_batch();
}
#ifdef CONFIG_X86_64
{
pgd_t *user_pgd = xen_get_user_pgd(pgd);
xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
if (user_pgd) {
xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
xen_do_pin(MMUEXT_PIN_L4_TABLE,
PFN_DOWN(__pa(user_pgd)));
}
}
#else /* CONFIG_X86_32 */
#ifdef CONFIG_X86_PAE
/* Need to make sure unshared kernel PMD is pinnable */
xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
PT_PMD);
#endif
xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
#endif /* CONFIG_X86_64 */
xen_mc_issue(0);
}
static void xen_pgd_pin(struct mm_struct *mm)
{
__xen_pgd_pin(mm, mm->pgd);
}
/*
* On save, we need to pin all pagetables to make sure they get their
* mfns turned into pfns. Search the list for any unpinned pgds and pin
* them (unpinned pgds are not currently in use, probably because the
* process is under construction or destruction).
*
* Expected to be called in stop_machine() ("equivalent to taking
* every spinlock in the system"), so the locking doesn't really
* matter all that much.
*/
void xen_mm_pin_all(void)
{
struct page *page;
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
if (!PagePinned(page)) {
__xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
SetPageSavePinned(page);
}
}
spin_unlock(&pgd_lock);
}
/*
* The init_mm pagetable is really pinned as soon as its created, but
* that's before we have page structures to store the bits. So do all
* the book-keeping now.
*/
static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
enum pt_level level)
{
SetPagePinned(page);
return 0;
}
static void __init xen_mark_init_mm_pinned(void)
{
xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
}
static int xen_unpin_page(struct mm_struct *mm, struct page *page,
enum pt_level level)
{
unsigned pgfl = TestClearPagePinned(page);
if (pgfl && !PageHighMem(page)) {
void *pt = lowmem_page_address(page);
unsigned long pfn = page_to_pfn(page);
spinlock_t *ptl = NULL;
struct multicall_space mcs;
/*
* Do the converse to pin_page. If we're using split
* pte locks, we must be holding the lock for while
* the pte page is unpinned but still RO to prevent
* concurrent updates from seeing it in this
* partially-pinned state.
*/
if (level == PT_PTE) {
ptl = xen_pte_lock(page, mm);
if (ptl)
xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
}
mcs = __xen_mc_entry(0);
MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
pfn_pte(pfn, PAGE_KERNEL),
level == PT_PGD ? UVMF_TLB_FLUSH : 0);
if (ptl) {
/* unlock when batch completed */
xen_mc_callback(xen_pte_unlock, ptl);
}
}
return 0; /* never need to flush on unpin */
}
/* Release a pagetables pages back as normal RW */
static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
{
trace_xen_mmu_pgd_unpin(mm, pgd);
xen_mc_batch();
xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
#ifdef CONFIG_X86_64
{
pgd_t *user_pgd = xen_get_user_pgd(pgd);
if (user_pgd) {
xen_do_pin(MMUEXT_UNPIN_TABLE,
PFN_DOWN(__pa(user_pgd)));
xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
}
}
#endif
#ifdef CONFIG_X86_PAE
/* Need to make sure unshared kernel PMD is unpinned */
xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
PT_PMD);
#endif
__xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
xen_mc_issue(0);
}
static void xen_pgd_unpin(struct mm_struct *mm)
{
__xen_pgd_unpin(mm, mm->pgd);
}
/*
* On resume, undo any pinning done at save, so that the rest of the
* kernel doesn't see any unexpected pinned pagetables.
*/
void xen_mm_unpin_all(void)
{
struct page *page;
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
if (PageSavePinned(page)) {
BUG_ON(!PagePinned(page));
__xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
ClearPageSavePinned(page);
}
}
spin_unlock(&pgd_lock);
}
static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
spin_lock(&next->page_table_lock);
xen_pgd_pin(next);
spin_unlock(&next->page_table_lock);
}
static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
{
spin_lock(&mm->page_table_lock);
xen_pgd_pin(mm);
spin_unlock(&mm->page_table_lock);
}
#ifdef CONFIG_SMP
/* Another cpu may still have their %cr3 pointing at the pagetable, so
we need to repoint it somewhere else before we can unpin it. */
static void drop_other_mm_ref(void *info)
{
struct mm_struct *mm = info;
struct mm_struct *active_mm;
active_mm = this_cpu_read(cpu_tlbstate.active_mm);
if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
leave_mm(smp_processor_id());
/* If this cpu still has a stale cr3 reference, then make sure
it has been flushed. */
if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
load_cr3(swapper_pg_dir);
}
static void xen_drop_mm_ref(struct mm_struct *mm)
{
cpumask_var_t mask;
unsigned cpu;
if (current->active_mm == mm) {
if (current->mm == mm)
load_cr3(swapper_pg_dir);
else
leave_mm(smp_processor_id());
}
/* Get the "official" set of cpus referring to our pagetable. */
if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
for_each_online_cpu(cpu) {
if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
&& per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
continue;
smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
}
return;
}
cpumask_copy(mask, mm_cpumask(mm));
/* It's possible that a vcpu may have a stale reference to our
cr3, because its in lazy mode, and it hasn't yet flushed
its set of pending hypercalls yet. In this case, we can
look at its actual current cr3 value, and force it to flush
if needed. */
for_each_online_cpu(cpu) {
if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
cpumask_set_cpu(cpu, mask);
}
if (!cpumask_empty(mask))
smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
free_cpumask_var(mask);
}
#else
static void xen_drop_mm_ref(struct mm_struct *mm)
{
if (current->active_mm == mm)
load_cr3(swapper_pg_dir);
}
#endif
/*
* While a process runs, Xen pins its pagetables, which means that the
* hypervisor forces it to be read-only, and it controls all updates
* to it. This means that all pagetable updates have to go via the
* hypervisor, which is moderately expensive.
*
* Since we're pulling the pagetable down, we switch to use init_mm,
* unpin old process pagetable and mark it all read-write, which
* allows further operations on it to be simple memory accesses.
*
* The only subtle point is that another CPU may be still using the
* pagetable because of lazy tlb flushing. This means we need need to
* switch all CPUs off this pagetable before we can unpin it.
*/
static void xen_exit_mmap(struct mm_struct *mm)
{
get_cpu(); /* make sure we don't move around */
xen_drop_mm_ref(mm);
put_cpu();
spin_lock(&mm->page_table_lock);
/* pgd may not be pinned in the error exit path of execve */
if (xen_page_pinned(mm->pgd))
xen_pgd_unpin(mm);
spin_unlock(&mm->page_table_lock);
}
static void xen_post_allocator_init(void);
static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
{
struct mmuext_op op;
op.cmd = cmd;
op.arg1.mfn = pfn_to_mfn(pfn);
if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
BUG();
}
#ifdef CONFIG_X86_64
static void __init xen_cleanhighmap(unsigned long vaddr,
unsigned long vaddr_end)
{
unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
/* NOTE: The loop is more greedy than the cleanup_highmap variant.
* We include the PMD passed in on _both_ boundaries. */
for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
pmd++, vaddr += PMD_SIZE) {
if (pmd_none(*pmd))
continue;
if (vaddr < (unsigned long) _text || vaddr > kernel_end)
set_pmd(pmd, __pmd(0));
}
/* In case we did something silly, we should crash in this function
* instead of somewhere later and be confusing. */
xen_mc_flush();
}
/*
* Make a page range writeable and free it.
*/
static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
{
void *vaddr = __va(paddr);
void *vaddr_end = vaddr + size;
for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
make_lowmem_page_readwrite(vaddr);
memblock_free(paddr, size);
}
static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
{
unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
if (unpin)
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
ClearPagePinned(virt_to_page(__va(pa)));
xen_free_ro_pages(pa, PAGE_SIZE);
}
static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
{
unsigned long pa;
pte_t *pte_tbl;
int i;
if (pmd_large(*pmd)) {
pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, PMD_SIZE);
return;
}
pte_tbl = pte_offset_kernel(pmd, 0);
for (i = 0; i < PTRS_PER_PTE; i++) {
if (pte_none(pte_tbl[i]))
continue;
pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
xen_free_ro_pages(pa, PAGE_SIZE);
}
set_pmd(pmd, __pmd(0));
xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
}
static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
{
unsigned long pa;
pmd_t *pmd_tbl;
int i;
if (pud_large(*pud)) {
pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, PUD_SIZE);
return;
}
pmd_tbl = pmd_offset(pud, 0);
for (i = 0; i < PTRS_PER_PMD; i++) {
if (pmd_none(pmd_tbl[i]))
continue;
xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
}
set_pud(pud, __pud(0));
xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
}
static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
{
unsigned long pa;
pud_t *pud_tbl;
int i;
if (p4d_large(*p4d)) {
pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, P4D_SIZE);
return;
}
pud_tbl = pud_offset(p4d, 0);
for (i = 0; i < PTRS_PER_PUD; i++) {
if (pud_none(pud_tbl[i]))
continue;
xen_cleanmfnmap_pud(pud_tbl + i, unpin);
}
set_p4d(p4d, __p4d(0));
xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
}
/*
* Since it is well isolated we can (and since it is perhaps large we should)
* also free the page tables mapping the initial P->M table.
*/
static void __init xen_cleanmfnmap(unsigned long vaddr)
{
pgd_t *pgd;
p4d_t *p4d;
unsigned int i;
bool unpin;
unpin = (vaddr == 2 * PGDIR_SIZE);
vaddr &= PMD_MASK;
pgd = pgd_offset_k(vaddr);
p4d = p4d_offset(pgd, 0);
for (i = 0; i < PTRS_PER_P4D; i++) {
if (p4d_none(p4d[i]))
continue;
xen_cleanmfnmap_p4d(p4d + i, unpin);
}
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
set_pgd(pgd, __pgd(0));
xen_cleanmfnmap_free_pgtbl(p4d, unpin);
}
}
static void __init xen_pagetable_p2m_free(void)
{
unsigned long size;
unsigned long addr;
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
/* No memory or already called. */
if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
return;
/* using __ka address and sticking INVALID_P2M_ENTRY! */
memset((void *)xen_start_info->mfn_list, 0xff, size);
addr = xen_start_info->mfn_list;
/*
* We could be in __ka space.
* We roundup to the PMD, which means that if anybody at this stage is
* using the __ka address of xen_start_info or
* xen_start_info->shared_info they are in going to crash. Fortunatly
* we have already revectored in xen_setup_kernel_pagetable and in
* xen_setup_shared_info.
*/
size = roundup(size, PMD_SIZE);
if (addr >= __START_KERNEL_map) {
xen_cleanhighmap(addr, addr + size);
size = PAGE_ALIGN(xen_start_info->nr_pages *
sizeof(unsigned long));
memblock_free(__pa(addr), size);
} else {
xen_cleanmfnmap(addr);
}
}
static void __init xen_pagetable_cleanhighmap(void)
{
unsigned long size;
unsigned long addr;
/* At this stage, cleanup_highmap has already cleaned __ka space
* from _brk_limit way up to the max_pfn_mapped (which is the end of
* the ramdisk). We continue on, erasing PMD entries that point to page
* tables - do note that they are accessible at this stage via __va.
* For good measure we also round up to the PMD - which means that if
* anybody is using __ka address to the initial boot-stack - and try
* to use it - they are going to crash. The xen_start_info has been
* taken care of already in xen_setup_kernel_pagetable. */
addr = xen_start_info->pt_base;
size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
xen_cleanhighmap(addr, addr + size);
xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
#ifdef DEBUG
/* This is superfluous and is not necessary, but you know what
* lets do it. The MODULES_VADDR -> MODULES_END should be clear of
* anything at this stage. */
xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
#endif
}
#endif
static void __init xen_pagetable_p2m_setup(void)
{
xen_vmalloc_p2m_tree();
#ifdef CONFIG_X86_64
xen_pagetable_p2m_free();
xen_pagetable_cleanhighmap();
#endif
/* And revector! Bye bye old array */
xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
}
static void __init xen_pagetable_init(void)
{
paging_init();
xen_post_allocator_init();
xen_pagetable_p2m_setup();
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();
/* Remap memory freed due to conflicts with E820 map */
xen_remap_memory();
xen_setup_shared_info();
}
static void xen_write_cr2(unsigned long cr2)
{
this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
}
static unsigned long xen_read_cr2(void)
{
return this_cpu_read(xen_vcpu)->arch.cr2;
}
unsigned long xen_read_cr2_direct(void)
{
return this_cpu_read(xen_vcpu_info.arch.cr2);
}
static void xen_flush_tlb(void)
{
struct mmuext_op *op;
struct multicall_space mcs;
trace_xen_mmu_flush_tlb(0);
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU);
preempt_enable();
}
static void xen_flush_tlb_single(unsigned long addr)
{
struct mmuext_op *op;
struct multicall_space mcs;
trace_xen_mmu_flush_tlb_single(addr);
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = MMUEXT_INVLPG_LOCAL;
op->arg1.linear_addr = addr & PAGE_MASK;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU);
preempt_enable();
}
static void xen_flush_tlb_others(const struct cpumask *cpus,
struct mm_struct *mm, unsigned long start,
unsigned long end)
{
struct {
struct mmuext_op op;
#ifdef CONFIG_SMP
DECLARE_BITMAP(mask, num_processors);
#else
DECLARE_BITMAP(mask, NR_CPUS);
#endif
} *args;
struct multicall_space mcs;
trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
if (cpumask_empty(cpus))
return; /* nothing to do */
mcs = xen_mc_entry(sizeof(*args));
args = mcs.args;
args->op.arg2.vcpumask = to_cpumask(args->mask);
/* Remove us, and any offline CPUS. */
cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
args->op.cmd = MMUEXT_INVLPG_MULTI;
args->op.arg1.linear_addr = start;
}
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
static unsigned long xen_read_cr3(void)
{
return this_cpu_read(xen_cr3);
}
static void set_current_cr3(void *v)
{
this_cpu_write(xen_current_cr3, (unsigned long)v);
}
static void __xen_write_cr3(bool kernel, unsigned long cr3)
{
struct mmuext_op op;
unsigned long mfn;
trace_xen_mmu_write_cr3(kernel, cr3);
if (cr3)
mfn = pfn_to_mfn(PFN_DOWN(cr3));
else
mfn = 0;
WARN_ON(mfn == 0 && kernel);
op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
op.arg1.mfn = mfn;
xen_extend_mmuext_op(&op);
if (kernel) {
this_cpu_write(xen_cr3, cr3);
/* Update xen_current_cr3 once the batch has actually
been submitted. */
xen_mc_callback(set_current_cr3, (void *)cr3);
}
}
static void xen_write_cr3(unsigned long cr3)
{
BUG_ON(preemptible());
xen_mc_batch(); /* disables interrupts */
/* Update while interrupts are disabled, so its atomic with
respect to ipis */
this_cpu_write(xen_cr3, cr3);
__xen_write_cr3(true, cr3);
#ifdef CONFIG_X86_64
{
pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
if (user_pgd)
__xen_write_cr3(false, __pa(user_pgd));
else
__xen_write_cr3(false, 0);
}
#endif
xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
}
#ifdef CONFIG_X86_64
/*
* At the start of the day - when Xen launches a guest, it has already
* built pagetables for the guest. We diligently look over them
* in xen_setup_kernel_pagetable and graft as appropriate them in the
* init_level4_pgt and its friends. Then when we are happy we load
* the new init_level4_pgt - and continue on.
*
* The generic code starts (start_kernel) and 'init_mem_mapping' sets
* up the rest of the pagetables. When it has completed it loads the cr3.
* N.B. that baremetal would start at 'start_kernel' (and the early
* #PF handler would create bootstrap pagetables) - so we are running
* with the same assumptions as what to do when write_cr3 is executed
* at this point.
*
* Since there are no user-page tables at all, we have two variants
* of xen_write_cr3 - the early bootup (this one), and the late one
* (xen_write_cr3). The reason we have to do that is that in 64-bit
* the Linux kernel and user-space are both in ring 3 while the
* hypervisor is in ring 0.
*/
static void __init xen_write_cr3_init(unsigned long cr3)
{
BUG_ON(preemptible());
xen_mc_batch(); /* disables interrupts */
/* Update while interrupts are disabled, so its atomic with
respect to ipis */
this_cpu_write(xen_cr3, cr3);
__xen_write_cr3(true, cr3);
xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
}
#endif
static int xen_pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd = mm->pgd;
int ret = 0;
BUG_ON(PagePinned(virt_to_page(pgd)));
#ifdef CONFIG_X86_64
{
struct page *page = virt_to_page(pgd);
pgd_t *user_pgd;
BUG_ON(page->private != 0);
ret = -ENOMEM;
user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
page->private = (unsigned long)user_pgd;
if (user_pgd != NULL) {
#ifdef CONFIG_X86_VSYSCALL_EMULATION
user_pgd[pgd_index(VSYSCALL_ADDR)] =
__pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
#endif
ret = 0;
}
BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
}
#endif
return ret;
}
static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifdef CONFIG_X86_64
pgd_t *user_pgd = xen_get_user_pgd(pgd);
if (user_pgd)
free_page((unsigned long)user_pgd);
#endif
}
/*
* Init-time set_pte while constructing initial pagetables, which
* doesn't allow RO page table pages to be remapped RW.
*
* If there is no MFN for this PFN then this page is initially
* ballooned out so clear the PTE (as in decrease_reservation() in
* drivers/xen/balloon.c).
*
* Many of these PTE updates are done on unpinned and writable pages
* and doing a hypercall for these is unnecessary and expensive. At
* this point it is not possible to tell if a page is pinned or not,
* so always write the PTE directly and rely on Xen trapping and
* emulating any updates as necessary.
*/
__visible pte_t xen_make_pte_init(pteval_t pte)
{
#ifdef CONFIG_X86_64
unsigned long pfn;
/*
* Pages belonging to the initial p2m list mapped outside the default
* address range must be mapped read-only. This region contains the
* page tables for mapping the p2m list, too, and page tables MUST be
* mapped read-only.
*/
pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
if (xen_start_info->mfn_list < __START_KERNEL_map &&
pfn >= xen_start_info->first_p2m_pfn &&
pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
pte &= ~_PAGE_RW;
#endif
pte = pte_pfn_to_mfn(pte);
return native_make_pte(pte);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
{
#ifdef CONFIG_X86_32
/* If there's an existing pte, then don't allow _PAGE_RW to be set */
if (pte_mfn(pte) != INVALID_P2M_ENTRY
&& pte_val_ma(*ptep) & _PAGE_PRESENT)
pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
pte_val_ma(pte));
#endif
native_set_pte(ptep, pte);
}
/* Early in boot, while setting up the initial pagetable, assume
everything is pinned. */
static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
{
#ifdef CONFIG_FLATMEM
BUG_ON(mem_map); /* should only be used early */
#endif
make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
}
/* Used for pmd and pud */
static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
{
#ifdef CONFIG_FLATMEM
BUG_ON(mem_map); /* should only be used early */
#endif
make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
}
/* Early release_pte assumes that all pts are pinned, since there's
only init_mm and anything attached to that is pinned. */
static void __init xen_release_pte_init(unsigned long pfn)
{
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
}
static void __init xen_release_pmd_init(unsigned long pfn)
{
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
}
static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
{
struct multicall_space mcs;
struct mmuext_op *op;
mcs = __xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = cmd;
op->arg1.mfn = pfn_to_mfn(pfn);
MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
}
static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
{
struct multicall_space mcs;
unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
mcs = __xen_mc_entry(0);
MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
pfn_pte(pfn, prot), 0);
}
/* This needs to make sure the new pte page is pinned iff its being
attached to a pinned pagetable. */
static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
unsigned level)
{
bool pinned = PagePinned(virt_to_page(mm->pgd));
trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
if (pinned) {
struct page *page = pfn_to_page(pfn);
SetPagePinned(page);
if (!PageHighMem(page)) {
xen_mc_batch();
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(PARAVIRT_LAZY_MMU);
} else {
/* make sure there are no stray mappings of
this page */
kmap_flush_unused();
}
}
}
static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
{
xen_alloc_ptpage(mm, pfn, PT_PTE);
}
static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
{
xen_alloc_ptpage(mm, pfn, PT_PMD);
}
/* This should never happen until we're OK to use struct page */
static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
{
struct page *page = pfn_to_page(pfn);
bool pinned = PagePinned(page);
trace_xen_mmu_release_ptpage(pfn, level, pinned);
if (pinned) {
if (!PageHighMem(page)) {
xen_mc_batch();
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
__set_pfn_prot(pfn, PAGE_KERNEL);
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
ClearPagePinned(page);
}
}
static void xen_release_pte(unsigned long pfn)
{
xen_release_ptpage(pfn, PT_PTE);
}
static void xen_release_pmd(unsigned long pfn)
{
xen_release_ptpage(pfn, PT_PMD);
}
#if CONFIG_PGTABLE_LEVELS >= 4
static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
{
xen_alloc_ptpage(mm, pfn, PT_PUD);
}
static void xen_release_pud(unsigned long pfn)
{
xen_release_ptpage(pfn, PT_PUD);
}
#endif
void __init xen_reserve_top(void)
{
#ifdef CONFIG_X86_32
unsigned long top = HYPERVISOR_VIRT_START;
struct xen_platform_parameters pp;
if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
top = pp.virt_start;
reserve_top_address(-top);
#endif /* CONFIG_X86_32 */
}
/*
* Like __va(), but returns address in the kernel mapping (which is
* all we have until the physical memory mapping has been set up.
*/
static void * __init __ka(phys_addr_t paddr)
{
#ifdef CONFIG_X86_64
return (void *)(paddr + __START_KERNEL_map);
#else
return __va(paddr);
#endif
}
/* Convert a machine address to physical address */
static unsigned long __init m2p(phys_addr_t maddr)
{
phys_addr_t paddr;
maddr &= PTE_PFN_MASK;
paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
return paddr;
}
/* Convert a machine address to kernel virtual */
static void * __init m2v(phys_addr_t maddr)
{
return __ka(m2p(maddr));
}
/* Set the page permissions on an identity-mapped pages */
static void __init set_page_prot_flags(void *addr, pgprot_t prot,
unsigned long flags)
{
unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
pte_t pte = pfn_pte(pfn, prot);
if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
BUG();
}
static void __init set_page_prot(void *addr, pgprot_t prot)
{
return set_page_prot_flags(addr, prot, UVMF_NONE);
}
#ifdef CONFIG_X86_32
static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
{
unsigned pmdidx, pteidx;
unsigned ident_pte;
unsigned long pfn;
level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
PAGE_SIZE);
ident_pte = 0;
pfn = 0;
for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
pte_t *pte_page;
/* Reuse or allocate a page of ptes */
if (pmd_present(pmd[pmdidx]))
pte_page = m2v(pmd[pmdidx].pmd);
else {
/* Check for free pte pages */
if (ident_pte == LEVEL1_IDENT_ENTRIES)
break;
pte_page = &level1_ident_pgt[ident_pte];
ident_pte += PTRS_PER_PTE;
pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
}
/* Install mappings */
for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
pte_t pte;
if (pfn > max_pfn_mapped)
max_pfn_mapped = pfn;
if (!pte_none(pte_page[pteidx]))
continue;
pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
pte_page[pteidx] = pte;
}
}
for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
set_page_prot(pmd, PAGE_KERNEL_RO);
}
#endif
void __init xen_setup_machphys_mapping(void)
{
struct xen_machphys_mapping mapping;
if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
machine_to_phys_mapping = (unsigned long *)mapping.v_start;
machine_to_phys_nr = mapping.max_mfn + 1;
} else {
machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
}
#ifdef CONFIG_X86_32
WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
< machine_to_phys_mapping);
#endif
}
#ifdef CONFIG_X86_64
static void __init convert_pfn_mfn(void *v)
{
pte_t *pte = v;
int i;
/* All levels are converted the same way, so just treat them
as ptes. */
for (i = 0; i < PTRS_PER_PTE; i++)
pte[i] = xen_make_pte(pte[i].pte);
}
static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
unsigned long addr)
{
if (*pt_base == PFN_DOWN(__pa(addr))) {
set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
clear_page((void *)addr);
(*pt_base)++;
}
if (*pt_end == PFN_DOWN(__pa(addr))) {
set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
clear_page((void *)addr);
(*pt_end)--;
}
}
/*
* Set up the initial kernel pagetable.
*
* We can construct this by grafting the Xen provided pagetable into
* head_64.S's preconstructed pagetables. We copy the Xen L2's into
* level2_ident_pgt, and level2_kernel_pgt. This means that only the
* kernel has a physical mapping to start with - but that's enough to
* get __va working. We need to fill in the rest of the physical
* mapping once some sort of allocator has been set up.
*/
void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
{
pud_t *l3;
pmd_t *l2;
unsigned long addr[3];
unsigned long pt_base, pt_end;
unsigned i;
/* max_pfn_mapped is the last pfn mapped in the initial memory
* mappings. Considering that on Xen after the kernel mappings we
* have the mappings of some pages that don't exist in pfn space, we
* set max_pfn_mapped to the last real pfn mapped. */
if (xen_start_info->mfn_list < __START_KERNEL_map)
max_pfn_mapped = xen_start_info->first_p2m_pfn;
else
max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
pt_end = pt_base + xen_start_info->nr_pt_frames;
/* Zap identity mapping */
init_level4_pgt[0] = __pgd(0);
/* Pre-constructed entries are in pfn, so convert to mfn */
/* L4[272] -> level3_ident_pgt */
/* L4[511] -> level3_kernel_pgt */
convert_pfn_mfn(init_level4_pgt);
/* L3_i[0] -> level2_ident_pgt */
convert_pfn_mfn(level3_ident_pgt);
/* L3_k[510] -> level2_kernel_pgt */
/* L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
/* L3_k[511][506] -> level1_fixmap_pgt */
convert_pfn_mfn(level2_fixmap_pgt);
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
addr[0] = (unsigned long)pgd;
addr[1] = (unsigned long)l3;
addr[2] = (unsigned long)l2;
/* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
* Both L4[272][0] and L4[511][510] have entries that point to the same
* L2 (PMD) tables. Meaning that if you modify it in __va space
* it will be also modified in the __ka space! (But if you just
* modify the PMD table to point to other PTE's or none, then you
* are OK - which is what cleanup_highmap does) */
copy_page(level2_ident_pgt, l2);
/* Graft it onto L4[511][510] */
copy_page(level2_kernel_pgt, l2);
/* Copy the initial P->M table mappings if necessary. */
i = pgd_index(xen_start_info->mfn_list);
if (i && i < pgd_index(__START_KERNEL_map))
init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
/* Make pagetable pieces RO */
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
/* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
PFN_DOWN(__pa_symbol(init_level4_pgt)));
/* Unpin Xen-provided one */
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
/*
* At this stage there can be no user pgd, and no page structure to
* attach it to, so make sure we just set kernel pgd.
*/
xen_mc_batch();
__xen_write_cr3(true, __pa(init_level4_pgt));
xen_mc_issue(PARAVIRT_LAZY_CPU);
/* We can't that easily rip out L3 and L2, as the Xen pagetables are
* set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
* the initial domain. For guests using the toolstack, they are in:
* [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
* rip out the [L4] (pgd), but for guests we shave off three pages.
*/
for (i = 0; i < ARRAY_SIZE(addr); i++)
check_pt_base(&pt_base, &pt_end, addr[i]);
/* Our (by three pages) smaller Xen pagetable that we are using */
xen_pt_base = PFN_PHYS(pt_base);
xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
memblock_reserve(xen_pt_base, xen_pt_size);
/* Revector the xen_start_info */
xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
}
/*
* Read a value from a physical address.
*/
static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
{
unsigned long *vaddr;
unsigned long val;
vaddr = early_memremap_ro(addr, sizeof(val));
val = *vaddr;
early_memunmap(vaddr, sizeof(val));
return val;
}
/*
* Translate a virtual address to a physical one without relying on mapped
* page tables. Don't rely on big pages being aligned in (guest) physical
* space!
*/
static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
{
phys_addr_t pa;
pgd_t pgd;
pud_t pud;
pmd_t pmd;
pte_t pte;
pa = read_cr3();
pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
sizeof(pgd)));
if (!pgd_present(pgd))
return 0;
pa = pgd_val(pgd) & PTE_PFN_MASK;
pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
sizeof(pud)));
if (!pud_present(pud))
return 0;
pa = pud_val(pud) & PTE_PFN_MASK;
if (pud_large(pud))
return pa + (vaddr & ~PUD_MASK);
pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
sizeof(pmd)));
if (!pmd_present(pmd))
return 0;
pa = pmd_val(pmd) & PTE_PFN_MASK;
if (pmd_large(pmd))
return pa + (vaddr & ~PMD_MASK);
pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
sizeof(pte)));
if (!pte_present(pte))
return 0;
pa = pte_pfn(pte) << PAGE_SHIFT;
return pa | (vaddr & ~PAGE_MASK);
}
/*
* Find a new area for the hypervisor supplied p2m list and relocate the p2m to
* this area.
*/
void __init xen_relocate_p2m(void)
{
phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys;
unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, idx_pud, idx_p4d;
pte_t *pt;
pmd_t *pmd;
pud_t *pud;
p4d_t *p4d = NULL;
pgd_t *pgd;
unsigned long *new_p2m;
int save_pud;
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
if (PTRS_PER_P4D > 1)
n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT;
else
n_p4d = 0;
n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d;
new_area = xen_find_free_area(PFN_PHYS(n_frames));
if (!new_area) {
xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
BUG();
}
/*
* Setup the page tables for addressing the new p2m list.
* We have asked the hypervisor to map the p2m list at the user address
* PUD_SIZE. It may have done so, or it may have used a kernel space
* address depending on the Xen version.
* To avoid any possible virtual address collision, just use
* 2 * PUD_SIZE for the new area.
*/
p4d_phys = new_area;
pud_phys = p4d_phys + PFN_PHYS(n_p4d);
pmd_phys = pud_phys + PFN_PHYS(n_pud);
pt_phys = pmd_phys + PFN_PHYS(n_pmd);
p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
pgd = __va(read_cr3());
new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
idx_p4d = 0;
save_pud = n_pud;
do {
if (n_p4d > 0) {
p4d = early_memremap(p4d_phys, PAGE_SIZE);
clear_page(p4d);
n_pud = min(save_pud, PTRS_PER_P4D);
}
for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
pud = early_memremap(pud_phys, PAGE_SIZE);
clear_page(pud);
for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
idx_pmd++) {
pmd = early_memremap(pmd_phys, PAGE_SIZE);
clear_page(pmd);
for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
idx_pt++) {
pt = early_memremap(pt_phys, PAGE_SIZE);
clear_page(pt);
for (idx_pte = 0;
idx_pte < min(n_pte, PTRS_PER_PTE);
idx_pte++) {
set_pte(pt + idx_pte,
pfn_pte(p2m_pfn, PAGE_KERNEL));
p2m_pfn++;
}
n_pte -= PTRS_PER_PTE;
early_memunmap(pt, PAGE_SIZE);
make_lowmem_page_readonly(__va(pt_phys));
pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
PFN_DOWN(pt_phys));
set_pmd(pmd + idx_pt,
__pmd(_PAGE_TABLE | pt_phys));
pt_phys += PAGE_SIZE;
}
n_pt -= PTRS_PER_PMD;
early_memunmap(pmd, PAGE_SIZE);
make_lowmem_page_readonly(__va(pmd_phys));
pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
PFN_DOWN(pmd_phys));
set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
pmd_phys += PAGE_SIZE;
}
n_pmd -= PTRS_PER_PUD;
early_memunmap(pud, PAGE_SIZE);
make_lowmem_page_readonly(__va(pud_phys));
pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
if (n_p4d > 0)
set_p4d(p4d + idx_pud, __p4d(_PAGE_TABLE | pud_phys));
else
set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
pud_phys += PAGE_SIZE;
}
if (n_p4d > 0) {
save_pud -= PTRS_PER_P4D;
early_memunmap(p4d, PAGE_SIZE);
make_lowmem_page_readonly(__va(p4d_phys));
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(p4d_phys));
set_pgd(pgd + 2 + idx_p4d, __pgd(_PAGE_TABLE | p4d_phys));
p4d_phys += PAGE_SIZE;
}
} while (++idx_p4d < n_p4d);
/* Now copy the old p2m info to the new area. */
memcpy(new_p2m, xen_p2m_addr, size);
xen_p2m_addr = new_p2m;
/* Release the old p2m list and set new list info. */
p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
BUG_ON(!p2m_pfn);
p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
if (xen_start_info->mfn_list < __START_KERNEL_map) {
pfn = xen_start_info->first_p2m_pfn;
pfn_end = xen_start_info->first_p2m_pfn +
xen_start_info->nr_p2m_frames;
set_pgd(pgd + 1, __pgd(0));
} else {
pfn = p2m_pfn;
pfn_end = p2m_pfn_end;
}
memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
while (pfn < pfn_end) {
if (pfn == p2m_pfn) {
pfn = p2m_pfn_end;
continue;
}
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
pfn++;
}
xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
xen_start_info->nr_p2m_frames = n_frames;
}
#else /* !CONFIG_X86_64 */
static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
static void __init xen_write_cr3_init(unsigned long cr3)
{
unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
BUG_ON(read_cr3() != __pa(initial_page_table));
BUG_ON(cr3 != __pa(swapper_pg_dir));
/*
* We are switching to swapper_pg_dir for the first time (from
* initial_page_table) and therefore need to mark that page
* read-only and then pin it.
*
* Xen disallows sharing of kernel PMDs for PAE
* guests. Therefore we must copy the kernel PMD from
* initial_page_table into a new kernel PMD to be used in
* swapper_pg_dir.
*/
swapper_kernel_pmd =
extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
copy_page(swapper_kernel_pmd, initial_kernel_pmd);
swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
__pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
xen_write_cr3(cr3);
pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
PFN_DOWN(__pa(initial_page_table)));
set_page_prot(initial_page_table, PAGE_KERNEL);
set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
pv_mmu_ops.write_cr3 = &xen_write_cr3;
}
/*
* For 32 bit domains xen_start_info->pt_base is the pgd address which might be
* not the first page table in the page table pool.
* Iterate through the initial page tables to find the real page table base.
*/
static phys_addr_t xen_find_pt_base(pmd_t *pmd)
{
phys_addr_t pt_base, paddr;
unsigned pmdidx;
pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
paddr = m2p(pmd[pmdidx].pmd);
pt_base = min(pt_base, paddr);
}
return pt_base;
}
void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
{
pmd_t *kernel_pmd;
kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
xen_pt_base = xen_find_pt_base(kernel_pmd);
xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
initial_kernel_pmd =
extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
copy_page(initial_kernel_pmd, kernel_pmd);
xen_map_identity_early(initial_kernel_pmd, max_pfn);
copy_page(initial_page_table, pgd);
initial_page_table[KERNEL_PGD_BOUNDARY] =
__pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
set_page_prot(initial_page_table, PAGE_KERNEL_RO);
set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
PFN_DOWN(__pa(initial_page_table)));
xen_write_cr3(__pa(initial_page_table));
memblock_reserve(xen_pt_base, xen_pt_size);
}
#endif /* CONFIG_X86_64 */
void __init xen_reserve_special_pages(void)
{
phys_addr_t paddr;
memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
if (xen_start_info->store_mfn) {
paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
memblock_reserve(paddr, PAGE_SIZE);
}
if (!xen_initial_domain()) {
paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
memblock_reserve(paddr, PAGE_SIZE);
}
}
void __init xen_pt_check_e820(void)
{
if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
BUG();
}
}
static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
{
pte_t pte;
phys >>= PAGE_SHIFT;
switch (idx) {
case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
case FIX_RO_IDT:
#ifdef CONFIG_X86_32
case FIX_WP_TEST:
# ifdef CONFIG_HIGHMEM
case FIX_KMAP_BEGIN ... FIX_KMAP_END:
# endif
#elif defined(CONFIG_X86_VSYSCALL_EMULATION)
case VSYSCALL_PAGE:
#endif
case FIX_TEXT_POKE0:
case FIX_TEXT_POKE1:
case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END:
/* All local page mappings */
pte = pfn_pte(phys, prot);
break;
#ifdef CONFIG_X86_LOCAL_APIC
case FIX_APIC_BASE: /* maps dummy local APIC */
pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
break;
#endif
#ifdef CONFIG_X86_IO_APIC
case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
/*
* We just don't map the IO APIC - all access is via
* hypercalls. Keep the address in the pte for reference.
*/
pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
break;
#endif
case FIX_PARAVIRT_BOOTMAP:
/* This is an MFN, but it isn't an IO mapping from the
IO domain */
pte = mfn_pte(phys, prot);
break;
default:
/* By default, set_fixmap is used for hardware mappings */
pte = mfn_pte(phys, prot);
break;
}
__native_set_fixmap(idx, pte);
#ifdef CONFIG_X86_VSYSCALL_EMULATION
/* Replicate changes to map the vsyscall page into the user
pagetable vsyscall mapping. */
if (idx == VSYSCALL_PAGE) {
unsigned long vaddr = __fix_to_virt(idx);
set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
}
#endif
}
static void __init xen_post_allocator_init(void)
{
pv_mmu_ops.set_pte = xen_set_pte;
pv_mmu_ops.set_pmd = xen_set_pmd;
pv_mmu_ops.set_pud = xen_set_pud;
#if CONFIG_PGTABLE_LEVELS >= 4
pv_mmu_ops.set_p4d = xen_set_p4d;
#endif
/* This will work as long as patching hasn't happened yet
(which it hasn't) */
pv_mmu_ops.alloc_pte = xen_alloc_pte;
pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
pv_mmu_ops.release_pte = xen_release_pte;
pv_mmu_ops.release_pmd = xen_release_pmd;
#if CONFIG_PGTABLE_LEVELS >= 4
pv_mmu_ops.alloc_pud = xen_alloc_pud;
pv_mmu_ops.release_pud = xen_release_pud;
#endif
pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
#ifdef CONFIG_X86_64
pv_mmu_ops.write_cr3 = &xen_write_cr3;
SetPagePinned(virt_to_page(level3_user_vsyscall));
#endif
xen_mark_init_mm_pinned();
}
static void xen_leave_lazy_mmu(void)
{
preempt_disable();
xen_mc_flush();
paravirt_leave_lazy_mmu();
preempt_enable();
}
static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.read_cr2 = xen_read_cr2,
.write_cr2 = xen_write_cr2,
.read_cr3 = xen_read_cr3,
.write_cr3 = xen_write_cr3_init,
.flush_tlb_user = xen_flush_tlb,
.flush_tlb_kernel = xen_flush_tlb,
.flush_tlb_single = xen_flush_tlb_single,
.flush_tlb_others = xen_flush_tlb_others,
.pte_update = paravirt_nop,
.pgd_alloc = xen_pgd_alloc,
.pgd_free = xen_pgd_free,
.alloc_pte = xen_alloc_pte_init,
.release_pte = xen_release_pte_init,
.alloc_pmd = xen_alloc_pmd_init,
.release_pmd = xen_release_pmd_init,
.set_pte = xen_set_pte_init,
.set_pte_at = xen_set_pte_at,
.set_pmd = xen_set_pmd_hyper,
.ptep_modify_prot_start = __ptep_modify_prot_start,
.ptep_modify_prot_commit = __ptep_modify_prot_commit,
.pte_val = PV_CALLEE_SAVE(xen_pte_val),
.pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
.make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
.make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
#ifdef CONFIG_X86_PAE
.set_pte_atomic = xen_set_pte_atomic,
.pte_clear = xen_pte_clear,
.pmd_clear = xen_pmd_clear,
#endif /* CONFIG_X86_PAE */
.set_pud = xen_set_pud_hyper,
.make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
.pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
#if CONFIG_PGTABLE_LEVELS >= 4
.pud_val = PV_CALLEE_SAVE(xen_pud_val),
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
.set_p4d = xen_set_p4d_hyper,
.alloc_pud = xen_alloc_pmd_init,
.release_pud = xen_release_pmd_init,
#endif /* CONFIG_PGTABLE_LEVELS == 4 */
.activate_mm = xen_activate_mm,
.dup_mmap = xen_dup_mmap,
.exit_mmap = xen_exit_mmap,
.lazy_mode = {
.enter = paravirt_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu,
.flush = paravirt_flush_lazy_mmu,
},
.set_fixmap = xen_set_fixmap,
};
void __init xen_init_mmu_ops(void)
{
x86_init.paging.pagetable_init = xen_pagetable_init;
pv_mmu_ops = xen_mmu_ops;
memset(dummy_mapping, 0xff, PAGE_SIZE);
}
/* Protected by xen_reservation_lock. */
#define MAX_CONTIG_ORDER 9 /* 2MB */
static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
#define VOID_PTE (mfn_pte(0, __pgprot(0)))
static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
unsigned long *in_frames,
unsigned long *out_frames)
{
int i;
struct multicall_space mcs;
xen_mc_batch();
for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
mcs = __xen_mc_entry(0);
if (in_frames)
in_frames[i] = virt_to_mfn(vaddr);
MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
__set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
if (out_frames)
out_frames[i] = virt_to_pfn(vaddr);
}
xen_mc_issue(0);
}
/*
* Update the pfn-to-mfn mappings for a virtual address range, either to
* point to an array of mfns, or contiguously from a single starting
* mfn.
*/
static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
unsigned long *mfns,
unsigned long first_mfn)
{
unsigned i, limit;
unsigned long mfn;
xen_mc_batch();
limit = 1u << order;
for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
struct multicall_space mcs;
unsigned flags;
mcs = __xen_mc_entry(0);
if (mfns)
mfn = mfns[i];
else
mfn = first_mfn + i;
if (i < (limit - 1))
flags = 0;
else {
if (order == 0)
flags = UVMF_INVLPG | UVMF_ALL;
else
flags = UVMF_TLB_FLUSH | UVMF_ALL;
}
MULTI_update_va_mapping(mcs.mc, vaddr,
mfn_pte(mfn, PAGE_KERNEL), flags);
set_phys_to_machine(virt_to_pfn(vaddr), mfn);
}
xen_mc_issue(0);
}
/*
* Perform the hypercall to exchange a region of our pfns to point to
* memory with the required contiguous alignment. Takes the pfns as
* input, and populates mfns as output.
*
* Returns a success code indicating whether the hypervisor was able to
* satisfy the request or not.
*/
static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
unsigned long *pfns_in,
unsigned long extents_out,
unsigned int order_out,
unsigned long *mfns_out,
unsigned int address_bits)
{
long rc;
int success;
struct xen_memory_exchange exchange = {
.in = {
.nr_extents = extents_in,
.extent_order = order_in,
.extent_start = pfns_in,
.domid = DOMID_SELF
},
.out = {
.nr_extents = extents_out,
.extent_order = order_out,
.extent_start = mfns_out,
.address_bits = address_bits,
.domid = DOMID_SELF
}
};
BUG_ON(extents_in << order_in != extents_out << order_out);
rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
success = (exchange.nr_exchanged == extents_in);
BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
BUG_ON(success && (rc != 0));
return success;
}
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle)
{
unsigned long *in_frames = discontig_frames, out_frame;
unsigned long flags;
int success;
unsigned long vstart = (unsigned long)phys_to_virt(pstart);
/*
* Currently an auto-translated guest will not perform I/O, nor will
* it require PAE page directories below 4GB. Therefore any calls to
* this function are redundant and can be ignored.
*/
if (unlikely(order > MAX_CONTIG_ORDER))
return -ENOMEM;
memset((void *) vstart, 0, PAGE_SIZE << order);
spin_lock_irqsave(&xen_reservation_lock, flags);
/* 1. Zap current PTEs, remembering MFNs. */
xen_zap_pfn_range(vstart, order, in_frames, NULL);
/* 2. Get a new contiguous memory extent. */
out_frame = virt_to_pfn(vstart);
success = xen_exchange_memory(1UL << order, 0, in_frames,
1, order, &out_frame,
address_bits);
/* 3. Map the new extent in place of old pages. */
if (success)
xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
else
xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
spin_unlock_irqrestore(&xen_reservation_lock, flags);
*dma_handle = virt_to_machine(vstart).maddr;
return success ? 0 : -ENOMEM;
}
EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
{
unsigned long *out_frames = discontig_frames, in_frame;
unsigned long flags;
int success;
unsigned long vstart;
if (unlikely(order > MAX_CONTIG_ORDER))
return;
vstart = (unsigned long)phys_to_virt(pstart);
memset((void *) vstart, 0, PAGE_SIZE << order);
spin_lock_irqsave(&xen_reservation_lock, flags);
/* 1. Find start MFN of contiguous extent. */
in_frame = virt_to_mfn(vstart);
/* 2. Zap current PTEs. */
xen_zap_pfn_range(vstart, order, NULL, out_frames);
/* 3. Do the exchange for non-contiguous MFNs. */
success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
0, out_frames, 0);
/* 4. Map new pages in place of old pages. */
if (success)
xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
else
xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
spin_unlock_irqrestore(&xen_reservation_lock, flags);
}
EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
#ifdef CONFIG_KEXEC_CORE
phys_addr_t paddr_vmcoreinfo_note(void)
{
if (xen_pv_domain())
return virt_to_machine(&vmcoreinfo_note).maddr;
else
return __pa_symbol(&vmcoreinfo_note);
}
#endif /* CONFIG_KEXEC_CORE */
| oldzhu/linux | arch/x86/xen/mmu_pv.c | C | gpl-2.0 | 70,932 |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Creates audit record for dropped/accepted packets
*
* (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
* (C) 2010-2011 Red Hat, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/audit.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_AUDIT.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <net/ipv6.h>
#include <net/ip.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Thomas Graf <tgraf@redhat.com>");
MODULE_DESCRIPTION("Xtables: creates audit records for dropped/accepted packets");
MODULE_ALIAS("ipt_AUDIT");
MODULE_ALIAS("ip6t_AUDIT");
MODULE_ALIAS("ebt_AUDIT");
MODULE_ALIAS("arpt_AUDIT");
static bool audit_ip4(struct audit_buffer *ab, struct sk_buff *skb)
{
struct iphdr _iph;
const struct iphdr *ih;
ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_iph), &_iph);
if (!ih)
return false;
audit_log_format(ab, " saddr=%pI4 daddr=%pI4 proto=%hhu",
&ih->saddr, &ih->daddr, ih->protocol);
return true;
}
static bool audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
{
struct ipv6hdr _ip6h;
const struct ipv6hdr *ih;
u8 nexthdr;
__be16 frag_off;
ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h);
if (!ih)
return false;
nexthdr = ih->nexthdr;
ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h), &nexthdr, &frag_off);
audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu",
&ih->saddr, &ih->daddr, nexthdr);
return true;
}
static unsigned int
audit_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
struct audit_buffer *ab;
int fam = -1;
if (audit_enabled == AUDIT_OFF)
goto errout;
ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT);
if (ab == NULL)
goto errout;
audit_log_format(ab, "mark=%#x", skb->mark);
switch (xt_family(par)) {
case NFPROTO_BRIDGE:
switch (eth_hdr(skb)->h_proto) {
case htons(ETH_P_IP):
fam = audit_ip4(ab, skb) ? NFPROTO_IPV4 : -1;
break;
case htons(ETH_P_IPV6):
fam = audit_ip6(ab, skb) ? NFPROTO_IPV6 : -1;
break;
}
break;
case NFPROTO_IPV4:
fam = audit_ip4(ab, skb) ? NFPROTO_IPV4 : -1;
break;
case NFPROTO_IPV6:
fam = audit_ip6(ab, skb) ? NFPROTO_IPV6 : -1;
break;
}
if (fam == -1)
audit_log_format(ab, " saddr=? daddr=? proto=-1");
audit_log_end(ab);
errout:
return XT_CONTINUE;
}
static unsigned int
audit_tg_ebt(struct sk_buff *skb, const struct xt_action_param *par)
{
audit_tg(skb, par);
return EBT_CONTINUE;
}
static int audit_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_audit_info *info = par->targinfo;
if (info->type > XT_AUDIT_TYPE_MAX) {
pr_info_ratelimited("Audit type out of range (valid range: 0..%u)\n",
XT_AUDIT_TYPE_MAX);
return -ERANGE;
}
return 0;
}
static struct xt_target audit_tg_reg[] __read_mostly = {
{
.name = "AUDIT",
.family = NFPROTO_UNSPEC,
.target = audit_tg,
.targetsize = sizeof(struct xt_audit_info),
.checkentry = audit_tg_check,
.me = THIS_MODULE,
},
{
.name = "AUDIT",
.family = NFPROTO_BRIDGE,
.target = audit_tg_ebt,
.targetsize = sizeof(struct xt_audit_info),
.checkentry = audit_tg_check,
.me = THIS_MODULE,
},
};
static int __init audit_tg_init(void)
{
return xt_register_targets(audit_tg_reg, ARRAY_SIZE(audit_tg_reg));
}
static void __exit audit_tg_exit(void)
{
xt_unregister_targets(audit_tg_reg, ARRAY_SIZE(audit_tg_reg));
}
module_init(audit_tg_init);
module_exit(audit_tg_exit);
| rperier/linux | net/netfilter/xt_AUDIT.c | C | gpl-2.0 | 3,623 |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000, 2006 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/cache.h>
#include <linux/compat.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/suspend.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
#include <asm/abi.h>
#include <asm/asm.h>
#include <asm/compat-signal.h>
#include <linux/bitops.h>
#include <asm/cacheflush.h>
#include <asm/sim.h>
#include <asm/ucontext.h>
#include <asm/fpu.h>
#include <asm/war.h>
#include <asm/vdso.h>
#include <asm/dsp.h>
#include "signal-common.h"
static int (*save_fp_context32)(struct sigcontext32 __user *sc);
static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
*/
#define __NR_O32_restart_syscall 4253
/* 32-bit compatibility types */
typedef unsigned int __sighandler32_t;
typedef void (*vfptr_t)(void);
struct sigaction32 {
unsigned int sa_flags;
__sighandler32_t sa_handler;
compat_sigset_t sa_mask;
};
/* IRIX compatible stack_t */
typedef struct sigaltstack32 {
s32 ss_sp;
compat_size_t ss_size;
int ss_flags;
} stack32_t;
struct ucontext32 {
u32 uc_flags;
s32 uc_link;
stack32_t uc_stack;
struct sigcontext32 uc_mcontext;
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
struct sigframe32 {
u32 sf_ass[4]; /* argument save space for o32 */
u32 sf_pad[2]; /* Was: signal trampoline */
struct sigcontext32 sf_sc;
compat_sigset_t sf_mask;
};
struct rt_sigframe32 {
u32 rs_ass[4]; /* argument save space for o32 */
u32 rs_pad[2]; /* Was: signal trampoline */
compat_siginfo_t rs_info;
struct ucontext32 rs_uc;
};
/*
* sigcontext handlers
*/
static int protected_save_fp_context32(struct sigcontext32 __user *sc)
{
int err;
while (1) {
lock_fpu_owner();
own_fpu_inatomic(1);
err = save_fp_context32(sc); /* this might fail */
unlock_fpu_owner();
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __put_user(0, &sc->sc_fpregs[0]) |
__put_user(0, &sc->sc_fpregs[31]) |
__put_user(0, &sc->sc_fpc_csr);
if (err)
break; /* really bad sigcontext */
}
return err;
}
static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
{
int err, tmp __maybe_unused;
while (1) {
lock_fpu_owner();
own_fpu_inatomic(0);
err = restore_fp_context32(sc); /* this might fail */
unlock_fpu_owner();
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __get_user(tmp, &sc->sc_fpregs[0]) |
__get_user(tmp, &sc->sc_fpregs[31]) |
__get_user(tmp, &sc->sc_fpc_csr);
if (err)
break; /* really bad sigcontext */
}
return err;
}
static int setup_sigcontext32(struct pt_regs *regs,
struct sigcontext32 __user *sc)
{
int err = 0;
int i;
u32 used_math;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
err |= __put_user(0, &sc->sc_regs[0]);
for (i = 1; i < 32; i++)
err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
err |= __put_user(mfhi1(), &sc->sc_hi1);
err |= __put_user(mflo1(), &sc->sc_lo1);
err |= __put_user(mfhi2(), &sc->sc_hi2);
err |= __put_user(mflo2(), &sc->sc_lo2);
err |= __put_user(mfhi3(), &sc->sc_hi3);
err |= __put_user(mflo3(), &sc->sc_lo3);
}
used_math = !!used_math();
err |= __put_user(used_math, &sc->sc_used_math);
if (used_math) {
/*
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
err |= protected_save_fp_context32(sc);
}
return err;
}
static int
check_and_restore_fp_context32(struct sigcontext32 __user *sc)
{
int err, sig;
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
err |= protected_restore_fp_context32(sc);
return err ?: sig;
}
static int restore_sigcontext32(struct pt_regs *regs,
struct sigcontext32 __user *sc)
{
u32 used_math;
int err = 0;
s32 treg;
int i;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
}
for (i = 1; i < 32; i++)
err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
err |= __get_user(used_math, &sc->sc_used_math);
conditional_used_math(used_math);
if (used_math) {
/* restore fpu context if we have used it before */
if (!err)
err = check_and_restore_fp_context32(sc);
} else {
/* signal handler may have used FPU. Give it up. */
lose_fpu(0);
}
return err;
}
/*
*
*/
extern void __put_sigset_unknown_nsig(void);
extern void __get_sigset_unknown_nsig(void);
static inline int put_sigset(const sigset_t *kbuf, compat_sigset_t __user *ubuf)
{
int err = 0;
if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)))
return -EFAULT;
switch (_NSIG_WORDS) {
default:
__put_sigset_unknown_nsig();
case 2:
err |= __put_user(kbuf->sig[1] >> 32, &ubuf->sig[3]);
err |= __put_user(kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]);
case 1:
err |= __put_user(kbuf->sig[0] >> 32, &ubuf->sig[1]);
err |= __put_user(kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]);
}
return err;
}
static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t __user *ubuf)
{
int err = 0;
unsigned long sig[4];
if (!access_ok(VERIFY_READ, ubuf, sizeof(*ubuf)))
return -EFAULT;
switch (_NSIG_WORDS) {
default:
__get_sigset_unknown_nsig();
case 2:
err |= __get_user(sig[3], &ubuf->sig[3]);
err |= __get_user(sig[2], &ubuf->sig[2]);
kbuf->sig[1] = sig[2] | (sig[3] << 32);
case 1:
err |= __get_user(sig[1], &ubuf->sig[1]);
err |= __get_user(sig[0], &ubuf->sig[0]);
kbuf->sig[0] = sig[0] | (sig[1] << 32);
}
return err;
}
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
compat_sigset_t __user *uset;
sigset_t newset;
uset = (compat_sigset_t __user *) regs.regs[4];
if (get_sigset(&newset, uset))
return -EFAULT;
return sigsuspend(&newset);
}
asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
compat_sigset_t __user *uset;
sigset_t newset;
size_t sigsetsize;
/* XXX Don't preclude handling different sized sigset_t's. */
sigsetsize = regs.regs[5];
if (sigsetsize != sizeof(compat_sigset_t))
return -EINVAL;
uset = (compat_sigset_t __user *) regs.regs[4];
if (get_sigset(&newset, uset))
return -EFAULT;
return sigsuspend(&newset);
}
SYSCALL_DEFINE3(32_sigaction, long, sig, const struct sigaction32 __user *, act,
struct sigaction32 __user *, oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
int err = 0;
if (act) {
old_sigset_t mask;
s32 handler;
if (!access_ok(VERIFY_READ, act, sizeof(*act)))
return -EFAULT;
err |= __get_user(handler, &act->sa_handler);
new_ka.sa.sa_handler = (void __user *)(s64)handler;
err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
err |= __get_user(mask, &act->sa_mask.sig[0]);
if (err)
return -EFAULT;
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
return -EFAULT;
err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
err |= __put_user((u32)(u64)old_ka.sa.sa_handler,
&oact->sa_handler);
err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
err |= __put_user(0, &oact->sa_mask.sig[1]);
err |= __put_user(0, &oact->sa_mask.sig[2]);
err |= __put_user(0, &oact->sa_mask.sig[3]);
if (err)
return -EFAULT;
}
return ret;
}
asmlinkage int sys32_sigaltstack(nabi_no_regargs struct pt_regs regs)
{
const stack32_t __user *uss = (const stack32_t __user *) regs.regs[4];
stack32_t __user *uoss = (stack32_t __user *) regs.regs[5];
unsigned long usp = regs.regs[29];
stack_t kss, koss;
int ret, err = 0;
mm_segment_t old_fs = get_fs();
s32 sp;
if (uss) {
if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
return -EFAULT;
err |= __get_user(sp, &uss->ss_sp);
kss.ss_sp = (void __user *) (long) sp;
err |= __get_user(kss.ss_size, &uss->ss_size);
err |= __get_user(kss.ss_flags, &uss->ss_flags);
if (err)
return -EFAULT;
}
set_fs(KERNEL_DS);
ret = do_sigaltstack(uss ? (stack_t __user *)&kss : NULL,
uoss ? (stack_t __user *)&koss : NULL, usp);
set_fs(old_fs);
if (!ret && uoss) {
if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
return -EFAULT;
sp = (int) (unsigned long) koss.ss_sp;
err |= __put_user(sp, &uoss->ss_sp);
err |= __put_user(koss.ss_size, &uoss->ss_size);
err |= __put_user(koss.ss_flags, &uoss->ss_flags);
if (err)
return -EFAULT;
}
return ret;
}
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
int err;
if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
return -EFAULT;
/* If you change siginfo_t structure, please be sure
this code is fixed accordingly.
It should never copy any pad contained in the structure
to avoid security leaks, but must copy the generic
3 ints plus the relevant union member.
This routine must convert siginfo from 64bit to 32bit as well
at the same time. */
err = __put_user(from->si_signo, &to->si_signo);
err |= __put_user(from->si_errno, &to->si_errno);
err |= __put_user((short)from->si_code, &to->si_code);
if (from->si_code < 0)
err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
else {
switch (from->si_code >> 16) {
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(from->si_int, &to->si_int);
break;
case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime);
err |= __put_user(from->si_stime, &to->si_stime);
err |= __put_user(from->si_status, &to->si_status);
default:
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
break;
case __SI_FAULT >> 16:
err |= __put_user((unsigned long)from->si_addr, &to->si_addr);
break;
case __SI_POLL >> 16:
err |= __put_user(from->si_band, &to->si_band);
err |= __put_user(from->si_fd, &to->si_fd);
break;
case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_int, &to->si_int);
break;
}
}
return err;
}
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
memset(to, 0, sizeof *to);
if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE32))
return -EFAULT;
return 0;
}
asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct sigframe32 __user *frame;
sigset_t blocked;
int sig;
frame = (struct sigframe32 __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
goto badframe;
set_current_blocked(&blocked);
sig = restore_sigcontext32(®s, &frame->sf_sc);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
:/* no outputs */
:"r" (®s));
/* Unreached */
badframe:
force_sig(SIGSEGV, current);
}
asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe32 __user *frame;
mm_segment_t old_fs;
sigset_t set;
stack_t st;
s32 sp;
int sig;
frame = (struct rt_sigframe32 __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
goto badframe;
set_current_blocked(&set);
sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
/* The ucontext contains a stack32_t, so we must convert! */
if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp))
goto badframe;
st.ss_sp = (void __user *)(long) sp;
if (__get_user(st.ss_size, &frame->rs_uc.uc_stack.ss_size))
goto badframe;
if (__get_user(st.ss_flags, &frame->rs_uc.uc_stack.ss_flags))
goto badframe;
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
old_fs = get_fs();
set_fs(KERNEL_DS);
do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
set_fs(old_fs);
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
:/* no outputs */
:"r" (®s));
/* Unreached */
badframe:
force_sig(SIGSEGV, current);
}
static int setup_frame_32(void *sig_return, struct k_sigaction *ka,
struct pt_regs *regs, int signr, sigset_t *set)
{
struct sigframe32 __user *frame;
int err = 0;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
err |= setup_sigcontext32(regs, &frame->sf_sc);
err |= __copy_conv_sigset_to_user(&frame->sf_mask, set);
if (err)
goto give_sigsegv;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to struct sigcontext
*
* $25 and c0_epc point to the signal handler, $29 points to the
* struct sigframe.
*/
regs->regs[ 4] = signr;
regs->regs[ 5] = 0;
regs->regs[ 6] = (unsigned long) &frame->sf_sc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
give_sigsegv:
force_sigsegv(signr, current);
return -EFAULT;
}
static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka,
struct pt_regs *regs, int signr, sigset_t *set,
siginfo_t *info)
{
struct rt_sigframe32 __user *frame;
int err = 0;
s32 sp;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
/* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
err |= copy_siginfo_to_user32(&frame->rs_info, info);
/* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(0, &frame->rs_uc.uc_link);
sp = (int) (long) current->sas_ss_sp;
err |= __put_user(sp,
&frame->rs_uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->regs[29]),
&frame->rs_uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size,
&frame->rs_uc.uc_stack.ss_size);
err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
if (err)
goto give_sigsegv;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to ucontext
*
* $25 and c0_epc point to the signal handler, $29 points to
* the struct rt_sigframe32.
*/
regs->regs[ 4] = signr;
regs->regs[ 5] = (unsigned long) &frame->rs_info;
regs->regs[ 6] = (unsigned long) &frame->rs_uc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
give_sigsegv:
force_sigsegv(signr, current);
return -EFAULT;
}
/*
* o32 compatibility on 64-bit kernels, without DSP ASE
*/
struct mips_abi mips_abi_32 = {
.setup_frame = setup_frame_32,
.signal_return_offset =
offsetof(struct mips_vdso, o32_signal_trampoline),
.setup_rt_frame = setup_rt_frame_32,
.rt_signal_return_offset =
offsetof(struct mips_vdso, o32_rt_signal_trampoline),
.restart = __NR_O32_restart_syscall
};
SYSCALL_DEFINE4(32_rt_sigaction, int, sig,
const struct sigaction32 __user *, act,
struct sigaction32 __user *, oact, unsigned int, sigsetsize)
{
struct k_sigaction new_sa, old_sa;
int ret = -EINVAL;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
goto out;
if (act) {
s32 handler;
int err = 0;
if (!access_ok(VERIFY_READ, act, sizeof(*act)))
return -EFAULT;
err |= __get_user(handler, &act->sa_handler);
new_sa.sa.sa_handler = (void __user *)(s64)handler;
err |= __get_user(new_sa.sa.sa_flags, &act->sa_flags);
err |= get_sigset(&new_sa.sa.sa_mask, &act->sa_mask);
if (err)
return -EFAULT;
}
ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
if (!ret && oact) {
int err = 0;
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
return -EFAULT;
err |= __put_user((u32)(u64)old_sa.sa.sa_handler,
&oact->sa_handler);
err |= __put_user(old_sa.sa.sa_flags, &oact->sa_flags);
err |= put_sigset(&old_sa.sa.sa_mask, &oact->sa_mask);
if (err)
return -EFAULT;
}
out:
return ret;
}
SYSCALL_DEFINE4(32_rt_sigprocmask, int, how, compat_sigset_t __user *, set,
compat_sigset_t __user *, oset, unsigned int, sigsetsize)
{
sigset_t old_set, new_set;
int ret;
mm_segment_t old_fs = get_fs();
if (set && get_sigset(&new_set, set))
return -EFAULT;
set_fs(KERNEL_DS);
ret = sys_rt_sigprocmask(how, set ? (sigset_t __user *)&new_set : NULL,
oset ? (sigset_t __user *)&old_set : NULL,
sigsetsize);
set_fs(old_fs);
if (!ret && oset && put_sigset(&old_set, oset))
return -EFAULT;
return ret;
}
SYSCALL_DEFINE2(32_rt_sigpending, compat_sigset_t __user *, uset,
unsigned int, sigsetsize)
{
int ret;
sigset_t set;
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_rt_sigpending((sigset_t __user *)&set, sigsetsize);
set_fs(old_fs);
if (!ret && put_sigset(&set, uset))
return -EFAULT;
return ret;
}
SYSCALL_DEFINE3(32_rt_sigqueueinfo, int, pid, int, sig,
compat_siginfo_t __user *, uinfo)
{
siginfo_t info;
int ret;
mm_segment_t old_fs = get_fs();
if (copy_from_user(&info, uinfo, 3*sizeof(int)) ||
copy_from_user(info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE))
return -EFAULT;
set_fs(KERNEL_DS);
ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
set_fs(old_fs);
return ret;
}
SYSCALL_DEFINE5(32_waitid, int, which, compat_pid_t, pid,
compat_siginfo_t __user *, uinfo, int, options,
struct compat_rusage __user *, uru)
{
siginfo_t info;
struct rusage ru;
long ret;
mm_segment_t old_fs = get_fs();
info.si_signo = 0;
set_fs(KERNEL_DS);
ret = sys_waitid(which, pid, (siginfo_t __user *) &info, options,
uru ? (struct rusage __user *) &ru : NULL);
set_fs(old_fs);
if (ret < 0 || info.si_signo == 0)
return ret;
if (uru && (ret = put_compat_rusage(&ru, uru)))
return ret;
BUG_ON(info.si_code & __SI_MASK);
info.si_code |= __SI_CHLD;
return copy_siginfo_to_user32(uinfo, &info);
}
static int signal32_init(void)
{
if (cpu_has_fpu) {
save_fp_context32 = _save_fp_context32;
restore_fp_context32 = _restore_fp_context32;
} else {
save_fp_context32 = fpu_emulator_save_context32;
restore_fp_context32 = fpu_emulator_restore_context32;
}
return 0;
}
arch_initcall(signal32_init);
| markushx/linux | arch/mips/kernel/signal32.c | C | gpl-2.0 | 20,829 |
/*
* arch/arm/kernel/unwind.c
*
* Copyright (C) 2008 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* Stack unwinding support for ARM
*
* An ARM EABI version of gcc is required to generate the unwind
* tables. For information about the structure of the unwind tables,
* see "Exception Handling ABI for the ARM Architecture" at:
*
* http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html
*/
#if !defined (__ARM_EABI__)
#warning Your compiler does not have EABI support.
#warning ARM unwind is known to compile only with EABI compilers.
#warning Change compiler or disable ARM_UNWIND option.
#elif (__GNUC__ == 4 && __GNUC_MINOR__ <= 2)
#warning Your compiler is too buggy; it is known to not compile ARM unwind support.
#warning Change compiler or disable ARM_UNWIND option.
#endif
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <asm/stacktrace.h>
#include <asm/traps.h>
#include <asm/unwind.h>
/* Dummy functions to avoid linker complaints */
void __aeabi_unwind_cpp_pr0(void)
{
};
EXPORT_SYMBOL(__aeabi_unwind_cpp_pr0);
void __aeabi_unwind_cpp_pr1(void)
{
};
EXPORT_SYMBOL(__aeabi_unwind_cpp_pr1);
void __aeabi_unwind_cpp_pr2(void)
{
};
EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
struct unwind_ctrl_block {
unsigned long vrs[16]; /* virtual register set */
unsigned long *insn; /* pointer to the current instructions word */
int entries; /* number of entries left to interpret */
int byte; /* current byte number in the instructions word */
};
enum regs {
#ifdef CONFIG_THUMB2_KERNEL
FP = 7,
#else
FP = 11,
#endif
SP = 13,
LR = 14,
PC = 15
};
extern struct unwind_idx __start_unwind_idx[];
extern struct unwind_idx __stop_unwind_idx[];
static DEFINE_SPINLOCK(unwind_lock);
static LIST_HEAD(unwind_tables);
/* Convert a prel31 symbol to an absolute address */
#define prel31_to_addr(ptr) \
({ \
/* sign-extend to 32 bits */ \
long offset = (((long)*(ptr)) << 1) >> 1; \
(unsigned long)(ptr) + offset; \
})
/*
* Binary search in the unwind index. The entries entries are
* guaranteed to be sorted in ascending order by the linker.
*/
static struct unwind_idx *search_index(unsigned long addr,
struct unwind_idx *first,
struct unwind_idx *last)
{
pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last);
if (addr < first->addr) {
pr_warning("unwind: Unknown symbol address %08lx\n", addr);
return NULL;
} else if (addr >= last->addr)
return last;
while (first < last - 1) {
struct unwind_idx *mid = first + ((last - first + 1) >> 1);
if (addr < mid->addr)
last = mid;
else
first = mid;
}
return first;
}
static struct unwind_idx *unwind_find_idx(unsigned long addr)
{
struct unwind_idx *idx = NULL;
unsigned long flags;
pr_debug("%s(%08lx)\n", __func__, addr);
if (core_kernel_text(addr))
/* main unwind table */
idx = search_index(addr, __start_unwind_idx,
__stop_unwind_idx - 1);
else {
/* module unwind tables */
struct unwind_table *table;
spin_lock_irqsave(&unwind_lock, flags);
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->begin_addr &&
addr < table->end_addr) {
idx = search_index(addr, table->start,
table->stop - 1);
break;
}
}
spin_unlock_irqrestore(&unwind_lock, flags);
}
pr_debug("%s: idx = %p\n", __func__, idx);
return idx;
}
static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl)
{
unsigned long ret;
if (ctrl->entries <= 0) {
pr_warning("unwind: Corrupt unwind table\n");
return 0;
}
ret = (*ctrl->insn >> (ctrl->byte * 8)) & 0xff;
if (ctrl->byte == 0) {
ctrl->insn++;
ctrl->entries--;
ctrl->byte = 3;
} else
ctrl->byte--;
return ret;
}
/*
* Execute the current unwind instruction.
*/
static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
{
unsigned long insn = unwind_get_byte(ctrl);
pr_debug("%s: insn = %08lx\n", __func__, insn);
if ((insn & 0xc0) == 0x00)
ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
else if ((insn & 0xc0) == 0x40)
ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
else if ((insn & 0xf0) == 0x80) {
unsigned long mask;
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
int load_sp, reg = 4;
insn = (insn << 8) | unwind_get_byte(ctrl);
mask = insn & 0x0fff;
if (mask == 0) {
pr_warning("unwind: 'Refuse to unwind' instruction %04lx\n",
insn);
return -URC_FAILURE;
}
/* pop R4-R15 according to mask */
load_sp = mask & (1 << (13 - 4));
while (mask) {
if (mask & 1)
ctrl->vrs[reg] = *vsp++;
mask >>= 1;
reg++;
}
if (!load_sp)
ctrl->vrs[SP] = (unsigned long)vsp;
} else if ((insn & 0xf0) == 0x90 &&
(insn & 0x0d) != 0x0d)
ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
else if ((insn & 0xf0) == 0xa0) {
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
int reg;
/* pop R4-R[4+bbb] */
for (reg = 4; reg <= 4 + (insn & 7); reg++)
ctrl->vrs[reg] = *vsp++;
if (insn & 0x80)
ctrl->vrs[14] = *vsp++;
ctrl->vrs[SP] = (unsigned long)vsp;
} else if (insn == 0xb0) {
if (ctrl->vrs[PC] == 0)
ctrl->vrs[PC] = ctrl->vrs[LR];
/* no further processing */
ctrl->entries = 0;
} else if (insn == 0xb1) {
unsigned long mask = unwind_get_byte(ctrl);
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
int reg = 0;
if (mask == 0 || mask & 0xf0) {
pr_warning("unwind: Spare encoding %04lx\n",
(insn << 8) | mask);
return -URC_FAILURE;
}
/* pop R0-R3 according to mask */
while (mask) {
if (mask & 1)
ctrl->vrs[reg] = *vsp++;
mask >>= 1;
reg++;
}
ctrl->vrs[SP] = (unsigned long)vsp;
} else if (insn == 0xb2) {
unsigned long uleb128 = unwind_get_byte(ctrl);
ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
} else {
pr_warning("unwind: Unhandled instruction %02lx\n", insn);
return -URC_FAILURE;
}
pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__,
ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]);
return URC_OK;
}
/*
* Unwind a single frame starting with *sp for the symbol at *pc. It
* updates the *pc and *sp with the new values.
*/
int unwind_frame(struct stackframe *frame)
{
unsigned long high, low;
struct unwind_idx *idx;
struct unwind_ctrl_block ctrl;
/* only go to a higher address on the stack */
low = frame->sp;
high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
frame->pc, frame->lr, frame->sp);
if (!kernel_text_address(frame->pc))
return -URC_FAILURE;
idx = unwind_find_idx(frame->pc);
if (!idx) {
pr_warning("unwind: Index not found %08lx\n", frame->pc);
return -URC_FAILURE;
}
ctrl.vrs[FP] = frame->fp;
ctrl.vrs[SP] = frame->sp;
ctrl.vrs[LR] = frame->lr;
ctrl.vrs[PC] = 0;
if (idx->insn == 1)
/* can't unwind */
return -URC_FAILURE;
else if ((idx->insn & 0x80000000) == 0)
/* prel31 to the unwind table */
ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn);
else if ((idx->insn & 0xff000000) == 0x80000000)
/* only personality routine 0 supported in the index */
ctrl.insn = &idx->insn;
else {
pr_warning("unwind: Unsupported personality routine %08lx in the index at %p\n",
idx->insn, idx);
return -URC_FAILURE;
}
/* check the personality routine */
if ((*ctrl.insn & 0xff000000) == 0x80000000) {
ctrl.byte = 2;
ctrl.entries = 1;
} else if ((*ctrl.insn & 0xff000000) == 0x81000000) {
ctrl.byte = 1;
ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16);
} else {
pr_warning("unwind: Unsupported personality routine %08lx at %p\n",
*ctrl.insn, ctrl.insn);
return -URC_FAILURE;
}
while (ctrl.entries > 0) {
int urc = unwind_exec_insn(&ctrl);
if (urc < 0)
return urc;
if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
return -URC_FAILURE;
}
if (ctrl.vrs[PC] == 0)
ctrl.vrs[PC] = ctrl.vrs[LR];
/* check for infinite loop */
if (frame->pc == ctrl.vrs[PC])
return -URC_FAILURE;
frame->fp = ctrl.vrs[FP];
frame->sp = ctrl.vrs[SP];
frame->lr = ctrl.vrs[LR];
frame->pc = ctrl.vrs[PC];
return URC_OK;
}
void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
register unsigned long current_sp asm ("sp");
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (!tsk)
tsk = current;
if (regs) {
frame.fp = regs->ARM_fp;
frame.sp = regs->ARM_sp;
frame.lr = regs->ARM_lr;
frame.pc = regs->ARM_pc;
} else if (tsk == current) {
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp;
frame.lr = (unsigned long)__builtin_return_address(0);
frame.pc = (unsigned long)unwind_backtrace;
} else {
/* task blocked in __switch_to */
frame.fp = thread_saved_fp(tsk);
frame.sp = thread_saved_sp(tsk);
/*
* The function calling __switch_to cannot be a leaf function
* so LR is recovered from the stack.
*/
frame.lr = 0;
frame.pc = thread_saved_pc(tsk);
}
while (1) {
int urc;
unsigned long where = frame.pc;
urc = unwind_frame(&frame);
if (urc < 0)
break;
dump_backtrace_entry(where, frame.pc, frame.sp - 4);
}
}
struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
unsigned long text_addr,
unsigned long text_size)
{
unsigned long flags;
struct unwind_idx *idx;
struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
text_addr, text_size);
if (!tab)
return tab;
tab->start = (struct unwind_idx *)start;
tab->stop = (struct unwind_idx *)(start + size);
tab->begin_addr = text_addr;
tab->end_addr = text_addr + text_size;
/* Convert the symbol addresses to absolute values */
for (idx = tab->start; idx < tab->stop; idx++)
idx->addr = prel31_to_addr(&idx->addr);
spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&tab->list, &unwind_tables);
spin_unlock_irqrestore(&unwind_lock, flags);
return tab;
}
void unwind_table_del(struct unwind_table *tab)
{
unsigned long flags;
if (!tab)
return;
spin_lock_irqsave(&unwind_lock, flags);
list_del(&tab->list);
spin_unlock_irqrestore(&unwind_lock, flags);
kfree(tab);
}
int __init unwind_init(void)
{
struct unwind_idx *idx;
/* Convert the symbol addresses to absolute values */
for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++)
idx->addr = prel31_to_addr(&idx->addr);
pr_debug("unwind: ARM stack unwinding initialised\n");
return 0;
}
| CyanogenMod/htc-kernel-msm7227 | arch/arm/kernel/unwind.c | C | gpl-2.0 | 11,205 |
/* ZD1211 USB-WLAN driver for Linux
*
* Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
* Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/* This file implements all the hardware specific functions for the ZD1211
* and ZD1211B chips. Support for the ZD1211B was possible after Timothy
* Legge sent me a ZD1211B device. Thank you Tim. -- Uli
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include "zd_def.h"
#include "zd_chip.h"
#include "zd_mac.h"
#include "zd_rf.h"
void zd_chip_init(struct zd_chip *chip,
struct ieee80211_hw *hw,
struct usb_interface *intf)
{
memset(chip, 0, sizeof(*chip));
mutex_init(&chip->mutex);
zd_usb_init(&chip->usb, hw, intf);
zd_rf_init(&chip->rf);
}
void zd_chip_clear(struct zd_chip *chip)
{
ZD_ASSERT(!mutex_is_locked(&chip->mutex));
zd_usb_clear(&chip->usb);
zd_rf_clear(&chip->rf);
mutex_destroy(&chip->mutex);
ZD_MEMCLEAR(chip, sizeof(*chip));
}
static int scnprint_mac_oui(struct zd_chip *chip, char *buffer, size_t size)
{
u8 *addr = zd_mac_get_perm_addr(zd_chip_to_mac(chip));
return scnprintf(buffer, size, "%02x-%02x-%02x",
addr[0], addr[1], addr[2]);
}
/* Prints an identifier line, which will support debugging. */
static int scnprint_id(struct zd_chip *chip, char *buffer, size_t size)
{
int i = 0;
i = scnprintf(buffer, size, "zd1211%s chip ",
zd_chip_is_zd1211b(chip) ? "b" : "");
i += zd_usb_scnprint_id(&chip->usb, buffer+i, size-i);
i += scnprintf(buffer+i, size-i, " ");
i += scnprint_mac_oui(chip, buffer+i, size-i);
i += scnprintf(buffer+i, size-i, " ");
i += zd_rf_scnprint_id(&chip->rf, buffer+i, size-i);
i += scnprintf(buffer+i, size-i, " pa%1x %c%c%c%c%c", chip->pa_type,
chip->patch_cck_gain ? 'g' : '-',
chip->patch_cr157 ? '7' : '-',
chip->patch_6m_band_edge ? '6' : '-',
chip->new_phy_layout ? 'N' : '-',
chip->al2230s_bit ? 'S' : '-');
return i;
}
static void print_id(struct zd_chip *chip)
{
char buffer[80];
scnprint_id(chip, buffer, sizeof(buffer));
buffer[sizeof(buffer)-1] = 0;
dev_info(zd_chip_dev(chip), "%s\n", buffer);
}
static zd_addr_t inc_addr(zd_addr_t addr)
{
u16 a = (u16)addr;
/* Control registers use byte addressing, but everything else uses word
* addressing. */
if ((a & 0xf000) == CR_START)
a += 2;
else
a += 1;
return (zd_addr_t)a;
}
/* Read a variable number of 32-bit values. Parameter count is not allowed to
* exceed USB_MAX_IOREAD32_COUNT.
*/
int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr,
unsigned int count)
{
int r;
int i;
zd_addr_t a16[USB_MAX_IOREAD32_COUNT * 2];
u16 v16[USB_MAX_IOREAD32_COUNT * 2];
unsigned int count16;
if (count > USB_MAX_IOREAD32_COUNT)
return -EINVAL;
/* Use stack for values and addresses. */
count16 = 2 * count;
BUG_ON(count16 * sizeof(zd_addr_t) > sizeof(a16));
BUG_ON(count16 * sizeof(u16) > sizeof(v16));
for (i = 0; i < count; i++) {
int j = 2*i;
/* We read the high word always first. */
a16[j] = inc_addr(addr[i]);
a16[j+1] = addr[i];
}
r = zd_ioread16v_locked(chip, v16, a16, count16);
if (r) {
dev_dbg_f(zd_chip_dev(chip),
"error: zd_ioread16v_locked. Error number %d\n", r);
return r;
}
for (i = 0; i < count; i++) {
int j = 2*i;
values[i] = (v16[j] << 16) | v16[j+1];
}
return 0;
}
static int _zd_iowrite32v_async_locked(struct zd_chip *chip,
const struct zd_ioreq32 *ioreqs,
unsigned int count)
{
int i, j, r;
struct zd_ioreq16 ioreqs16[USB_MAX_IOWRITE32_COUNT * 2];
unsigned int count16;
/* Use stack for values and addresses. */
ZD_ASSERT(mutex_is_locked(&chip->mutex));
if (count == 0)
return 0;
if (count > USB_MAX_IOWRITE32_COUNT)
return -EINVAL;
count16 = 2 * count;
BUG_ON(count16 * sizeof(struct zd_ioreq16) > sizeof(ioreqs16));
for (i = 0; i < count; i++) {
j = 2*i;
/* We write the high word always first. */
ioreqs16[j].value = ioreqs[i].value >> 16;
ioreqs16[j].addr = inc_addr(ioreqs[i].addr);
ioreqs16[j+1].value = ioreqs[i].value;
ioreqs16[j+1].addr = ioreqs[i].addr;
}
r = zd_usb_iowrite16v_async(&chip->usb, ioreqs16, count16);
#ifdef DEBUG
if (r) {
dev_dbg_f(zd_chip_dev(chip),
"error %d in zd_usb_write16v\n", r);
}
#endif /* DEBUG */
return r;
}
int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
unsigned int count)
{
int r;
zd_usb_iowrite16v_async_start(&chip->usb);
r = _zd_iowrite32v_async_locked(chip, ioreqs, count);
if (r) {
zd_usb_iowrite16v_async_end(&chip->usb, 0);
return r;
}
return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
}
int zd_iowrite16a_locked(struct zd_chip *chip,
const struct zd_ioreq16 *ioreqs, unsigned int count)
{
int r;
unsigned int i, j, t, max;
ZD_ASSERT(mutex_is_locked(&chip->mutex));
zd_usb_iowrite16v_async_start(&chip->usb);
for (i = 0; i < count; i += j + t) {
t = 0;
max = count-i;
if (max > USB_MAX_IOWRITE16_COUNT)
max = USB_MAX_IOWRITE16_COUNT;
for (j = 0; j < max; j++) {
if (!ioreqs[i+j].addr) {
t = 1;
break;
}
}
r = zd_usb_iowrite16v_async(&chip->usb, &ioreqs[i], j);
if (r) {
zd_usb_iowrite16v_async_end(&chip->usb, 0);
dev_dbg_f(zd_chip_dev(chip),
"error zd_usb_iowrite16v. Error number %d\n",
r);
return r;
}
}
return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
}
/* Writes a variable number of 32 bit registers. The functions will split
* that in several USB requests. A split can be forced by inserting an IO
* request with an zero address field.
*/
int zd_iowrite32a_locked(struct zd_chip *chip,
const struct zd_ioreq32 *ioreqs, unsigned int count)
{
int r;
unsigned int i, j, t, max;
zd_usb_iowrite16v_async_start(&chip->usb);
for (i = 0; i < count; i += j + t) {
t = 0;
max = count-i;
if (max > USB_MAX_IOWRITE32_COUNT)
max = USB_MAX_IOWRITE32_COUNT;
for (j = 0; j < max; j++) {
if (!ioreqs[i+j].addr) {
t = 1;
break;
}
}
r = _zd_iowrite32v_async_locked(chip, &ioreqs[i], j);
if (r) {
zd_usb_iowrite16v_async_end(&chip->usb, 0);
dev_dbg_f(zd_chip_dev(chip),
"error _zd_iowrite32v_locked."
" Error number %d\n", r);
return r;
}
}
return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
}
int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value)
{
int r;
mutex_lock(&chip->mutex);
r = zd_ioread16_locked(chip, value, addr);
mutex_unlock(&chip->mutex);
return r;
}
int zd_ioread32(struct zd_chip *chip, zd_addr_t addr, u32 *value)
{
int r;
mutex_lock(&chip->mutex);
r = zd_ioread32_locked(chip, value, addr);
mutex_unlock(&chip->mutex);
return r;
}
int zd_iowrite16(struct zd_chip *chip, zd_addr_t addr, u16 value)
{
int r;
mutex_lock(&chip->mutex);
r = zd_iowrite16_locked(chip, value, addr);
mutex_unlock(&chip->mutex);
return r;
}
int zd_iowrite32(struct zd_chip *chip, zd_addr_t addr, u32 value)
{
int r;
mutex_lock(&chip->mutex);
r = zd_iowrite32_locked(chip, value, addr);
mutex_unlock(&chip->mutex);
return r;
}
int zd_ioread32v(struct zd_chip *chip, const zd_addr_t *addresses,
u32 *values, unsigned int count)
{
int r;
mutex_lock(&chip->mutex);
r = zd_ioread32v_locked(chip, values, addresses, count);
mutex_unlock(&chip->mutex);
return r;
}
int zd_iowrite32a(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
unsigned int count)
{
int r;
mutex_lock(&chip->mutex);
r = zd_iowrite32a_locked(chip, ioreqs, count);
mutex_unlock(&chip->mutex);
return r;
}
static int read_pod(struct zd_chip *chip, u8 *rf_type)
{
int r;
u32 value;
ZD_ASSERT(mutex_is_locked(&chip->mutex));
r = zd_ioread32_locked(chip, &value, E2P_POD);
if (r)
goto error;
dev_dbg_f(zd_chip_dev(chip), "E2P_POD %#010x\n", value);
/* FIXME: AL2230 handling (Bit 7 in POD) */
*rf_type = value & 0x0f;
chip->pa_type = (value >> 16) & 0x0f;
chip->patch_cck_gain = (value >> 8) & 0x1;
chip->patch_cr157 = (value >> 13) & 0x1;
chip->patch_6m_band_edge = (value >> 21) & 0x1;
chip->new_phy_layout = (value >> 31) & 0x1;
chip->al2230s_bit = (value >> 7) & 0x1;
chip->link_led = ((value >> 4) & 1) ? LED1 : LED2;
chip->supports_tx_led = 1;
if (value & (1 << 24)) { /* LED scenario */
if (value & (1 << 29))
chip->supports_tx_led = 0;
}
dev_dbg_f(zd_chip_dev(chip),
"RF %s %#01x PA type %#01x patch CCK %d patch CR157 %d "
"patch 6M %d new PHY %d link LED%d tx led %d\n",
zd_rf_name(*rf_type), *rf_type,
chip->pa_type, chip->patch_cck_gain,
chip->patch_cr157, chip->patch_6m_band_edge,
chip->new_phy_layout,
chip->link_led == LED1 ? 1 : 2,
chip->supports_tx_led);
return 0;
error:
*rf_type = 0;
chip->pa_type = 0;
chip->patch_cck_gain = 0;
chip->patch_cr157 = 0;
chip->patch_6m_band_edge = 0;
chip->new_phy_layout = 0;
return r;
}
static int zd_write_mac_addr_common(struct zd_chip *chip, const u8 *mac_addr,
const struct zd_ioreq32 *in_reqs,
const char *type)
{
int r;
struct zd_ioreq32 reqs[2] = {in_reqs[0], in_reqs[1]};
if (mac_addr) {
reqs[0].value = (mac_addr[3] << 24)
| (mac_addr[2] << 16)
| (mac_addr[1] << 8)
| mac_addr[0];
reqs[1].value = (mac_addr[5] << 8)
| mac_addr[4];
dev_dbg_f(zd_chip_dev(chip), "%s addr %pM\n", type, mac_addr);
} else {
dev_dbg_f(zd_chip_dev(chip), "set NULL %s\n", type);
}
mutex_lock(&chip->mutex);
r = zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs));
mutex_unlock(&chip->mutex);
return r;
}
/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and
* CR_MAC_ADDR_P2 must be overwritten
*/
int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
{
static const struct zd_ioreq32 reqs[2] = {
[0] = { .addr = CR_MAC_ADDR_P1 },
[1] = { .addr = CR_MAC_ADDR_P2 },
};
return zd_write_mac_addr_common(chip, mac_addr, reqs, "mac");
}
int zd_write_bssid(struct zd_chip *chip, const u8 *bssid)
{
static const struct zd_ioreq32 reqs[2] = {
[0] = { .addr = CR_BSSID_P1 },
[1] = { .addr = CR_BSSID_P2 },
};
return zd_write_mac_addr_common(chip, bssid, reqs, "bssid");
}
int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain)
{
int r;
u32 value;
mutex_lock(&chip->mutex);
r = zd_ioread32_locked(chip, &value, E2P_SUBID);
mutex_unlock(&chip->mutex);
if (r)
return r;
*regdomain = value >> 16;
dev_dbg_f(zd_chip_dev(chip), "regdomain: %#04x\n", *regdomain);
return 0;
}
static int read_values(struct zd_chip *chip, u8 *values, size_t count,
zd_addr_t e2p_addr, u32 guard)
{
int r;
int i;
u32 v;
ZD_ASSERT(mutex_is_locked(&chip->mutex));
for (i = 0;;) {
r = zd_ioread32_locked(chip, &v,
(zd_addr_t)((u16)e2p_addr+i/2));
if (r)
return r;
v -= guard;
if (i+4 < count) {
values[i++] = v;
values[i++] = v >> 8;
values[i++] = v >> 16;
values[i++] = v >> 24;
continue;
}
for (;i < count; i++)
values[i] = v >> (8*(i%3));
return 0;
}
}
static int read_pwr_cal_values(struct zd_chip *chip)
{
return read_values(chip, chip->pwr_cal_values,
E2P_CHANNEL_COUNT, E2P_PWR_CAL_VALUE1,
0);
}
static int read_pwr_int_values(struct zd_chip *chip)
{
return read_values(chip, chip->pwr_int_values,
E2P_CHANNEL_COUNT, E2P_PWR_INT_VALUE1,
E2P_PWR_INT_GUARD);
}
static int read_ofdm_cal_values(struct zd_chip *chip)
{
int r;
int i;
static const zd_addr_t addresses[] = {
E2P_36M_CAL_VALUE1,
E2P_48M_CAL_VALUE1,
E2P_54M_CAL_VALUE1,
};
for (i = 0; i < 3; i++) {
r = read_values(chip, chip->ofdm_cal_values[i],
E2P_CHANNEL_COUNT, addresses[i], 0);
if (r)
return r;
}
return 0;
}
static int read_cal_int_tables(struct zd_chip *chip)
{
int r;
r = read_pwr_cal_values(chip);
if (r)
return r;
r = read_pwr_int_values(chip);
if (r)
return r;
r = read_ofdm_cal_values(chip);
if (r)
return r;
return 0;
}
/* phy means physical registers */
int zd_chip_lock_phy_regs(struct zd_chip *chip)
{
int r;
u32 tmp;
ZD_ASSERT(mutex_is_locked(&chip->mutex));
r = zd_ioread32_locked(chip, &tmp, CR_REG1);
if (r) {
dev_err(zd_chip_dev(chip), "error ioread32(CR_REG1): %d\n", r);
return r;
}
tmp &= ~UNLOCK_PHY_REGS;
r = zd_iowrite32_locked(chip, tmp, CR_REG1);
if (r)
dev_err(zd_chip_dev(chip), "error iowrite32(CR_REG1): %d\n", r);
return r;
}
int zd_chip_unlock_phy_regs(struct zd_chip *chip)
{
int r;
u32 tmp;
ZD_ASSERT(mutex_is_locked(&chip->mutex));
r = zd_ioread32_locked(chip, &tmp, CR_REG1);
if (r) {
dev_err(zd_chip_dev(chip),
"error ioread32(CR_REG1): %d\n", r);
return r;
}
tmp |= UNLOCK_PHY_REGS;
r = zd_iowrite32_locked(chip, tmp, CR_REG1);
if (r)
dev_err(zd_chip_dev(chip), "error iowrite32(CR_REG1): %d\n", r);
return r;
}
/* ZD_CR157 can be optionally patched by the EEPROM for original ZD1211 */
static int patch_cr157(struct zd_chip *chip)
{
int r;
u16 value;
if (!chip->patch_cr157)
return 0;
r = zd_ioread16_locked(chip, &value, E2P_PHY_REG);
if (r)
return r;
dev_dbg_f(zd_chip_dev(chip), "patching value %x\n", value >> 8);
return zd_iowrite32_locked(chip, value >> 8, ZD_CR157);
}
/*
* 6M band edge can be optionally overwritten for certain RF's
* Vendor driver says: for FCC regulation, enabled per HWFeature 6M band edge
* bit (for AL2230, AL2230S)
*/
static int patch_6m_band_edge(struct zd_chip *chip, u8 channel)
{
ZD_ASSERT(mutex_is_locked(&chip->mutex));
if (!chip->patch_6m_band_edge)
return 0;
return zd_rf_patch_6m_band_edge(&chip->rf, channel);
}
/* Generic implementation of 6M band edge patching, used by most RFs via
* zd_rf_generic_patch_6m() */
int zd_chip_generic_patch_6m_band(struct zd_chip *chip, int channel)
{
struct zd_ioreq16 ioreqs[] = {
{ ZD_CR128, 0x14 }, { ZD_CR129, 0x12 }, { ZD_CR130, 0x10 },
{ ZD_CR47, 0x1e },
};
/* FIXME: Channel 11 is not the edge for all regulatory domains. */
if (channel == 1 || channel == 11)
ioreqs[0].value = 0x12;
dev_dbg_f(zd_chip_dev(chip), "patching for channel %d\n", channel);
return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
}
static int zd1211_hw_reset_phy(struct zd_chip *chip)
{
static const struct zd_ioreq16 ioreqs[] = {
{ ZD_CR0, 0x0a }, { ZD_CR1, 0x06 }, { ZD_CR2, 0x26 },
{ ZD_CR3, 0x38 }, { ZD_CR4, 0x80 }, { ZD_CR9, 0xa0 },
{ ZD_CR10, 0x81 }, { ZD_CR11, 0x00 }, { ZD_CR12, 0x7f },
{ ZD_CR13, 0x8c }, { ZD_CR14, 0x80 }, { ZD_CR15, 0x3d },
{ ZD_CR16, 0x20 }, { ZD_CR17, 0x1e }, { ZD_CR18, 0x0a },
{ ZD_CR19, 0x48 }, { ZD_CR20, 0x0c }, { ZD_CR21, 0x0c },
{ ZD_CR22, 0x23 }, { ZD_CR23, 0x90 }, { ZD_CR24, 0x14 },
{ ZD_CR25, 0x40 }, { ZD_CR26, 0x10 }, { ZD_CR27, 0x19 },
{ ZD_CR28, 0x7f }, { ZD_CR29, 0x80 }, { ZD_CR30, 0x4b },
{ ZD_CR31, 0x60 }, { ZD_CR32, 0x43 }, { ZD_CR33, 0x08 },
{ ZD_CR34, 0x06 }, { ZD_CR35, 0x0a }, { ZD_CR36, 0x00 },
{ ZD_CR37, 0x00 }, { ZD_CR38, 0x38 }, { ZD_CR39, 0x0c },
{ ZD_CR40, 0x84 }, { ZD_CR41, 0x2a }, { ZD_CR42, 0x80 },
{ ZD_CR43, 0x10 }, { ZD_CR44, 0x12 }, { ZD_CR46, 0xff },
{ ZD_CR47, 0x1E }, { ZD_CR48, 0x26 }, { ZD_CR49, 0x5b },
{ ZD_CR64, 0xd0 }, { ZD_CR65, 0x04 }, { ZD_CR66, 0x58 },
{ ZD_CR67, 0xc9 }, { ZD_CR68, 0x88 }, { ZD_CR69, 0x41 },
{ ZD_CR70, 0x23 }, { ZD_CR71, 0x10 }, { ZD_CR72, 0xff },
{ ZD_CR73, 0x32 }, { ZD_CR74, 0x30 }, { ZD_CR75, 0x65 },
{ ZD_CR76, 0x41 }, { ZD_CR77, 0x1b }, { ZD_CR78, 0x30 },
{ ZD_CR79, 0x68 }, { ZD_CR80, 0x64 }, { ZD_CR81, 0x64 },
{ ZD_CR82, 0x00 }, { ZD_CR83, 0x00 }, { ZD_CR84, 0x00 },
{ ZD_CR85, 0x02 }, { ZD_CR86, 0x00 }, { ZD_CR87, 0x00 },
{ ZD_CR88, 0xff }, { ZD_CR89, 0xfc }, { ZD_CR90, 0x00 },
{ ZD_CR91, 0x00 }, { ZD_CR92, 0x00 }, { ZD_CR93, 0x08 },
{ ZD_CR94, 0x00 }, { ZD_CR95, 0x00 }, { ZD_CR96, 0xff },
{ ZD_CR97, 0xe7 }, { ZD_CR98, 0x00 }, { ZD_CR99, 0x00 },
{ ZD_CR100, 0x00 }, { ZD_CR101, 0xae }, { ZD_CR102, 0x02 },
{ ZD_CR103, 0x00 }, { ZD_CR104, 0x03 }, { ZD_CR105, 0x65 },
{ ZD_CR106, 0x04 }, { ZD_CR107, 0x00 }, { ZD_CR108, 0x0a },
{ ZD_CR109, 0xaa }, { ZD_CR110, 0xaa }, { ZD_CR111, 0x25 },
{ ZD_CR112, 0x25 }, { ZD_CR113, 0x00 }, { ZD_CR119, 0x1e },
{ ZD_CR125, 0x90 }, { ZD_CR126, 0x00 }, { ZD_CR127, 0x00 },
{ },
{ ZD_CR5, 0x00 }, { ZD_CR6, 0x00 }, { ZD_CR7, 0x00 },
{ ZD_CR8, 0x00 }, { ZD_CR9, 0x20 }, { ZD_CR12, 0xf0 },
{ ZD_CR20, 0x0e }, { ZD_CR21, 0x0e }, { ZD_CR27, 0x10 },
{ ZD_CR44, 0x33 }, { ZD_CR47, 0x1E }, { ZD_CR83, 0x24 },
{ ZD_CR84, 0x04 }, { ZD_CR85, 0x00 }, { ZD_CR86, 0x0C },
{ ZD_CR87, 0x12 }, { ZD_CR88, 0x0C }, { ZD_CR89, 0x00 },
{ ZD_CR90, 0x10 }, { ZD_CR91, 0x08 }, { ZD_CR93, 0x00 },
{ ZD_CR94, 0x01 }, { ZD_CR95, 0x00 }, { ZD_CR96, 0x50 },
{ ZD_CR97, 0x37 }, { ZD_CR98, 0x35 }, { ZD_CR101, 0x13 },
{ ZD_CR102, 0x27 }, { ZD_CR103, 0x27 }, { ZD_CR104, 0x18 },
{ ZD_CR105, 0x12 }, { ZD_CR109, 0x27 }, { ZD_CR110, 0x27 },
{ ZD_CR111, 0x27 }, { ZD_CR112, 0x27 }, { ZD_CR113, 0x27 },
{ ZD_CR114, 0x27 }, { ZD_CR115, 0x26 }, { ZD_CR116, 0x24 },
{ ZD_CR117, 0xfc }, { ZD_CR118, 0xfa }, { ZD_CR120, 0x4f },
{ ZD_CR125, 0xaa }, { ZD_CR127, 0x03 }, { ZD_CR128, 0x14 },
{ ZD_CR129, 0x12 }, { ZD_CR130, 0x10 }, { ZD_CR131, 0x0C },
{ ZD_CR136, 0xdf }, { ZD_CR137, 0x40 }, { ZD_CR138, 0xa0 },
{ ZD_CR139, 0xb0 }, { ZD_CR140, 0x99 }, { ZD_CR141, 0x82 },
{ ZD_CR142, 0x54 }, { ZD_CR143, 0x1c }, { ZD_CR144, 0x6c },
{ ZD_CR147, 0x07 }, { ZD_CR148, 0x4c }, { ZD_CR149, 0x50 },
{ ZD_CR150, 0x0e }, { ZD_CR151, 0x18 }, { ZD_CR160, 0xfe },
{ ZD_CR161, 0xee }, { ZD_CR162, 0xaa }, { ZD_CR163, 0xfa },
{ ZD_CR164, 0xfa }, { ZD_CR165, 0xea }, { ZD_CR166, 0xbe },
{ ZD_CR167, 0xbe }, { ZD_CR168, 0x6a }, { ZD_CR169, 0xba },
{ ZD_CR170, 0xba }, { ZD_CR171, 0xba },
/* Note: ZD_CR204 must lead the ZD_CR203 */
{ ZD_CR204, 0x7d },
{ },
{ ZD_CR203, 0x30 },
};
int r, t;
dev_dbg_f(zd_chip_dev(chip), "\n");
r = zd_chip_lock_phy_regs(chip);
if (r)
goto out;
r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
if (r)
goto unlock;
r = patch_cr157(chip);
unlock:
t = zd_chip_unlock_phy_regs(chip);
if (t && !r)
r = t;
out:
return r;
}
static int zd1211b_hw_reset_phy(struct zd_chip *chip)
{
static const struct zd_ioreq16 ioreqs[] = {
{ ZD_CR0, 0x14 }, { ZD_CR1, 0x06 }, { ZD_CR2, 0x26 },
{ ZD_CR3, 0x38 }, { ZD_CR4, 0x80 }, { ZD_CR9, 0xe0 },
{ ZD_CR10, 0x81 },
/* power control { { ZD_CR11, 1 << 6 }, */
{ ZD_CR11, 0x00 },
{ ZD_CR12, 0xf0 }, { ZD_CR13, 0x8c }, { ZD_CR14, 0x80 },
{ ZD_CR15, 0x3d }, { ZD_CR16, 0x20 }, { ZD_CR17, 0x1e },
{ ZD_CR18, 0x0a }, { ZD_CR19, 0x48 },
{ ZD_CR20, 0x10 }, /* Org:0x0E, ComTrend:RalLink AP */
{ ZD_CR21, 0x0e }, { ZD_CR22, 0x23 }, { ZD_CR23, 0x90 },
{ ZD_CR24, 0x14 }, { ZD_CR25, 0x40 }, { ZD_CR26, 0x10 },
{ ZD_CR27, 0x10 }, { ZD_CR28, 0x7f }, { ZD_CR29, 0x80 },
{ ZD_CR30, 0x4b }, /* ASIC/FWT, no jointly decoder */
{ ZD_CR31, 0x60 }, { ZD_CR32, 0x43 }, { ZD_CR33, 0x08 },
{ ZD_CR34, 0x06 }, { ZD_CR35, 0x0a }, { ZD_CR36, 0x00 },
{ ZD_CR37, 0x00 }, { ZD_CR38, 0x38 }, { ZD_CR39, 0x0c },
{ ZD_CR40, 0x84 }, { ZD_CR41, 0x2a }, { ZD_CR42, 0x80 },
{ ZD_CR43, 0x10 }, { ZD_CR44, 0x33 }, { ZD_CR46, 0xff },
{ ZD_CR47, 0x1E }, { ZD_CR48, 0x26 }, { ZD_CR49, 0x5b },
{ ZD_CR64, 0xd0 }, { ZD_CR65, 0x04 }, { ZD_CR66, 0x58 },
{ ZD_CR67, 0xc9 }, { ZD_CR68, 0x88 }, { ZD_CR69, 0x41 },
{ ZD_CR70, 0x23 }, { ZD_CR71, 0x10 }, { ZD_CR72, 0xff },
{ ZD_CR73, 0x32 }, { ZD_CR74, 0x30 }, { ZD_CR75, 0x65 },
{ ZD_CR76, 0x41 }, { ZD_CR77, 0x1b }, { ZD_CR78, 0x30 },
{ ZD_CR79, 0xf0 }, { ZD_CR80, 0x64 }, { ZD_CR81, 0x64 },
{ ZD_CR82, 0x00 }, { ZD_CR83, 0x24 }, { ZD_CR84, 0x04 },
{ ZD_CR85, 0x00 }, { ZD_CR86, 0x0c }, { ZD_CR87, 0x12 },
{ ZD_CR88, 0x0c }, { ZD_CR89, 0x00 }, { ZD_CR90, 0x58 },
{ ZD_CR91, 0x04 }, { ZD_CR92, 0x00 }, { ZD_CR93, 0x00 },
{ ZD_CR94, 0x01 },
{ ZD_CR95, 0x20 }, /* ZD1211B */
{ ZD_CR96, 0x50 }, { ZD_CR97, 0x37 }, { ZD_CR98, 0x35 },
{ ZD_CR99, 0x00 }, { ZD_CR100, 0x01 }, { ZD_CR101, 0x13 },
{ ZD_CR102, 0x27 }, { ZD_CR103, 0x27 }, { ZD_CR104, 0x18 },
{ ZD_CR105, 0x12 }, { ZD_CR106, 0x04 }, { ZD_CR107, 0x00 },
{ ZD_CR108, 0x0a }, { ZD_CR109, 0x27 }, { ZD_CR110, 0x27 },
{ ZD_CR111, 0x27 }, { ZD_CR112, 0x27 }, { ZD_CR113, 0x27 },
{ ZD_CR114, 0x27 }, { ZD_CR115, 0x26 }, { ZD_CR116, 0x24 },
{ ZD_CR117, 0xfc }, { ZD_CR118, 0xfa }, { ZD_CR119, 0x1e },
{ ZD_CR125, 0x90 }, { ZD_CR126, 0x00 }, { ZD_CR127, 0x00 },
{ ZD_CR128, 0x14 }, { ZD_CR129, 0x12 }, { ZD_CR130, 0x10 },
{ ZD_CR131, 0x0c }, { ZD_CR136, 0xdf }, { ZD_CR137, 0xa0 },
{ ZD_CR138, 0xa8 }, { ZD_CR139, 0xb4 }, { ZD_CR140, 0x98 },
{ ZD_CR141, 0x82 }, { ZD_CR142, 0x53 }, { ZD_CR143, 0x1c },
{ ZD_CR144, 0x6c }, { ZD_CR147, 0x07 }, { ZD_CR148, 0x40 },
{ ZD_CR149, 0x40 }, /* Org:0x50 ComTrend:RalLink AP */
{ ZD_CR150, 0x14 }, /* Org:0x0E ComTrend:RalLink AP */
{ ZD_CR151, 0x18 }, { ZD_CR159, 0x70 }, { ZD_CR160, 0xfe },
{ ZD_CR161, 0xee }, { ZD_CR162, 0xaa }, { ZD_CR163, 0xfa },
{ ZD_CR164, 0xfa }, { ZD_CR165, 0xea }, { ZD_CR166, 0xbe },
{ ZD_CR167, 0xbe }, { ZD_CR168, 0x6a }, { ZD_CR169, 0xba },
{ ZD_CR170, 0xba }, { ZD_CR171, 0xba },
/* Note: ZD_CR204 must lead the ZD_CR203 */
{ ZD_CR204, 0x7d },
{},
{ ZD_CR203, 0x30 },
};
int r, t;
dev_dbg_f(zd_chip_dev(chip), "\n");
r = zd_chip_lock_phy_regs(chip);
if (r)
goto out;
r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
t = zd_chip_unlock_phy_regs(chip);
if (t && !r)
r = t;
out:
return r;
}
static int hw_reset_phy(struct zd_chip *chip)
{
return zd_chip_is_zd1211b(chip) ? zd1211b_hw_reset_phy(chip) :
zd1211_hw_reset_phy(chip);
}
static int zd1211_hw_init_hmac(struct zd_chip *chip)
{
static const struct zd_ioreq32 ioreqs[] = {
{ CR_ZD1211_RETRY_MAX, ZD1211_RETRY_COUNT },
{ CR_RX_THRESHOLD, 0x000c0640 },
};
dev_dbg_f(zd_chip_dev(chip), "\n");
ZD_ASSERT(mutex_is_locked(&chip->mutex));
return zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
}
static int zd1211b_hw_init_hmac(struct zd_chip *chip)
{
static const struct zd_ioreq32 ioreqs[] = {
{ CR_ZD1211B_RETRY_MAX, ZD1211B_RETRY_COUNT },
{ CR_ZD1211B_CWIN_MAX_MIN_AC0, 0x007f003f },
{ CR_ZD1211B_CWIN_MAX_MIN_AC1, 0x007f003f },
{ CR_ZD1211B_CWIN_MAX_MIN_AC2, 0x003f001f },
{ CR_ZD1211B_CWIN_MAX_MIN_AC3, 0x001f000f },
{ CR_ZD1211B_AIFS_CTL1, 0x00280028 },
{ CR_ZD1211B_AIFS_CTL2, 0x008C003C },
{ CR_ZD1211B_TXOP, 0x01800824 },
{ CR_RX_THRESHOLD, 0x000c0eff, },
};
dev_dbg_f(zd_chip_dev(chip), "\n");
ZD_ASSERT(mutex_is_locked(&chip->mutex));
return zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
}
static int hw_init_hmac(struct zd_chip *chip)
{
int r;
static const struct zd_ioreq32 ioreqs[] = {
{ CR_ACK_TIMEOUT_EXT, 0x20 },
{ CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
{ CR_SNIFFER_ON, 0 },
{ CR_RX_FILTER, STA_RX_FILTER },
{ CR_GROUP_HASH_P1, 0x00 },
{ CR_GROUP_HASH_P2, 0x80000000 },
{ CR_REG1, 0xa4 },
{ CR_ADDA_PWR_DWN, 0x7f },
{ CR_BCN_PLCP_CFG, 0x00f00401 },
{ CR_PHY_DELAY, 0x00 },
{ CR_ACK_TIMEOUT_EXT, 0x80 },
{ CR_ADDA_PWR_DWN, 0x00 },
{ CR_ACK_TIME_80211, 0x100 },
{ CR_RX_PE_DELAY, 0x70 },
{ CR_PS_CTRL, 0x10000000 },
{ CR_RTS_CTS_RATE, 0x02030203 },
{ CR_AFTER_PNP, 0x1 },
{ CR_WEP_PROTECT, 0x114 },
{ CR_IFS_VALUE, IFS_VALUE_DEFAULT },
{ CR_CAM_MODE, MODE_AP_WDS},
};
ZD_ASSERT(mutex_is_locked(&chip->mutex));
r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
if (r)
return r;
return zd_chip_is_zd1211b(chip) ?
zd1211b_hw_init_hmac(chip) : zd1211_hw_init_hmac(chip);
}
struct aw_pt_bi {
u32 atim_wnd_period;
u32 pre_tbtt;
u32 beacon_interval;
};
static int get_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
{
int r;
static const zd_addr_t aw_pt_bi_addr[] =
{ CR_ATIM_WND_PERIOD, CR_PRE_TBTT, CR_BCN_INTERVAL };
u32 values[3];
r = zd_ioread32v_locked(chip, values, (const zd_addr_t *)aw_pt_bi_addr,
ARRAY_SIZE(aw_pt_bi_addr));
if (r) {
memset(s, 0, sizeof(*s));
return r;
}
s->atim_wnd_period = values[0];
s->pre_tbtt = values[1];
s->beacon_interval = values[2];
return 0;
}
static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
{
struct zd_ioreq32 reqs[3];
u16 b_interval = s->beacon_interval & 0xffff;
if (b_interval <= 5)
b_interval = 5;
if (s->pre_tbtt < 4 || s->pre_tbtt >= b_interval)
s->pre_tbtt = b_interval - 1;
if (s->atim_wnd_period >= s->pre_tbtt)
s->atim_wnd_period = s->pre_tbtt - 1;
reqs[0].addr = CR_ATIM_WND_PERIOD;
reqs[0].value = s->atim_wnd_period;
reqs[1].addr = CR_PRE_TBTT;
reqs[1].value = s->pre_tbtt;
reqs[2].addr = CR_BCN_INTERVAL;
reqs[2].value = (s->beacon_interval & ~0xffff) | b_interval;
return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs));
}
static int set_beacon_interval(struct zd_chip *chip, u16 interval,
u8 dtim_period, int type)
{
int r;
struct aw_pt_bi s;
u32 b_interval, mode_flag;
ZD_ASSERT(mutex_is_locked(&chip->mutex));
if (interval > 0) {
switch (type) {
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
mode_flag = BCN_MODE_IBSS;
break;
case NL80211_IFTYPE_AP:
mode_flag = BCN_MODE_AP;
break;
default:
mode_flag = 0;
break;
}
} else {
dtim_period = 0;
mode_flag = 0;
}
b_interval = mode_flag | (dtim_period << 16) | interval;
r = zd_iowrite32_locked(chip, b_interval, CR_BCN_INTERVAL);
if (r)
return r;
r = get_aw_pt_bi(chip, &s);
if (r)
return r;
return set_aw_pt_bi(chip, &s);
}
int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
int type)
{
int r;
mutex_lock(&chip->mutex);
r = set_beacon_interval(chip, interval, dtim_period, type);
mutex_unlock(&chip->mutex);
return r;
}
static int hw_init(struct zd_chip *chip)
{
int r;
dev_dbg_f(zd_chip_dev(chip), "\n");
ZD_ASSERT(mutex_is_locked(&chip->mutex));
r = hw_reset_phy(chip);
if (r)
return r;
r = hw_init_hmac(chip);
if (r)
return r;
return set_beacon_interval(chip, 100, 0, NL80211_IFTYPE_UNSPECIFIED);
}
static zd_addr_t fw_reg_addr(struct zd_chip *chip, u16 offset)
{
return (zd_addr_t)((u16)chip->fw_regs_base + offset);
}
#ifdef DEBUG
static int dump_cr(struct zd_chip *chip, const zd_addr_t addr,
const char *addr_string)
{
int r;
u32 value;
r = zd_ioread32_locked(chip, &value, addr);
if (r) {
dev_dbg_f(zd_chip_dev(chip),
"error reading %s. Error number %d\n", addr_string, r);
return r;
}
dev_dbg_f(zd_chip_dev(chip), "%s %#010x\n",
addr_string, (unsigned int)value);
return 0;
}
static int test_init(struct zd_chip *chip)
{
int r;
r = dump_cr(chip, CR_AFTER_PNP, "CR_AFTER_PNP");
if (r)
return r;
r = dump_cr(chip, CR_GPI_EN, "CR_GPI_EN");
if (r)
return r;
return dump_cr(chip, CR_INTERRUPT, "CR_INTERRUPT");
}
static void dump_fw_registers(struct zd_chip *chip)
{
const zd_addr_t addr[4] = {
fw_reg_addr(chip, FW_REG_FIRMWARE_VER),
fw_reg_addr(chip, FW_REG_USB_SPEED),
fw_reg_addr(chip, FW_REG_FIX_TX_RATE),
fw_reg_addr(chip, FW_REG_LED_LINK_STATUS),
};
int r;
u16 values[4];
r = zd_ioread16v_locked(chip, values, (const zd_addr_t*)addr,
ARRAY_SIZE(addr));
if (r) {
dev_dbg_f(zd_chip_dev(chip), "error %d zd_ioread16v_locked\n",
r);
return;
}
dev_dbg_f(zd_chip_dev(chip), "FW_FIRMWARE_VER %#06hx\n", values[0]);
dev_dbg_f(zd_chip_dev(chip), "FW_USB_SPEED %#06hx\n", values[1]);
dev_dbg_f(zd_chip_dev(chip), "FW_FIX_TX_RATE %#06hx\n", values[2]);
dev_dbg_f(zd_chip_dev(chip), "FW_LINK_STATUS %#06hx\n", values[3]);
}
#endif /* DEBUG */
static int print_fw_version(struct zd_chip *chip)
{
struct wiphy *wiphy = zd_chip_to_mac(chip)->hw->wiphy;
int r;
u16 version;
r = zd_ioread16_locked(chip, &version,
fw_reg_addr(chip, FW_REG_FIRMWARE_VER));
if (r)
return r;
dev_info(zd_chip_dev(chip),"firmware version %04hx\n", version);
snprintf(wiphy->fw_version, sizeof(wiphy->fw_version),
"%04hx", version);
return 0;
}
static int set_mandatory_rates(struct zd_chip *chip, int gmode)
{
u32 rates;
ZD_ASSERT(mutex_is_locked(&chip->mutex));
/* This sets the mandatory rates, which only depend from the standard
* that the device is supporting. Until further notice we should try
* to support 802.11g also for full speed USB.
*/
if (!gmode)
rates = CR_RATE_1M|CR_RATE_2M|CR_RATE_5_5M|CR_RATE_11M;
else
rates = CR_RATE_1M|CR_RATE_2M|CR_RATE_5_5M|CR_RATE_11M|
CR_RATE_6M|CR_RATE_12M|CR_RATE_24M;
return zd_iowrite32_locked(chip, rates, CR_MANDATORY_RATE_TBL);
}
int zd_chip_set_rts_cts_rate_locked(struct zd_chip *chip,
int preamble)
{
u32 value = 0;
dev_dbg_f(zd_chip_dev(chip), "preamble=%x\n", preamble);
value |= preamble << RTSCTS_SH_RTS_PMB_TYPE;
value |= preamble << RTSCTS_SH_CTS_PMB_TYPE;
/* We always send 11M RTS/self-CTS messages, like the vendor driver. */
value |= ZD_PURE_RATE(ZD_CCK_RATE_11M) << RTSCTS_SH_RTS_RATE;
value |= ZD_RX_CCK << RTSCTS_SH_RTS_MOD_TYPE;
value |= ZD_PURE_RATE(ZD_CCK_RATE_11M) << RTSCTS_SH_CTS_RATE;
value |= ZD_RX_CCK << RTSCTS_SH_CTS_MOD_TYPE;
return zd_iowrite32_locked(chip, value, CR_RTS_CTS_RATE);
}
int zd_chip_enable_hwint(struct zd_chip *chip)
{
int r;
mutex_lock(&chip->mutex);
r = zd_iowrite32_locked(chip, HWINT_ENABLED, CR_INTERRUPT);
mutex_unlock(&chip->mutex);
return r;
}
static int disable_hwint(struct zd_chip *chip)
{
return zd_iowrite32_locked(chip, HWINT_DISABLED, CR_INTERRUPT);
}
int zd_chip_disable_hwint(struct zd_chip *chip)
{
int r;
mutex_lock(&chip->mutex);
r = disable_hwint(chip);
mutex_unlock(&chip->mutex);
return r;
}
static int read_fw_regs_offset(struct zd_chip *chip)
{
int r;
ZD_ASSERT(mutex_is_locked(&chip->mutex));
r = zd_ioread16_locked(chip, (u16*)&chip->fw_regs_base,
FWRAW_REGS_ADDR);
if (r)
return r;
dev_dbg_f(zd_chip_dev(chip), "fw_regs_base: %#06hx\n",
(u16)chip->fw_regs_base);
return 0;
}
/* Read mac address using pre-firmware interface */
int zd_chip_read_mac_addr_fw(struct zd_chip *chip, u8 *addr)
{
dev_dbg_f(zd_chip_dev(chip), "\n");
return zd_usb_read_fw(&chip->usb, E2P_MAC_ADDR_P1, addr,
ETH_ALEN);
}
int zd_chip_init_hw(struct zd_chip *chip)
{
int r;
u8 rf_type;
dev_dbg_f(zd_chip_dev(chip), "\n");
mutex_lock(&chip->mutex);
#ifdef DEBUG
r = test_init(chip);
if (r)
goto out;
#endif
r = zd_iowrite32_locked(chip, 1, CR_AFTER_PNP);
if (r)
goto out;
r = read_fw_regs_offset(chip);
if (r)
goto out;
/* GPI is always disabled, also in the other driver.
*/
r = zd_iowrite32_locked(chip, 0, CR_GPI_EN);
if (r)
goto out;
r = zd_iowrite32_locked(chip, CWIN_SIZE, CR_CWMIN_CWMAX);
if (r)
goto out;
/* Currently we support IEEE 802.11g for full and high speed USB.
* It might be discussed, whether we should support pure b mode for
* full speed USB.
*/
r = set_mandatory_rates(chip, 1);
if (r)
goto out;
/* Disabling interrupts is certainly a smart thing here.
*/
r = disable_hwint(chip);
if (r)
goto out;
r = read_pod(chip, &rf_type);
if (r)
goto out;
r = hw_init(chip);
if (r)
goto out;
r = zd_rf_init_hw(&chip->rf, rf_type);
if (r)
goto out;
r = print_fw_version(chip);
if (r)
goto out;
#ifdef DEBUG
dump_fw_registers(chip);
r = test_init(chip);
if (r)
goto out;
#endif /* DEBUG */
r = read_cal_int_tables(chip);
if (r)
goto out;
print_id(chip);
out:
mutex_unlock(&chip->mutex);
return r;
}
static int update_pwr_int(struct zd_chip *chip, u8 channel)
{
u8 value = chip->pwr_int_values[channel - 1];
return zd_iowrite16_locked(chip, value, ZD_CR31);
}
static int update_pwr_cal(struct zd_chip *chip, u8 channel)
{
u8 value = chip->pwr_cal_values[channel-1];
return zd_iowrite16_locked(chip, value, ZD_CR68);
}
static int update_ofdm_cal(struct zd_chip *chip, u8 channel)
{
struct zd_ioreq16 ioreqs[3];
ioreqs[0].addr = ZD_CR67;
ioreqs[0].value = chip->ofdm_cal_values[OFDM_36M_INDEX][channel-1];
ioreqs[1].addr = ZD_CR66;
ioreqs[1].value = chip->ofdm_cal_values[OFDM_48M_INDEX][channel-1];
ioreqs[2].addr = ZD_CR65;
ioreqs[2].value = chip->ofdm_cal_values[OFDM_54M_INDEX][channel-1];
return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
}
static int update_channel_integration_and_calibration(struct zd_chip *chip,
u8 channel)
{
int r;
if (!zd_rf_should_update_pwr_int(&chip->rf))
return 0;
r = update_pwr_int(chip, channel);
if (r)
return r;
if (zd_chip_is_zd1211b(chip)) {
static const struct zd_ioreq16 ioreqs[] = {
{ ZD_CR69, 0x28 },
{},
{ ZD_CR69, 0x2a },
};
r = update_ofdm_cal(chip, channel);
if (r)
return r;
r = update_pwr_cal(chip, channel);
if (r)
return r;
r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
if (r)
return r;
}
return 0;
}
/* The CCK baseband gain can be optionally patched by the EEPROM */
static int patch_cck_gain(struct zd_chip *chip)
{
int r;
u32 value;
if (!chip->patch_cck_gain || !zd_rf_should_patch_cck_gain(&chip->rf))
return 0;
ZD_ASSERT(mutex_is_locked(&chip->mutex));
r = zd_ioread32_locked(chip, &value, E2P_PHY_REG);
if (r)
return r;
dev_dbg_f(zd_chip_dev(chip), "patching value %x\n", value & 0xff);
return zd_iowrite16_locked(chip, value & 0xff, ZD_CR47);
}
int zd_chip_set_channel(struct zd_chip *chip, u8 channel)
{
int r, t;
mutex_lock(&chip->mutex);
r = zd_chip_lock_phy_regs(chip);
if (r)
goto out;
r = zd_rf_set_channel(&chip->rf, channel);
if (r)
goto unlock;
r = update_channel_integration_and_calibration(chip, channel);
if (r)
goto unlock;
r = patch_cck_gain(chip);
if (r)
goto unlock;
r = patch_6m_band_edge(chip, channel);
if (r)
goto unlock;
r = zd_iowrite32_locked(chip, 0, CR_CONFIG_PHILIPS);
unlock:
t = zd_chip_unlock_phy_regs(chip);
if (t && !r)
r = t;
out:
mutex_unlock(&chip->mutex);
return r;
}
u8 zd_chip_get_channel(struct zd_chip *chip)
{
u8 channel;
mutex_lock(&chip->mutex);
channel = chip->rf.channel;
mutex_unlock(&chip->mutex);
return channel;
}
int zd_chip_control_leds(struct zd_chip *chip, enum led_status status)
{
const zd_addr_t a[] = {
fw_reg_addr(chip, FW_REG_LED_LINK_STATUS),
CR_LED,
};
int r;
u16 v[ARRAY_SIZE(a)];
struct zd_ioreq16 ioreqs[ARRAY_SIZE(a)] = {
[0] = { fw_reg_addr(chip, FW_REG_LED_LINK_STATUS) },
[1] = { CR_LED },
};
u16 other_led;
mutex_lock(&chip->mutex);
r = zd_ioread16v_locked(chip, v, (const zd_addr_t *)a, ARRAY_SIZE(a));
if (r)
goto out;
other_led = chip->link_led == LED1 ? LED2 : LED1;
switch (status) {
case ZD_LED_OFF:
ioreqs[0].value = FW_LINK_OFF;
ioreqs[1].value = v[1] & ~(LED1|LED2);
break;
case ZD_LED_SCANNING:
ioreqs[0].value = FW_LINK_OFF;
ioreqs[1].value = v[1] & ~other_led;
if (get_seconds() % 3 == 0) {
ioreqs[1].value &= ~chip->link_led;
} else {
ioreqs[1].value |= chip->link_led;
}
break;
case ZD_LED_ASSOCIATED:
ioreqs[0].value = FW_LINK_TX;
ioreqs[1].value = v[1] & ~other_led;
ioreqs[1].value |= chip->link_led;
break;
default:
r = -EINVAL;
goto out;
}
if (v[0] != ioreqs[0].value || v[1] != ioreqs[1].value) {
r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
if (r)
goto out;
}
r = 0;
out:
mutex_unlock(&chip->mutex);
return r;
}
int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates)
{
int r;
if (cr_rates & ~(CR_RATES_80211B|CR_RATES_80211G))
return -EINVAL;
mutex_lock(&chip->mutex);
r = zd_iowrite32_locked(chip, cr_rates, CR_BASIC_RATE_TBL);
mutex_unlock(&chip->mutex);
return r;
}
static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame)
{
return ZD_OFDM | zd_ofdm_plcp_header_rate(rx_frame);
}
/**
* zd_rx_rate - report zd-rate
* @rx_frame - received frame
* @rx_status - rx_status as given by the device
*
* This function converts the rate as encoded in the received packet to the
* zd-rate, we are using on other places in the driver.
*/
u8 zd_rx_rate(const void *rx_frame, const struct rx_status *status)
{
u8 zd_rate;
if (status->frame_status & ZD_RX_OFDM) {
zd_rate = zd_rate_from_ofdm_plcp_header(rx_frame);
} else {
switch (zd_cck_plcp_header_signal(rx_frame)) {
case ZD_CCK_PLCP_SIGNAL_1M:
zd_rate = ZD_CCK_RATE_1M;
break;
case ZD_CCK_PLCP_SIGNAL_2M:
zd_rate = ZD_CCK_RATE_2M;
break;
case ZD_CCK_PLCP_SIGNAL_5M5:
zd_rate = ZD_CCK_RATE_5_5M;
break;
case ZD_CCK_PLCP_SIGNAL_11M:
zd_rate = ZD_CCK_RATE_11M;
break;
default:
zd_rate = 0;
}
}
return zd_rate;
}
int zd_chip_switch_radio_on(struct zd_chip *chip)
{
int r;
mutex_lock(&chip->mutex);
r = zd_switch_radio_on(&chip->rf);
mutex_unlock(&chip->mutex);
return r;
}
int zd_chip_switch_radio_off(struct zd_chip *chip)
{
int r;
mutex_lock(&chip->mutex);
r = zd_switch_radio_off(&chip->rf);
mutex_unlock(&chip->mutex);
return r;
}
int zd_chip_enable_int(struct zd_chip *chip)
{
int r;
mutex_lock(&chip->mutex);
r = zd_usb_enable_int(&chip->usb);
mutex_unlock(&chip->mutex);
return r;
}
void zd_chip_disable_int(struct zd_chip *chip)
{
mutex_lock(&chip->mutex);
zd_usb_disable_int(&chip->usb);
mutex_unlock(&chip->mutex);
/* cancel pending interrupt work */
cancel_work_sync(&zd_chip_to_mac(chip)->process_intr);
}
int zd_chip_enable_rxtx(struct zd_chip *chip)
{
int r;
mutex_lock(&chip->mutex);
zd_usb_enable_tx(&chip->usb);
r = zd_usb_enable_rx(&chip->usb);
zd_tx_watchdog_enable(&chip->usb);
mutex_unlock(&chip->mutex);
return r;
}
void zd_chip_disable_rxtx(struct zd_chip *chip)
{
mutex_lock(&chip->mutex);
zd_tx_watchdog_disable(&chip->usb);
zd_usb_disable_rx(&chip->usb);
zd_usb_disable_tx(&chip->usb);
mutex_unlock(&chip->mutex);
}
int zd_rfwritev_locked(struct zd_chip *chip,
const u32* values, unsigned int count, u8 bits)
{
int r;
unsigned int i;
for (i = 0; i < count; i++) {
r = zd_rfwrite_locked(chip, values[i], bits);
if (r)
return r;
}
return 0;
}
/*
* We can optionally program the RF directly through CR regs, if supported by
* the hardware. This is much faster than the older method.
*/
int zd_rfwrite_cr_locked(struct zd_chip *chip, u32 value)
{
const struct zd_ioreq16 ioreqs[] = {
{ ZD_CR244, (value >> 16) & 0xff },
{ ZD_CR243, (value >> 8) & 0xff },
{ ZD_CR242, value & 0xff },
};
ZD_ASSERT(mutex_is_locked(&chip->mutex));
return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
}
int zd_rfwritev_cr_locked(struct zd_chip *chip,
const u32 *values, unsigned int count)
{
int r;
unsigned int i;
for (i = 0; i < count; i++) {
r = zd_rfwrite_cr_locked(chip, values[i]);
if (r)
return r;
}
return 0;
}
int zd_chip_set_multicast_hash(struct zd_chip *chip,
struct zd_mc_hash *hash)
{
const struct zd_ioreq32 ioreqs[] = {
{ CR_GROUP_HASH_P1, hash->low },
{ CR_GROUP_HASH_P2, hash->high },
};
return zd_iowrite32a(chip, ioreqs, ARRAY_SIZE(ioreqs));
}
u64 zd_chip_get_tsf(struct zd_chip *chip)
{
int r;
static const zd_addr_t aw_pt_bi_addr[] =
{ CR_TSF_LOW_PART, CR_TSF_HIGH_PART };
u32 values[2];
u64 tsf;
mutex_lock(&chip->mutex);
r = zd_ioread32v_locked(chip, values, (const zd_addr_t *)aw_pt_bi_addr,
ARRAY_SIZE(aw_pt_bi_addr));
mutex_unlock(&chip->mutex);
if (r)
return 0;
tsf = values[1];
tsf = (tsf << 32) | values[0];
return tsf;
}
| n-aizu/linux-linaro-stable-mx6 | drivers/net/wireless/zd1211rw/zd_chip.c | C | gpl-2.0 | 40,175 |
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Component\Process;
use Symfony\Component\Process\Exception\InvalidArgumentException;
/**
* ProcessUtils is a bunch of utility methods.
*
* This class contains static methods only and is not meant to be instantiated.
*
* @author Martin Hasoň <martin.hason@gmail.com>
*/
class ProcessUtils
{
/**
* This class should not be instantiated.
*/
private function __construct()
{
}
/**
* Escapes a string to be used as a shell argument.
*
* @param string $argument The argument that will be escaped
*
* @return string The escaped argument
*/
public static function escapeArgument($argument)
{
//Fix for PHP bug #43784 escapeshellarg removes % from given string
//Fix for PHP bug #49446 escapeshellarg doesn't work on Windows
//@see https://bugs.php.net/bug.php?id=43784
//@see https://bugs.php.net/bug.php?id=49446
if ('\\' === DIRECTORY_SEPARATOR) {
if ('' === $argument) {
return escapeshellarg($argument);
}
$escapedArgument = '';
$quote = false;
foreach (preg_split('/(")/', $argument, -1, PREG_SPLIT_NO_EMPTY | PREG_SPLIT_DELIM_CAPTURE) as $part) {
if ('"' === $part) {
$escapedArgument .= '\\"';
} elseif (self::isSurroundedBy($part, '%')) {
// Avoid environment variable expansion
$escapedArgument .= '^%"'.substr($part, 1, -1).'"^%';
} else {
// escape trailing backslash
if ('\\' === substr($part, -1)) {
$part .= '\\';
}
$quote = true;
$escapedArgument .= $part;
}
}
if ($quote) {
$escapedArgument = '"'.$escapedArgument.'"';
}
return $escapedArgument;
}
return escapeshellarg($argument);
}
/**
* Validates and normalizes a Process input.
*
* @param string $caller The name of method call that validates the input
* @param mixed $input The input to validate
*
* @return string The validated input
*
* @throws InvalidArgumentException In case the input is not valid
*
* Passing an object as an input is deprecated since version 2.5 and will be removed in 3.0.
*/
public static function validateInput($caller, $input)
{
if (null !== $input) {
if (is_resource($input)) {
return $input;
}
if (is_scalar($input)) {
return (string) $input;
}
// deprecated as of Symfony 2.5, to be removed in 3.0
if (is_object($input) && method_exists($input, '__toString')) {
@trigger_error('Passing an object as an input is deprecated since version 2.5 and will be removed in 3.0.', E_USER_DEPRECATED);
return (string) $input;
}
throw new InvalidArgumentException(sprintf('%s only accepts strings or stream resources.', $caller));
}
return $input;
}
private static function isSurroundedBy($arg, $char)
{
return 2 < strlen($arg) && $char === $arg[0] && $char === $arg[strlen($arg) - 1];
}
}
| issaint/D8 | vendor/symfony/process/ProcessUtils.php | PHP | gpl-2.0 | 3,637 |
/*
* This file is part of wl12xx
*
* Copyright (C) 2012 Texas Instruments. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/ieee80211.h>
#include "scan.h"
#include "../wlcore/debug.h"
#include "../wlcore/tx.h"
static int wl1271_get_scan_channels(struct wl1271 *wl,
struct cfg80211_scan_request *req,
struct basic_scan_channel_params *channels,
enum ieee80211_band band, bool passive)
{
struct conf_scan_settings *c = &wl->conf.scan;
int i, j;
u32 flags;
for (i = 0, j = 0;
i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS;
i++) {
flags = req->channels[i]->flags;
if (!test_bit(i, wl->scan.scanned_ch) &&
!(flags & IEEE80211_CHAN_DISABLED) &&
(req->channels[i]->band == band) &&
/*
* In passive scans, we scan all remaining
* channels, even if not marked as such.
* In active scans, we only scan channels not
* marked as passive.
*/
(passive || !(flags & IEEE80211_CHAN_NO_IR))) {
wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
req->channels[i]->band,
req->channels[i]->center_freq);
wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
req->channels[i]->hw_value,
req->channels[i]->flags);
wl1271_debug(DEBUG_SCAN,
"max_antenna_gain %d, max_power %d",
req->channels[i]->max_antenna_gain,
req->channels[i]->max_power);
wl1271_debug(DEBUG_SCAN, "beacon_found %d",
req->channels[i]->beacon_found);
if (!passive) {
channels[j].min_duration =
cpu_to_le32(c->min_dwell_time_active);
channels[j].max_duration =
cpu_to_le32(c->max_dwell_time_active);
} else {
channels[j].min_duration =
cpu_to_le32(c->dwell_time_passive);
channels[j].max_duration =
cpu_to_le32(c->dwell_time_passive);
}
channels[j].early_termination = 0;
channels[j].tx_power_att = req->channels[i]->max_power;
channels[j].channel = req->channels[i]->hw_value;
memset(&channels[j].bssid_lsb, 0xff, 4);
memset(&channels[j].bssid_msb, 0xff, 2);
/* Mark the channels we already used */
set_bit(i, wl->scan.scanned_ch);
j++;
}
}
return j;
}
#define WL1271_NOTHING_TO_SCAN 1
static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum ieee80211_band band,
bool passive, u32 basic_rate)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl1271_cmd_scan *cmd;
struct wl1271_cmd_trigger_scan_to *trigger;
int ret;
u16 scan_options = 0;
/* skip active scans if we don't have SSIDs */
if (!passive && wl->scan.req->n_ssids == 0)
return WL1271_NOTHING_TO_SCAN;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
if (!cmd || !trigger) {
ret = -ENOMEM;
goto out;
}
if (wl->conf.scan.split_scan_timeout)
scan_options |= WL1271_SCAN_OPT_SPLIT_SCAN;
if (passive)
scan_options |= WL1271_SCAN_OPT_PASSIVE;
cmd->params.role_id = wlvif->role_id;
if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
ret = -EINVAL;
goto out;
}
cmd->params.scan_options = cpu_to_le16(scan_options);
cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
cmd->channels,
band, passive);
if (cmd->params.n_ch == 0) {
ret = WL1271_NOTHING_TO_SCAN;
goto out;
}
cmd->params.tx_rate = cpu_to_le32(basic_rate);
cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
cmd->params.tid_trigger = CONF_TX_AC_ANY_TID;
cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
if (band == IEEE80211_BAND_2GHZ)
cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ;
else
cmd->params.band = WL1271_SCAN_BAND_5_GHZ;
if (wl->scan.ssid_len && wl->scan.ssid) {
cmd->params.ssid_len = wl->scan.ssid_len;
memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
}
memcpy(cmd->addr, vif->addr, ETH_ALEN);
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
cmd->params.role_id, band,
wl->scan.ssid, wl->scan.ssid_len,
wl->scan.req->ie,
wl->scan.req->ie_len, NULL, 0, false);
if (ret < 0) {
wl1271_error("PROBE request template failed");
goto out;
}
trigger->timeout = cpu_to_le32(wl->conf.scan.split_scan_timeout);
ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
sizeof(*trigger), 0);
if (ret < 0) {
wl1271_error("trigger scan to failed for hw scan");
goto out;
}
wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("SCAN failed");
goto out;
}
out:
kfree(cmd);
kfree(trigger);
return ret;
}
int wl12xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_cmd_header *cmd = NULL;
int ret = 0;
if (WARN_ON(wl->scan.state == WL1271_SCAN_STATE_IDLE))
return -EINVAL;
wl1271_debug(DEBUG_CMD, "cmd scan stop");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, cmd,
sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("cmd stop_scan failed");
goto out;
}
out:
kfree(cmd);
return ret;
}
void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret = 0;
enum ieee80211_band band;
u32 rate, mask;
switch (wl->scan.state) {
case WL1271_SCAN_STATE_IDLE:
break;
case WL1271_SCAN_STATE_2GHZ_ACTIVE:
band = IEEE80211_BAND_2GHZ;
mask = wlvif->bitrate_masks[band];
if (wl->scan.req->no_cck) {
mask &= ~CONF_TX_CCK_RATES;
if (!mask)
mask = CONF_TX_RATE_MASK_BASIC_P2P;
}
rate = wl1271_tx_min_rate_get(wl, mask);
ret = wl1271_scan_send(wl, wlvif, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
wl1271_scan_stm(wl, wlvif);
}
break;
case WL1271_SCAN_STATE_2GHZ_PASSIVE:
band = IEEE80211_BAND_2GHZ;
mask = wlvif->bitrate_masks[band];
if (wl->scan.req->no_cck) {
mask &= ~CONF_TX_CCK_RATES;
if (!mask)
mask = CONF_TX_RATE_MASK_BASIC_P2P;
}
rate = wl1271_tx_min_rate_get(wl, mask);
ret = wl1271_scan_send(wl, wlvif, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
if (wl->enable_11a)
wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
else
wl->scan.state = WL1271_SCAN_STATE_DONE;
wl1271_scan_stm(wl, wlvif);
}
break;
case WL1271_SCAN_STATE_5GHZ_ACTIVE:
band = IEEE80211_BAND_5GHZ;
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
ret = wl1271_scan_send(wl, wlvif, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
wl1271_scan_stm(wl, wlvif);
}
break;
case WL1271_SCAN_STATE_5GHZ_PASSIVE:
band = IEEE80211_BAND_5GHZ;
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
ret = wl1271_scan_send(wl, wlvif, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_DONE;
wl1271_scan_stm(wl, wlvif);
}
break;
case WL1271_SCAN_STATE_DONE:
wl->scan.failed = false;
cancel_delayed_work(&wl->scan_complete_work);
ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
msecs_to_jiffies(0));
break;
default:
wl1271_error("invalid scan state");
break;
}
if (ret < 0) {
cancel_delayed_work(&wl->scan_complete_work);
ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
msecs_to_jiffies(0));
}
}
static void wl12xx_adjust_channels(struct wl1271_cmd_sched_scan_config *cmd,
struct wlcore_scan_channels *cmd_channels)
{
memcpy(cmd->passive, cmd_channels->passive, sizeof(cmd->passive));
memcpy(cmd->active, cmd_channels->active, sizeof(cmd->active));
cmd->dfs = cmd_channels->dfs;
cmd->n_pactive_ch = cmd_channels->passive_active;
memcpy(cmd->channels_2, cmd_channels->channels_2,
sizeof(cmd->channels_2));
memcpy(cmd->channels_5, cmd_channels->channels_5,
sizeof(cmd->channels_5));
/* channels_4 are not supported, so no need to copy them */
}
int wl1271_scan_sched_scan_config(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies)
{
struct wl1271_cmd_sched_scan_config *cfg = NULL;
struct wlcore_scan_channels *cfg_channels = NULL;
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
int i, ret;
bool force_passive = !req->n_ssids;
wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return -ENOMEM;
cfg->role_id = wlvif->role_id;
cfg->rssi_threshold = c->rssi_threshold;
cfg->snr_threshold = c->snr_threshold;
cfg->n_probe_reqs = c->num_probe_reqs;
/* cycles set to 0 it means infinite (until manually stopped) */
cfg->cycles = 0;
/* report APs when at least 1 is found */
cfg->report_after = 1;
/* don't stop scanning automatically when something is found */
cfg->terminate = 0;
cfg->tag = WL1271_SCAN_DEFAULT_TAG;
/* don't filter on BSS type */
cfg->bss_type = SCAN_BSS_TYPE_ANY;
/* currently NL80211 supports only a single interval */
for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
cfg->intervals[i] = cpu_to_le32(req->interval);
cfg->ssid_len = 0;
ret = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req);
if (ret < 0)
goto out;
cfg->filter_type = ret;
wl1271_debug(DEBUG_SCAN, "filter_type = %d", cfg->filter_type);
cfg_channels = kzalloc(sizeof(*cfg_channels), GFP_KERNEL);
if (!cfg_channels) {
ret = -ENOMEM;
goto out;
}
if (!wlcore_set_scan_chan_params(wl, cfg_channels, req->channels,
req->n_channels, req->n_ssids,
SCAN_TYPE_PERIODIC)) {
wl1271_error("scan channel list is empty");
ret = -EINVAL;
goto out;
}
wl12xx_adjust_channels(cfg, cfg_channels);
if (!force_passive && cfg->active[0]) {
u8 band = IEEE80211_BAND_2GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
wlvif->role_id, band,
req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ies[band],
ies->len[band],
ies->common_ies,
ies->common_ie_len,
true);
if (ret < 0) {
wl1271_error("2.4GHz PROBE request template failed");
goto out;
}
}
if (!force_passive && cfg->active[1]) {
u8 band = IEEE80211_BAND_5GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
wlvif->role_id, band,
req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ies[band],
ies->len[band],
ies->common_ies,
ies->common_ie_len,
true);
if (ret < 0) {
wl1271_error("5GHz PROBE request template failed");
goto out;
}
}
wl1271_dump(DEBUG_SCAN, "SCAN_CFG: ", cfg, sizeof(*cfg));
ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_CFG, cfg,
sizeof(*cfg), 0);
if (ret < 0) {
wl1271_error("SCAN configuration failed");
goto out;
}
out:
kfree(cfg_channels);
kfree(cfg);
return ret;
}
int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_cmd_sched_scan_start *start;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
if (wlvif->bss_type != BSS_TYPE_STA_BSS)
return -EOPNOTSUPP;
if ((wl->quirks & WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN) &&
test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
return -EBUSY;
start = kzalloc(sizeof(*start), GFP_KERNEL);
if (!start)
return -ENOMEM;
start->role_id = wlvif->role_id;
start->tag = WL1271_SCAN_DEFAULT_TAG;
ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
sizeof(*start), 0);
if (ret < 0) {
wl1271_error("failed to send scan start command");
goto out_free;
}
out_free:
kfree(start);
return ret;
}
int wl12xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies)
{
int ret;
ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
if (ret < 0)
return ret;
return wl1271_scan_sched_scan_start(wl, wlvif);
}
void wl12xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_cmd_sched_scan_stop *stop;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd periodic scan stop");
/* FIXME: what to do if alloc'ing to stop fails? */
stop = kzalloc(sizeof(*stop), GFP_KERNEL);
if (!stop) {
wl1271_error("failed to alloc memory to send sched scan stop");
return;
}
stop->role_id = wlvif->role_id;
stop->tag = WL1271_SCAN_DEFAULT_TAG;
ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
sizeof(*stop), 0);
if (ret < 0) {
wl1271_error("failed to send sched scan stop command");
goto out_free;
}
out_free:
kfree(stop);
}
int wl12xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct cfg80211_scan_request *req)
{
wl1271_scan_stm(wl, wlvif);
return 0;
}
void wl12xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
wl1271_scan_stm(wl, wlvif);
}
| seem-sky/linux | drivers/net/wireless/ti/wl12xx/scan.c | C | gpl-2.0 | 13,424 |
/*
* drivers/ata/pata_mpc52xx.c
*
* libata driver for the Freescale MPC52xx on-chip IDE interface
*
* Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
* Copyright (C) 2003 Mipsys - Benjamin Herrenschmidt
*
* UDMA support based on patches by Freescale (Bernard Kuhn, John Rigby),
* Domen Puncer and Tim Yamin.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/libata.h>
#include <linux/of_platform.h>
#include <linux/types.h>
#include <asm/cacheflush.h>
#include <asm/prom.h>
#include <asm/mpc52xx.h>
#include <linux/fsl/bestcomm/bestcomm.h>
#include <linux/fsl/bestcomm/bestcomm_priv.h>
#include <linux/fsl/bestcomm/ata.h>
#define DRV_NAME "mpc52xx_ata"
/* Private structures used by the driver */
struct mpc52xx_ata_timings {
u32 pio1;
u32 pio2;
u32 mdma1;
u32 mdma2;
u32 udma1;
u32 udma2;
u32 udma3;
u32 udma4;
u32 udma5;
int using_udma;
};
struct mpc52xx_ata_priv {
unsigned int ipb_period;
struct mpc52xx_ata __iomem *ata_regs;
phys_addr_t ata_regs_pa;
int ata_irq;
struct mpc52xx_ata_timings timings[2];
int csel;
/* DMA */
struct bcom_task *dmatsk;
const struct udmaspec *udmaspec;
const struct mdmaspec *mdmaspec;
int mpc52xx_ata_dma_last_write;
int waiting_for_dma;
};
/* ATAPI-4 PIO specs (in ns) */
static const u16 ataspec_t0[5] = {600, 383, 240, 180, 120};
static const u16 ataspec_t1[5] = { 70, 50, 30, 30, 25};
static const u16 ataspec_t2_8[5] = {290, 290, 290, 80, 70};
static const u16 ataspec_t2_16[5] = {165, 125, 100, 80, 70};
static const u16 ataspec_t2i[5] = { 0, 0, 0, 70, 25};
static const u16 ataspec_t4[5] = { 30, 20, 15, 10, 10};
static const u16 ataspec_ta[5] = { 35, 35, 35, 35, 35};
#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c)))
/* ======================================================================== */
/* ATAPI-4 MDMA specs (in clocks) */
struct mdmaspec {
u8 t0M;
u8 td;
u8 th;
u8 tj;
u8 tkw;
u8 tm;
u8 tn;
};
static const struct mdmaspec mdmaspec66[3] = {
{ .t0M = 32, .td = 15, .th = 2, .tj = 2, .tkw = 15, .tm = 4, .tn = 1 },
{ .t0M = 10, .td = 6, .th = 1, .tj = 1, .tkw = 4, .tm = 2, .tn = 1 },
{ .t0M = 8, .td = 5, .th = 1, .tj = 1, .tkw = 2, .tm = 2, .tn = 1 },
};
static const struct mdmaspec mdmaspec132[3] = {
{ .t0M = 64, .td = 29, .th = 3, .tj = 3, .tkw = 29, .tm = 7, .tn = 2 },
{ .t0M = 20, .td = 11, .th = 2, .tj = 1, .tkw = 7, .tm = 4, .tn = 1 },
{ .t0M = 16, .td = 10, .th = 2, .tj = 1, .tkw = 4, .tm = 4, .tn = 1 },
};
/* ATAPI-4 UDMA specs (in clocks) */
struct udmaspec {
u8 tcyc;
u8 t2cyc;
u8 tds;
u8 tdh;
u8 tdvs;
u8 tdvh;
u8 tfs;
u8 tli;
u8 tmli;
u8 taz;
u8 tzah;
u8 tenv;
u8 tsr;
u8 trfs;
u8 trp;
u8 tack;
u8 tss;
};
static const struct udmaspec udmaspec66[6] = {
{ .tcyc = 8, .t2cyc = 16, .tds = 1, .tdh = 1, .tdvs = 5, .tdvh = 1,
.tfs = 16, .tli = 10, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
.tsr = 3, .trfs = 5, .trp = 11, .tack = 2, .tss = 4,
},
{ .tcyc = 5, .t2cyc = 11, .tds = 1, .tdh = 1, .tdvs = 4, .tdvh = 1,
.tfs = 14, .tli = 10, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
.tsr = 2, .trfs = 5, .trp = 9, .tack = 2, .tss = 4,
},
{ .tcyc = 4, .t2cyc = 8, .tds = 1, .tdh = 1, .tdvs = 3, .tdvh = 1,
.tfs = 12, .tli = 10, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
.tsr = 2, .trfs = 4, .trp = 7, .tack = 2, .tss = 4,
},
{ .tcyc = 3, .t2cyc = 6, .tds = 1, .tdh = 1, .tdvs = 2, .tdvh = 1,
.tfs = 9, .tli = 7, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
.tsr = 2, .trfs = 4, .trp = 7, .tack = 2, .tss = 4,
},
{ .tcyc = 2, .t2cyc = 4, .tds = 1, .tdh = 1, .tdvs = 1, .tdvh = 1,
.tfs = 8, .tli = 8, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
.tsr = 2, .trfs = 4, .trp = 7, .tack = 2, .tss = 4,
},
{ .tcyc = 2, .t2cyc = 2, .tds = 1, .tdh = 1, .tdvs = 1, .tdvh = 1,
.tfs = 6, .tli = 5, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
.tsr = 2, .trfs = 4, .trp = 6, .tack = 2, .tss = 4,
},
};
static const struct udmaspec udmaspec132[6] = {
{ .tcyc = 15, .t2cyc = 31, .tds = 2, .tdh = 1, .tdvs = 10, .tdvh = 1,
.tfs = 30, .tli = 20, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3,
.tsr = 7, .trfs = 10, .trp = 22, .tack = 3, .tss = 7,
},
{ .tcyc = 10, .t2cyc = 21, .tds = 2, .tdh = 1, .tdvs = 7, .tdvh = 1,
.tfs = 27, .tli = 20, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3,
.tsr = 4, .trfs = 10, .trp = 17, .tack = 3, .tss = 7,
},
{ .tcyc = 6, .t2cyc = 12, .tds = 1, .tdh = 1, .tdvs = 5, .tdvh = 1,
.tfs = 23, .tli = 20, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3,
.tsr = 3, .trfs = 8, .trp = 14, .tack = 3, .tss = 7,
},
{ .tcyc = 7, .t2cyc = 12, .tds = 1, .tdh = 1, .tdvs = 3, .tdvh = 1,
.tfs = 15, .tli = 13, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3,
.tsr = 3, .trfs = 8, .trp = 14, .tack = 3, .tss = 7,
},
{ .tcyc = 2, .t2cyc = 5, .tds = 0, .tdh = 0, .tdvs = 1, .tdvh = 1,
.tfs = 16, .tli = 14, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
.tsr = 2, .trfs = 7, .trp = 13, .tack = 2, .tss = 6,
},
{ .tcyc = 3, .t2cyc = 6, .tds = 1, .tdh = 1, .tdvs = 1, .tdvh = 1,
.tfs = 12, .tli = 10, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3,
.tsr = 3, .trfs = 7, .trp = 12, .tack = 3, .tss = 7,
},
};
/* ======================================================================== */
/* Bit definitions inside the registers */
#define MPC52xx_ATA_HOSTCONF_SMR 0x80000000UL /* State machine reset */
#define MPC52xx_ATA_HOSTCONF_FR 0x40000000UL /* FIFO Reset */
#define MPC52xx_ATA_HOSTCONF_IE 0x02000000UL /* Enable interrupt in PIO */
#define MPC52xx_ATA_HOSTCONF_IORDY 0x01000000UL /* Drive supports IORDY protocol */
#define MPC52xx_ATA_HOSTSTAT_TIP 0x80000000UL /* Transaction in progress */
#define MPC52xx_ATA_HOSTSTAT_UREP 0x40000000UL /* UDMA Read Extended Pause */
#define MPC52xx_ATA_HOSTSTAT_RERR 0x02000000UL /* Read Error */
#define MPC52xx_ATA_HOSTSTAT_WERR 0x01000000UL /* Write Error */
#define MPC52xx_ATA_FIFOSTAT_EMPTY 0x01 /* FIFO Empty */
#define MPC52xx_ATA_FIFOSTAT_ERROR 0x40 /* FIFO Error */
#define MPC52xx_ATA_DMAMODE_WRITE 0x01 /* Write DMA */
#define MPC52xx_ATA_DMAMODE_READ 0x02 /* Read DMA */
#define MPC52xx_ATA_DMAMODE_UDMA 0x04 /* UDMA enabled */
#define MPC52xx_ATA_DMAMODE_IE 0x08 /* Enable drive interrupt to CPU in DMA mode */
#define MPC52xx_ATA_DMAMODE_FE 0x10 /* FIFO Flush enable in Rx mode */
#define MPC52xx_ATA_DMAMODE_FR 0x20 /* FIFO Reset */
#define MPC52xx_ATA_DMAMODE_HUT 0x40 /* Host UDMA burst terminate */
#define MAX_DMA_BUFFERS 128
#define MAX_DMA_BUFFER_SIZE 0x20000u
/* Structure of the hardware registers */
struct mpc52xx_ata {
/* Host interface registers */
u32 config; /* ATA + 0x00 Host configuration */
u32 host_status; /* ATA + 0x04 Host controller status */
u32 pio1; /* ATA + 0x08 PIO Timing 1 */
u32 pio2; /* ATA + 0x0c PIO Timing 2 */
u32 mdma1; /* ATA + 0x10 MDMA Timing 1 */
u32 mdma2; /* ATA + 0x14 MDMA Timing 2 */
u32 udma1; /* ATA + 0x18 UDMA Timing 1 */
u32 udma2; /* ATA + 0x1c UDMA Timing 2 */
u32 udma3; /* ATA + 0x20 UDMA Timing 3 */
u32 udma4; /* ATA + 0x24 UDMA Timing 4 */
u32 udma5; /* ATA + 0x28 UDMA Timing 5 */
u32 share_cnt; /* ATA + 0x2c ATA share counter */
u32 reserved0[3];
/* FIFO registers */
u32 fifo_data; /* ATA + 0x3c */
u8 fifo_status_frame; /* ATA + 0x40 */
u8 fifo_status; /* ATA + 0x41 */
u16 reserved7[1];
u8 fifo_control; /* ATA + 0x44 */
u8 reserved8[5];
u16 fifo_alarm; /* ATA + 0x4a */
u16 reserved9;
u16 fifo_rdp; /* ATA + 0x4e */
u16 reserved10;
u16 fifo_wrp; /* ATA + 0x52 */
u16 reserved11;
u16 fifo_lfrdp; /* ATA + 0x56 */
u16 reserved12;
u16 fifo_lfwrp; /* ATA + 0x5a */
/* Drive TaskFile registers */
u8 tf_control; /* ATA + 0x5c TASKFILE Control/Alt Status */
u8 reserved13[3];
u16 tf_data; /* ATA + 0x60 TASKFILE Data */
u16 reserved14;
u8 tf_features; /* ATA + 0x64 TASKFILE Features/Error */
u8 reserved15[3];
u8 tf_sec_count; /* ATA + 0x68 TASKFILE Sector Count */
u8 reserved16[3];
u8 tf_sec_num; /* ATA + 0x6c TASKFILE Sector Number */
u8 reserved17[3];
u8 tf_cyl_low; /* ATA + 0x70 TASKFILE Cylinder Low */
u8 reserved18[3];
u8 tf_cyl_high; /* ATA + 0x74 TASKFILE Cylinder High */
u8 reserved19[3];
u8 tf_dev_head; /* ATA + 0x78 TASKFILE Device/Head */
u8 reserved20[3];
u8 tf_command; /* ATA + 0x7c TASKFILE Command/Status */
u8 dma_mode; /* ATA + 0x7d ATA Host DMA Mode configuration */
u8 reserved21[2];
};
/* ======================================================================== */
/* Aux fns */
/* ======================================================================== */
/* MPC52xx low level hw control */
static int
mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio)
{
struct mpc52xx_ata_timings *timing = &priv->timings[dev];
unsigned int ipb_period = priv->ipb_period;
u32 t0, t1, t2_8, t2_16, t2i, t4, ta;
if ((pio < 0) || (pio > 4))
return -EINVAL;
t0 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t0[pio]);
t1 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t1[pio]);
t2_8 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_8[pio]);
t2_16 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_16[pio]);
t2i = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2i[pio]);
t4 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t4[pio]);
ta = CALC_CLKCYC(ipb_period, 1000 * ataspec_ta[pio]);
timing->pio1 = (t0 << 24) | (t2_8 << 16) | (t2_16 << 8) | (t2i);
timing->pio2 = (t4 << 24) | (t1 << 16) | (ta << 8);
return 0;
}
static int
mpc52xx_ata_compute_mdma_timings(struct mpc52xx_ata_priv *priv, int dev,
int speed)
{
struct mpc52xx_ata_timings *t = &priv->timings[dev];
const struct mdmaspec *s = &priv->mdmaspec[speed];
if (speed < 0 || speed > 2)
return -EINVAL;
t->mdma1 = ((u32)s->t0M << 24) | ((u32)s->td << 16) | ((u32)s->tkw << 8) | s->tm;
t->mdma2 = ((u32)s->th << 24) | ((u32)s->tj << 16) | ((u32)s->tn << 8);
t->using_udma = 0;
return 0;
}
static int
mpc52xx_ata_compute_udma_timings(struct mpc52xx_ata_priv *priv, int dev,
int speed)
{
struct mpc52xx_ata_timings *t = &priv->timings[dev];
const struct udmaspec *s = &priv->udmaspec[speed];
if (speed < 0 || speed > 2)
return -EINVAL;
t->udma1 = ((u32)s->t2cyc << 24) | ((u32)s->tcyc << 16) | ((u32)s->tds << 8) | s->tdh;
t->udma2 = ((u32)s->tdvs << 24) | ((u32)s->tdvh << 16) | ((u32)s->tfs << 8) | s->tli;
t->udma3 = ((u32)s->tmli << 24) | ((u32)s->taz << 16) | ((u32)s->tenv << 8) | s->tsr;
t->udma4 = ((u32)s->tss << 24) | ((u32)s->trfs << 16) | ((u32)s->trp << 8) | s->tack;
t->udma5 = (u32)s->tzah << 24;
t->using_udma = 1;
return 0;
}
static void
mpc52xx_ata_apply_timings(struct mpc52xx_ata_priv *priv, int device)
{
struct mpc52xx_ata __iomem *regs = priv->ata_regs;
struct mpc52xx_ata_timings *timing = &priv->timings[device];
out_be32(®s->pio1, timing->pio1);
out_be32(®s->pio2, timing->pio2);
out_be32(®s->mdma1, timing->mdma1);
out_be32(®s->mdma2, timing->mdma2);
out_be32(®s->udma1, timing->udma1);
out_be32(®s->udma2, timing->udma2);
out_be32(®s->udma3, timing->udma3);
out_be32(®s->udma4, timing->udma4);
out_be32(®s->udma5, timing->udma5);
priv->csel = device;
}
static int
mpc52xx_ata_hw_init(struct mpc52xx_ata_priv *priv)
{
struct mpc52xx_ata __iomem *regs = priv->ata_regs;
int tslot;
/* Clear share_cnt (all sample code do this ...) */
out_be32(®s->share_cnt, 0);
/* Configure and reset host */
out_be32(®s->config,
MPC52xx_ATA_HOSTCONF_IE |
MPC52xx_ATA_HOSTCONF_IORDY |
MPC52xx_ATA_HOSTCONF_SMR |
MPC52xx_ATA_HOSTCONF_FR);
udelay(10);
out_be32(®s->config,
MPC52xx_ATA_HOSTCONF_IE |
MPC52xx_ATA_HOSTCONF_IORDY);
/* Set the time slot to 1us */
tslot = CALC_CLKCYC(priv->ipb_period, 1000000);
out_be32(®s->share_cnt, tslot << 16);
/* Init timings to PIO0 */
memset(priv->timings, 0x00, 2*sizeof(struct mpc52xx_ata_timings));
mpc52xx_ata_compute_pio_timings(priv, 0, 0);
mpc52xx_ata_compute_pio_timings(priv, 1, 0);
mpc52xx_ata_apply_timings(priv, 0);
return 0;
}
/* ======================================================================== */
/* libata driver */
/* ======================================================================== */
static void
mpc52xx_ata_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct mpc52xx_ata_priv *priv = ap->host->private_data;
int pio, rv;
pio = adev->pio_mode - XFER_PIO_0;
rv = mpc52xx_ata_compute_pio_timings(priv, adev->devno, pio);
if (rv) {
dev_err(ap->dev, "error: invalid PIO mode: %d\n", pio);
return;
}
mpc52xx_ata_apply_timings(priv, adev->devno);
}
static void
mpc52xx_ata_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct mpc52xx_ata_priv *priv = ap->host->private_data;
int rv;
if (adev->dma_mode >= XFER_UDMA_0) {
int dma = adev->dma_mode - XFER_UDMA_0;
rv = mpc52xx_ata_compute_udma_timings(priv, adev->devno, dma);
} else {
int dma = adev->dma_mode - XFER_MW_DMA_0;
rv = mpc52xx_ata_compute_mdma_timings(priv, adev->devno, dma);
}
if (rv) {
dev_alert(ap->dev,
"Trying to select invalid DMA mode %d\n",
adev->dma_mode);
return;
}
mpc52xx_ata_apply_timings(priv, adev->devno);
}
static void
mpc52xx_ata_dev_select(struct ata_port *ap, unsigned int device)
{
struct mpc52xx_ata_priv *priv = ap->host->private_data;
if (device != priv->csel)
mpc52xx_ata_apply_timings(priv, device);
ata_sff_dev_select(ap, device);
}
static int
mpc52xx_ata_build_dmatable(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mpc52xx_ata_priv *priv = ap->host->private_data;
struct bcom_ata_bd *bd;
unsigned int read = !(qc->tf.flags & ATA_TFLAG_WRITE), si;
struct scatterlist *sg;
int count = 0;
if (read)
bcom_ata_rx_prepare(priv->dmatsk);
else
bcom_ata_tx_prepare(priv->dmatsk);
for_each_sg(qc->sg, sg, qc->n_elem, si) {
dma_addr_t cur_addr = sg_dma_address(sg);
u32 cur_len = sg_dma_len(sg);
while (cur_len) {
unsigned int tc = min(cur_len, MAX_DMA_BUFFER_SIZE);
bd = (struct bcom_ata_bd *)
bcom_prepare_next_buffer(priv->dmatsk);
if (read) {
bd->status = tc;
bd->src_pa = (__force u32) priv->ata_regs_pa +
offsetof(struct mpc52xx_ata, fifo_data);
bd->dst_pa = (__force u32) cur_addr;
} else {
bd->status = tc;
bd->src_pa = (__force u32) cur_addr;
bd->dst_pa = (__force u32) priv->ata_regs_pa +
offsetof(struct mpc52xx_ata, fifo_data);
}
bcom_submit_next_buffer(priv->dmatsk, NULL);
cur_addr += tc;
cur_len -= tc;
count++;
if (count > MAX_DMA_BUFFERS) {
dev_alert(ap->dev, "dma table"
"too small\n");
goto use_pio_instead;
}
}
}
return 1;
use_pio_instead:
bcom_ata_reset_bd(priv->dmatsk);
return 0;
}
static void
mpc52xx_bmdma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mpc52xx_ata_priv *priv = ap->host->private_data;
struct mpc52xx_ata __iomem *regs = priv->ata_regs;
unsigned int read = !(qc->tf.flags & ATA_TFLAG_WRITE);
u8 dma_mode;
if (!mpc52xx_ata_build_dmatable(qc))
dev_alert(ap->dev, "%s: %i, return 1?\n",
__func__, __LINE__);
/* Check FIFO is OK... */
if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR)
dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n",
__func__, in_8(&priv->ata_regs->fifo_status));
if (read) {
dma_mode = MPC52xx_ATA_DMAMODE_IE | MPC52xx_ATA_DMAMODE_READ |
MPC52xx_ATA_DMAMODE_FE;
/* Setup FIFO if direction changed */
if (priv->mpc52xx_ata_dma_last_write != 0) {
priv->mpc52xx_ata_dma_last_write = 0;
/* Configure FIFO with granularity to 7 */
out_8(®s->fifo_control, 7);
out_be16(®s->fifo_alarm, 128);
/* Set FIFO Reset bit (FR) */
out_8(®s->dma_mode, MPC52xx_ATA_DMAMODE_FR);
}
} else {
dma_mode = MPC52xx_ATA_DMAMODE_IE | MPC52xx_ATA_DMAMODE_WRITE;
/* Setup FIFO if direction changed */
if (priv->mpc52xx_ata_dma_last_write != 1) {
priv->mpc52xx_ata_dma_last_write = 1;
/* Configure FIFO with granularity to 4 */
out_8(®s->fifo_control, 4);
out_be16(®s->fifo_alarm, 128);
}
}
if (priv->timings[qc->dev->devno].using_udma)
dma_mode |= MPC52xx_ATA_DMAMODE_UDMA;
out_8(®s->dma_mode, dma_mode);
priv->waiting_for_dma = ATA_DMA_ACTIVE;
ata_wait_idle(ap);
ap->ops->sff_exec_command(ap, &qc->tf);
}
static void
mpc52xx_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mpc52xx_ata_priv *priv = ap->host->private_data;
bcom_set_task_auto_start(priv->dmatsk->tasknum, priv->dmatsk->tasknum);
bcom_enable(priv->dmatsk);
}
static void
mpc52xx_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mpc52xx_ata_priv *priv = ap->host->private_data;
bcom_disable(priv->dmatsk);
bcom_ata_reset_bd(priv->dmatsk);
priv->waiting_for_dma = 0;
/* Check FIFO is OK... */
if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR)
dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n",
__func__, in_8(&priv->ata_regs->fifo_status));
}
static u8
mpc52xx_bmdma_status(struct ata_port *ap)
{
struct mpc52xx_ata_priv *priv = ap->host->private_data;
/* Check FIFO is OK... */
if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR) {
dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n",
__func__, in_8(&priv->ata_regs->fifo_status));
return priv->waiting_for_dma | ATA_DMA_ERR;
}
return priv->waiting_for_dma;
}
static irqreturn_t
mpc52xx_ata_task_irq(int irq, void *vpriv)
{
struct mpc52xx_ata_priv *priv = vpriv;
while (bcom_buffer_done(priv->dmatsk))
bcom_retrieve_buffer(priv->dmatsk, NULL, NULL);
priv->waiting_for_dma |= ATA_DMA_INTR;
return IRQ_HANDLED;
}
static struct scsi_host_template mpc52xx_ata_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations mpc52xx_ata_port_ops = {
.inherits = &ata_bmdma_port_ops,
.sff_dev_select = mpc52xx_ata_dev_select,
.set_piomode = mpc52xx_ata_set_piomode,
.set_dmamode = mpc52xx_ata_set_dmamode,
.bmdma_setup = mpc52xx_bmdma_setup,
.bmdma_start = mpc52xx_bmdma_start,
.bmdma_stop = mpc52xx_bmdma_stop,
.bmdma_status = mpc52xx_bmdma_status,
.qc_prep = ata_noop_qc_prep,
};
static int mpc52xx_ata_init_one(struct device *dev,
struct mpc52xx_ata_priv *priv,
unsigned long raw_ata_regs,
int mwdma_mask, int udma_mask)
{
struct ata_host *host;
struct ata_port *ap;
struct ata_ioports *aio;
host = ata_host_alloc(dev, 1);
if (!host)
return -ENOMEM;
ap = host->ports[0];
ap->flags |= ATA_FLAG_SLAVE_POSS;
ap->pio_mask = ATA_PIO4;
ap->mwdma_mask = mwdma_mask;
ap->udma_mask = udma_mask;
ap->ops = &mpc52xx_ata_port_ops;
host->private_data = priv;
aio = &ap->ioaddr;
aio->cmd_addr = NULL; /* Don't have a classic reg block */
aio->altstatus_addr = &priv->ata_regs->tf_control;
aio->ctl_addr = &priv->ata_regs->tf_control;
aio->data_addr = &priv->ata_regs->tf_data;
aio->error_addr = &priv->ata_regs->tf_features;
aio->feature_addr = &priv->ata_regs->tf_features;
aio->nsect_addr = &priv->ata_regs->tf_sec_count;
aio->lbal_addr = &priv->ata_regs->tf_sec_num;
aio->lbam_addr = &priv->ata_regs->tf_cyl_low;
aio->lbah_addr = &priv->ata_regs->tf_cyl_high;
aio->device_addr = &priv->ata_regs->tf_dev_head;
aio->status_addr = &priv->ata_regs->tf_command;
aio->command_addr = &priv->ata_regs->tf_command;
ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs);
/* activate host */
return ata_host_activate(host, priv->ata_irq, ata_bmdma_interrupt, 0,
&mpc52xx_ata_sht);
}
/* ======================================================================== */
/* OF Platform driver */
/* ======================================================================== */
static int mpc52xx_ata_probe(struct platform_device *op)
{
unsigned int ipb_freq;
struct resource res_mem;
int ata_irq = 0;
struct mpc52xx_ata __iomem *ata_regs;
struct mpc52xx_ata_priv *priv = NULL;
int rv, task_irq;
int mwdma_mask = 0, udma_mask = 0;
const __be32 *prop;
int proplen;
struct bcom_task *dmatsk;
/* Get ipb frequency */
ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
if (!ipb_freq) {
dev_err(&op->dev, "could not determine IPB bus frequency\n");
return -ENODEV;
}
/* Get device base address from device tree, request the region
* and ioremap it. */
rv = of_address_to_resource(op->dev.of_node, 0, &res_mem);
if (rv) {
dev_err(&op->dev, "could not determine device base address\n");
return rv;
}
if (!devm_request_mem_region(&op->dev, res_mem.start,
sizeof(*ata_regs), DRV_NAME)) {
dev_err(&op->dev, "error requesting register region\n");
return -EBUSY;
}
ata_regs = devm_ioremap(&op->dev, res_mem.start, sizeof(*ata_regs));
if (!ata_regs) {
dev_err(&op->dev, "error mapping device registers\n");
return -ENOMEM;
}
/*
* By default, all DMA modes are disabled for the MPC5200. Some
* boards don't have the required signals routed to make DMA work.
* Also, the MPC5200B has a silicon bug that causes data corruption
* with UDMA if it is used at the same time as the LocalPlus bus.
*
* Instead of trying to guess what modes are usable, check the
* ATA device tree node to find out what DMA modes work on the board.
* UDMA/MWDMA modes can also be forced by adding "libata.force=<mode>"
* to the kernel boot parameters.
*
* The MPC5200 ATA controller supports MWDMA modes 0, 1 and 2 and
* UDMA modes 0, 1 and 2.
*/
prop = of_get_property(op->dev.of_node, "mwdma-mode", &proplen);
if ((prop) && (proplen >= 4))
mwdma_mask = ATA_MWDMA2 & ((1 << (*prop + 1)) - 1);
prop = of_get_property(op->dev.of_node, "udma-mode", &proplen);
if ((prop) && (proplen >= 4))
udma_mask = ATA_UDMA2 & ((1 << (*prop + 1)) - 1);
ata_irq = irq_of_parse_and_map(op->dev.of_node, 0);
if (ata_irq == NO_IRQ) {
dev_err(&op->dev, "error mapping irq\n");
return -EINVAL;
}
/* Prepare our private structure */
priv = devm_kzalloc(&op->dev, sizeof(*priv), GFP_ATOMIC);
if (!priv) {
dev_err(&op->dev, "error allocating private structure\n");
rv = -ENOMEM;
goto err1;
}
priv->ipb_period = 1000000000 / (ipb_freq / 1000);
priv->ata_regs = ata_regs;
priv->ata_regs_pa = res_mem.start;
priv->ata_irq = ata_irq;
priv->csel = -1;
priv->mpc52xx_ata_dma_last_write = -1;
if (ipb_freq/1000000 == 66) {
priv->mdmaspec = mdmaspec66;
priv->udmaspec = udmaspec66;
} else {
priv->mdmaspec = mdmaspec132;
priv->udmaspec = udmaspec132;
}
/* Allocate a BestComm task for DMA */
dmatsk = bcom_ata_init(MAX_DMA_BUFFERS, MAX_DMA_BUFFER_SIZE);
if (!dmatsk) {
dev_err(&op->dev, "bestcomm initialization failed\n");
rv = -ENOMEM;
goto err1;
}
task_irq = bcom_get_task_irq(dmatsk);
rv = devm_request_irq(&op->dev, task_irq, &mpc52xx_ata_task_irq, 0,
"ATA task", priv);
if (rv) {
dev_err(&op->dev, "error requesting DMA IRQ\n");
goto err2;
}
priv->dmatsk = dmatsk;
/* Init the hw */
rv = mpc52xx_ata_hw_init(priv);
if (rv) {
dev_err(&op->dev, "error initializing hardware\n");
goto err2;
}
/* Register ourselves to libata */
rv = mpc52xx_ata_init_one(&op->dev, priv, res_mem.start,
mwdma_mask, udma_mask);
if (rv) {
dev_err(&op->dev, "error registering with ATA layer\n");
goto err2;
}
return 0;
err2:
irq_dispose_mapping(task_irq);
bcom_ata_release(dmatsk);
err1:
irq_dispose_mapping(ata_irq);
return rv;
}
static int
mpc52xx_ata_remove(struct platform_device *op)
{
struct ata_host *host = platform_get_drvdata(op);
struct mpc52xx_ata_priv *priv = host->private_data;
int task_irq;
/* Deregister the ATA interface */
ata_platform_remove_one(op);
/* Clean up DMA */
task_irq = bcom_get_task_irq(priv->dmatsk);
irq_dispose_mapping(task_irq);
bcom_ata_release(priv->dmatsk);
irq_dispose_mapping(priv->ata_irq);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int
mpc52xx_ata_suspend(struct platform_device *op, pm_message_t state)
{
struct ata_host *host = platform_get_drvdata(op);
return ata_host_suspend(host, state);
}
static int
mpc52xx_ata_resume(struct platform_device *op)
{
struct ata_host *host = platform_get_drvdata(op);
struct mpc52xx_ata_priv *priv = host->private_data;
int rv;
rv = mpc52xx_ata_hw_init(priv);
if (rv) {
dev_err(host->dev, "error initializing hardware\n");
return rv;
}
ata_host_resume(host);
return 0;
}
#endif
static struct of_device_id mpc52xx_ata_of_match[] = {
{ .compatible = "fsl,mpc5200-ata", },
{ .compatible = "mpc5200-ata", },
{},
};
static struct platform_driver mpc52xx_ata_of_platform_driver = {
.probe = mpc52xx_ata_probe,
.remove = mpc52xx_ata_remove,
#ifdef CONFIG_PM_SLEEP
.suspend = mpc52xx_ata_suspend,
.resume = mpc52xx_ata_resume,
#endif
.driver = {
.name = DRV_NAME,
.of_match_table = mpc52xx_ata_of_match,
},
};
module_platform_driver(mpc52xx_ata_of_platform_driver);
MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
MODULE_DESCRIPTION("Freescale MPC52xx IDE/ATA libata driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(of, mpc52xx_ata_of_match);
| daler3/smack-next | drivers/ata/pata_mpc52xx.c | C | gpl-2.0 | 25,620 |
/*
* drivers/gpu/drm/omapdrm/omap_plane.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kfifo.h>
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
/* some hackery because omapdss has an 'enum omap_plane' (which would be
* better named omap_plane_id).. and compiler seems unhappy about having
* both a 'struct omap_plane' and 'enum omap_plane'
*/
#define omap_plane _omap_plane
/*
* plane funcs
*/
struct callback {
void (*fxn)(void *);
void *arg;
};
#define to_omap_plane(x) container_of(x, struct omap_plane, base)
struct omap_plane {
struct drm_plane base;
int id; /* TODO rename omap_plane -> omap_plane_id in omapdss so I can use the enum */
const char *name;
struct omap_overlay_info info;
struct omap_drm_apply apply;
/* position/orientation of scanout within the fb: */
struct omap_drm_window win;
bool enabled;
/* last fb that we pinned: */
struct drm_framebuffer *pinned_fb;
uint32_t nformats;
uint32_t formats[32];
struct omap_drm_irq error_irq;
/* set of bo's pending unpin until next post_apply() */
DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *);
// XXX maybe get rid of this and handle vblank in crtc too?
struct callback apply_done_cb;
};
static void unpin(void *arg, struct drm_gem_object *bo)
{
struct drm_plane *plane = arg;
struct omap_plane *omap_plane = to_omap_plane(plane);
if (kfifo_put(&omap_plane->unpin_fifo,
(const struct drm_gem_object **)&bo)) {
/* also hold a ref so it isn't free'd while pinned */
drm_gem_object_reference(bo);
} else {
dev_err(plane->dev->dev, "unpin fifo full!\n");
omap_gem_put_paddr(bo);
}
}
/* update which fb (if any) is pinned for scanout */
static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
if (pinned_fb != fb) {
int ret;
DBG("%p -> %p", pinned_fb, fb);
if (fb)
drm_framebuffer_reference(fb);
ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin);
if (pinned_fb)
drm_framebuffer_unreference(pinned_fb);
if (ret) {
dev_err(plane->dev->dev, "could not swap %p -> %p\n",
omap_plane->pinned_fb, fb);
if (fb)
drm_framebuffer_unreference(fb);
omap_plane->pinned_fb = NULL;
return ret;
}
omap_plane->pinned_fb = fb;
}
return 0;
}
static void omap_plane_pre_apply(struct omap_drm_apply *apply)
{
struct omap_plane *omap_plane =
container_of(apply, struct omap_plane, apply);
struct omap_drm_window *win = &omap_plane->win;
struct drm_plane *plane = &omap_plane->base;
struct drm_device *dev = plane->dev;
struct omap_overlay_info *info = &omap_plane->info;
struct drm_crtc *crtc = plane->crtc;
enum omap_channel channel;
bool enabled = omap_plane->enabled && crtc;
bool ilace, replication;
int ret;
DBG("%s, enabled=%d", omap_plane->name, enabled);
/* if fb has changed, pin new fb: */
update_pin(plane, enabled ? plane->fb : NULL);
if (!enabled) {
dispc_ovl_enable(omap_plane->id, false);
return;
}
channel = omap_crtc_channel(crtc);
/* update scanout: */
omap_framebuffer_update_scanout(plane->fb, win, info);
DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
info->out_width, info->out_height,
info->screen_width);
DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
info->paddr, info->p_uv_addr);
/* TODO: */
ilace = false;
replication = false;
/* and finally, update omapdss: */
ret = dispc_ovl_setup(omap_plane->id, info,
replication, omap_crtc_timings(crtc), false);
if (ret) {
dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret);
return;
}
dispc_ovl_enable(omap_plane->id, true);
dispc_ovl_set_channel_out(omap_plane->id, channel);
}
static void omap_plane_post_apply(struct omap_drm_apply *apply)
{
struct omap_plane *omap_plane =
container_of(apply, struct omap_plane, apply);
struct drm_plane *plane = &omap_plane->base;
struct omap_overlay_info *info = &omap_plane->info;
struct drm_gem_object *bo = NULL;
struct callback cb;
cb = omap_plane->apply_done_cb;
omap_plane->apply_done_cb.fxn = NULL;
while (kfifo_get(&omap_plane->unpin_fifo, &bo)) {
omap_gem_put_paddr(bo);
drm_gem_object_unreference_unlocked(bo);
}
if (cb.fxn)
cb.fxn(cb.arg);
if (omap_plane->enabled) {
omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
info->out_width, info->out_height);
}
}
static int apply(struct drm_plane *plane)
{
if (plane->crtc) {
struct omap_plane *omap_plane = to_omap_plane(plane);
return omap_crtc_apply(plane->crtc, &omap_plane->apply);
}
return 0;
}
int omap_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
void (*fxn)(void *), void *arg)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_drm_window *win = &omap_plane->win;
win->crtc_x = crtc_x;
win->crtc_y = crtc_y;
win->crtc_w = crtc_w;
win->crtc_h = crtc_h;
/* src values are in Q16 fixed point, convert to integer: */
win->src_x = src_x >> 16;
win->src_y = src_y >> 16;
win->src_w = src_w >> 16;
win->src_h = src_h >> 16;
if (fxn) {
/* omap_crtc should ensure that a new page flip
* isn't permitted while there is one pending:
*/
BUG_ON(omap_plane->apply_done_cb.fxn);
omap_plane->apply_done_cb.fxn = fxn;
omap_plane->apply_done_cb.arg = arg;
}
plane->fb = fb;
plane->crtc = crtc;
return apply(plane);
}
static int omap_plane_update(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
omap_plane->enabled = true;
if (plane->fb)
drm_framebuffer_unreference(plane->fb);
drm_framebuffer_reference(fb);
return omap_plane_mode_set(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h,
NULL, NULL);
}
static int omap_plane_disable(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
omap_plane->win.rotation = BIT(DRM_ROTATE_0);
return omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
}
static void omap_plane_destroy(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
DBG("%s", omap_plane->name);
omap_irq_unregister(plane->dev, &omap_plane->error_irq);
omap_plane_disable(plane);
drm_plane_cleanup(plane);
WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo));
kfifo_free(&omap_plane->unpin_fifo);
kfree(omap_plane);
}
int omap_plane_dpms(struct drm_plane *plane, int mode)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
bool enabled = (mode == DRM_MODE_DPMS_ON);
int ret = 0;
if (enabled != omap_plane->enabled) {
omap_plane->enabled = enabled;
ret = apply(plane);
}
return ret;
}
/* helper to install properties which are common to planes and crtcs */
void omap_plane_install_properties(struct drm_plane *plane,
struct drm_mode_object *obj)
{
struct drm_device *dev = plane->dev;
struct omap_drm_private *priv = dev->dev_private;
struct drm_property *prop;
if (priv->has_dmm) {
prop = priv->rotation_prop;
if (!prop) {
const struct drm_prop_enum_list props[] = {
{ DRM_ROTATE_0, "rotate-0" },
{ DRM_ROTATE_90, "rotate-90" },
{ DRM_ROTATE_180, "rotate-180" },
{ DRM_ROTATE_270, "rotate-270" },
{ DRM_REFLECT_X, "reflect-x" },
{ DRM_REFLECT_Y, "reflect-y" },
};
prop = drm_property_create_bitmask(dev, 0, "rotation",
props, ARRAY_SIZE(props));
if (prop == NULL)
return;
priv->rotation_prop = prop;
}
drm_object_attach_property(obj, prop, 0);
}
prop = priv->zorder_prop;
if (!prop) {
prop = drm_property_create_range(dev, 0, "zorder", 0, 3);
if (prop == NULL)
return;
priv->zorder_prop = prop;
}
drm_object_attach_property(obj, prop, 0);
}
int omap_plane_set_property(struct drm_plane *plane,
struct drm_property *property, uint64_t val)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_drm_private *priv = plane->dev->dev_private;
int ret = -EINVAL;
if (property == priv->rotation_prop) {
DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val);
omap_plane->win.rotation = val;
ret = apply(plane);
} else if (property == priv->zorder_prop) {
DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val);
omap_plane->info.zorder = val;
ret = apply(plane);
}
return ret;
}
static const struct drm_plane_funcs omap_plane_funcs = {
.update_plane = omap_plane_update,
.disable_plane = omap_plane_disable,
.destroy = omap_plane_destroy,
.set_property = omap_plane_set_property,
};
static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
{
struct omap_plane *omap_plane =
container_of(irq, struct omap_plane, error_irq);
DRM_ERROR("%s: errors: %08x\n", omap_plane->name, irqstatus);
}
static const char *plane_names[] = {
[OMAP_DSS_GFX] = "gfx",
[OMAP_DSS_VIDEO1] = "vid1",
[OMAP_DSS_VIDEO2] = "vid2",
[OMAP_DSS_VIDEO3] = "vid3",
};
static const uint32_t error_irqs[] = {
[OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
[OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
[OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
[OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
};
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
int id, bool private_plane)
{
struct omap_drm_private *priv = dev->dev_private;
struct drm_plane *plane = NULL;
struct omap_plane *omap_plane;
struct omap_overlay_info *info;
int ret;
DBG("%s: priv=%d", plane_names[id], private_plane);
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
if (!omap_plane)
goto fail;
ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL);
if (ret) {
dev_err(dev->dev, "could not allocate unpin FIFO\n");
goto fail;
}
omap_plane->nformats = omap_framebuffer_get_formats(
omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
dss_feat_get_supported_color_modes(id));
omap_plane->id = id;
omap_plane->name = plane_names[id];
plane = &omap_plane->base;
omap_plane->apply.pre_apply = omap_plane_pre_apply;
omap_plane->apply.post_apply = omap_plane_post_apply;
omap_plane->error_irq.irqmask = error_irqs[id];
omap_plane->error_irq.irq = omap_plane_error_irq;
omap_irq_register(dev, &omap_plane->error_irq);
drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs,
omap_plane->formats, omap_plane->nformats, private_plane);
omap_plane_install_properties(plane, &plane->base);
/* get our starting configuration, set defaults for parameters
* we don't currently use, etc:
*/
info = &omap_plane->info;
info->rotation_type = OMAP_DSS_ROT_DMA;
info->rotation = OMAP_DSS_ROT_0;
info->global_alpha = 0xff;
info->mirror = 0;
/* Set defaults depending on whether we are a CRTC or overlay
* layer.
* TODO add ioctl to give userspace an API to change this.. this
* will come in a subsequent patch.
*/
if (private_plane)
omap_plane->info.zorder = 0;
else
omap_plane->info.zorder = id;
return plane;
fail:
if (plane)
omap_plane_destroy(plane);
return NULL;
}
| ghosteyezz/my-linux-kernel-study | drivers/gpu/drm/omapdrm/omap_plane.c | C | gpl-2.0 | 11,949 |
/* DVB USB compliant Linux driver for the Friio USB2.0 ISDB-T receiver.
*
* Copyright (C) 2009 Akihiro Tsukada <tskd2@yahoo.co.jp>
*
* This module is based off the the gl861 and vp702x modules.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation, version 2.
*
* see Documentation/dvb/README.dvb-usb for more information
*/
#include "friio.h"
/* debug */
int dvb_usb_friio_debug;
module_param_named(debug, dvb_usb_friio_debug, int, 0644);
MODULE_PARM_DESC(debug,
"set debugging level (1=info,2=xfer,4=rc,8=fe (or-able))."
DVB_USB_DEBUG_STATUS);
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
/**
* Indirect I2C access to the PLL via FE.
* whole I2C protocol data to the PLL is sent via the FE's I2C register.
* This is done by a control msg to the FE with the I2C data accompanied, and
* a specific USB request number is assigned for that purpose.
*
* this func sends wbuf[1..] to the I2C register wbuf[0] at addr (= at FE).
* TODO: refoctored, smarter i2c functions.
*/
static int gl861_i2c_ctrlmsg_data(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
u16 index = wbuf[0]; /* must be JDVBT90502_2ND_I2C_REG(=0xFE) */
u16 value = addr << (8 + 1);
int wo = (rbuf == NULL || rlen == 0); /* write only */
u8 req, type;
deb_xfer("write to PLL:0x%02x via FE reg:0x%02x, len:%d\n",
wbuf[1], wbuf[0], wlen - 1);
if (wo && wlen >= 2) {
req = GL861_REQ_I2C_DATA_CTRL_WRITE;
type = GL861_WRITE;
udelay(20);
return usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
req, type, value, index,
&wbuf[1], wlen - 1, 2000);
}
deb_xfer("not supported ctrl-msg, aborting.");
return -EINVAL;
}
/* normal I2C access (without extra data arguments).
* write to the register wbuf[0] at I2C address addr with the value wbuf[1],
* or read from the register wbuf[0].
* register address can be 16bit (wbuf[2]<<8 | wbuf[0]) if wlen==3
*/
static int gl861_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
u16 index;
u16 value = addr << (8 + 1);
int wo = (rbuf == NULL || rlen == 0); /* write-only */
u8 req, type;
unsigned int pipe;
/* special case for the indirect I2C access to the PLL via FE, */
if (addr == friio_fe_config.demod_address &&
wbuf[0] == JDVBT90502_2ND_I2C_REG)
return gl861_i2c_ctrlmsg_data(d, addr, wbuf, wlen, rbuf, rlen);
if (wo) {
req = GL861_REQ_I2C_WRITE;
type = GL861_WRITE;
pipe = usb_sndctrlpipe(d->udev, 0);
} else { /* rw */
req = GL861_REQ_I2C_READ;
type = GL861_READ;
pipe = usb_rcvctrlpipe(d->udev, 0);
}
switch (wlen) {
case 1:
index = wbuf[0];
break;
case 2:
index = wbuf[0];
value = value + wbuf[1];
break;
case 3:
/* special case for 16bit register-address */
index = (wbuf[2] << 8) | wbuf[0];
value = value + wbuf[1];
break;
default:
deb_xfer("wlen = %x, aborting.", wlen);
return -EINVAL;
}
msleep(1);
return usb_control_msg(d->udev, pipe, req, type,
value, index, rbuf, rlen, 2000);
}
/* I2C */
static int gl861_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
int i;
if (num > 2)
return -EINVAL;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
for (i = 0; i < num; i++) {
/* write/read request */
if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD)) {
if (gl861_i2c_msg(d, msg[i].addr,
msg[i].buf, msg[i].len,
msg[i + 1].buf, msg[i + 1].len) < 0)
break;
i++;
} else
if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf,
msg[i].len, NULL, 0) < 0)
break;
}
mutex_unlock(&d->i2c_mutex);
return i;
}
static u32 gl861_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
static int friio_ext_ctl(struct dvb_usb_adapter *adap,
u32 sat_color, int lnb_on)
{
int i;
int ret;
struct i2c_msg msg;
u8 buf[2];
u32 mask;
u8 lnb = (lnb_on) ? FRIIO_CTL_LNB : 0;
msg.addr = 0x00;
msg.flags = 0;
msg.len = 2;
msg.buf = buf;
buf[0] = 0x00;
/* send 2bit header (&B10) */
buf[1] = lnb | FRIIO_CTL_LED | FRIIO_CTL_STROBE;
ret = gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
buf[1] |= FRIIO_CTL_CLK;
ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
buf[1] = lnb | FRIIO_CTL_STROBE;
ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
buf[1] |= FRIIO_CTL_CLK;
ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
/* send 32bit(satur, R, G, B) data in serial */
mask = 1 << 31;
for (i = 0; i < 32; i++) {
buf[1] = lnb | FRIIO_CTL_STROBE;
if (sat_color & mask)
buf[1] |= FRIIO_CTL_LED;
ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
buf[1] |= FRIIO_CTL_CLK;
ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
mask >>= 1;
}
/* set the strobe off */
buf[1] = lnb;
ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
buf[1] |= FRIIO_CTL_CLK;
ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
return (ret == 70);
}
static int friio_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff);
/* TODO: move these init cmds to the FE's init routine? */
static u8 streaming_init_cmds[][2] = {
{0x33, 0x08},
{0x37, 0x40},
{0x3A, 0x1F},
{0x3B, 0xFF},
{0x3C, 0x1F},
{0x3D, 0xFF},
{0x38, 0x00},
{0x35, 0x00},
{0x39, 0x00},
{0x36, 0x00},
};
static int cmdlen = sizeof(streaming_init_cmds) / 2;
/*
* Command sequence in this init function is a replay
* of the captured USB commands from the Windows proprietary driver.
*/
static int friio_initialize(struct dvb_usb_device *d)
{
int ret;
int i;
int retry = 0;
u8 rbuf[2];
u8 wbuf[3];
deb_info("%s called.\n", __func__);
/* use gl861_i2c_msg instead of gl861_i2c_xfer(), */
/* because the i2c device is not set up yet. */
wbuf[0] = 0x11;
wbuf[1] = 0x02;
ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
if (ret < 0)
goto error;
msleep(2);
wbuf[0] = 0x11;
wbuf[1] = 0x00;
ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
if (ret < 0)
goto error;
msleep(1);
/* following msgs should be in the FE's init code? */
/* cmd sequence to identify the device type? (friio black/white) */
wbuf[0] = 0x03;
wbuf[1] = 0x80;
/* can't use gl861_i2c_cmd, as the register-addr is 16bit(0x0100) */
ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
GL861_REQ_I2C_DATA_CTRL_WRITE, GL861_WRITE,
0x1200, 0x0100, wbuf, 2, 2000);
if (ret < 0)
goto error;
msleep(2);
wbuf[0] = 0x00;
wbuf[2] = 0x01; /* reg.0x0100 */
wbuf[1] = 0x00;
ret = gl861_i2c_msg(d, 0x12 >> 1, wbuf, 3, rbuf, 2);
/* my Friio White returns 0xffff. */
if (ret < 0 || rbuf[0] != 0xff || rbuf[1] != 0xff)
goto error;
msleep(2);
wbuf[0] = 0x03;
wbuf[1] = 0x80;
ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
GL861_REQ_I2C_DATA_CTRL_WRITE, GL861_WRITE,
0x9000, 0x0100, wbuf, 2, 2000);
if (ret < 0)
goto error;
msleep(2);
wbuf[0] = 0x00;
wbuf[2] = 0x01; /* reg.0x0100 */
wbuf[1] = 0x00;
ret = gl861_i2c_msg(d, 0x90 >> 1, wbuf, 3, rbuf, 2);
/* my Friio White returns 0xffff again. */
if (ret < 0 || rbuf[0] != 0xff || rbuf[1] != 0xff)
goto error;
msleep(1);
restart:
/* ============ start DEMOD init cmds ================== */
/* read PLL status to clear the POR bit */
wbuf[0] = JDVBT90502_2ND_I2C_REG;
wbuf[1] = (FRIIO_PLL_ADDR << 1) + 1; /* +1 for reading */
ret = gl861_i2c_msg(d, FRIIO_DEMOD_ADDR, wbuf, 2, NULL, 0);
if (ret < 0)
goto error;
msleep(5);
/* note: DEMODULATOR has 16bit register-address. */
wbuf[0] = 0x00;
wbuf[2] = 0x01; /* reg addr: 0x0100 */
wbuf[1] = 0x00; /* val: not used */
ret = gl861_i2c_msg(d, FRIIO_DEMOD_ADDR, wbuf, 3, rbuf, 1);
if (ret < 0)
goto error;
/*
msleep(1);
wbuf[0] = 0x80;
wbuf[1] = 0x00;
ret = gl861_i2c_msg(d, FRIIO_DEMOD_ADDR, wbuf, 2, rbuf, 1);
if (ret < 0)
goto error;
*/
if (rbuf[0] & 0x80) { /* still in PowerOnReset state? */
if (++retry > 3) {
deb_info("failed to get the correct"
" FE demod status:0x%02x\n", rbuf[0]);
goto error;
}
msleep(100);
goto restart;
}
/* TODO: check return value in rbuf */
/* =========== end DEMOD init cmds ===================== */
msleep(1);
wbuf[0] = 0x30;
wbuf[1] = 0x04;
ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
if (ret < 0)
goto error;
msleep(2);
/* following 2 cmds unnecessary? */
wbuf[0] = 0x00;
wbuf[1] = 0x01;
ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
if (ret < 0)
goto error;
wbuf[0] = 0x06;
wbuf[1] = 0x0F;
ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
if (ret < 0)
goto error;
/* some streaming ctl cmds (maybe) */
msleep(10);
for (i = 0; i < cmdlen; i++) {
ret = gl861_i2c_msg(d, 0x00, streaming_init_cmds[i], 2,
NULL, 0);
if (ret < 0)
goto error;
msleep(1);
}
msleep(20);
/* change the LED color etc. */
ret = friio_streaming_ctrl(&d->adapter[0], 0);
if (ret < 0)
goto error;
return 0;
error:
deb_info("%s:ret == %d\n", __func__, ret);
return -EIO;
}
/* Callbacks for DVB USB */
static int friio_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
int ret;
deb_info("%s called.(%d)\n", __func__, onoff);
/* set the LED color and saturation (and LNB on) */
if (onoff)
ret = friio_ext_ctl(adap, 0x6400ff64, 1);
else
ret = friio_ext_ctl(adap, 0x96ff00ff, 1);
if (ret != 1) {
deb_info("%s failed to send cmdx. ret==%d\n", __func__, ret);
return -EREMOTEIO;
}
return 0;
}
static int friio_frontend_attach(struct dvb_usb_adapter *adap)
{
if (friio_initialize(adap->dev) < 0)
return -EIO;
adap->fe = jdvbt90502_attach(adap->dev);
if (adap->fe == NULL)
return -EIO;
return 0;
}
/* DVB USB Driver stuff */
static struct dvb_usb_device_properties friio_properties;
static int friio_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct dvb_usb_device *d;
struct usb_host_interface *alt;
int ret;
if (intf->num_altsetting < GL861_ALTSETTING_COUNT)
return -ENODEV;
alt = usb_altnum_to_altsetting(intf, FRIIO_BULK_ALTSETTING);
if (alt == NULL) {
deb_rc("not alt found!\n");
return -ENODEV;
}
ret = usb_set_interface(interface_to_usbdev(intf),
alt->desc.bInterfaceNumber,
alt->desc.bAlternateSetting);
if (ret != 0) {
deb_rc("failed to set alt-setting!\n");
return ret;
}
ret = dvb_usb_device_init(intf, &friio_properties,
THIS_MODULE, &d, adapter_nr);
if (ret == 0)
friio_streaming_ctrl(&d->adapter[0], 1);
return ret;
}
struct jdvbt90502_config friio_fe_config = {
.demod_address = FRIIO_DEMOD_ADDR,
.pll_address = FRIIO_PLL_ADDR,
};
static struct i2c_algorithm gl861_i2c_algo = {
.master_xfer = gl861_i2c_xfer,
.functionality = gl861_i2c_func,
};
static struct usb_device_id friio_table[] = {
{ USB_DEVICE(USB_VID_774, USB_PID_FRIIO_WHITE) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, friio_table);
static struct dvb_usb_device_properties friio_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
.size_of_priv = 0,
.num_adapters = 1,
.adapter = {
/* caps:0 => no pid filter, 188B TS packet */
/* GL861 has a HW pid filter, but no info available. */
{
.caps = 0,
.frontend_attach = friio_frontend_attach,
.streaming_ctrl = friio_streaming_ctrl,
.stream = {
.type = USB_BULK,
/* count <= MAX_NO_URBS_FOR_DATA_STREAM(10) */
.count = 8,
.endpoint = 0x01,
.u = {
/* GL861 has 6KB buf inside */
.bulk = {
.buffersize = 16384,
}
}
},
}
},
.i2c_algo = &gl861_i2c_algo,
.num_device_descs = 1,
.devices = {
{
.name = "774 Friio ISDB-T USB2.0",
.cold_ids = { NULL },
.warm_ids = { &friio_table[0], NULL },
},
}
};
static struct usb_driver friio_driver = {
.name = "dvb_usb_friio",
.probe = friio_probe,
.disconnect = dvb_usb_device_exit,
.id_table = friio_table,
};
/* module stuff */
static int __init friio_module_init(void)
{
int ret;
ret = usb_register(&friio_driver);
if (ret)
err("usb_register failed. Error number %d", ret);
return ret;
}
static void __exit friio_module_exit(void)
{
/* deregister this driver from the USB subsystem */
usb_deregister(&friio_driver);
}
module_init(friio_module_init);
module_exit(friio_module_exit);
MODULE_AUTHOR("Akihiro Tsukada <tskd2@yahoo.co.jp>");
MODULE_DESCRIPTION("Driver for Friio ISDB-T USB2.0 Receiver");
MODULE_VERSION("0.2");
MODULE_LICENSE("GPL");
| rothnic/Adam_Kernel | drivers/media/dvb/dvb-usb/friio.c | C | gpl-2.0 | 12,478 |
/*
* STMicroelectronics sensors trigger library driver
*
* Copyright 2012-2013 STMicroelectronics Inc.
*
* Denis Ciocca <denis.ciocca@st.com>
*
* Licensed under the GPL-2.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/interrupt.h>
#include <linux/iio/common/st_sensors.h>
int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops)
{
int err;
struct st_sensor_data *sdata = iio_priv(indio_dev);
sdata->trig = iio_trigger_alloc("%s-trigger", indio_dev->name);
if (sdata->trig == NULL) {
err = -ENOMEM;
dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n");
goto iio_trigger_alloc_error;
}
err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
iio_trigger_generic_data_rdy_poll,
NULL,
IRQF_TRIGGER_RISING,
sdata->trig->name,
sdata->trig);
if (err)
goto request_irq_error;
iio_trigger_set_drvdata(sdata->trig, indio_dev);
sdata->trig->ops = trigger_ops;
sdata->trig->dev.parent = sdata->dev;
err = iio_trigger_register(sdata->trig);
if (err < 0) {
dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
goto iio_trigger_register_error;
}
indio_dev->trig = iio_trigger_get(sdata->trig);
return 0;
iio_trigger_register_error:
free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig);
request_irq_error:
iio_trigger_free(sdata->trig);
iio_trigger_alloc_error:
return err;
}
EXPORT_SYMBOL(st_sensors_allocate_trigger);
void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
{
struct st_sensor_data *sdata = iio_priv(indio_dev);
iio_trigger_unregister(sdata->trig);
free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig);
iio_trigger_free(sdata->trig);
}
EXPORT_SYMBOL(st_sensors_deallocate_trigger);
MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger");
MODULE_LICENSE("GPL v2");
| Digilent/linux-Digilent-Dev | drivers/iio/common/st_sensors/st_sensors_trigger.c | C | gpl-2.0 | 1,998 |
/*
* MCT (Magic Control Technology Corp.) USB RS232 Converter Driver
*
* Copyright (C) 2000 Wolfgang Grandegger (wolfgang@ces.ch)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is largely derived from the Belkin USB Serial Adapter Driver
* (see belkin_sa.[ch]). All of the information about the device was acquired
* by using SniffUSB on Windows98. For technical details see mct_u232.h.
*
* William G. Greathouse and Greg Kroah-Hartman provided great help on how to
* do the reverse engineering and how to write a USB serial device driver.
*
* TO BE DONE, TO BE CHECKED:
* DTR/RTS signal handling may be incomplete or incorrect. I have mainly
* implemented what I have seen with SniffUSB or found in belkin_sa.c.
* For further TODOs check also belkin_sa.c.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial.h>
#include "mct_u232.h"
#define DRIVER_AUTHOR "Wolfgang Grandegger <wolfgang@ces.ch>"
#define DRIVER_DESC "Magic Control Technology USB-RS232 converter driver"
/*
* Function prototypes
*/
static int mct_u232_port_probe(struct usb_serial_port *port);
static int mct_u232_port_remove(struct usb_serial_port *remove);
static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port);
static void mct_u232_close(struct usb_serial_port *port);
static void mct_u232_dtr_rts(struct usb_serial_port *port, int on);
static void mct_u232_read_int_callback(struct urb *urb);
static void mct_u232_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
static void mct_u232_break_ctl(struct tty_struct *tty, int break_state);
static int mct_u232_tiocmget(struct tty_struct *tty);
static int mct_u232_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static void mct_u232_throttle(struct tty_struct *tty);
static void mct_u232_unthrottle(struct tty_struct *tty);
/*
* All of the device info needed for the MCT USB-RS232 converter.
*/
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(MCT_U232_VID, MCT_U232_PID) },
{ USB_DEVICE(MCT_U232_VID, MCT_U232_SITECOM_PID) },
{ USB_DEVICE(MCT_U232_VID, MCT_U232_DU_H3SP_PID) },
{ USB_DEVICE(MCT_U232_BELKIN_F5U109_VID, MCT_U232_BELKIN_F5U109_PID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver mct_u232_device = {
.driver = {
.owner = THIS_MODULE,
.name = "mct_u232",
},
.description = "MCT U232",
.id_table = id_table,
.num_ports = 1,
.open = mct_u232_open,
.close = mct_u232_close,
.dtr_rts = mct_u232_dtr_rts,
.throttle = mct_u232_throttle,
.unthrottle = mct_u232_unthrottle,
.read_int_callback = mct_u232_read_int_callback,
.set_termios = mct_u232_set_termios,
.break_ctl = mct_u232_break_ctl,
.tiocmget = mct_u232_tiocmget,
.tiocmset = mct_u232_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.port_probe = mct_u232_port_probe,
.port_remove = mct_u232_port_remove,
.get_icount = usb_serial_generic_get_icount,
};
static struct usb_serial_driver * const serial_drivers[] = {
&mct_u232_device, NULL
};
struct mct_u232_private {
struct urb *read_urb;
spinlock_t lock;
unsigned int control_state; /* Modem Line Setting (TIOCM) */
unsigned char last_lcr; /* Line Control Register */
unsigned char last_lsr; /* Line Status Register */
unsigned char last_msr; /* Modem Status Register */
unsigned int rx_flags; /* Throttling flags */
};
#define THROTTLED 0x01
/*
* Handle vendor specific USB requests
*/
#define WDR_TIMEOUT 5000 /* default urb timeout */
/*
* Later day 2.6.0-test kernels have new baud rates like B230400 which
* we do not know how to support. We ignore them for the moment.
*/
static int mct_u232_calculate_baud_rate(struct usb_serial *serial,
speed_t value, speed_t *result)
{
*result = value;
if (le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_SITECOM_PID
|| le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_BELKIN_F5U109_PID) {
switch (value) {
case 300:
return 0x01;
case 600:
return 0x02; /* this one not tested */
case 1200:
return 0x03;
case 2400:
return 0x04;
case 4800:
return 0x06;
case 9600:
return 0x08;
case 19200:
return 0x09;
case 38400:
return 0x0a;
case 57600:
return 0x0b;
case 115200:
return 0x0c;
default:
*result = 9600;
return 0x08;
}
} else {
/* FIXME: Can we use any divider - should we do
divider = 115200/value;
real baud = 115200/divider */
switch (value) {
case 300: break;
case 600: break;
case 1200: break;
case 2400: break;
case 4800: break;
case 9600: break;
case 19200: break;
case 38400: break;
case 57600: break;
case 115200: break;
default:
value = 9600;
*result = 9600;
}
return 115200/value;
}
}
static int mct_u232_set_baud_rate(struct tty_struct *tty,
struct usb_serial *serial, struct usb_serial_port *port, speed_t value)
{
unsigned int divisor;
int rc;
unsigned char *buf;
unsigned char cts_enable_byte = 0;
speed_t speed;
buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
divisor = mct_u232_calculate_baud_rate(serial, value, &speed);
put_unaligned_le32(cpu_to_le32(divisor), buf);
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_BAUD_RATE_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
0, 0, buf, MCT_U232_SET_BAUD_RATE_SIZE,
WDR_TIMEOUT);
if (rc < 0) /*FIXME: What value speed results */
dev_err(&port->dev, "Set BAUD RATE %d failed (error = %d)\n",
value, rc);
else
tty_encode_baud_rate(tty, speed, speed);
dev_dbg(&port->dev, "set_baud_rate: value: 0x%x, divisor: 0x%x\n", value, divisor);
/* Mimic the MCT-supplied Windows driver (version 1.21P.0104), which
always sends two extra USB 'device request' messages after the
'baud rate change' message. The actual functionality of the
request codes in these messages is not fully understood but these
particular codes are never seen in any operation besides a baud
rate change. Both of these messages send a single byte of data.
In the first message, the value of this byte is always zero.
The second message has been determined experimentally to control
whether data will be transmitted to a device which is not asserting
the 'CTS' signal. If the second message's data byte is zero, data
will be transmitted even if 'CTS' is not asserted (i.e. no hardware
flow control). if the second message's data byte is nonzero (a
value of 1 is used by this driver), data will not be transmitted to
a device which is not asserting 'CTS'.
*/
buf[0] = 0;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_UNKNOWN1_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
0, 0, buf, MCT_U232_SET_UNKNOWN1_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&port->dev, "Sending USB device request code %d "
"failed (error = %d)\n", MCT_U232_SET_UNKNOWN1_REQUEST,
rc);
if (port && C_CRTSCTS(tty))
cts_enable_byte = 1;
dev_dbg(&port->dev, "set_baud_rate: send second control message, data = %02X\n",
cts_enable_byte);
buf[0] = cts_enable_byte;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_CTS_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
0, 0, buf, MCT_U232_SET_CTS_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&port->dev, "Sending USB device request code %d "
"failed (error = %d)\n", MCT_U232_SET_CTS_REQUEST, rc);
kfree(buf);
return rc;
} /* mct_u232_set_baud_rate */
static int mct_u232_set_line_ctrl(struct usb_serial_port *port,
unsigned char lcr)
{
int rc;
unsigned char *buf;
buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
buf[0] = lcr;
rc = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0),
MCT_U232_SET_LINE_CTRL_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
0, 0, buf, MCT_U232_SET_LINE_CTRL_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&port->dev, "Set LINE CTRL 0x%x failed (error = %d)\n", lcr, rc);
dev_dbg(&port->dev, "set_line_ctrl: 0x%x\n", lcr);
kfree(buf);
return rc;
} /* mct_u232_set_line_ctrl */
static int mct_u232_set_modem_ctrl(struct usb_serial_port *port,
unsigned int control_state)
{
int rc;
unsigned char mcr;
unsigned char *buf;
buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
mcr = MCT_U232_MCR_NONE;
if (control_state & TIOCM_DTR)
mcr |= MCT_U232_MCR_DTR;
if (control_state & TIOCM_RTS)
mcr |= MCT_U232_MCR_RTS;
buf[0] = mcr;
rc = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0),
MCT_U232_SET_MODEM_CTRL_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
WDR_TIMEOUT);
kfree(buf);
dev_dbg(&port->dev, "set_modem_ctrl: state=0x%x ==> mcr=0x%x\n", control_state, mcr);
if (rc < 0) {
dev_err(&port->dev, "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
return rc;
}
return 0;
} /* mct_u232_set_modem_ctrl */
static int mct_u232_get_modem_stat(struct usb_serial_port *port,
unsigned char *msr)
{
int rc;
unsigned char *buf;
buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
if (buf == NULL) {
*msr = 0;
return -ENOMEM;
}
rc = usb_control_msg(port->serial->dev, usb_rcvctrlpipe(port->serial->dev, 0),
MCT_U232_GET_MODEM_STAT_REQUEST,
MCT_U232_GET_REQUEST_TYPE,
0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE,
WDR_TIMEOUT);
if (rc < 0) {
dev_err(&port->dev, "Get MODEM STATus failed (error = %d)\n", rc);
*msr = 0;
} else {
*msr = buf[0];
}
dev_dbg(&port->dev, "get_modem_stat: 0x%x\n", *msr);
kfree(buf);
return rc;
} /* mct_u232_get_modem_stat */
static void mct_u232_msr_to_icount(struct async_icount *icount,
unsigned char msr)
{
/* Translate Control Line states */
if (msr & MCT_U232_MSR_DDSR)
icount->dsr++;
if (msr & MCT_U232_MSR_DCTS)
icount->cts++;
if (msr & MCT_U232_MSR_DRI)
icount->rng++;
if (msr & MCT_U232_MSR_DCD)
icount->dcd++;
} /* mct_u232_msr_to_icount */
static void mct_u232_msr_to_state(struct usb_serial_port *port,
unsigned int *control_state, unsigned char msr)
{
/* Translate Control Line states */
if (msr & MCT_U232_MSR_DSR)
*control_state |= TIOCM_DSR;
else
*control_state &= ~TIOCM_DSR;
if (msr & MCT_U232_MSR_CTS)
*control_state |= TIOCM_CTS;
else
*control_state &= ~TIOCM_CTS;
if (msr & MCT_U232_MSR_RI)
*control_state |= TIOCM_RI;
else
*control_state &= ~TIOCM_RI;
if (msr & MCT_U232_MSR_CD)
*control_state |= TIOCM_CD;
else
*control_state &= ~TIOCM_CD;
dev_dbg(&port->dev, "msr_to_state: msr=0x%x ==> state=0x%x\n", msr, *control_state);
} /* mct_u232_msr_to_state */
/*
* Driver's tty interface functions
*/
static int mct_u232_port_probe(struct usb_serial_port *port)
{
struct mct_u232_private *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* Use second interrupt-in endpoint for reading. */
priv->read_urb = port->serial->port[1]->interrupt_in_urb;
priv->read_urb->context = port;
spin_lock_init(&priv->lock);
usb_set_serial_port_data(port, priv);
return 0;
}
static int mct_u232_port_remove(struct usb_serial_port *port)
{
struct mct_u232_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
return 0;
}
static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
int retval = 0;
unsigned int control_state;
unsigned long flags;
unsigned char last_lcr;
unsigned char last_msr;
/* Compensate for a hardware bug: although the Sitecom U232-P25
* device reports a maximum output packet size of 32 bytes,
* it seems to be able to accept only 16 bytes (and that's what
* SniffUSB says too...)
*/
if (le16_to_cpu(serial->dev->descriptor.idProduct)
== MCT_U232_SITECOM_PID)
port->bulk_out_size = 16;
/* Do a defined restart: the normal serial device seems to
* always turn on DTR and RTS here, so do the same. I'm not
* sure if this is really necessary. But it should not harm
* either.
*/
spin_lock_irqsave(&priv->lock, flags);
if (tty && (tty->termios.c_cflag & CBAUD))
priv->control_state = TIOCM_DTR | TIOCM_RTS;
else
priv->control_state = 0;
priv->last_lcr = (MCT_U232_DATA_BITS_8 |
MCT_U232_PARITY_NONE |
MCT_U232_STOP_BITS_1);
control_state = priv->control_state;
last_lcr = priv->last_lcr;
spin_unlock_irqrestore(&priv->lock, flags);
mct_u232_set_modem_ctrl(port, control_state);
mct_u232_set_line_ctrl(port, last_lcr);
/* Read modem status and update control state */
mct_u232_get_modem_stat(port, &last_msr);
spin_lock_irqsave(&priv->lock, flags);
priv->last_msr = last_msr;
mct_u232_msr_to_state(port, &priv->control_state, priv->last_msr);
spin_unlock_irqrestore(&priv->lock, flags);
retval = usb_submit_urb(priv->read_urb, GFP_KERNEL);
if (retval) {
dev_err(&port->dev,
"usb_submit_urb(read) failed pipe 0x%x err %d\n",
port->read_urb->pipe, retval);
goto error;
}
retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (retval) {
usb_kill_urb(priv->read_urb);
dev_err(&port->dev,
"usb_submit_urb(read int) failed pipe 0x%x err %d",
port->interrupt_in_urb->pipe, retval);
goto error;
}
return 0;
error:
return retval;
} /* mct_u232_open */
static void mct_u232_dtr_rts(struct usb_serial_port *port, int on)
{
unsigned int control_state;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
spin_lock_irq(&priv->lock);
if (on)
priv->control_state |= TIOCM_DTR | TIOCM_RTS;
else
priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
control_state = priv->control_state;
spin_unlock_irq(&priv->lock);
mct_u232_set_modem_ctrl(port, control_state);
}
static void mct_u232_close(struct usb_serial_port *port)
{
struct mct_u232_private *priv = usb_get_serial_port_data(port);
usb_kill_urb(priv->read_urb);
usb_kill_urb(port->interrupt_in_urb);
usb_serial_generic_close(port);
} /* mct_u232_close */
static void mct_u232_read_int_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
int retval;
int status = urb->status;
unsigned long flags;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
/*
* Work-a-round: handle the 'usual' bulk-in pipe here
*/
if (urb->transfer_buffer_length > 2) {
if (urb->actual_length) {
tty_insert_flip_string(&port->port, data,
urb->actual_length);
tty_flip_buffer_push(&port->port);
}
goto exit;
}
/*
* The interrupt-in pipe signals exceptional conditions (modem line
* signal changes and errors). data[0] holds MSR, data[1] holds LSR.
*/
spin_lock_irqsave(&priv->lock, flags);
priv->last_msr = data[MCT_U232_MSR_INDEX];
/* Record Control Line states */
mct_u232_msr_to_state(port, &priv->control_state, priv->last_msr);
mct_u232_msr_to_icount(&port->icount, priv->last_msr);
#if 0
/* Not yet handled. See belkin_sa.c for further information */
/* Now to report any errors */
priv->last_lsr = data[MCT_U232_LSR_INDEX];
/*
* fill in the flip buffer here, but I do not know the relation
* to the current/next receive buffer or characters. I need
* to look in to this before committing any code.
*/
if (priv->last_lsr & MCT_U232_LSR_ERR) {
tty = tty_port_tty_get(&port->port);
/* Overrun Error */
if (priv->last_lsr & MCT_U232_LSR_OE) {
}
/* Parity Error */
if (priv->last_lsr & MCT_U232_LSR_PE) {
}
/* Framing Error */
if (priv->last_lsr & MCT_U232_LSR_FE) {
}
/* Break Indicator */
if (priv->last_lsr & MCT_U232_LSR_BI) {
}
tty_kref_put(tty);
}
#endif
wake_up_interruptible(&port->port.delta_msr_wait);
spin_unlock_irqrestore(&priv->lock, flags);
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&port->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, retval);
} /* mct_u232_read_int_callback */
static void mct_u232_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
struct ktermios *termios = &tty->termios;
unsigned int cflag = termios->c_cflag;
unsigned int old_cflag = old_termios->c_cflag;
unsigned long flags;
unsigned int control_state;
unsigned char last_lcr;
/* get a local copy of the current port settings */
spin_lock_irqsave(&priv->lock, flags);
control_state = priv->control_state;
spin_unlock_irqrestore(&priv->lock, flags);
last_lcr = 0;
/*
* Update baud rate.
* Do not attempt to cache old rates and skip settings,
* disconnects screw such tricks up completely.
* Premature optimization is the root of all evil.
*/
/* reassert DTR and RTS on transition from B0 */
if ((old_cflag & CBAUD) == B0) {
dev_dbg(&port->dev, "%s: baud was B0\n", __func__);
control_state |= TIOCM_DTR | TIOCM_RTS;
mct_u232_set_modem_ctrl(port, control_state);
}
mct_u232_set_baud_rate(tty, serial, port, tty_get_baud_rate(tty));
if ((cflag & CBAUD) == B0) {
dev_dbg(&port->dev, "%s: baud is B0\n", __func__);
/* Drop RTS and DTR */
control_state &= ~(TIOCM_DTR | TIOCM_RTS);
mct_u232_set_modem_ctrl(port, control_state);
}
/*
* Update line control register (LCR)
*/
/* set the parity */
if (cflag & PARENB)
last_lcr |= (cflag & PARODD) ?
MCT_U232_PARITY_ODD : MCT_U232_PARITY_EVEN;
else
last_lcr |= MCT_U232_PARITY_NONE;
/* set the number of data bits */
switch (cflag & CSIZE) {
case CS5:
last_lcr |= MCT_U232_DATA_BITS_5; break;
case CS6:
last_lcr |= MCT_U232_DATA_BITS_6; break;
case CS7:
last_lcr |= MCT_U232_DATA_BITS_7; break;
case CS8:
last_lcr |= MCT_U232_DATA_BITS_8; break;
default:
dev_err(&port->dev,
"CSIZE was not CS5-CS8, using default of 8\n");
last_lcr |= MCT_U232_DATA_BITS_8;
break;
}
termios->c_cflag &= ~CMSPAR;
/* set the number of stop bits */
last_lcr |= (cflag & CSTOPB) ?
MCT_U232_STOP_BITS_2 : MCT_U232_STOP_BITS_1;
mct_u232_set_line_ctrl(port, last_lcr);
/* save off the modified port settings */
spin_lock_irqsave(&priv->lock, flags);
priv->control_state = control_state;
priv->last_lcr = last_lcr;
spin_unlock_irqrestore(&priv->lock, flags);
} /* mct_u232_set_termios */
static void mct_u232_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
unsigned char lcr;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
lcr = priv->last_lcr;
if (break_state)
lcr |= MCT_U232_SET_BREAK;
spin_unlock_irqrestore(&priv->lock, flags);
mct_u232_set_line_ctrl(port, lcr);
} /* mct_u232_break_ctl */
static int mct_u232_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
unsigned int control_state;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
control_state = priv->control_state;
spin_unlock_irqrestore(&priv->lock, flags);
return control_state;
}
static int mct_u232_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
unsigned int control_state;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
control_state = priv->control_state;
if (set & TIOCM_RTS)
control_state |= TIOCM_RTS;
if (set & TIOCM_DTR)
control_state |= TIOCM_DTR;
if (clear & TIOCM_RTS)
control_state &= ~TIOCM_RTS;
if (clear & TIOCM_DTR)
control_state &= ~TIOCM_DTR;
priv->control_state = control_state;
spin_unlock_irqrestore(&priv->lock, flags);
return mct_u232_set_modem_ctrl(port, control_state);
}
static void mct_u232_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
unsigned int control_state;
spin_lock_irq(&priv->lock);
priv->rx_flags |= THROTTLED;
if (C_CRTSCTS(tty)) {
priv->control_state &= ~TIOCM_RTS;
control_state = priv->control_state;
spin_unlock_irq(&priv->lock);
mct_u232_set_modem_ctrl(port, control_state);
} else {
spin_unlock_irq(&priv->lock);
}
}
static void mct_u232_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
unsigned int control_state;
spin_lock_irq(&priv->lock);
if ((priv->rx_flags & THROTTLED) && C_CRTSCTS(tty)) {
priv->rx_flags &= ~THROTTLED;
priv->control_state |= TIOCM_RTS;
control_state = priv->control_state;
spin_unlock_irq(&priv->lock);
mct_u232_set_modem_ctrl(port, control_state);
} else {
spin_unlock_irq(&priv->lock);
}
}
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| Grarak/android_kernel_oneplus_msm8994 | drivers/usb/serial/mct_u232.c | C | gpl-2.0 | 22,148 |
/*
* Copyright (c) 2008-2011 Atheros Communications Inc.
* Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
* Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/nl80211.h>
#include <linux/platform_device.h>
#include <linux/ath9k_platform.h>
#include "ath9k.h"
static const struct platform_device_id ath9k_platform_id_table[] = {
{
.name = "ath9k",
.driver_data = AR5416_AR9100_DEVID,
},
{
.name = "ar934x_wmac",
.driver_data = AR9300_DEVID_AR9340,
},
{},
};
/* return bus cachesize in 4B word units */
static void ath_ahb_read_cachesize(struct ath_common *common, int *csz)
{
*csz = L1_CACHE_BYTES >> 2;
}
static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath_softc *sc = (struct ath_softc *)common->priv;
struct platform_device *pdev = to_platform_device(sc->dev);
struct ath9k_platform_data *pdata;
pdata = (struct ath9k_platform_data *) pdev->dev.platform_data;
if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
ath_err(common,
"%s: flash read failed, offset %08x is out of range\n",
__func__, off);
return false;
}
*data = pdata->eeprom_data[off];
return true;
}
static struct ath_bus_ops ath_ahb_bus_ops = {
.ath_bus_type = ATH_AHB,
.read_cachesize = ath_ahb_read_cachesize,
.eeprom_read = ath_ahb_eeprom_read,
};
static int ath_ahb_probe(struct platform_device *pdev)
{
void __iomem *mem;
struct ath_softc *sc;
struct ieee80211_hw *hw;
struct resource *res;
const struct platform_device_id *id = platform_get_device_id(pdev);
int irq;
int ret = 0;
struct ath_hw *ah;
char hw_name[64];
if (!pdev->dev.platform_data) {
dev_err(&pdev->dev, "no platform data specified\n");
ret = -EINVAL;
goto err_out;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "no memory resource found\n");
ret = -ENXIO;
goto err_out;
}
mem = ioremap_nocache(res->start, resource_size(res));
if (mem == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -ENOMEM;
goto err_out;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(&pdev->dev, "no IRQ resource found\n");
ret = -ENXIO;
goto err_iounmap;
}
irq = res->start;
hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
ret = -ENOMEM;
goto err_iounmap;
}
SET_IEEE80211_DEV(hw, &pdev->dev);
platform_set_drvdata(pdev, hw);
sc = hw->priv;
sc->hw = hw;
sc->dev = &pdev->dev;
sc->mem = mem;
sc->irq = irq;
/* Will be cleared in ath9k_start() */
sc->sc_flags |= SC_OP_INVALID;
ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
goto err_free_hw;
}
ret = ath9k_init_device(id->driver_data, sc, 0x0, &ath_ahb_bus_ops);
if (ret) {
dev_err(&pdev->dev, "failed to initialize device\n");
goto err_irq;
}
ah = sc->sc_ah;
ath9k_hw_name(ah, hw_name, sizeof(hw_name));
wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
hw_name, (unsigned long)mem, irq);
return 0;
err_irq:
free_irq(irq, sc);
err_free_hw:
ieee80211_free_hw(hw);
platform_set_drvdata(pdev, NULL);
err_iounmap:
iounmap(mem);
err_out:
return ret;
}
static int ath_ahb_remove(struct platform_device *pdev)
{
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
if (hw) {
struct ath_softc *sc = hw->priv;
void __iomem *mem = sc->mem;
ath9k_deinit_device(sc);
free_irq(sc->irq, sc);
ieee80211_free_hw(sc->hw);
iounmap(mem);
platform_set_drvdata(pdev, NULL);
}
return 0;
}
static struct platform_driver ath_ahb_driver = {
.probe = ath_ahb_probe,
.remove = ath_ahb_remove,
.driver = {
.name = "ath9k",
.owner = THIS_MODULE,
},
.id_table = ath9k_platform_id_table,
};
MODULE_DEVICE_TABLE(platform, ath9k_platform_id_table);
int ath_ahb_init(void)
{
return platform_driver_register(&ath_ahb_driver);
}
void ath_ahb_exit(void)
{
platform_driver_unregister(&ath_ahb_driver);
}
| BlownFuze/i717_TW_JBkernel | drivers/net/wireless/ath/ath9k/ahb.c | C | gpl-2.0 | 4,768 |
/*
* drivers/mtd/nand/au1550nd.c
*
* Copyright (C) 2004 Embedded Edge, LLC
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1550nd.h>
struct au1550nd_ctx {
struct mtd_info info;
struct nand_chip chip;
int cs;
void __iomem *base;
void (*write_byte)(struct mtd_info *, u_char);
};
/**
* au_read_byte - read one byte from the chip
* @mtd: MTD device structure
*
* read function for 8bit buswidth
*/
static u_char au_read_byte(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
u_char ret = readb(this->IO_ADDR_R);
au_sync();
return ret;
}
/**
* au_write_byte - write one byte to the chip
* @mtd: MTD device structure
* @byte: pointer to data byte to write
*
* write function for 8it buswidth
*/
static void au_write_byte(struct mtd_info *mtd, u_char byte)
{
struct nand_chip *this = mtd->priv;
writeb(byte, this->IO_ADDR_W);
au_sync();
}
/**
* au_read_byte16 - read one byte endianness aware from the chip
* @mtd: MTD device structure
*
* read function for 16bit buswidth with endianness conversion
*/
static u_char au_read_byte16(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
u_char ret = (u_char) cpu_to_le16(readw(this->IO_ADDR_R));
au_sync();
return ret;
}
/**
* au_write_byte16 - write one byte endianness aware to the chip
* @mtd: MTD device structure
* @byte: pointer to data byte to write
*
* write function for 16bit buswidth with endianness conversion
*/
static void au_write_byte16(struct mtd_info *mtd, u_char byte)
{
struct nand_chip *this = mtd->priv;
writew(le16_to_cpu((u16) byte), this->IO_ADDR_W);
au_sync();
}
/**
* au_read_word - read one word from the chip
* @mtd: MTD device structure
*
* read function for 16bit buswidth without endianness conversion
*/
static u16 au_read_word(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
u16 ret = readw(this->IO_ADDR_R);
au_sync();
return ret;
}
/**
* au_write_buf - write buffer to chip
* @mtd: MTD device structure
* @buf: data buffer
* @len: number of bytes to write
*
* write function for 8bit buswidth
*/
static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
int i;
struct nand_chip *this = mtd->priv;
for (i = 0; i < len; i++) {
writeb(buf[i], this->IO_ADDR_W);
au_sync();
}
}
/**
* au_read_buf - read chip data into buffer
* @mtd: MTD device structure
* @buf: buffer to store date
* @len: number of bytes to read
*
* read function for 8bit buswidth
*/
static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
int i;
struct nand_chip *this = mtd->priv;
for (i = 0; i < len; i++) {
buf[i] = readb(this->IO_ADDR_R);
au_sync();
}
}
/**
* au_write_buf16 - write buffer to chip
* @mtd: MTD device structure
* @buf: data buffer
* @len: number of bytes to write
*
* write function for 16bit buswidth
*/
static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
{
int i;
struct nand_chip *this = mtd->priv;
u16 *p = (u16 *) buf;
len >>= 1;
for (i = 0; i < len; i++) {
writew(p[i], this->IO_ADDR_W);
au_sync();
}
}
/**
* au_read_buf16 - read chip data into buffer
* @mtd: MTD device structure
* @buf: buffer to store date
* @len: number of bytes to read
*
* read function for 16bit buswidth
*/
static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
{
int i;
struct nand_chip *this = mtd->priv;
u16 *p = (u16 *) buf;
len >>= 1;
for (i = 0; i < len; i++) {
p[i] = readw(this->IO_ADDR_R);
au_sync();
}
}
/* Select the chip by setting nCE to low */
#define NAND_CTL_SETNCE 1
/* Deselect the chip by setting nCE to high */
#define NAND_CTL_CLRNCE 2
/* Select the command latch by setting CLE to high */
#define NAND_CTL_SETCLE 3
/* Deselect the command latch by setting CLE to low */
#define NAND_CTL_CLRCLE 4
/* Select the address latch by setting ALE to high */
#define NAND_CTL_SETALE 5
/* Deselect the address latch by setting ALE to low */
#define NAND_CTL_CLRALE 6
static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
{
struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info);
struct nand_chip *this = mtd->priv;
switch (cmd) {
case NAND_CTL_SETCLE:
this->IO_ADDR_W = ctx->base + MEM_STNAND_CMD;
break;
case NAND_CTL_CLRCLE:
this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
break;
case NAND_CTL_SETALE:
this->IO_ADDR_W = ctx->base + MEM_STNAND_ADDR;
break;
case NAND_CTL_CLRALE:
this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
/* FIXME: Nobody knows why this is necessary,
* but it works only that way */
udelay(1);
break;
case NAND_CTL_SETNCE:
/* assert (force assert) chip enable */
au_writel((1 << (4 + ctx->cs)), MEM_STNDCTL);
break;
case NAND_CTL_CLRNCE:
/* deassert chip enable */
au_writel(0, MEM_STNDCTL);
break;
}
this->IO_ADDR_R = this->IO_ADDR_W;
/* Drain the writebuffer */
au_sync();
}
int au1550_device_ready(struct mtd_info *mtd)
{
int ret = (au_readl(MEM_STSTAT) & 0x1) ? 1 : 0;
au_sync();
return ret;
}
/**
* au1550_select_chip - control -CE line
* Forbid driving -CE manually permitting the NAND controller to do this.
* Keeping -CE asserted during the whole sector reads interferes with the
* NOR flash and PCMCIA drivers as it causes contention on the static bus.
* We only have to hold -CE low for the NAND read commands since the flash
* chip needs it to be asserted during chip not ready time but the NAND
* controller keeps it released.
*
* @mtd: MTD device structure
* @chip: chipnumber to select, -1 for deselect
*/
static void au1550_select_chip(struct mtd_info *mtd, int chip)
{
}
/**
* au1550_command - Send command to NAND device
* @mtd: MTD device structure
* @command: the command to be sent
* @column: the column address for this command, -1 if none
* @page_addr: the page address for this command, -1 if none
*/
static void au1550_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
{
struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info);
struct nand_chip *this = mtd->priv;
int ce_override = 0, i;
unsigned long flags = 0;
/* Begin command latch cycle */
au1550_hwcontrol(mtd, NAND_CTL_SETCLE);
/*
* Write out the command to the device.
*/
if (command == NAND_CMD_SEQIN) {
int readcmd;
if (column >= mtd->writesize) {
/* OOB area */
column -= mtd->writesize;
readcmd = NAND_CMD_READOOB;
} else if (column < 256) {
/* First 256 bytes --> READ0 */
readcmd = NAND_CMD_READ0;
} else {
column -= 256;
readcmd = NAND_CMD_READ1;
}
ctx->write_byte(mtd, readcmd);
}
ctx->write_byte(mtd, command);
/* Set ALE and clear CLE to start address cycle */
au1550_hwcontrol(mtd, NAND_CTL_CLRCLE);
if (column != -1 || page_addr != -1) {
au1550_hwcontrol(mtd, NAND_CTL_SETALE);
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
if (this->options & NAND_BUSWIDTH_16)
column >>= 1;
ctx->write_byte(mtd, column);
}
if (page_addr != -1) {
ctx->write_byte(mtd, (u8)(page_addr & 0xff));
if (command == NAND_CMD_READ0 ||
command == NAND_CMD_READ1 ||
command == NAND_CMD_READOOB) {
/*
* NAND controller will release -CE after
* the last address byte is written, so we'll
* have to forcibly assert it. No interrupts
* are allowed while we do this as we don't
* want the NOR flash or PCMCIA drivers to
* steal our precious bytes of data...
*/
ce_override = 1;
local_irq_save(flags);
au1550_hwcontrol(mtd, NAND_CTL_SETNCE);
}
ctx->write_byte(mtd, (u8)(page_addr >> 8));
/* One more address cycle for devices > 32MiB */
if (this->chipsize > (32 << 20))
ctx->write_byte(mtd,
((page_addr >> 16) & 0x0f));
}
/* Latch in address */
au1550_hwcontrol(mtd, NAND_CTL_CLRALE);
}
/*
* Program and erase have their own busy handlers.
* Status and sequential in need no delay.
*/
switch (command) {
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
case NAND_CMD_SEQIN:
case NAND_CMD_STATUS:
return;
case NAND_CMD_RESET:
break;
case NAND_CMD_READ0:
case NAND_CMD_READ1:
case NAND_CMD_READOOB:
/* Check if we're really driving -CE low (just in case) */
if (unlikely(!ce_override))
break;
/* Apply a short delay always to ensure that we do wait tWB. */
ndelay(100);
/* Wait for a chip to become ready... */
for (i = this->chip_delay; !this->dev_ready(mtd) && i > 0; --i)
udelay(1);
/* Release -CE and re-enable interrupts. */
au1550_hwcontrol(mtd, NAND_CTL_CLRNCE);
local_irq_restore(flags);
return;
}
/* Apply this short delay always to ensure that we do wait tWB. */
ndelay(100);
while(!this->dev_ready(mtd));
}
static int find_nand_cs(unsigned long nand_base)
{
void __iomem *base =
(void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
unsigned long addr, staddr, start, mask, end;
int i;
for (i = 0; i < 4; i++) {
addr = 0x1000 + (i * 0x10); /* CSx */
staddr = __raw_readl(base + addr + 0x08); /* STADDRx */
/* figure out the decoded range of this CS */
start = (staddr << 4) & 0xfffc0000;
mask = (staddr << 18) & 0xfffc0000;
end = (start | (start - 1)) & ~(start ^ mask);
if ((nand_base >= start) && (nand_base < end))
return i;
}
return -ENODEV;
}
static int au1550nd_probe(struct platform_device *pdev)
{
struct au1550nd_platdata *pd;
struct au1550nd_ctx *ctx;
struct nand_chip *this;
struct resource *r;
int ret, cs;
pd = pdev->dev.platform_data;
if (!pd) {
dev_err(&pdev->dev, "missing platform data\n");
return -ENODEV;
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
dev_err(&pdev->dev, "no memory for NAND context\n");
return -ENOMEM;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "no NAND memory resource\n");
ret = -ENODEV;
goto out1;
}
if (request_mem_region(r->start, resource_size(r), "au1550-nand")) {
dev_err(&pdev->dev, "cannot claim NAND memory area\n");
ret = -ENOMEM;
goto out1;
}
ctx->base = ioremap_nocache(r->start, 0x1000);
if (!ctx->base) {
dev_err(&pdev->dev, "cannot remap NAND memory area\n");
ret = -ENODEV;
goto out2;
}
this = &ctx->chip;
ctx->info.priv = this;
ctx->info.owner = THIS_MODULE;
/* figure out which CS# r->start belongs to */
cs = find_nand_cs(r->start);
if (cs < 0) {
dev_err(&pdev->dev, "cannot detect NAND chipselect\n");
ret = -ENODEV;
goto out3;
}
ctx->cs = cs;
this->dev_ready = au1550_device_ready;
this->select_chip = au1550_select_chip;
this->cmdfunc = au1550_command;
/* 30 us command delay time */
this->chip_delay = 30;
this->ecc.mode = NAND_ECC_SOFT;
if (pd->devwidth)
this->options |= NAND_BUSWIDTH_16;
this->read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte;
ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte;
this->read_word = au_read_word;
this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
ret = nand_scan(&ctx->info, 1);
if (ret) {
dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
goto out3;
}
mtd_device_register(&ctx->info, pd->parts, pd->num_parts);
return 0;
out3:
iounmap(ctx->base);
out2:
release_mem_region(r->start, resource_size(r));
out1:
kfree(ctx);
return ret;
}
static int au1550nd_remove(struct platform_device *pdev)
{
struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
nand_release(&ctx->info);
iounmap(ctx->base);
release_mem_region(r->start, 0x1000);
kfree(ctx);
return 0;
}
static struct platform_driver au1550nd_driver = {
.driver = {
.name = "au1550-nand",
.owner = THIS_MODULE,
},
.probe = au1550nd_probe,
.remove = au1550nd_remove,
};
module_platform_driver(au1550nd_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Embedded Edge, LLC");
MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on Pb1550 board");
| teto/mptcp-old | drivers/mtd/nand/au1550nd.c | C | gpl-2.0 | 12,518 |
/*
* gpiolib support for Wolfson WM8994
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/mfd/core.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/regmap.h>
#include <linux/mfd/wm8994/core.h>
#include <linux/mfd/wm8994/pdata.h>
#include <linux/mfd/wm8994/gpio.h>
#include <linux/mfd/wm8994/registers.h>
struct wm8994_gpio {
struct wm8994 *wm8994;
struct gpio_chip gpio_chip;
};
static inline struct wm8994_gpio *to_wm8994_gpio(struct gpio_chip *chip)
{
return container_of(chip, struct wm8994_gpio, gpio_chip);
}
static int wm8994_gpio_request(struct gpio_chip *chip, unsigned offset)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
switch (wm8994->type) {
case WM8958:
switch (offset) {
case 1:
case 2:
case 3:
case 4:
case 6:
return -EINVAL;
}
break;
default:
break;
}
return 0;
}
static int wm8994_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
WM8994_GPN_DIR, WM8994_GPN_DIR);
}
static int wm8994_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
int ret;
ret = wm8994_reg_read(wm8994, WM8994_GPIO_1 + offset);
if (ret < 0)
return ret;
if (ret & WM8994_GPN_LVL)
return 1;
else
return 0;
}
static int wm8994_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
if (value)
value = WM8994_GPN_LVL;
return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
WM8994_GPN_DIR | WM8994_GPN_LVL, value);
}
static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
if (value)
value = WM8994_GPN_LVL;
wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL, value);
}
static int wm8994_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
return regmap_irq_get_virq(wm8994->irq_data, offset);
}
#ifdef CONFIG_DEBUG_FS
static const char *wm8994_gpio_fn(u16 fn)
{
switch (fn) {
case WM8994_GP_FN_PIN_SPECIFIC:
return "pin-specific";
case WM8994_GP_FN_GPIO:
return "GPIO";
case WM8994_GP_FN_SDOUT:
return "SDOUT";
case WM8994_GP_FN_IRQ:
return "IRQ";
case WM8994_GP_FN_TEMPERATURE:
return "Temperature";
case WM8994_GP_FN_MICBIAS1_DET:
return "MICBIAS1 detect";
case WM8994_GP_FN_MICBIAS1_SHORT:
return "MICBIAS1 short";
case WM8994_GP_FN_MICBIAS2_DET:
return "MICBIAS2 detect";
case WM8994_GP_FN_MICBIAS2_SHORT:
return "MICBIAS2 short";
case WM8994_GP_FN_FLL1_LOCK:
return "FLL1 lock";
case WM8994_GP_FN_FLL2_LOCK:
return "FLL2 lock";
case WM8994_GP_FN_SRC1_LOCK:
return "SRC1 lock";
case WM8994_GP_FN_SRC2_LOCK:
return "SRC2 lock";
case WM8994_GP_FN_DRC1_ACT:
return "DRC1 activity";
case WM8994_GP_FN_DRC2_ACT:
return "DRC2 activity";
case WM8994_GP_FN_DRC3_ACT:
return "DRC3 activity";
case WM8994_GP_FN_WSEQ_STATUS:
return "Write sequencer";
case WM8994_GP_FN_FIFO_ERROR:
return "FIFO error";
case WM8994_GP_FN_OPCLK:
return "OPCLK";
case WM8994_GP_FN_THW:
return "Thermal warning";
case WM8994_GP_FN_DCS_DONE:
return "DC servo";
case WM8994_GP_FN_FLL1_OUT:
return "FLL1 output";
case WM8994_GP_FN_FLL2_OUT:
return "FLL1 output";
default:
return "Unknown";
}
}
static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
int i;
for (i = 0; i < chip->ngpio; i++) {
int gpio = i + chip->base;
int reg;
const char *label;
/* We report the GPIO even if it's not requested since
* we're also reporting things like alternate
* functions which apply even when the GPIO is not in
* use as a GPIO.
*/
label = gpiochip_is_requested(chip, i);
if (!label)
label = "Unrequested";
seq_printf(s, " gpio-%-3d (%-20.20s) ", gpio, label);
reg = wm8994_reg_read(wm8994, WM8994_GPIO_1 + i);
if (reg < 0) {
dev_err(wm8994->dev,
"GPIO control %d read failed: %d\n",
gpio, reg);
seq_printf(s, "\n");
continue;
}
if (reg & WM8994_GPN_DIR)
seq_printf(s, "in ");
else
seq_printf(s, "out ");
if (reg & WM8994_GPN_PU)
seq_printf(s, "pull up ");
if (reg & WM8994_GPN_PD)
seq_printf(s, "pull down ");
if (reg & WM8994_GPN_POL)
seq_printf(s, "inverted ");
else
seq_printf(s, "noninverted ");
if (reg & WM8994_GPN_OP_CFG)
seq_printf(s, "open drain ");
else
seq_printf(s, "CMOS ");
seq_printf(s, "%s (%x)\n",
wm8994_gpio_fn(reg & WM8994_GPN_FN_MASK), reg);
}
}
#else
#define wm8994_gpio_dbg_show NULL
#endif
static struct gpio_chip template_chip = {
.label = "wm8994",
.owner = THIS_MODULE,
.request = wm8994_gpio_request,
.direction_input = wm8994_gpio_direction_in,
.get = wm8994_gpio_get,
.direction_output = wm8994_gpio_direction_out,
.set = wm8994_gpio_set,
.to_irq = wm8994_gpio_to_irq,
.dbg_show = wm8994_gpio_dbg_show,
.can_sleep = 1,
};
static int wm8994_gpio_probe(struct platform_device *pdev)
{
struct wm8994 *wm8994 = dev_get_drvdata(pdev->dev.parent);
struct wm8994_pdata *pdata = wm8994->dev->platform_data;
struct wm8994_gpio *wm8994_gpio;
int ret;
wm8994_gpio = devm_kzalloc(&pdev->dev, sizeof(*wm8994_gpio),
GFP_KERNEL);
if (wm8994_gpio == NULL)
return -ENOMEM;
wm8994_gpio->wm8994 = wm8994;
wm8994_gpio->gpio_chip = template_chip;
wm8994_gpio->gpio_chip.ngpio = WM8994_GPIO_MAX;
wm8994_gpio->gpio_chip.dev = &pdev->dev;
if (pdata && pdata->gpio_base)
wm8994_gpio->gpio_chip.base = pdata->gpio_base;
else
wm8994_gpio->gpio_chip.base = -1;
ret = gpiochip_add(&wm8994_gpio->gpio_chip);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
ret);
goto err;
}
platform_set_drvdata(pdev, wm8994_gpio);
return ret;
err:
return ret;
}
static int wm8994_gpio_remove(struct platform_device *pdev)
{
struct wm8994_gpio *wm8994_gpio = platform_get_drvdata(pdev);
return gpiochip_remove(&wm8994_gpio->gpio_chip);
}
static struct platform_driver wm8994_gpio_driver = {
.driver.name = "wm8994-gpio",
.driver.owner = THIS_MODULE,
.probe = wm8994_gpio_probe,
.remove = wm8994_gpio_remove,
};
static int __init wm8994_gpio_init(void)
{
return platform_driver_register(&wm8994_gpio_driver);
}
subsys_initcall(wm8994_gpio_init);
static void __exit wm8994_gpio_exit(void)
{
platform_driver_unregister(&wm8994_gpio_driver);
}
module_exit(wm8994_gpio_exit);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("GPIO interface for WM8994");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm8994-gpio");
| ddilber/telegrauq7_linux | drivers/gpio/gpio-wm8994.c | C | gpl-2.0 | 7,586 |
/*
* drivers/mtd/nand/au1550nd.c
*
* Copyright (C) 2004 Embedded Edge, LLC
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1550nd.h>
struct au1550nd_ctx {
struct mtd_info info;
struct nand_chip chip;
int cs;
void __iomem *base;
void (*write_byte)(struct mtd_info *, u_char);
};
/**
* au_read_byte - read one byte from the chip
* @mtd: MTD device structure
*
* read function for 8bit buswidth
*/
static u_char au_read_byte(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
u_char ret = readb(this->IO_ADDR_R);
au_sync();
return ret;
}
/**
* au_write_byte - write one byte to the chip
* @mtd: MTD device structure
* @byte: pointer to data byte to write
*
* write function for 8it buswidth
*/
static void au_write_byte(struct mtd_info *mtd, u_char byte)
{
struct nand_chip *this = mtd->priv;
writeb(byte, this->IO_ADDR_W);
au_sync();
}
/**
* au_read_byte16 - read one byte endianness aware from the chip
* @mtd: MTD device structure
*
* read function for 16bit buswidth with endianness conversion
*/
static u_char au_read_byte16(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
u_char ret = (u_char) cpu_to_le16(readw(this->IO_ADDR_R));
au_sync();
return ret;
}
/**
* au_write_byte16 - write one byte endianness aware to the chip
* @mtd: MTD device structure
* @byte: pointer to data byte to write
*
* write function for 16bit buswidth with endianness conversion
*/
static void au_write_byte16(struct mtd_info *mtd, u_char byte)
{
struct nand_chip *this = mtd->priv;
writew(le16_to_cpu((u16) byte), this->IO_ADDR_W);
au_sync();
}
/**
* au_read_word - read one word from the chip
* @mtd: MTD device structure
*
* read function for 16bit buswidth without endianness conversion
*/
static u16 au_read_word(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
u16 ret = readw(this->IO_ADDR_R);
au_sync();
return ret;
}
/**
* au_write_buf - write buffer to chip
* @mtd: MTD device structure
* @buf: data buffer
* @len: number of bytes to write
*
* write function for 8bit buswidth
*/
static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
int i;
struct nand_chip *this = mtd->priv;
for (i = 0; i < len; i++) {
writeb(buf[i], this->IO_ADDR_W);
au_sync();
}
}
/**
* au_read_buf - read chip data into buffer
* @mtd: MTD device structure
* @buf: buffer to store date
* @len: number of bytes to read
*
* read function for 8bit buswidth
*/
static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
int i;
struct nand_chip *this = mtd->priv;
for (i = 0; i < len; i++) {
buf[i] = readb(this->IO_ADDR_R);
au_sync();
}
}
/**
* au_write_buf16 - write buffer to chip
* @mtd: MTD device structure
* @buf: data buffer
* @len: number of bytes to write
*
* write function for 16bit buswidth
*/
static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
{
int i;
struct nand_chip *this = mtd->priv;
u16 *p = (u16 *) buf;
len >>= 1;
for (i = 0; i < len; i++) {
writew(p[i], this->IO_ADDR_W);
au_sync();
}
}
/**
* au_read_buf16 - read chip data into buffer
* @mtd: MTD device structure
* @buf: buffer to store date
* @len: number of bytes to read
*
* read function for 16bit buswidth
*/
static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
{
int i;
struct nand_chip *this = mtd->priv;
u16 *p = (u16 *) buf;
len >>= 1;
for (i = 0; i < len; i++) {
p[i] = readw(this->IO_ADDR_R);
au_sync();
}
}
/* Select the chip by setting nCE to low */
#define NAND_CTL_SETNCE 1
/* Deselect the chip by setting nCE to high */
#define NAND_CTL_CLRNCE 2
/* Select the command latch by setting CLE to high */
#define NAND_CTL_SETCLE 3
/* Deselect the command latch by setting CLE to low */
#define NAND_CTL_CLRCLE 4
/* Select the address latch by setting ALE to high */
#define NAND_CTL_SETALE 5
/* Deselect the address latch by setting ALE to low */
#define NAND_CTL_CLRALE 6
static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
{
struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info);
struct nand_chip *this = mtd->priv;
switch (cmd) {
case NAND_CTL_SETCLE:
this->IO_ADDR_W = ctx->base + MEM_STNAND_CMD;
break;
case NAND_CTL_CLRCLE:
this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
break;
case NAND_CTL_SETALE:
this->IO_ADDR_W = ctx->base + MEM_STNAND_ADDR;
break;
case NAND_CTL_CLRALE:
this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
/* FIXME: Nobody knows why this is necessary,
* but it works only that way */
udelay(1);
break;
case NAND_CTL_SETNCE:
/* assert (force assert) chip enable */
au_writel((1 << (4 + ctx->cs)), MEM_STNDCTL);
break;
case NAND_CTL_CLRNCE:
/* deassert chip enable */
au_writel(0, MEM_STNDCTL);
break;
}
this->IO_ADDR_R = this->IO_ADDR_W;
/* Drain the writebuffer */
au_sync();
}
int au1550_device_ready(struct mtd_info *mtd)
{
int ret = (au_readl(MEM_STSTAT) & 0x1) ? 1 : 0;
au_sync();
return ret;
}
/**
* au1550_select_chip - control -CE line
* Forbid driving -CE manually permitting the NAND controller to do this.
* Keeping -CE asserted during the whole sector reads interferes with the
* NOR flash and PCMCIA drivers as it causes contention on the static bus.
* We only have to hold -CE low for the NAND read commands since the flash
* chip needs it to be asserted during chip not ready time but the NAND
* controller keeps it released.
*
* @mtd: MTD device structure
* @chip: chipnumber to select, -1 for deselect
*/
static void au1550_select_chip(struct mtd_info *mtd, int chip)
{
}
/**
* au1550_command - Send command to NAND device
* @mtd: MTD device structure
* @command: the command to be sent
* @column: the column address for this command, -1 if none
* @page_addr: the page address for this command, -1 if none
*/
static void au1550_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
{
struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info);
struct nand_chip *this = mtd->priv;
int ce_override = 0, i;
unsigned long flags = 0;
/* Begin command latch cycle */
au1550_hwcontrol(mtd, NAND_CTL_SETCLE);
/*
* Write out the command to the device.
*/
if (command == NAND_CMD_SEQIN) {
int readcmd;
if (column >= mtd->writesize) {
/* OOB area */
column -= mtd->writesize;
readcmd = NAND_CMD_READOOB;
} else if (column < 256) {
/* First 256 bytes --> READ0 */
readcmd = NAND_CMD_READ0;
} else {
column -= 256;
readcmd = NAND_CMD_READ1;
}
ctx->write_byte(mtd, readcmd);
}
ctx->write_byte(mtd, command);
/* Set ALE and clear CLE to start address cycle */
au1550_hwcontrol(mtd, NAND_CTL_CLRCLE);
if (column != -1 || page_addr != -1) {
au1550_hwcontrol(mtd, NAND_CTL_SETALE);
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
if (this->options & NAND_BUSWIDTH_16)
column >>= 1;
ctx->write_byte(mtd, column);
}
if (page_addr != -1) {
ctx->write_byte(mtd, (u8)(page_addr & 0xff));
if (command == NAND_CMD_READ0 ||
command == NAND_CMD_READ1 ||
command == NAND_CMD_READOOB) {
/*
* NAND controller will release -CE after
* the last address byte is written, so we'll
* have to forcibly assert it. No interrupts
* are allowed while we do this as we don't
* want the NOR flash or PCMCIA drivers to
* steal our precious bytes of data...
*/
ce_override = 1;
local_irq_save(flags);
au1550_hwcontrol(mtd, NAND_CTL_SETNCE);
}
ctx->write_byte(mtd, (u8)(page_addr >> 8));
/* One more address cycle for devices > 32MiB */
if (this->chipsize > (32 << 20))
ctx->write_byte(mtd,
((page_addr >> 16) & 0x0f));
}
/* Latch in address */
au1550_hwcontrol(mtd, NAND_CTL_CLRALE);
}
/*
* Program and erase have their own busy handlers.
* Status and sequential in need no delay.
*/
switch (command) {
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
case NAND_CMD_SEQIN:
case NAND_CMD_STATUS:
return;
case NAND_CMD_RESET:
break;
case NAND_CMD_READ0:
case NAND_CMD_READ1:
case NAND_CMD_READOOB:
/* Check if we're really driving -CE low (just in case) */
if (unlikely(!ce_override))
break;
/* Apply a short delay always to ensure that we do wait tWB. */
ndelay(100);
/* Wait for a chip to become ready... */
for (i = this->chip_delay; !this->dev_ready(mtd) && i > 0; --i)
udelay(1);
/* Release -CE and re-enable interrupts. */
au1550_hwcontrol(mtd, NAND_CTL_CLRNCE);
local_irq_restore(flags);
return;
}
/* Apply this short delay always to ensure that we do wait tWB. */
ndelay(100);
while(!this->dev_ready(mtd));
}
static int find_nand_cs(unsigned long nand_base)
{
void __iomem *base =
(void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
unsigned long addr, staddr, start, mask, end;
int i;
for (i = 0; i < 4; i++) {
addr = 0x1000 + (i * 0x10); /* CSx */
staddr = __raw_readl(base + addr + 0x08); /* STADDRx */
/* figure out the decoded range of this CS */
start = (staddr << 4) & 0xfffc0000;
mask = (staddr << 18) & 0xfffc0000;
end = (start | (start - 1)) & ~(start ^ mask);
if ((nand_base >= start) && (nand_base < end))
return i;
}
return -ENODEV;
}
static int au1550nd_probe(struct platform_device *pdev)
{
struct au1550nd_platdata *pd;
struct au1550nd_ctx *ctx;
struct nand_chip *this;
struct resource *r;
int ret, cs;
pd = pdev->dev.platform_data;
if (!pd) {
dev_err(&pdev->dev, "missing platform data\n");
return -ENODEV;
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
dev_err(&pdev->dev, "no memory for NAND context\n");
return -ENOMEM;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "no NAND memory resource\n");
ret = -ENODEV;
goto out1;
}
if (request_mem_region(r->start, resource_size(r), "au1550-nand")) {
dev_err(&pdev->dev, "cannot claim NAND memory area\n");
ret = -ENOMEM;
goto out1;
}
ctx->base = ioremap_nocache(r->start, 0x1000);
if (!ctx->base) {
dev_err(&pdev->dev, "cannot remap NAND memory area\n");
ret = -ENODEV;
goto out2;
}
this = &ctx->chip;
ctx->info.priv = this;
ctx->info.owner = THIS_MODULE;
/* figure out which CS# r->start belongs to */
cs = find_nand_cs(r->start);
if (cs < 0) {
dev_err(&pdev->dev, "cannot detect NAND chipselect\n");
ret = -ENODEV;
goto out3;
}
ctx->cs = cs;
this->dev_ready = au1550_device_ready;
this->select_chip = au1550_select_chip;
this->cmdfunc = au1550_command;
/* 30 us command delay time */
this->chip_delay = 30;
this->ecc.mode = NAND_ECC_SOFT;
if (pd->devwidth)
this->options |= NAND_BUSWIDTH_16;
this->read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte;
ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte;
this->read_word = au_read_word;
this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
ret = nand_scan(&ctx->info, 1);
if (ret) {
dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
goto out3;
}
mtd_device_register(&ctx->info, pd->parts, pd->num_parts);
return 0;
out3:
iounmap(ctx->base);
out2:
release_mem_region(r->start, resource_size(r));
out1:
kfree(ctx);
return ret;
}
static int au1550nd_remove(struct platform_device *pdev)
{
struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
nand_release(&ctx->info);
iounmap(ctx->base);
release_mem_region(r->start, 0x1000);
kfree(ctx);
return 0;
}
static struct platform_driver au1550nd_driver = {
.driver = {
.name = "au1550-nand",
.owner = THIS_MODULE,
},
.probe = au1550nd_probe,
.remove = au1550nd_remove,
};
module_platform_driver(au1550nd_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Embedded Edge, LLC");
MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on Pb1550 board");
| gobzateloon/Gobza_Sprout-LP | drivers/mtd/nand/au1550nd.c | C | gpl-2.0 | 12,518 |
/*
* SPARC64 Huge TLB page support.
*
* Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
/* Slightly simplified from the non-hugepage variant because by
* definition we don't have to worry about any page coloring stuff
*/
#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct * vma;
unsigned long task_size = TASK_SIZE;
unsigned long start_addr;
if (test_thread_flag(TIF_32BIT))
task_size = STACK_TOP32;
if (unlikely(len >= VA_EXCLUDE_START))
return -ENOMEM;
if (len > mm->cached_hole_size) {
start_addr = addr = mm->free_area_cache;
} else {
start_addr = addr = TASK_UNMAPPED_BASE;
mm->cached_hole_size = 0;
}
task_size -= len;
full_search:
addr = ALIGN(addr, HPAGE_SIZE);
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (addr < VA_EXCLUDE_START &&
(addr + len) >= VA_EXCLUDE_START) {
addr = VA_EXCLUDE_END;
vma = find_vma(mm, VA_EXCLUDE_END);
}
if (unlikely(task_size < addr)) {
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = addr = TASK_UNMAPPED_BASE;
mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
}
if (likely(!vma || addr + len <= vma->vm_start)) {
/*
* Remember the place where we stopped the search:
*/
mm->free_area_cache = addr + len;
return addr;
}
if (addr + mm->cached_hole_size < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;
addr = ALIGN(vma->vm_end, HPAGE_SIZE);
}
}
static unsigned long
hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len,
const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
/* This should only ever run for 32-bit processes. */
BUG_ON(!test_thread_flag(TIF_32BIT));
/* check if free_area_cache is useful for us */
if (len <= mm->cached_hole_size) {
mm->cached_hole_size = 0;
mm->free_area_cache = mm->mmap_base;
}
/* either no address requested or can't fit in requested address hole */
addr = mm->free_area_cache & HPAGE_MASK;
/* make sure it can fit in the remaining address space */
if (likely(addr > len)) {
vma = find_vma(mm, addr-len);
if (!vma || addr <= vma->vm_start) {
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr-len);
}
}
if (unlikely(mm->mmap_base < len))
goto bottomup;
addr = (mm->mmap_base-len) & HPAGE_MASK;
do {
/*
* Lookup failure means no vma is above this address,
* else if new region fits below vma->vm_start,
* return with success:
*/
vma = find_vma(mm, addr);
if (likely(!vma || addr+len <= vma->vm_start)) {
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr);
}
/* remember the largest hole we saw so far */
if (addr + mm->cached_hole_size < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;
/* try just below the current vma->vm_start */
addr = (vma->vm_start-len) & HPAGE_MASK;
} while (likely(len < vma->vm_start));
bottomup:
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
mm->cached_hole_size = ~0UL;
mm->free_area_cache = TASK_UNMAPPED_BASE;
addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
/*
* Restore the topdown base:
*/
mm->free_area_cache = mm->mmap_base;
mm->cached_hole_size = ~0UL;
return addr;
}
unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long task_size = TASK_SIZE;
if (test_thread_flag(TIF_32BIT))
task_size = STACK_TOP32;
if (len & ~HPAGE_MASK)
return -EINVAL;
if (len > task_size)
return -ENOMEM;
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
return addr;
}
if (addr) {
addr = ALIGN(addr, HPAGE_SIZE);
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
else
return hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags);
}
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
/* We must align the address, because our caller will run
* set_huge_pte_at() on whatever we return, which writes out
* all of the sub-ptes for the hugepage range. So we have
* to give it the first such sub-pte.
*/
addr &= HPAGE_MASK;
pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
if (pud) {
pmd = pmd_alloc(mm, pud, addr);
if (pmd)
pte = pte_alloc_map(mm, NULL, pmd, addr);
}
return pte;
}
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
addr &= HPAGE_MASK;
pgd = pgd_offset(mm, addr);
if (!pgd_none(*pgd)) {
pud = pud_offset(pgd, addr);
if (!pud_none(*pud)) {
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd))
pte = pte_offset_map(pmd, addr);
}
}
return pte;
}
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
int i;
if (!pte_present(*ptep) && pte_present(entry))
mm->context.huge_pte_count++;
addr &= HPAGE_MASK;
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
set_pte_at(mm, addr, ptep, entry);
ptep++;
addr += PAGE_SIZE;
pte_val(entry) += PAGE_SIZE;
}
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_t entry;
int i;
entry = *ptep;
if (pte_present(entry))
mm->context.huge_pte_count--;
addr &= HPAGE_MASK;
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
pte_clear(mm, addr, ptep);
addr += PAGE_SIZE;
ptep++;
}
return entry;
}
struct page *follow_huge_addr(struct mm_struct *mm,
unsigned long address, int write)
{
return ERR_PTR(-EINVAL);
}
int pmd_huge(pmd_t pmd)
{
return 0;
}
int pud_huge(pud_t pud)
{
return 0;
}
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
return NULL;
}
static void context_reload(void *__data)
{
struct mm_struct *mm = __data;
if (mm == current->mm)
load_secondary_context(mm);
}
void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
if (likely(tp->tsb != NULL))
return;
tsb_grow(mm, MM_TSB_HUGE, 0);
tsb_context_switch(mm);
smp_tsb_sync(mm);
/* On UltraSPARC-III+ and later, configure the second half of
* the Data-TLB for huge pages.
*/
if (tlb_type == cheetah_plus) {
unsigned long ctx;
spin_lock(&ctx_alloc_lock);
ctx = mm->context.sparc64_ctx_val;
ctx &= ~CTX_PGSZ_MASK;
ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
if (ctx != mm->context.sparc64_ctx_val) {
/* When changing the page size fields, we
* must perform a context flush so that no
* stale entries match. This flush must
* occur with the original context register
* settings.
*/
do_flush_tlb_mm(mm);
/* Reload the context register of all processors
* also executing in this address space.
*/
mm->context.sparc64_ctx_val = ctx;
on_each_cpu(context_reload, mm, 0);
}
spin_unlock(&ctx_alloc_lock);
}
}
| CyanogenMod/sony-kernel-msm8960 | arch/sparc/mm/hugetlbpage.c | C | gpl-2.0 | 8,502 |
/*********************************************************************
*
* Filename: discovery.c
* Version: 0.1
* Description: Routines for handling discoveries at the IrLMP layer
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Apr 6 15:33:50 1999
* Modified at: Sat Oct 9 17:11:31 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Modified at: Fri May 28 3:11 CST 1999
* Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl>
*
* Copyright (c) 1999 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*
********************************************************************/
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <net/irda/irda.h>
#include <net/irda/irlmp.h>
#include <net/irda/discovery.h>
#include <asm/unaligned.h>
/*
* Function irlmp_add_discovery (cachelog, discovery)
*
* Add a new discovery to the cachelog, and remove any old discoveries
* from the same device
*
* Note : we try to preserve the time this device was *first* discovered
* (as opposed to the time of last discovery used for cleanup). This is
* used by clients waiting for discovery events to tell if the device
* discovered is "new" or just the same old one. They can't rely there
* on a binary flag (new/old), because not all discovery events are
* propagated to them, and they might not always listen, so they would
* miss some new devices popping up...
* Jean II
*/
void irlmp_add_discovery(hashbin_t *cachelog, discovery_t *new)
{
discovery_t *discovery, *node;
unsigned long flags;
/* Set time of first discovery if node is new (see below) */
new->firststamp = new->timestamp;
spin_lock_irqsave(&cachelog->hb_spinlock, flags);
/*
* Remove all discoveries of devices that has previously been
* discovered on the same link with the same name (info), or the
* same daddr. We do this since some devices (mostly PDAs) change
* their device address between every discovery.
*/
discovery = (discovery_t *) hashbin_get_first(cachelog);
while (discovery != NULL ) {
node = discovery;
/* Be sure to stay one item ahead */
discovery = (discovery_t *) hashbin_get_next(cachelog);
if ((node->data.saddr == new->data.saddr) &&
((node->data.daddr == new->data.daddr) ||
(strcmp(node->data.info, new->data.info) == 0)))
{
/* This discovery is a previous discovery
* from the same device, so just remove it
*/
hashbin_remove_this(cachelog, (irda_queue_t *) node);
/* Check if hints bits are unchanged */
if (get_unaligned((__u16 *)node->data.hints) == get_unaligned((__u16 *)new->data.hints))
/* Set time of first discovery for this node */
new->firststamp = node->firststamp;
kfree(node);
}
}
/* Insert the new and updated version */
hashbin_insert(cachelog, (irda_queue_t *) new, new->data.daddr, NULL);
spin_unlock_irqrestore(&cachelog->hb_spinlock, flags);
}
/*
* Function irlmp_add_discovery_log (cachelog, log)
*
* Merge a disovery log into the cachelog.
*
*/
void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log)
{
discovery_t *discovery;
IRDA_DEBUG(4, "%s()\n", __func__);
/*
* If log is missing this means that IrLAP was unable to perform the
* discovery, so restart discovery again with just the half timeout
* of the normal one.
*/
/* Well... It means that there was nobody out there - Jean II */
if (log == NULL) {
/* irlmp_start_discovery_timer(irlmp, 150); */
return;
}
/*
* Locking : we are the only owner of this discovery log, so
* no need to lock it.
* We just need to lock the global log in irlmp_add_discovery().
*/
discovery = (discovery_t *) hashbin_remove_first(log);
while (discovery != NULL) {
irlmp_add_discovery(cachelog, discovery);
discovery = (discovery_t *) hashbin_remove_first(log);
}
/* Delete the now empty log */
hashbin_delete(log, (FREE_FUNC) kfree);
}
/*
* Function irlmp_expire_discoveries (log, saddr, force)
*
* Go through all discoveries and expire all that has stayed too long
*
* Note : this assume that IrLAP won't change its saddr, which
* currently is a valid assumption...
*/
void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force)
{
discovery_t * discovery;
discovery_t * curr;
unsigned long flags;
discinfo_t * buffer = NULL;
int n; /* Size of the full log */
int i = 0; /* How many we expired */
IRDA_ASSERT(log != NULL, return;);
IRDA_DEBUG(4, "%s()\n", __func__);
spin_lock_irqsave(&log->hb_spinlock, flags);
discovery = (discovery_t *) hashbin_get_first(log);
while (discovery != NULL) {
/* Be sure to be one item ahead */
curr = discovery;
discovery = (discovery_t *) hashbin_get_next(log);
/* Test if it's time to expire this discovery */
if ((curr->data.saddr == saddr) &&
(force ||
((jiffies - curr->timestamp) > DISCOVERY_EXPIRE_TIMEOUT)))
{
/* Create buffer as needed.
* As this function get called a lot and most time
* we don't have anything to put in the log (we are
* quite picky), we can save a lot of overhead
* by not calling kmalloc. Jean II */
if(buffer == NULL) {
/* Create the client specific buffer */
n = HASHBIN_GET_SIZE(log);
buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC);
if (buffer == NULL) {
spin_unlock_irqrestore(&log->hb_spinlock, flags);
return;
}
}
/* Copy discovery information */
memcpy(&(buffer[i]), &(curr->data),
sizeof(discinfo_t));
i++;
/* Remove it from the log */
curr = hashbin_remove_this(log, (irda_queue_t *) curr);
kfree(curr);
}
}
/* Drop the spinlock before calling the higher layers, as
* we can't guarantee they won't call us back and create a
* deadlock. We will work on our own private data, so we
* don't care to be interrupted. - Jean II */
spin_unlock_irqrestore(&log->hb_spinlock, flags);
if(buffer == NULL)
return;
/* Tell IrLMP and registered clients about it */
irlmp_discovery_expiry(buffer, i);
/* Free up our buffer */
kfree(buffer);
}
#if 0
/*
* Function irlmp_dump_discoveries (log)
*
* Print out all discoveries in log
*
*/
void irlmp_dump_discoveries(hashbin_t *log)
{
discovery_t *discovery;
IRDA_ASSERT(log != NULL, return;);
discovery = (discovery_t *) hashbin_get_first(log);
while (discovery != NULL) {
IRDA_DEBUG(0, "Discovery:\n");
IRDA_DEBUG(0, " daddr=%08x\n", discovery->data.daddr);
IRDA_DEBUG(0, " saddr=%08x\n", discovery->data.saddr);
IRDA_DEBUG(0, " nickname=%s\n", discovery->data.info);
discovery = (discovery_t *) hashbin_get_next(log);
}
}
#endif
/*
* Function irlmp_copy_discoveries (log, pn, mask)
*
* Copy all discoveries in a buffer
*
* This function implement a safe way for lmp clients to access the
* discovery log. The basic problem is that we don't want the log
* to change (add/remove) while the client is reading it. If the
* lmp client manipulate directly the hashbin, he is sure to get
* into troubles...
* The idea is that we copy all the current discovery log in a buffer
* which is specific to the client and pass this copy to him. As we
* do this operation with the spinlock grabbed, we are safe...
* Note : we don't want those clients to grab the spinlock, because
* we have no control on how long they will hold it...
* Note : we choose to copy the log in "struct irda_device_info" to
* save space...
* Note : the client must kfree himself() the log...
* Jean II
*/
struct irda_device_info *irlmp_copy_discoveries(hashbin_t *log, int *pn,
__u16 mask, int old_entries)
{
discovery_t * discovery;
unsigned long flags;
discinfo_t * buffer = NULL;
int j_timeout = (sysctl_discovery_timeout * HZ);
int n; /* Size of the full log */
int i = 0; /* How many we picked */
IRDA_ASSERT(pn != NULL, return NULL;);
IRDA_ASSERT(log != NULL, return NULL;);
/* Save spin lock */
spin_lock_irqsave(&log->hb_spinlock, flags);
discovery = (discovery_t *) hashbin_get_first(log);
while (discovery != NULL) {
/* Mask out the ones we don't want :
* We want to match the discovery mask, and to get only
* the most recent one (unless we want old ones) */
if ((get_unaligned((__u16 *)discovery->data.hints) & mask) &&
((old_entries) ||
((jiffies - discovery->firststamp) < j_timeout))) {
/* Create buffer as needed.
* As this function get called a lot and most time
* we don't have anything to put in the log (we are
* quite picky), we can save a lot of overhead
* by not calling kmalloc. Jean II */
if(buffer == NULL) {
/* Create the client specific buffer */
n = HASHBIN_GET_SIZE(log);
buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC);
if (buffer == NULL) {
spin_unlock_irqrestore(&log->hb_spinlock, flags);
return NULL;
}
}
/* Copy discovery information */
memcpy(&(buffer[i]), &(discovery->data),
sizeof(discinfo_t));
i++;
}
discovery = (discovery_t *) hashbin_get_next(log);
}
spin_unlock_irqrestore(&log->hb_spinlock, flags);
/* Get the actual number of device in the buffer and return */
*pn = i;
return buffer;
}
#ifdef CONFIG_PROC_FS
static inline discovery_t *discovery_seq_idx(loff_t pos)
{
discovery_t *discovery;
for (discovery = (discovery_t *) hashbin_get_first(irlmp->cachelog);
discovery != NULL;
discovery = (discovery_t *) hashbin_get_next(irlmp->cachelog)) {
if (pos-- == 0)
break;
}
return discovery;
}
static void *discovery_seq_start(struct seq_file *seq, loff_t *pos)
{
spin_lock_irq(&irlmp->cachelog->hb_spinlock);
return *pos ? discovery_seq_idx(*pos - 1) : SEQ_START_TOKEN;
}
static void *discovery_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN)
? (void *) hashbin_get_first(irlmp->cachelog)
: (void *) hashbin_get_next(irlmp->cachelog);
}
static void discovery_seq_stop(struct seq_file *seq, void *v)
{
spin_unlock_irq(&irlmp->cachelog->hb_spinlock);
}
static int discovery_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "IrLMP: Discovery log:\n\n");
else {
const discovery_t *discovery = v;
seq_printf(seq, "nickname: %s, hint: 0x%02x%02x",
discovery->data.info,
discovery->data.hints[0],
discovery->data.hints[1]);
#if 0
if ( discovery->data.hints[0] & HINT_PNP)
seq_puts(seq, "PnP Compatible ");
if ( discovery->data.hints[0] & HINT_PDA)
seq_puts(seq, "PDA/Palmtop ");
if ( discovery->data.hints[0] & HINT_COMPUTER)
seq_puts(seq, "Computer ");
if ( discovery->data.hints[0] & HINT_PRINTER)
seq_puts(seq, "Printer ");
if ( discovery->data.hints[0] & HINT_MODEM)
seq_puts(seq, "Modem ");
if ( discovery->data.hints[0] & HINT_FAX)
seq_puts(seq, "Fax ");
if ( discovery->data.hints[0] & HINT_LAN)
seq_puts(seq, "LAN Access ");
if ( discovery->data.hints[1] & HINT_TELEPHONY)
seq_puts(seq, "Telephony ");
if ( discovery->data.hints[1] & HINT_FILE_SERVER)
seq_puts(seq, "File Server ");
if ( discovery->data.hints[1] & HINT_COMM)
seq_puts(seq, "IrCOMM ");
if ( discovery->data.hints[1] & HINT_OBEX)
seq_puts(seq, "IrOBEX ");
#endif
seq_printf(seq,", saddr: 0x%08x, daddr: 0x%08x\n\n",
discovery->data.saddr,
discovery->data.daddr);
seq_putc(seq, '\n');
}
return 0;
}
static const struct seq_operations discovery_seq_ops = {
.start = discovery_seq_start,
.next = discovery_seq_next,
.stop = discovery_seq_stop,
.show = discovery_seq_show,
};
static int discovery_seq_open(struct inode *inode, struct file *file)
{
IRDA_ASSERT(irlmp != NULL, return -EINVAL;);
return seq_open(file, &discovery_seq_ops);
}
const struct file_operations discovery_seq_fops = {
.owner = THIS_MODULE,
.open = discovery_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif
| glewarne/Note2Core_v3_kernel_N710x | net/irda/discovery.c | C | gpl-2.0 | 12,808 |
/*
* Driver for the Analog Devices digital potentiometers (I2C bus)
*
* Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/i2c.h>
#include <linux/module.h>
#include "ad525x_dpot.h"
/* ------------------------------------------------------------------------- */
/* I2C bus functions */
static int write_d8(void *client, u8 val)
{
return i2c_smbus_write_byte(client, val);
}
static int write_r8d8(void *client, u8 reg, u8 val)
{
return i2c_smbus_write_byte_data(client, reg, val);
}
static int write_r8d16(void *client, u8 reg, u16 val)
{
return i2c_smbus_write_word_data(client, reg, val);
}
static int read_d8(void *client)
{
return i2c_smbus_read_byte(client);
}
static int read_r8d8(void *client, u8 reg)
{
return i2c_smbus_read_byte_data(client, reg);
}
static int read_r8d16(void *client, u8 reg)
{
return i2c_smbus_read_word_data(client, reg);
}
static const struct ad_dpot_bus_ops bops = {
.read_d8 = read_d8,
.read_r8d8 = read_r8d8,
.read_r8d16 = read_r8d16,
.write_d8 = write_d8,
.write_r8d8 = write_r8d8,
.write_r8d16 = write_r8d16,
};
static int __devinit ad_dpot_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ad_dpot_bus_data bdata = {
.client = client,
.bops = &bops,
};
struct ad_dpot_id dpot_id = {
.name = (char *) &id->name,
.devid = id->driver_data,
};
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA)) {
dev_err(&client->dev, "SMBUS Word Data not Supported\n");
return -EIO;
}
return ad_dpot_probe(&client->dev, &bdata, &dpot_id);
}
static int __devexit ad_dpot_i2c_remove(struct i2c_client *client)
{
return ad_dpot_remove(&client->dev);
}
static const struct i2c_device_id ad_dpot_id[] = {
{"ad5258", AD5258_ID},
{"ad5259", AD5259_ID},
{"ad5251", AD5251_ID},
{"ad5252", AD5252_ID},
{"ad5253", AD5253_ID},
{"ad5254", AD5254_ID},
{"ad5255", AD5255_ID},
{"ad5241", AD5241_ID},
{"ad5242", AD5242_ID},
{"ad5243", AD5243_ID},
{"ad5245", AD5245_ID},
{"ad5246", AD5246_ID},
{"ad5247", AD5247_ID},
{"ad5248", AD5248_ID},
{"ad5280", AD5280_ID},
{"ad5282", AD5282_ID},
{"adn2860", ADN2860_ID},
{"ad5273", AD5273_ID},
{"ad5171", AD5171_ID},
{"ad5170", AD5170_ID},
{"ad5172", AD5172_ID},
{"ad5173", AD5173_ID},
{"ad5272", AD5272_ID},
{"ad5274", AD5274_ID},
{}
};
MODULE_DEVICE_TABLE(i2c, ad_dpot_id);
static struct i2c_driver ad_dpot_i2c_driver = {
.driver = {
.name = "ad_dpot",
.owner = THIS_MODULE,
},
.probe = ad_dpot_i2c_probe,
.remove = __devexit_p(ad_dpot_i2c_remove),
.id_table = ad_dpot_id,
};
static int __init ad_dpot_i2c_init(void)
{
return i2c_add_driver(&ad_dpot_i2c_driver);
}
module_init(ad_dpot_i2c_init);
static void __exit ad_dpot_i2c_exit(void)
{
i2c_del_driver(&ad_dpot_i2c_driver);
}
module_exit(ad_dpot_i2c_exit);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("digital potentiometer I2C bus driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("i2c:ad_dpot");
| johnnyslt/android_kernel_shooter | drivers/misc/ad525x_dpot-i2c.c | C | gpl-2.0 | 3,059 |
/* $Id: diddfunc.c,v 1.14.6.2 2004/08/28 20:03:53 armin Exp $
*
* DIDD Interface module for Eicon active cards.
*
* Functions are in dadapter.c
*
* Copyright 2002-2003 by Armin Schindler (mac@melware.de)
* Copyright 2002-2003 Cytronics & Melware (info@melware.de)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*/
#include "platform.h"
#include "di_defs.h"
#include "dadapter.h"
#include "divasync.h"
#define DBG_MINIMUM (DL_LOG + DL_FTL + DL_ERR)
#define DBG_DEFAULT (DBG_MINIMUM + DL_XLOG + DL_REG)
extern void DIVA_DIDD_Read(void *, int);
extern char *DRIVERRELEASE_DIDD;
static dword notify_handle;
static DESCRIPTOR _DAdapter;
/*
* didd callback function
*/
static void *didd_callback(void *context, DESCRIPTOR * adapter,
int removal)
{
if (adapter->type == IDI_DADAPTER) {
DBG_ERR(("Notification about IDI_DADAPTER change ! Oops."))
return (NULL);
} else if (adapter->type == IDI_DIMAINT) {
if (removal) {
DbgDeregister();
} else {
DbgRegister("DIDD", DRIVERRELEASE_DIDD, DBG_DEFAULT);
}
}
return (NULL);
}
/*
* connect to didd
*/
static int DIVA_INIT_FUNCTION connect_didd(void)
{
int x = 0;
int dadapter = 0;
IDI_SYNC_REQ req;
DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
for (x = 0; x < MAX_DESCRIPTORS; x++) {
if (DIDD_Table[x].type == IDI_DADAPTER) { /* DADAPTER found */
dadapter = 1;
memcpy(&_DAdapter, &DIDD_Table[x], sizeof(_DAdapter));
req.didd_notify.e.Req = 0;
req.didd_notify.e.Rc =
IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
req.didd_notify.info.callback = (void *)didd_callback;
req.didd_notify.info.context = NULL;
_DAdapter.request((ENTITY *) & req);
if (req.didd_notify.e.Rc != 0xff)
return (0);
notify_handle = req.didd_notify.info.handle;
} else if (DIDD_Table[x].type == IDI_DIMAINT) { /* MAINT found */
DbgRegister("DIDD", DRIVERRELEASE_DIDD, DBG_DEFAULT);
}
}
return (dadapter);
}
/*
* disconnect from didd
*/
static void DIVA_EXIT_FUNCTION disconnect_didd(void)
{
IDI_SYNC_REQ req;
req.didd_notify.e.Req = 0;
req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY;
req.didd_notify.info.handle = notify_handle;
_DAdapter.request((ENTITY *) & req);
}
/*
* init
*/
int DIVA_INIT_FUNCTION diddfunc_init(void)
{
diva_didd_load_time_init();
if (!connect_didd()) {
DBG_ERR(("init: failed to connect to DIDD."))
diva_didd_load_time_finit();
return (0);
}
return (1);
}
/*
* finit
*/
void DIVA_EXIT_FUNCTION diddfunc_finit(void)
{
DbgDeregister();
disconnect_didd();
diva_didd_load_time_finit();
}
| tusisma/linux-2.6-mbt | drivers/isdn/hardware/eicon/diddfunc.c | C | gpl-2.0 | 2,703 |
/*
* Copyright (C) 2003 David Brownell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <asm/unaligned.h>
static int utf8_to_utf16le(const char *s, __le16 *cp, unsigned len)
{
int count = 0;
u8 c;
u16 uchar;
/* this insists on correct encodings, though not minimal ones.
* BUT it currently rejects legit 4-byte UTF-8 code points,
* which need surrogate pairs. (Unicode 3.1 can use them.)
*/
while (len != 0 && (c = (u8) *s++) != 0) {
if (unlikely(c & 0x80)) {
// 2-byte sequence:
// 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx
if ((c & 0xe0) == 0xc0) {
uchar = (c & 0x1f) << 6;
c = (u8) *s++;
if ((c & 0xc0) != 0x80)
goto fail;
c &= 0x3f;
uchar |= c;
// 3-byte sequence (most CJKV characters):
// zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx
} else if ((c & 0xf0) == 0xe0) {
uchar = (c & 0x0f) << 12;
c = (u8) *s++;
if ((c & 0xc0) != 0x80)
goto fail;
c &= 0x3f;
uchar |= c << 6;
c = (u8) *s++;
if ((c & 0xc0) != 0x80)
goto fail;
c &= 0x3f;
uchar |= c;
/* no bogus surrogates */
if (0xd800 <= uchar && uchar <= 0xdfff)
goto fail;
// 4-byte sequence (surrogate pairs, currently rare):
// 11101110wwwwzzzzyy + 110111yyyyxxxxxx
// = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx
// (uuuuu = wwww + 1)
// FIXME accept the surrogate code points (only)
} else
goto fail;
} else
uchar = c;
put_unaligned_le16(uchar, cp++);
count++;
len--;
}
return count;
fail:
return -1;
}
/**
* usb_gadget_get_string - fill out a string descriptor
* @table: of c strings encoded using UTF-8
* @id: string id, from low byte of wValue in get string descriptor
* @buf: at least 256 bytes
*
* Finds the UTF-8 string matching the ID, and converts it into a
* string descriptor in utf16-le.
* Returns length of descriptor (always even) or negative errno
*
* If your driver needs stings in multiple languages, you'll probably
* "switch (wIndex) { ... }" in your ep0 string descriptor logic,
* using this routine after choosing which set of UTF-8 strings to use.
* Note that US-ASCII is a strict subset of UTF-8; any string bytes with
* the eighth bit set will be multibyte UTF-8 characters, not ISO-8859/1
* characters (which are also widely used in C strings).
*/
int
usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf)
{
struct usb_string *s;
int len;
/* descriptor 0 has the language id */
if (id == 0) {
buf [0] = 4;
buf [1] = USB_DT_STRING;
buf [2] = (u8) table->language;
buf [3] = (u8) (table->language >> 8);
return 4;
}
for (s = table->strings; s && s->s; s++)
if (s->id == id)
break;
/* unrecognized: stall. */
if (!s || !s->s)
return -EINVAL;
/* string descriptors have length, tag, then UTF16-LE text */
len = min ((size_t) 126, strlen (s->s));
memset (buf + 2, 0, 2 * len); /* zero all the bytes */
len = utf8_to_utf16le(s->s, (__le16 *)&buf[2], len);
if (len < 0)
return -EINVAL;
buf [0] = (len + 1) * 2;
buf [1] = USB_DT_STRING;
return buf [0];
}
| jdkernel/mecha_aosp_2.6.35 | drivers/usb/gadget/usbstring.c | C | gpl-2.0 | 3,505 |
/*
* Support for indirect PCI bridges.
*
* Copyright (C) 1998 Gabriel Paubert.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
static int
indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 *val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
volatile void __iomem *cfg_data;
u8 cfg_type = 0;
u32 bus_no, reg;
if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) {
if (bus->number != hose->first_busno)
return PCIBIOS_DEVICE_NOT_FOUND;
if (devfn != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
}
if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE)
if (bus->number != hose->first_busno)
cfg_type = 1;
bus_no = (bus->number == hose->first_busno) ?
hose->self_busno : bus->number;
if (hose->indirect_type & INDIRECT_TYPE_EXT_REG)
reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
else
reg = offset & 0xfc; /* Only 3 bits for function */
if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN)
out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
(devfn << 8) | reg | cfg_type));
else
out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
(devfn << 8) | reg | cfg_type));
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
cfg_data = hose->cfg_data + (offset & 3); /* Only 3 bits for function */
switch (len) {
case 1:
*val = in_8(cfg_data);
break;
case 2:
*val = in_le16(cfg_data);
break;
default:
*val = in_le32(cfg_data);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int
indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
volatile void __iomem *cfg_data;
u8 cfg_type = 0;
u32 bus_no, reg;
if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) {
if (bus->number != hose->first_busno)
return PCIBIOS_DEVICE_NOT_FOUND;
if (devfn != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
}
if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE)
if (bus->number != hose->first_busno)
cfg_type = 1;
bus_no = (bus->number == hose->first_busno) ?
hose->self_busno : bus->number;
if (hose->indirect_type & INDIRECT_TYPE_EXT_REG)
reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
else
reg = offset & 0xfc;
if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN)
out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
(devfn << 8) | reg | cfg_type));
else
out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
(devfn << 8) | reg | cfg_type));
/* suppress setting of PCI_PRIMARY_BUS */
if (hose->indirect_type & INDIRECT_TYPE_SURPRESS_PRIMARY_BUS)
if ((offset == PCI_PRIMARY_BUS) &&
(bus->number == hose->first_busno))
val &= 0xffffff00;
/* Workaround for PCI_28 Errata in 440EPx/GRx */
if ((hose->indirect_type & INDIRECT_TYPE_BROKEN_MRM) &&
offset == PCI_CACHE_LINE_SIZE) {
val = 0;
}
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
cfg_data = hose->cfg_data + (offset & 3);
switch (len) {
case 1:
out_8(cfg_data, val);
break;
case 2:
out_le16(cfg_data, val);
break;
default:
out_le32(cfg_data, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops indirect_pci_ops = {
.read = indirect_read_config,
.write = indirect_write_config,
};
void __init
setup_indirect_pci(struct pci_controller *hose,
resource_size_t cfg_addr,
resource_size_t cfg_data, u32 flags)
{
resource_size_t base = cfg_addr & PAGE_MASK;
void __iomem *mbase;
mbase = ioremap(base, PAGE_SIZE);
hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK);
if ((cfg_data & PAGE_MASK) != base)
mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE);
hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK);
hose->ops = &indirect_pci_ops;
hose->indirect_type = flags;
}
| alexandrinno/android_device_sony_D2403 | arch/microblaze/pci/indirect_pci.c | C | gpl-2.0 | 4,263 |
/* sun3x_esp.c: ESP front-end for Sun3x systems.
*
* Copyright (C) 2007,2008 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <asm/sun3x.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/dvma.h>
/* DMA controller reg offsets */
#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
#include <scsi/scsi_host.h>
#include "esp_scsi.h"
#define DRV_MODULE_NAME "sun3x_esp"
#define PFX DRV_MODULE_NAME ": "
#define DRV_VERSION "1.000"
#define DRV_MODULE_RELDATE "Nov 1, 2007"
/*
* m68k always assumes readl/writel operate on little endian
* mmio space; this is wrong at least for Sun3x, so we
* need to workaround this until a proper way is found
*/
#if 0
#define dma_read32(REG) \
readl(esp->dma_regs + (REG))
#define dma_write32(VAL, REG) \
writel((VAL), esp->dma_regs + (REG))
#else
#define dma_read32(REG) \
*(volatile u32 *)(esp->dma_regs + (REG))
#define dma_write32(VAL, REG) \
do { *(volatile u32 *)(esp->dma_regs + (REG)) = (VAL); } while (0)
#endif
static void sun3x_esp_write8(struct esp *esp, u8 val, unsigned long reg)
{
writeb(val, esp->regs + (reg * 4UL));
}
static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg)
{
return readb(esp->regs + (reg * 4UL));
}
static dma_addr_t sun3x_esp_map_single(struct esp *esp, void *buf,
size_t sz, int dir)
{
return dma_map_single(esp->dev, buf, sz, dir);
}
static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
return dma_map_sg(esp->dev, sg, num_sg, dir);
}
static void sun3x_esp_unmap_single(struct esp *esp, dma_addr_t addr,
size_t sz, int dir)
{
dma_unmap_single(esp->dev, addr, sz, dir);
}
static void sun3x_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
dma_unmap_sg(esp->dev, sg, num_sg, dir);
}
static int sun3x_esp_irq_pending(struct esp *esp)
{
if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
return 1;
return 0;
}
static void sun3x_esp_reset_dma(struct esp *esp)
{
u32 val;
val = dma_read32(DMA_CSR);
dma_write32(val | DMA_RST_SCSI, DMA_CSR);
dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
/* Enable interrupts. */
val = dma_read32(DMA_CSR);
dma_write32(val | DMA_INT_ENAB, DMA_CSR);
}
static void sun3x_esp_dma_drain(struct esp *esp)
{
u32 csr;
int lim;
csr = dma_read32(DMA_CSR);
if (!(csr & DMA_FIFO_ISDRAIN))
return;
dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
lim = 1000;
while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
if (--lim == 0) {
printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
esp->host->unique_id);
break;
}
udelay(1);
}
}
static void sun3x_esp_dma_invalidate(struct esp *esp)
{
u32 val;
int lim;
lim = 1000;
while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
if (--lim == 0) {
printk(KERN_ALERT PFX "esp%d: DMA will not "
"invalidate!\n", esp->host->unique_id);
break;
}
udelay(1);
}
val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
val |= DMA_FIFO_INV;
dma_write32(val, DMA_CSR);
val &= ~DMA_FIFO_INV;
dma_write32(val, DMA_CSR);
}
static void sun3x_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
u32 dma_count, int write, u8 cmd)
{
u32 csr;
BUG_ON(!(cmd & ESP_CMD_DMA));
sun3x_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
sun3x_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
csr = dma_read32(DMA_CSR);
csr |= DMA_ENABLE;
if (write)
csr |= DMA_ST_WRITE;
else
csr &= ~DMA_ST_WRITE;
dma_write32(csr, DMA_CSR);
dma_write32(addr, DMA_ADDR);
scsi_esp_cmd(esp, cmd);
}
static int sun3x_esp_dma_error(struct esp *esp)
{
u32 csr = dma_read32(DMA_CSR);
if (csr & DMA_HNDL_ERROR)
return 1;
return 0;
}
static const struct esp_driver_ops sun3x_esp_ops = {
.esp_write8 = sun3x_esp_write8,
.esp_read8 = sun3x_esp_read8,
.map_single = sun3x_esp_map_single,
.map_sg = sun3x_esp_map_sg,
.unmap_single = sun3x_esp_unmap_single,
.unmap_sg = sun3x_esp_unmap_sg,
.irq_pending = sun3x_esp_irq_pending,
.reset_dma = sun3x_esp_reset_dma,
.dma_drain = sun3x_esp_dma_drain,
.dma_invalidate = sun3x_esp_dma_invalidate,
.send_dma_cmd = sun3x_esp_send_dma_cmd,
.dma_error = sun3x_esp_dma_error,
};
static int __devinit esp_sun3x_probe(struct platform_device *dev)
{
struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
struct esp *esp;
struct resource *res;
int err = -ENOMEM;
host = scsi_host_alloc(tpnt, sizeof(struct esp));
if (!host)
goto fail;
host->max_id = 8;
esp = shost_priv(host);
esp->host = host;
esp->dev = dev;
esp->ops = &sun3x_esp_ops;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res || !res->start)
goto fail_unlink;
esp->regs = ioremap_nocache(res->start, 0x20);
if (!esp->regs)
goto fail_unmap_regs;
res = platform_get_resource(dev, IORESOURCE_MEM, 1);
if (!res || !res->start)
goto fail_unmap_regs;
esp->dma_regs = ioremap_nocache(res->start, 0x10);
esp->command_block = dma_alloc_coherent(esp->dev, 16,
&esp->command_block_dma,
GFP_KERNEL);
if (!esp->command_block)
goto fail_unmap_regs_dma;
host->irq = platform_get_irq(dev, 0);
err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
"SUN3X ESP", esp);
if (err < 0)
goto fail_unmap_command_block;
esp->scsi_id = 7;
esp->host->this_id = esp->scsi_id;
esp->scsi_id_mask = (1 << esp->scsi_id);
esp->cfreq = 20000000;
dev_set_drvdata(&dev->dev, esp);
err = scsi_esp_register(esp, &dev->dev);
if (err)
goto fail_free_irq;
return 0;
fail_free_irq:
free_irq(host->irq, esp);
fail_unmap_command_block:
dma_free_coherent(esp->dev, 16,
esp->command_block,
esp->command_block_dma);
fail_unmap_regs_dma:
iounmap(esp->dma_regs);
fail_unmap_regs:
iounmap(esp->regs);
fail_unlink:
scsi_host_put(host);
fail:
return err;
}
static int __devexit esp_sun3x_remove(struct platform_device *dev)
{
struct esp *esp = dev_get_drvdata(&dev->dev);
unsigned int irq = esp->host->irq;
u32 val;
scsi_esp_unregister(esp);
/* Disable interrupts. */
val = dma_read32(DMA_CSR);
dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
free_irq(irq, esp);
dma_free_coherent(esp->dev, 16,
esp->command_block,
esp->command_block_dma);
scsi_host_put(esp->host);
return 0;
}
static struct platform_driver esp_sun3x_driver = {
.probe = esp_sun3x_probe,
.remove = __devexit_p(esp_sun3x_remove),
.driver = {
.name = "sun3x_esp",
.owner = THIS_MODULE,
},
};
static int __init sun3x_esp_init(void)
{
return platform_driver_register(&esp_sun3x_driver);
}
static void __exit sun3x_esp_exit(void)
{
platform_driver_unregister(&esp_sun3x_driver);
}
MODULE_DESCRIPTION("Sun3x ESP SCSI driver");
MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
module_init(sun3x_esp_init);
module_exit(sun3x_esp_exit);
MODULE_ALIAS("platform:sun3x_esp");
| Jackeagle/android_kernel_samsung_n7502 | drivers/scsi/sun3x_esp.c | C | gpl-2.0 | 7,400 |
/* sun3x_esp.c: ESP front-end for Sun3x systems.
*
* Copyright (C) 2007,2008 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <asm/sun3x.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/dvma.h>
/* DMA controller reg offsets */
#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
#include <scsi/scsi_host.h>
#include "esp_scsi.h"
#define DRV_MODULE_NAME "sun3x_esp"
#define PFX DRV_MODULE_NAME ": "
#define DRV_VERSION "1.000"
#define DRV_MODULE_RELDATE "Nov 1, 2007"
/*
* m68k always assumes readl/writel operate on little endian
* mmio space; this is wrong at least for Sun3x, so we
* need to workaround this until a proper way is found
*/
#if 0
#define dma_read32(REG) \
readl(esp->dma_regs + (REG))
#define dma_write32(VAL, REG) \
writel((VAL), esp->dma_regs + (REG))
#else
#define dma_read32(REG) \
*(volatile u32 *)(esp->dma_regs + (REG))
#define dma_write32(VAL, REG) \
do { *(volatile u32 *)(esp->dma_regs + (REG)) = (VAL); } while (0)
#endif
static void sun3x_esp_write8(struct esp *esp, u8 val, unsigned long reg)
{
writeb(val, esp->regs + (reg * 4UL));
}
static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg)
{
return readb(esp->regs + (reg * 4UL));
}
static dma_addr_t sun3x_esp_map_single(struct esp *esp, void *buf,
size_t sz, int dir)
{
return dma_map_single(esp->dev, buf, sz, dir);
}
static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
return dma_map_sg(esp->dev, sg, num_sg, dir);
}
static void sun3x_esp_unmap_single(struct esp *esp, dma_addr_t addr,
size_t sz, int dir)
{
dma_unmap_single(esp->dev, addr, sz, dir);
}
static void sun3x_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
dma_unmap_sg(esp->dev, sg, num_sg, dir);
}
static int sun3x_esp_irq_pending(struct esp *esp)
{
if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
return 1;
return 0;
}
static void sun3x_esp_reset_dma(struct esp *esp)
{
u32 val;
val = dma_read32(DMA_CSR);
dma_write32(val | DMA_RST_SCSI, DMA_CSR);
dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
/* Enable interrupts. */
val = dma_read32(DMA_CSR);
dma_write32(val | DMA_INT_ENAB, DMA_CSR);
}
static void sun3x_esp_dma_drain(struct esp *esp)
{
u32 csr;
int lim;
csr = dma_read32(DMA_CSR);
if (!(csr & DMA_FIFO_ISDRAIN))
return;
dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
lim = 1000;
while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
if (--lim == 0) {
printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
esp->host->unique_id);
break;
}
udelay(1);
}
}
static void sun3x_esp_dma_invalidate(struct esp *esp)
{
u32 val;
int lim;
lim = 1000;
while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
if (--lim == 0) {
printk(KERN_ALERT PFX "esp%d: DMA will not "
"invalidate!\n", esp->host->unique_id);
break;
}
udelay(1);
}
val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
val |= DMA_FIFO_INV;
dma_write32(val, DMA_CSR);
val &= ~DMA_FIFO_INV;
dma_write32(val, DMA_CSR);
}
static void sun3x_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
u32 dma_count, int write, u8 cmd)
{
u32 csr;
BUG_ON(!(cmd & ESP_CMD_DMA));
sun3x_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
sun3x_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
csr = dma_read32(DMA_CSR);
csr |= DMA_ENABLE;
if (write)
csr |= DMA_ST_WRITE;
else
csr &= ~DMA_ST_WRITE;
dma_write32(csr, DMA_CSR);
dma_write32(addr, DMA_ADDR);
scsi_esp_cmd(esp, cmd);
}
static int sun3x_esp_dma_error(struct esp *esp)
{
u32 csr = dma_read32(DMA_CSR);
if (csr & DMA_HNDL_ERROR)
return 1;
return 0;
}
static const struct esp_driver_ops sun3x_esp_ops = {
.esp_write8 = sun3x_esp_write8,
.esp_read8 = sun3x_esp_read8,
.map_single = sun3x_esp_map_single,
.map_sg = sun3x_esp_map_sg,
.unmap_single = sun3x_esp_unmap_single,
.unmap_sg = sun3x_esp_unmap_sg,
.irq_pending = sun3x_esp_irq_pending,
.reset_dma = sun3x_esp_reset_dma,
.dma_drain = sun3x_esp_dma_drain,
.dma_invalidate = sun3x_esp_dma_invalidate,
.send_dma_cmd = sun3x_esp_send_dma_cmd,
.dma_error = sun3x_esp_dma_error,
};
static int __devinit esp_sun3x_probe(struct platform_device *dev)
{
struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
struct esp *esp;
struct resource *res;
int err = -ENOMEM;
host = scsi_host_alloc(tpnt, sizeof(struct esp));
if (!host)
goto fail;
host->max_id = 8;
esp = shost_priv(host);
esp->host = host;
esp->dev = dev;
esp->ops = &sun3x_esp_ops;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res || !res->start)
goto fail_unlink;
esp->regs = ioremap_nocache(res->start, 0x20);
if (!esp->regs)
goto fail_unmap_regs;
res = platform_get_resource(dev, IORESOURCE_MEM, 1);
if (!res || !res->start)
goto fail_unmap_regs;
esp->dma_regs = ioremap_nocache(res->start, 0x10);
esp->command_block = dma_alloc_coherent(esp->dev, 16,
&esp->command_block_dma,
GFP_KERNEL);
if (!esp->command_block)
goto fail_unmap_regs_dma;
host->irq = platform_get_irq(dev, 0);
err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
"SUN3X ESP", esp);
if (err < 0)
goto fail_unmap_command_block;
esp->scsi_id = 7;
esp->host->this_id = esp->scsi_id;
esp->scsi_id_mask = (1 << esp->scsi_id);
esp->cfreq = 20000000;
dev_set_drvdata(&dev->dev, esp);
err = scsi_esp_register(esp, &dev->dev);
if (err)
goto fail_free_irq;
return 0;
fail_free_irq:
free_irq(host->irq, esp);
fail_unmap_command_block:
dma_free_coherent(esp->dev, 16,
esp->command_block,
esp->command_block_dma);
fail_unmap_regs_dma:
iounmap(esp->dma_regs);
fail_unmap_regs:
iounmap(esp->regs);
fail_unlink:
scsi_host_put(host);
fail:
return err;
}
static int __devexit esp_sun3x_remove(struct platform_device *dev)
{
struct esp *esp = dev_get_drvdata(&dev->dev);
unsigned int irq = esp->host->irq;
u32 val;
scsi_esp_unregister(esp);
/* Disable interrupts. */
val = dma_read32(DMA_CSR);
dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
free_irq(irq, esp);
dma_free_coherent(esp->dev, 16,
esp->command_block,
esp->command_block_dma);
scsi_host_put(esp->host);
return 0;
}
static struct platform_driver esp_sun3x_driver = {
.probe = esp_sun3x_probe,
.remove = __devexit_p(esp_sun3x_remove),
.driver = {
.name = "sun3x_esp",
.owner = THIS_MODULE,
},
};
static int __init sun3x_esp_init(void)
{
return platform_driver_register(&esp_sun3x_driver);
}
static void __exit sun3x_esp_exit(void)
{
platform_driver_unregister(&esp_sun3x_driver);
}
MODULE_DESCRIPTION("Sun3x ESP SCSI driver");
MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
module_init(sun3x_esp_init);
module_exit(sun3x_esp_exit);
MODULE_ALIAS("platform:sun3x_esp");
| KOala888/Reborn | drivers/scsi/sun3x_esp.c | C | gpl-2.0 | 7,400 |
/* cx25840 VBI functions
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/videodev2.h>
#include <linux/i2c.h>
#include <media/v4l2-common.h>
#include <media/cx25840.h>
#include "cx25840-core.h"
static int odd_parity(u8 c)
{
c ^= (c >> 4);
c ^= (c >> 2);
c ^= (c >> 1);
return c & 1;
}
static int decode_vps(u8 * dst, u8 * p)
{
static const u8 biphase_tbl[] = {
0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96,
0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2,
0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94,
0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0,
0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5,
0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1,
0xc3, 0x4b, 0x43, 0xc3, 0x87, 0x0f, 0x07, 0x87,
0x83, 0x0b, 0x03, 0x83, 0xc3, 0x4b, 0x43, 0xc3,
0xc1, 0x49, 0x41, 0xc1, 0x85, 0x0d, 0x05, 0x85,
0x81, 0x09, 0x01, 0x81, 0xc1, 0x49, 0x41, 0xc1,
0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5,
0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1,
0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4,
0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0,
0xc2, 0x4a, 0x42, 0xc2, 0x86, 0x0e, 0x06, 0x86,
0x82, 0x0a, 0x02, 0x82, 0xc2, 0x4a, 0x42, 0xc2,
0xc0, 0x48, 0x40, 0xc0, 0x84, 0x0c, 0x04, 0x84,
0x80, 0x08, 0x00, 0x80, 0xc0, 0x48, 0x40, 0xc0,
0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4,
0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0,
0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96,
0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2,
0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94,
0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0,
0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
};
u8 c, err = 0;
int i;
for (i = 0; i < 2 * 13; i += 2) {
err |= biphase_tbl[p[i]] | biphase_tbl[p[i + 1]];
c = (biphase_tbl[p[i + 1]] & 0xf) |
((biphase_tbl[p[i]] & 0xf) << 4);
dst[i / 2] = c;
}
return err & 0xf0;
}
int cx25840_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct cx25840_state *state = to_state(sd);
static const u16 lcr2vbi[] = {
0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
0, V4L2_SLICED_WSS_625, 0, /* 4 */
V4L2_SLICED_CAPTION_525, /* 6 */
0, 0, V4L2_SLICED_VPS, 0, 0, /* 9 */
0, 0, 0, 0
};
int is_pal = !(state->std & V4L2_STD_525_60);
int i;
memset(svbi, 0, sizeof(*svbi));
/* we're done if raw VBI is active */
if ((cx25840_read(client, 0x404) & 0x10) == 0)
return 0;
if (is_pal) {
for (i = 7; i <= 23; i++) {
u8 v = cx25840_read(client, 0x424 + i - 7);
svbi->service_lines[0][i] = lcr2vbi[v >> 4];
svbi->service_lines[1][i] = lcr2vbi[v & 0xf];
svbi->service_set |= svbi->service_lines[0][i] |
svbi->service_lines[1][i];
}
} else {
for (i = 10; i <= 21; i++) {
u8 v = cx25840_read(client, 0x424 + i - 10);
svbi->service_lines[0][i] = lcr2vbi[v >> 4];
svbi->service_lines[1][i] = lcr2vbi[v & 0xf];
svbi->service_set |= svbi->service_lines[0][i] |
svbi->service_lines[1][i];
}
}
return 0;
}
int cx25840_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct cx25840_state *state = to_state(sd);
int is_pal = !(state->std & V4L2_STD_525_60);
int vbi_offset = is_pal ? 1 : 0;
/* Setup standard */
cx25840_std_setup(client);
/* VBI Offset */
cx25840_write(client, 0x47f, vbi_offset);
cx25840_write(client, 0x404, 0x2e);
return 0;
}
int cx25840_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct cx25840_state *state = to_state(sd);
int is_pal = !(state->std & V4L2_STD_525_60);
int vbi_offset = is_pal ? 1 : 0;
int i, x;
u8 lcr[24];
for (x = 0; x <= 23; x++)
lcr[x] = 0x00;
/* Setup standard */
cx25840_std_setup(client);
/* Sliced VBI */
cx25840_write(client, 0x404, 0x32); /* Ancillary data */
cx25840_write(client, 0x406, 0x13);
cx25840_write(client, 0x47f, vbi_offset);
if (is_pal) {
for (i = 0; i <= 6; i++)
svbi->service_lines[0][i] =
svbi->service_lines[1][i] = 0;
} else {
for (i = 0; i <= 9; i++)
svbi->service_lines[0][i] =
svbi->service_lines[1][i] = 0;
for (i = 22; i <= 23; i++)
svbi->service_lines[0][i] =
svbi->service_lines[1][i] = 0;
}
for (i = 7; i <= 23; i++) {
for (x = 0; x <= 1; x++) {
switch (svbi->service_lines[1-x][i]) {
case V4L2_SLICED_TELETEXT_B:
lcr[i] |= 1 << (4 * x);
break;
case V4L2_SLICED_WSS_625:
lcr[i] |= 4 << (4 * x);
break;
case V4L2_SLICED_CAPTION_525:
lcr[i] |= 6 << (4 * x);
break;
case V4L2_SLICED_VPS:
lcr[i] |= 9 << (4 * x);
break;
}
}
}
if (is_pal) {
for (x = 1, i = 0x424; i <= 0x434; i++, x++)
cx25840_write(client, i, lcr[6 + x]);
} else {
for (x = 1, i = 0x424; i <= 0x430; i++, x++)
cx25840_write(client, i, lcr[9 + x]);
for (i = 0x431; i <= 0x434; i++)
cx25840_write(client, i, 0);
}
cx25840_write(client, 0x43c, 0x16);
cx25840_write(client, 0x474, is_pal ? 0x2a : 0x22);
return 0;
}
int cx25840_decode_vbi_line(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi)
{
struct cx25840_state *state = to_state(sd);
u8 *p = vbi->p;
int id1, id2, l, err = 0;
if (p[0] || p[1] != 0xff || p[2] != 0xff ||
(p[3] != 0x55 && p[3] != 0x91)) {
vbi->line = vbi->type = 0;
return 0;
}
p += 4;
id1 = p[-1];
id2 = p[0] & 0xf;
l = p[2] & 0x3f;
l += state->vbi_line_offset;
p += 4;
switch (id2) {
case 1:
id2 = V4L2_SLICED_TELETEXT_B;
break;
case 4:
id2 = V4L2_SLICED_WSS_625;
break;
case 6:
id2 = V4L2_SLICED_CAPTION_525;
err = !odd_parity(p[0]) || !odd_parity(p[1]);
break;
case 9:
id2 = V4L2_SLICED_VPS;
if (decode_vps(p, p) != 0)
err = 1;
break;
default:
id2 = 0;
err = 1;
break;
}
vbi->type = err ? 0 : id2;
vbi->line = err ? 0 : l;
vbi->is_second_field = err ? 0 : (id1 == 0x55);
vbi->p = p;
return 0;
}
| LiquidSmooth-Devices/kernel_samsung_jf | drivers/media/video/cx25840/cx25840-vbi.c | C | gpl-2.0 | 6,978 |
/*
* Linux driver attachment glue for aic7770 based controllers.
*
* Copyright (c) 2000-2003 Adaptec Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic7770_osm.c#14 $
*/
#include "aic7xxx_osm.h"
#include <linux/device.h>
#include <linux/eisa.h>
int
aic7770_map_registers(struct ahc_softc *ahc, u_int port)
{
/*
* Lock out other contenders for our i/o space.
*/
if (!request_region(port, AHC_EISA_IOSIZE, "aic7xxx"))
return (ENOMEM);
ahc->tag = BUS_SPACE_PIO;
ahc->bsh.ioport = port;
return (0);
}
int
aic7770_map_int(struct ahc_softc *ahc, u_int irq)
{
int error;
int shared;
shared = 0;
if ((ahc->flags & AHC_EDGE_INTERRUPT) == 0)
shared = IRQF_SHARED;
error = request_irq(irq, ahc_linux_isr, shared, "aic7xxx", ahc);
if (error == 0)
ahc->platform_data->irq = irq;
return (-error);
}
static int
aic7770_probe(struct device *dev)
{
struct eisa_device *edev = to_eisa_device(dev);
u_int eisaBase = edev->base_addr+AHC_EISA_SLOT_OFFSET;
struct ahc_softc *ahc;
char buf[80];
char *name;
int error;
sprintf(buf, "ahc_eisa:%d", eisaBase >> 12);
name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
if (name == NULL)
return (ENOMEM);
strcpy(name, buf);
ahc = ahc_alloc(&aic7xxx_driver_template, name);
if (ahc == NULL)
return (ENOMEM);
error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data,
eisaBase);
if (error != 0) {
ahc->bsh.ioport = 0;
ahc_free(ahc);
return (error);
}
dev_set_drvdata(dev, ahc);
error = ahc_linux_register_host(ahc, &aic7xxx_driver_template);
return (error);
}
static int
aic7770_remove(struct device *dev)
{
struct ahc_softc *ahc = dev_get_drvdata(dev);
u_long s;
if (ahc->platform_data && ahc->platform_data->host)
scsi_remove_host(ahc->platform_data->host);
ahc_lock(ahc, &s);
ahc_intr_enable(ahc, FALSE);
ahc_unlock(ahc, &s);
ahc_free(ahc);
return 0;
}
static struct eisa_device_id aic7770_ids[] = {
{ "ADP7771", 0 }, /* AHA 274x */
{ "ADP7756", 1 }, /* AHA 284x BIOS enabled */
{ "ADP7757", 2 }, /* AHA 284x BIOS disabled */
{ "ADP7782", 3 }, /* AHA 274x Olivetti OEM */
{ "ADP7783", 4 }, /* AHA 274x Olivetti OEM (Differential) */
{ "ADP7770", 5 }, /* AIC7770 generic */
{ "" }
};
MODULE_DEVICE_TABLE(eisa, aic7770_ids);
static struct eisa_driver aic7770_driver = {
.id_table = aic7770_ids,
.driver = {
.name = "aic7xxx",
.probe = aic7770_probe,
.remove = aic7770_remove,
}
};
int
ahc_linux_eisa_init(void)
{
return eisa_driver_register(&aic7770_driver);
}
void
ahc_linux_eisa_exit(void)
{
eisa_driver_unregister(&aic7770_driver);
}
| voidz777/android_kernel_samsung_tuna | drivers/scsi/aic7xxx/aic7770_osm.c | C | gpl-2.0 | 4,430 |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <elf.h>
int
main(int argc, char **argv)
{
unsigned char ei[EI_NIDENT];
union { short s; char c[2]; } endian_test;
if (fread(ei, 1, EI_NIDENT, stdin) != EI_NIDENT) {
fprintf(stderr, "Error: input truncated\n");
return 1;
}
if (memcmp(ei, ELFMAG, SELFMAG) != 0) {
fprintf(stderr, "Error: not ELF\n");
return 1;
}
switch (ei[EI_CLASS]) {
case ELFCLASS32:
printf("#define KERNEL_ELFCLASS ELFCLASS32\n");
break;
case ELFCLASS64:
printf("#define KERNEL_ELFCLASS ELFCLASS64\n");
break;
default:
exit(1);
}
switch (ei[EI_DATA]) {
case ELFDATA2LSB:
printf("#define KERNEL_ELFDATA ELFDATA2LSB\n");
break;
case ELFDATA2MSB:
printf("#define KERNEL_ELFDATA ELFDATA2MSB\n");
break;
default:
exit(1);
}
if (sizeof(unsigned long) == 4) {
printf("#define HOST_ELFCLASS ELFCLASS32\n");
} else if (sizeof(unsigned long) == 8) {
printf("#define HOST_ELFCLASS ELFCLASS64\n");
}
endian_test.s = 0x0102;
if (memcmp(endian_test.c, "\x01\x02", 2) == 0)
printf("#define HOST_ELFDATA ELFDATA2MSB\n");
else if (memcmp(endian_test.c, "\x02\x01", 2) == 0)
printf("#define HOST_ELFDATA ELFDATA2LSB\n");
else
exit(1);
return 0;
}
| caoxin1988/linux-3.10.33 | scripts/mod/mk_elfconfig.c | C | gpl-2.0 | 1,234 |
/*
* linux/drivers/input/serio/sa1111ps2.c
*
* Copyright (C) 2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/hardware/sa1111.h>
struct ps2if {
struct serio *io;
struct sa1111_dev *dev;
void __iomem *base;
unsigned int open;
spinlock_t lock;
unsigned int head;
unsigned int tail;
unsigned char buf[4];
};
/*
* Read all bytes waiting in the PS2 port. There should be
* at the most one, but we loop for safety. If there was a
* framing error, we have to manually clear the status.
*/
static irqreturn_t ps2_rxint(int irq, void *dev_id, struct pt_regs *regs)
{
struct ps2if *ps2if = dev_id;
unsigned int scancode, flag, status;
status = sa1111_readl(ps2if->base + SA1111_PS2STAT);
while (status & PS2STAT_RXF) {
if (status & PS2STAT_STP)
sa1111_writel(PS2STAT_STP, ps2if->base + SA1111_PS2STAT);
flag = (status & PS2STAT_STP ? SERIO_FRAME : 0) |
(status & PS2STAT_RXP ? 0 : SERIO_PARITY);
scancode = sa1111_readl(ps2if->base + SA1111_PS2DATA) & 0xff;
if (hweight8(scancode) & 1)
flag ^= SERIO_PARITY;
serio_interrupt(ps2if->io, scancode, flag, regs);
status = sa1111_readl(ps2if->base + SA1111_PS2STAT);
}
return IRQ_HANDLED;
}
/*
* Completion of ps2 write
*/
static irqreturn_t ps2_txint(int irq, void *dev_id, struct pt_regs *regs)
{
struct ps2if *ps2if = dev_id;
unsigned int status;
spin_lock(&ps2if->lock);
status = sa1111_readl(ps2if->base + SA1111_PS2STAT);
if (ps2if->head == ps2if->tail) {
disable_irq(irq);
/* done */
} else if (status & PS2STAT_TXE) {
sa1111_writel(ps2if->buf[ps2if->tail], ps2if->base + SA1111_PS2DATA);
ps2if->tail = (ps2if->tail + 1) & (sizeof(ps2if->buf) - 1);
}
spin_unlock(&ps2if->lock);
return IRQ_HANDLED;
}
/*
* Write a byte to the PS2 port. We have to wait for the
* port to indicate that the transmitter is empty.
*/
static int ps2_write(struct serio *io, unsigned char val)
{
struct ps2if *ps2if = io->port_data;
unsigned long flags;
unsigned int head;
spin_lock_irqsave(&ps2if->lock, flags);
/*
* If the TX register is empty, we can go straight out.
*/
if (sa1111_readl(ps2if->base + SA1111_PS2STAT) & PS2STAT_TXE) {
sa1111_writel(val, ps2if->base + SA1111_PS2DATA);
} else {
if (ps2if->head == ps2if->tail)
enable_irq(ps2if->dev->irq[1]);
head = (ps2if->head + 1) & (sizeof(ps2if->buf) - 1);
if (head != ps2if->tail) {
ps2if->buf[ps2if->head] = val;
ps2if->head = head;
}
}
spin_unlock_irqrestore(&ps2if->lock, flags);
return 0;
}
static int ps2_open(struct serio *io)
{
struct ps2if *ps2if = io->port_data;
int ret;
sa1111_enable_device(ps2if->dev);
ret = request_irq(ps2if->dev->irq[0], ps2_rxint, 0,
SA1111_DRIVER_NAME(ps2if->dev), ps2if);
if (ret) {
printk(KERN_ERR "sa1111ps2: could not allocate IRQ%d: %d\n",
ps2if->dev->irq[0], ret);
return ret;
}
ret = request_irq(ps2if->dev->irq[1], ps2_txint, 0,
SA1111_DRIVER_NAME(ps2if->dev), ps2if);
if (ret) {
printk(KERN_ERR "sa1111ps2: could not allocate IRQ%d: %d\n",
ps2if->dev->irq[1], ret);
free_irq(ps2if->dev->irq[0], ps2if);
return ret;
}
ps2if->open = 1;
enable_irq_wake(ps2if->dev->irq[0]);
sa1111_writel(PS2CR_ENA, ps2if->base + SA1111_PS2CR);
return 0;
}
static void ps2_close(struct serio *io)
{
struct ps2if *ps2if = io->port_data;
sa1111_writel(0, ps2if->base + SA1111_PS2CR);
disable_irq_wake(ps2if->dev->irq[0]);
ps2if->open = 0;
free_irq(ps2if->dev->irq[1], ps2if);
free_irq(ps2if->dev->irq[0], ps2if);
sa1111_disable_device(ps2if->dev);
}
/*
* Clear the input buffer.
*/
static void __init ps2_clear_input(struct ps2if *ps2if)
{
int maxread = 100;
while (maxread--) {
if ((sa1111_readl(ps2if->base + SA1111_PS2DATA) & 0xff) == 0xff)
break;
}
}
static inline unsigned int
ps2_test_one(struct ps2if *ps2if, unsigned int mask)
{
unsigned int val;
sa1111_writel(PS2CR_ENA | mask, ps2if->base + SA1111_PS2CR);
udelay(2);
val = sa1111_readl(ps2if->base + SA1111_PS2STAT);
return val & (PS2STAT_KBC | PS2STAT_KBD);
}
/*
* Test the keyboard interface. We basically check to make sure that
* we can drive each line to the keyboard independently of each other.
*/
static int __init ps2_test(struct ps2if *ps2if)
{
unsigned int stat;
int ret = 0;
stat = ps2_test_one(ps2if, PS2CR_FKC);
if (stat != PS2STAT_KBD) {
printk("PS/2 interface test failed[1]: %02x\n", stat);
ret = -ENODEV;
}
stat = ps2_test_one(ps2if, 0);
if (stat != (PS2STAT_KBC | PS2STAT_KBD)) {
printk("PS/2 interface test failed[2]: %02x\n", stat);
ret = -ENODEV;
}
stat = ps2_test_one(ps2if, PS2CR_FKD);
if (stat != PS2STAT_KBC) {
printk("PS/2 interface test failed[3]: %02x\n", stat);
ret = -ENODEV;
}
sa1111_writel(0, ps2if->base + SA1111_PS2CR);
return ret;
}
/*
* Add one device to this driver.
*/
static int ps2_probe(struct sa1111_dev *dev)
{
struct ps2if *ps2if;
struct serio *serio;
int ret;
ps2if = kmalloc(sizeof(struct ps2if), GFP_KERNEL);
serio = kmalloc(sizeof(struct serio), GFP_KERNEL);
if (!ps2if || !serio) {
ret = -ENOMEM;
goto free;
}
memset(ps2if, 0, sizeof(struct ps2if));
memset(serio, 0, sizeof(struct serio));
serio->id.type = SERIO_8042;
serio->write = ps2_write;
serio->open = ps2_open;
serio->close = ps2_close;
strlcpy(serio->name, dev->dev.bus_id, sizeof(serio->name));
strlcpy(serio->phys, dev->dev.bus_id, sizeof(serio->phys));
serio->port_data = ps2if;
serio->dev.parent = &dev->dev;
ps2if->io = serio;
ps2if->dev = dev;
sa1111_set_drvdata(dev, ps2if);
spin_lock_init(&ps2if->lock);
/*
* Request the physical region for this PS2 port.
*/
if (!request_mem_region(dev->res.start,
dev->res.end - dev->res.start + 1,
SA1111_DRIVER_NAME(dev))) {
ret = -EBUSY;
goto free;
}
/*
* Our parent device has already mapped the region.
*/
ps2if->base = dev->mapbase;
sa1111_enable_device(ps2if->dev);
/* Incoming clock is 8MHz */
sa1111_writel(0, ps2if->base + SA1111_PS2CLKDIV);
sa1111_writel(127, ps2if->base + SA1111_PS2PRECNT);
/*
* Flush any pending input.
*/
ps2_clear_input(ps2if);
/*
* Test the keyboard interface.
*/
ret = ps2_test(ps2if);
if (ret)
goto out;
/*
* Flush any pending input.
*/
ps2_clear_input(ps2if);
sa1111_disable_device(ps2if->dev);
serio_register_port(ps2if->io);
return 0;
out:
sa1111_disable_device(ps2if->dev);
release_mem_region(dev->res.start,
dev->res.end - dev->res.start + 1);
free:
sa1111_set_drvdata(dev, NULL);
kfree(ps2if);
kfree(serio);
return ret;
}
/*
* Remove one device from this driver.
*/
static int ps2_remove(struct sa1111_dev *dev)
{
struct ps2if *ps2if = sa1111_get_drvdata(dev);
serio_unregister_port(ps2if->io);
release_mem_region(dev->res.start,
dev->res.end - dev->res.start + 1);
sa1111_set_drvdata(dev, NULL);
kfree(ps2if);
return 0;
}
/*
* Our device driver structure
*/
static struct sa1111_driver ps2_driver = {
.drv = {
.name = "sa1111-ps2",
},
.devid = SA1111_DEVID_PS2,
.probe = ps2_probe,
.remove = ps2_remove,
};
static int __init ps2_init(void)
{
return sa1111_driver_register(&ps2_driver);
}
static void __exit ps2_exit(void)
{
sa1111_driver_unregister(&ps2_driver);
}
module_init(ps2_init);
module_exit(ps2_exit);
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
MODULE_DESCRIPTION("SA1111 PS2 controller driver");
MODULE_LICENSE("GPL");
| lyn1337/LinuxDSc2 | linux-2.6.x/drivers/input/serio/sa1111ps2.c | C | gpl-2.0 | 7,952 |
/*
* Copyright (c) 1995, 1996, 1997, 1999 Kungliga Tekniska Högskolan
* (Royal Institute of Technology, Stockholm, Sweden).
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Institute nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "config.h"
#define HC_DEPRECATED
#ifdef KRB5
#include <krb5-types.h>
#endif
#include <des.h>
#include <rand.h>
#include <stdlib.h>
#undef __attribute__
#define __attribute__(X)
void HC_DEPRECATED
DES_rand_data(void *outdata, int size)
{
RAND_bytes(outdata, size);
}
void HC_DEPRECATED
DES_generate_random_block(DES_cblock *block)
{
RAND_bytes(block, sizeof(*block));
}
#define DES_rand_data_key hc_DES_rand_data_key
void HC_DEPRECATED
DES_rand_data_key(DES_cblock *key);
/*
* Generate a random DES key.
*/
void HC_DEPRECATED
DES_rand_data_key(DES_cblock *key)
{
DES_new_random_key(key);
}
void HC_DEPRECATED
DES_set_sequence_number(void *ll)
{
}
void HC_DEPRECATED
DES_set_random_generator_seed(DES_cblock *seed)
{
RAND_seed(seed, sizeof(*seed));
}
/**
* Generate a random des key using a random block, fixup parity and
* skip weak keys.
*
* @param key is set to a random key.
*
* @return 0 on success, non zero on random number generator failure.
*
* @ingroup hcrypto_des
*/
int HC_DEPRECATED
DES_new_random_key(DES_cblock *key)
{
do {
if (RAND_bytes(key, sizeof(*key)) != 1)
return 1;
DES_set_odd_parity(key);
} while(DES_is_weak_key(key));
return(0);
}
/**
* Seed the random number generator. Deprecated, use @ref page_rand
*
* @param seed a seed to seed that random number generate with.
*
* @ingroup hcrypto_des
*/
void HC_DEPRECATED
DES_init_random_number_generator(DES_cblock *seed)
{
RAND_seed(seed, sizeof(*seed));
}
/**
* Generate a random key, deprecated since it doesn't return an error
* code, use DES_new_random_key().
*
* @param key is set to a random key.
*
* @ingroup hcrypto_des
*/
void HC_DEPRECATED
DES_random_key(DES_cblock *key)
{
if (DES_new_random_key(key))
abort();
}
| smx-smx/dsl-n55u-bender | release/src/router/samba-3.5.8/source4/heimdal/lib/hcrypto/rnd_keys.c | C | gpl-2.0 | 3,447 |
/* Implementation of the MAXVAL intrinsic
Copyright (C) 2002-2013 Free Software Foundation, Inc.
Contributed by Paul Brook <paul@nowt.org>
This file is part of the GNU Fortran runtime library (libgfortran).
Libgfortran is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
Libgfortran is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "libgfortran.h"
#include <stdlib.h>
#include <assert.h>
#if defined (HAVE_GFC_REAL_8) && defined (HAVE_GFC_REAL_8)
extern void maxval_r8 (gfc_array_r8 * const restrict,
gfc_array_r8 * const restrict, const index_type * const restrict);
export_proto(maxval_r8);
void
maxval_r8 (gfc_array_r8 * const restrict retarray,
gfc_array_r8 * const restrict array,
const index_type * const restrict pdim)
{
index_type count[GFC_MAX_DIMENSIONS];
index_type extent[GFC_MAX_DIMENSIONS];
index_type sstride[GFC_MAX_DIMENSIONS];
index_type dstride[GFC_MAX_DIMENSIONS];
const GFC_REAL_8 * restrict base;
GFC_REAL_8 * restrict dest;
index_type rank;
index_type n;
index_type len;
index_type delta;
index_type dim;
int continue_loop;
/* Make dim zero based to avoid confusion. */
dim = (*pdim) - 1;
rank = GFC_DESCRIPTOR_RANK (array) - 1;
len = GFC_DESCRIPTOR_EXTENT(array,dim);
if (len < 0)
len = 0;
delta = GFC_DESCRIPTOR_STRIDE(array,dim);
for (n = 0; n < dim; n++)
{
sstride[n] = GFC_DESCRIPTOR_STRIDE(array,n);
extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] < 0)
extent[n] = 0;
}
for (n = dim; n < rank; n++)
{
sstride[n] = GFC_DESCRIPTOR_STRIDE(array, n + 1);
extent[n] = GFC_DESCRIPTOR_EXTENT(array, n + 1);
if (extent[n] < 0)
extent[n] = 0;
}
if (retarray->base_addr == NULL)
{
size_t alloc_size, str;
for (n = 0; n < rank; n++)
{
if (n == 0)
str = 1;
else
str = GFC_DESCRIPTOR_STRIDE(retarray,n-1) * extent[n-1];
GFC_DIMENSION_SET(retarray->dim[n], 0, extent[n] - 1, str);
}
retarray->offset = 0;
retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank;
alloc_size = sizeof (GFC_REAL_8) * GFC_DESCRIPTOR_STRIDE(retarray,rank-1)
* extent[rank-1];
retarray->base_addr = xmalloc (alloc_size);
if (alloc_size == 0)
{
/* Make sure we have a zero-sized array. */
GFC_DIMENSION_SET(retarray->dim[0], 0, -1, 1);
return;
}
}
else
{
if (rank != GFC_DESCRIPTOR_RANK (retarray))
runtime_error ("rank of return array incorrect in"
" MAXVAL intrinsic: is %ld, should be %ld",
(long int) (GFC_DESCRIPTOR_RANK (retarray)),
(long int) rank);
if (unlikely (compile_options.bounds_check))
bounds_ifunction_return ((array_t *) retarray, extent,
"return value", "MAXVAL");
}
for (n = 0; n < rank; n++)
{
count[n] = 0;
dstride[n] = GFC_DESCRIPTOR_STRIDE(retarray,n);
if (extent[n] <= 0)
return;
}
base = array->base_addr;
dest = retarray->base_addr;
continue_loop = 1;
while (continue_loop)
{
const GFC_REAL_8 * restrict src;
GFC_REAL_8 result;
src = base;
{
#if defined (GFC_REAL_8_INFINITY)
result = -GFC_REAL_8_INFINITY;
#else
result = -GFC_REAL_8_HUGE;
#endif
if (len <= 0)
*dest = -GFC_REAL_8_HUGE;
else
{
for (n = 0; n < len; n++, src += delta)
{
#if defined (GFC_REAL_8_QUIET_NAN)
if (*src >= result)
break;
}
if (unlikely (n >= len))
result = GFC_REAL_8_QUIET_NAN;
else for (; n < len; n++, src += delta)
{
#endif
if (*src > result)
result = *src;
}
*dest = result;
}
}
/* Advance to the next element. */
count[0]++;
base += sstride[0];
dest += dstride[0];
n = 0;
while (count[n] == extent[n])
{
/* When we get to the end of a dimension, reset it and increment
the next dimension. */
count[n] = 0;
/* We could precalculate these products, but this is a less
frequently used path so probably not worth it. */
base -= sstride[n] * extent[n];
dest -= dstride[n] * extent[n];
n++;
if (n == rank)
{
/* Break out of the look. */
continue_loop = 0;
break;
}
else
{
count[n]++;
base += sstride[n];
dest += dstride[n];
}
}
}
}
extern void mmaxval_r8 (gfc_array_r8 * const restrict,
gfc_array_r8 * const restrict, const index_type * const restrict,
gfc_array_l1 * const restrict);
export_proto(mmaxval_r8);
void
mmaxval_r8 (gfc_array_r8 * const restrict retarray,
gfc_array_r8 * const restrict array,
const index_type * const restrict pdim,
gfc_array_l1 * const restrict mask)
{
index_type count[GFC_MAX_DIMENSIONS];
index_type extent[GFC_MAX_DIMENSIONS];
index_type sstride[GFC_MAX_DIMENSIONS];
index_type dstride[GFC_MAX_DIMENSIONS];
index_type mstride[GFC_MAX_DIMENSIONS];
GFC_REAL_8 * restrict dest;
const GFC_REAL_8 * restrict base;
const GFC_LOGICAL_1 * restrict mbase;
int rank;
int dim;
index_type n;
index_type len;
index_type delta;
index_type mdelta;
int mask_kind;
dim = (*pdim) - 1;
rank = GFC_DESCRIPTOR_RANK (array) - 1;
len = GFC_DESCRIPTOR_EXTENT(array,dim);
if (len <= 0)
return;
mbase = mask->base_addr;
mask_kind = GFC_DESCRIPTOR_SIZE (mask);
if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
#ifdef HAVE_GFC_LOGICAL_16
|| mask_kind == 16
#endif
)
mbase = GFOR_POINTER_TO_L1 (mbase, mask_kind);
else
runtime_error ("Funny sized logical array");
delta = GFC_DESCRIPTOR_STRIDE(array,dim);
mdelta = GFC_DESCRIPTOR_STRIDE_BYTES(mask,dim);
for (n = 0; n < dim; n++)
{
sstride[n] = GFC_DESCRIPTOR_STRIDE(array,n);
mstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(mask,n);
extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] < 0)
extent[n] = 0;
}
for (n = dim; n < rank; n++)
{
sstride[n] = GFC_DESCRIPTOR_STRIDE(array,n + 1);
mstride[n] = GFC_DESCRIPTOR_STRIDE_BYTES(mask, n + 1);
extent[n] = GFC_DESCRIPTOR_EXTENT(array, n + 1);
if (extent[n] < 0)
extent[n] = 0;
}
if (retarray->base_addr == NULL)
{
size_t alloc_size, str;
for (n = 0; n < rank; n++)
{
if (n == 0)
str = 1;
else
str= GFC_DESCRIPTOR_STRIDE(retarray,n-1) * extent[n-1];
GFC_DIMENSION_SET(retarray->dim[n], 0, extent[n] - 1, str);
}
alloc_size = sizeof (GFC_REAL_8) * GFC_DESCRIPTOR_STRIDE(retarray,rank-1)
* extent[rank-1];
retarray->offset = 0;
retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank;
if (alloc_size == 0)
{
/* Make sure we have a zero-sized array. */
GFC_DIMENSION_SET(retarray->dim[0], 0, -1, 1);
return;
}
else
retarray->base_addr = xmalloc (alloc_size);
}
else
{
if (rank != GFC_DESCRIPTOR_RANK (retarray))
runtime_error ("rank of return array incorrect in MAXVAL intrinsic");
if (unlikely (compile_options.bounds_check))
{
bounds_ifunction_return ((array_t *) retarray, extent,
"return value", "MAXVAL");
bounds_equal_extents ((array_t *) mask, (array_t *) array,
"MASK argument", "MAXVAL");
}
}
for (n = 0; n < rank; n++)
{
count[n] = 0;
dstride[n] = GFC_DESCRIPTOR_STRIDE(retarray,n);
if (extent[n] <= 0)
return;
}
dest = retarray->base_addr;
base = array->base_addr;
while (base)
{
const GFC_REAL_8 * restrict src;
const GFC_LOGICAL_1 * restrict msrc;
GFC_REAL_8 result;
src = base;
msrc = mbase;
{
#if defined (GFC_REAL_8_INFINITY)
result = -GFC_REAL_8_INFINITY;
#else
result = -GFC_REAL_8_HUGE;
#endif
#if defined (GFC_REAL_8_QUIET_NAN)
int non_empty_p = 0;
#endif
for (n = 0; n < len; n++, src += delta, msrc += mdelta)
{
#if defined (GFC_REAL_8_INFINITY) || defined (GFC_REAL_8_QUIET_NAN)
if (*msrc)
{
#if defined (GFC_REAL_8_QUIET_NAN)
non_empty_p = 1;
if (*src >= result)
#endif
break;
}
}
if (unlikely (n >= len))
{
#if defined (GFC_REAL_8_QUIET_NAN)
result = non_empty_p ? GFC_REAL_8_QUIET_NAN : -GFC_REAL_8_HUGE;
#else
result = -GFC_REAL_8_HUGE;
#endif
}
else for (; n < len; n++, src += delta, msrc += mdelta)
{
#endif
if (*msrc && *src > result)
result = *src;
}
*dest = result;
}
/* Advance to the next element. */
count[0]++;
base += sstride[0];
mbase += mstride[0];
dest += dstride[0];
n = 0;
while (count[n] == extent[n])
{
/* When we get to the end of a dimension, reset it and increment
the next dimension. */
count[n] = 0;
/* We could precalculate these products, but this is a less
frequently used path so probably not worth it. */
base -= sstride[n] * extent[n];
mbase -= mstride[n] * extent[n];
dest -= dstride[n] * extent[n];
n++;
if (n == rank)
{
/* Break out of the look. */
base = NULL;
break;
}
else
{
count[n]++;
base += sstride[n];
mbase += mstride[n];
dest += dstride[n];
}
}
}
}
extern void smaxval_r8 (gfc_array_r8 * const restrict,
gfc_array_r8 * const restrict, const index_type * const restrict,
GFC_LOGICAL_4 *);
export_proto(smaxval_r8);
void
smaxval_r8 (gfc_array_r8 * const restrict retarray,
gfc_array_r8 * const restrict array,
const index_type * const restrict pdim,
GFC_LOGICAL_4 * mask)
{
index_type count[GFC_MAX_DIMENSIONS];
index_type extent[GFC_MAX_DIMENSIONS];
index_type dstride[GFC_MAX_DIMENSIONS];
GFC_REAL_8 * restrict dest;
index_type rank;
index_type n;
index_type dim;
if (*mask)
{
maxval_r8 (retarray, array, pdim);
return;
}
/* Make dim zero based to avoid confusion. */
dim = (*pdim) - 1;
rank = GFC_DESCRIPTOR_RANK (array) - 1;
for (n = 0; n < dim; n++)
{
extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] <= 0)
extent[n] = 0;
}
for (n = dim; n < rank; n++)
{
extent[n] =
GFC_DESCRIPTOR_EXTENT(array,n + 1);
if (extent[n] <= 0)
extent[n] = 0;
}
if (retarray->base_addr == NULL)
{
size_t alloc_size, str;
for (n = 0; n < rank; n++)
{
if (n == 0)
str = 1;
else
str = GFC_DESCRIPTOR_STRIDE(retarray,n-1) * extent[n-1];
GFC_DIMENSION_SET(retarray->dim[n], 0, extent[n] - 1, str);
}
retarray->offset = 0;
retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank;
alloc_size = sizeof (GFC_REAL_8) * GFC_DESCRIPTOR_STRIDE(retarray,rank-1)
* extent[rank-1];
if (alloc_size == 0)
{
/* Make sure we have a zero-sized array. */
GFC_DIMENSION_SET(retarray->dim[0], 0, -1, 1);
return;
}
else
retarray->base_addr = xmalloc (alloc_size);
}
else
{
if (rank != GFC_DESCRIPTOR_RANK (retarray))
runtime_error ("rank of return array incorrect in"
" MAXVAL intrinsic: is %ld, should be %ld",
(long int) (GFC_DESCRIPTOR_RANK (retarray)),
(long int) rank);
if (unlikely (compile_options.bounds_check))
{
for (n=0; n < rank; n++)
{
index_type ret_extent;
ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,n);
if (extent[n] != ret_extent)
runtime_error ("Incorrect extent in return value of"
" MAXVAL intrinsic in dimension %ld:"
" is %ld, should be %ld", (long int) n + 1,
(long int) ret_extent, (long int) extent[n]);
}
}
}
for (n = 0; n < rank; n++)
{
count[n] = 0;
dstride[n] = GFC_DESCRIPTOR_STRIDE(retarray,n);
}
dest = retarray->base_addr;
while(1)
{
*dest = -GFC_REAL_8_HUGE;
count[0]++;
dest += dstride[0];
n = 0;
while (count[n] == extent[n])
{
/* When we get to the end of a dimension, reset it and increment
the next dimension. */
count[n] = 0;
/* We could precalculate these products, but this is a less
frequently used path so probably not worth it. */
dest -= dstride[n] * extent[n];
n++;
if (n == rank)
return;
else
{
count[n]++;
dest += dstride[n];
}
}
}
}
#endif
| atgreen/gcc | libgfortran/generated/maxval_r8.c | C | gpl-2.0 | 13,065 |
/*
* SAMSUNG NFC Controller
*
* Copyright (C) 2013 Samsung Electronics Co.Ltd
* Author: Woonki Lee <woonki84.lee@samsung.com>
* Heejae Kim <heejae12.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Last update: 2014-07-15
*
*/
#ifdef CONFIG_SEC_NFC_IF_I2C_GPIO
#define CONFIG_SEC_NFC_IF_I2C
#endif
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include <linux/nfc/sec_nfc.h>
#ifdef CONFIG_SEC_NFC_CLK_REQ
#if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)
#include <mach/exynos-fimc-is.h>
#include <linux/clk-provider.h>
#endif
#include <linux/interrupt.h>
#endif
#include <linux/wakelock.h>
#include <linux/of_gpio.h>
#include <linux/clk.h>
#ifdef CONFIG_SEC_NFC_LDO_CONTROL
#include <linux/regulator/consumer.h>
#endif
#ifdef CONFIG_SOC_EXYNOS5433
#include <mach/regs-clock-exynos5433.h>
#endif
#ifndef CONFIG_SEC_NFC_IF_I2C
struct sec_nfc_i2c_info {};
#define sec_nfc_read NULL
#define sec_nfc_write NULL
#define sec_nfc_poll NULL
#define sec_nfc_i2c_irq_clear(x)
#define SEC_NFC_GET_INFO(dev) platform_get_drvdata(to_platform_device(dev))
#else /* CONFIG_SEC_NFC_IF_I2C */
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/i2c.h>
#define SEC_NFC_GET_INFO(dev) i2c_get_clientdata(to_i2c_client(dev))
enum sec_nfc_irq {
SEC_NFC_NONE,
SEC_NFC_INT,
SEC_NFC_SKIP,
};
struct sec_nfc_i2c_info {
struct i2c_client *i2c_dev;
struct mutex read_mutex;
enum sec_nfc_irq read_irq;
wait_queue_head_t read_wait;
size_t buflen;
u8 *buf;
};
#endif
struct sec_nfc_info {
struct miscdevice miscdev;
struct mutex mutex;
enum sec_nfc_mode mode;
struct device *dev;
struct sec_nfc_platform_data *pdata;
struct sec_nfc_i2c_info i2c_info;
struct wake_lock nfc_wake_lock;
#ifdef CONFIG_SEC_NFC_CLK_REQ
bool clk_ctl;
bool clk_state;
#if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)
struct platform_device *pdev;
#endif
#endif
};
#ifdef CONFIG_SEC_NFC_IF_I2C
static irqreturn_t sec_nfc_irq_thread_fn(int irq, void *dev_id)
{
struct sec_nfc_info *info = dev_id;
struct sec_nfc_platform_data *pdata = info->pdata;
dev_dbg(info->dev, "[NFC] Read Interrupt is occurred!\n");
if(gpio_get_value(pdata->irq) == 0) {
dev_err(info->dev, "[NFC] Warning,irq-gpio state is low!\n");
return IRQ_HANDLED;
}
mutex_lock(&info->i2c_info.read_mutex);
/* Skip interrupt during power switching
* It is released after first write */
if (info->i2c_info.read_irq == SEC_NFC_SKIP) {
dev_dbg(info->dev, "%s: Now power swiching. Skip this IRQ\n", __func__);
mutex_unlock(&info->i2c_info.read_mutex);
return IRQ_HANDLED;
}
info->i2c_info.read_irq = SEC_NFC_INT;
mutex_unlock(&info->i2c_info.read_mutex);
wake_up_interruptible(&info->i2c_info.read_wait);
if(!wake_lock_active(&info->nfc_wake_lock))
{
dev_dbg(info->dev, "%s: Set wake_lock_timeout for 2 sec. !!!\n", __func__);
wake_lock_timeout(&info->nfc_wake_lock, 2*HZ);
}
return IRQ_HANDLED;
}
static ssize_t sec_nfc_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct sec_nfc_info *info = container_of(file->private_data,
struct sec_nfc_info, miscdev);
enum sec_nfc_irq irq;
int ret = 0;
dev_dbg(info->dev, "%s: info: %p, count: %zu\n", __func__,
info, count);
mutex_lock(&info->mutex);
if (info->mode == SEC_NFC_MODE_OFF) {
dev_err(info->dev, "sec_nfc is not enabled\n");
ret = -ENODEV;
goto out;
}
mutex_lock(&info->i2c_info.read_mutex);
irq = info->i2c_info.read_irq;
mutex_unlock(&info->i2c_info.read_mutex);
if (irq == SEC_NFC_NONE) {
if (file->f_flags & O_NONBLOCK) {
dev_err(info->dev, "it is nonblock\n");
ret = -EAGAIN;
goto out;
}
}
/* i2c recv */
if (count > info->i2c_info.buflen)
count = info->i2c_info.buflen;
if (count > SEC_NFC_MSG_MAX_SIZE) {
dev_err(info->dev, "user required wrong size :%d\n", count);
ret = -EINVAL;
goto out;
}
mutex_lock(&info->i2c_info.read_mutex);
memset(info->i2c_info.buf, 0, count);
ret = i2c_master_recv(info->i2c_info.i2c_dev, info->i2c_info.buf, count);
dev_dbg(info->dev, "recv size : %d\n", ret);
if (ret == -EREMOTEIO) {
ret = -ERESTART;
goto read_error;
} else if (ret != count) {
dev_err(info->dev, "read failed: return: %d count: %d\n",
ret, count);
//ret = -EREMOTEIO;
goto read_error;
}
info->i2c_info.read_irq = SEC_NFC_NONE;
mutex_unlock(&info->i2c_info.read_mutex);
if (copy_to_user(buf, info->i2c_info.buf, ret)) {
dev_err(info->dev, "copy failed to user\n");
ret = -EFAULT;
}
goto out;
read_error:
info->i2c_info.read_irq = SEC_NFC_NONE;
mutex_unlock(&info->i2c_info.read_mutex);
out:
mutex_unlock(&info->mutex);
return ret;
}
static ssize_t sec_nfc_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct sec_nfc_info *info = container_of(file->private_data,
struct sec_nfc_info, miscdev);
int ret = 0;
dev_dbg(info->dev, "%s: info: %p, count %zu\n", __func__,
info, count);
mutex_lock(&info->mutex);
if (info->mode == SEC_NFC_MODE_OFF) {
dev_err(info->dev, "sec_nfc is not enabled\n");
ret = -ENODEV;
goto out;
}
if (count > info->i2c_info.buflen)
count = info->i2c_info.buflen;
if (count > SEC_NFC_MSG_MAX_SIZE) {
dev_err(info->dev, "user required wrong size :%d\n", count);
ret = -EINVAL;
goto out;
}
if (copy_from_user(info->i2c_info.buf, buf, count)) {
dev_err(info->dev, "copy failed from user\n");
ret = -EFAULT;
goto out;
}
/* Skip interrupt during power switching
* It is released after first write */
mutex_lock(&info->i2c_info.read_mutex);
ret = i2c_master_send(info->i2c_info.i2c_dev, info->i2c_info.buf, count);
if (info->i2c_info.read_irq == SEC_NFC_SKIP)
info->i2c_info.read_irq = SEC_NFC_NONE;
mutex_unlock(&info->i2c_info.read_mutex);
if (ret == -EREMOTEIO) {
dev_err(info->dev, "send failed: return: %d count: %d\n",
ret, count);
ret = -ERESTART;
goto out;
}
if (ret != count) {
dev_err(info->dev, "send failed: return: %d count: %d\n",
ret, count);
ret = -EREMOTEIO;
}
out:
mutex_unlock(&info->mutex);
return ret;
}
static unsigned int sec_nfc_poll(struct file *file, poll_table *wait)
{
struct sec_nfc_info *info = container_of(file->private_data,
struct sec_nfc_info, miscdev);
enum sec_nfc_irq irq;
int ret = 0;
dev_dbg(info->dev, "%s: info: %p\n", __func__, info);
mutex_lock(&info->mutex);
if (info->mode == SEC_NFC_MODE_OFF) {
dev_err(info->dev, "sec_nfc is not enabled\n");
ret = -ENODEV;
goto out;
}
poll_wait(file, &info->i2c_info.read_wait, wait);
mutex_lock(&info->i2c_info.read_mutex);
irq = info->i2c_info.read_irq;
if (irq == SEC_NFC_INT)
ret = (POLLIN | POLLRDNORM);
mutex_unlock(&info->i2c_info.read_mutex);
out:
mutex_unlock(&info->mutex);
return ret;
}
#ifdef CONFIG_SEC_NFC_LDO_CONTROL
static int sec_nfc_regulator_onoff(struct sec_nfc_platform_data *data, int onoff)
{
int rc = 0;
struct regulator *regulator_i2c_1p8;
regulator_i2c_1p8 = regulator_get(NULL, data->i2c_1p8);
if (IS_ERR(regulator_i2c_1p8) || regulator_i2c_1p8 == NULL) {
pr_err("%s - i2c_1p8 regulator_get fail\n", __func__);
return -ENODEV;
}
pr_info("%s - onoff = %d\n", __func__, onoff);
if (onoff == NFC_I2C_LDO_ON) {
rc = regulator_enable(regulator_i2c_1p8);
if (rc) {
pr_err("%s - enable i2c_1p8 failed, rc=%d\n",
__func__, rc);
}
} else {
rc = regulator_disable(regulator_i2c_1p8);
if (rc) {
pr_err("%s - disable i2c_1p8 failed, rc=%d\n",
__func__, rc);
}
}
regulator_put(regulator_i2c_1p8);
return rc;
}
#endif
void sec_nfc_i2c_irq_clear(struct sec_nfc_info *info)
{
/* clear interrupt. Interrupt will be occured at power off */
mutex_lock(&info->i2c_info.read_mutex);
info->i2c_info.read_irq = SEC_NFC_NONE;
mutex_unlock(&info->i2c_info.read_mutex);
}
int sec_nfc_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct sec_nfc_info *info = dev_get_drvdata(dev);
struct sec_nfc_platform_data *pdata = info->pdata;
int ret;
dev_dbg(info->dev, "%s: start: %p\n", __func__, info);
info->i2c_info.buflen = SEC_NFC_MAX_BUFFER_SIZE;
info->i2c_info.buf = kzalloc(SEC_NFC_MAX_BUFFER_SIZE, GFP_KERNEL);
if (!info->i2c_info.buf) {
dev_err(dev,
"failed to allocate memory for sec_nfc_info->buf\n");
return -ENOMEM;
}
info->i2c_info.i2c_dev = client;
info->i2c_info.read_irq = SEC_NFC_NONE;
mutex_init(&info->i2c_info.read_mutex);
init_waitqueue_head(&info->i2c_info.read_wait);
i2c_set_clientdata(client, info);
ret = gpio_request(pdata->irq, "nfc_int");
if (ret) {
dev_err(dev, "GPIO request is failed to register IRQ\n");
goto err_irq_req;
}
gpio_direction_input(pdata->irq);
#ifdef CONFIG_SEC_NFC_LDO_CONTROL
if (pdata->i2c_1p8 != NULL) {
if(!lpcharge) {
ret = sec_nfc_regulator_onoff(pdata, NFC_I2C_LDO_ON);
if (ret < 0)
pr_err("%s max86900_regulator_on fail err = %d\n",
__func__, ret);
usleep_range(1000, 1100);
}
}
#endif
ret = request_threaded_irq(client->irq, NULL, sec_nfc_irq_thread_fn,
IRQF_TRIGGER_RISING | IRQF_ONESHOT, SEC_NFC_DRIVER_NAME,
info);
if (ret < 0) {
dev_err(dev, "failed to register IRQ handler\n");
kfree(info->i2c_info.buf);
return ret;
}
dev_dbg(info->dev, "%s: success: %p\n", __func__, info);
return 0;
err_irq_req:
return ret;
}
void sec_nfc_i2c_remove(struct device *dev)
{
struct sec_nfc_info *info = dev_get_drvdata(dev);
struct i2c_client *client = info->i2c_info.i2c_dev;
struct sec_nfc_platform_data *pdata = info->pdata;
free_irq(client->irq, info);
gpio_free(pdata->irq);
}
#endif /* CONFIG_SEC_NFC_IF_I2C */
#ifdef CONFIG_SEC_NFC_CLK_REQ
static irqreturn_t sec_nfc_clk_irq_thread(int irq, void *dev_id)
{
struct sec_nfc_info *info = dev_id;
struct sec_nfc_platform_data *pdata = info->pdata;
bool value;
dev_dbg(info->dev, "[NFC]Clock Interrupt is occurred!\n");
value = gpio_get_value(pdata->clk_req) > 0 ? true : false;
#if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)
if (value == info->clk_state)
return IRQ_HANDLED;
if (value)
{
#ifdef CONFIG_SOC_EXYNOS5433
clk_prepare_enable(pdata->gate_top_cam1);
#endif
clk_prepare_enable(pdata->clk);
}
else
{
clk_disable_unprepare(pdata->clk);
#ifdef CONFIG_SOC_EXYNOS5433
clk_disable_unprepare(pdata->gate_top_cam1);
#endif
}
#else
value = gpio_get_value(pdata->clk_req) > 0 ? 1 : 0;
gpio_set_value(pdata->clk, value);
#endif
info->clk_state = value;
return IRQ_HANDLED;
}
void sec_nfc_clk_ctl_enable(struct sec_nfc_info *info)
{
struct sec_nfc_platform_data *pdata = info->pdata;
unsigned int irq = gpio_to_irq(pdata->clk_req);
int ret;
if (info->clk_ctl)
return;
info->clk_state = false;
ret = request_threaded_irq(irq, NULL, sec_nfc_clk_irq_thread,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
SEC_NFC_DRIVER_NAME, info);
if (ret < 0) {
dev_err(info->dev, "failed to register CLK REQ IRQ handler\n");
}
info->clk_ctl = true;
}
void sec_nfc_clk_ctl_disable(struct sec_nfc_info *info)
{
struct sec_nfc_platform_data *pdata = info->pdata;
unsigned int irq = gpio_to_irq(pdata->clk_req);
if (!info->clk_ctl)
return;
free_irq(irq, info);
if (info->clk_state)
{
clk_disable_unprepare(pdata->clk);
#ifdef CONFIG_SOC_EXYNOS5433
clk_disable_unprepare(pdata->gate_top_cam1);
#endif
}
info->clk_state = false;
info->clk_ctl = false;
}
#else
#define sec_nfc_clk_ctl_enable(x)
#define sec_nfc_clk_ctl_disable(x)
#endif /* CONFIG_SEC_NFC_CLK_REQ */
static void sec_nfc_set_mode(struct sec_nfc_info *info,
enum sec_nfc_mode mode)
{
struct sec_nfc_platform_data *pdata = info->pdata;
/* intfo lock is aleady gotten before calling this function */
if (info->mode == mode) {
dev_dbg(info->dev, "Power mode is already %d", mode);
return;
}
info->mode = mode;
#ifdef CONFIG_SEC_NFC_IF_I2C
/* Skip interrupt during power switching
* It is released after first write */
mutex_lock(&info->i2c_info.read_mutex);
info->i2c_info.read_irq = SEC_NFC_SKIP;
mutex_unlock(&info->i2c_info.read_mutex);
#endif
gpio_set_value(pdata->ven, SEC_NFC_PW_OFF);
if (pdata->firm) gpio_set_value(pdata->firm, SEC_NFC_FW_OFF);
if (mode == SEC_NFC_MODE_BOOTLOADER)
if (pdata->firm) gpio_set_value(pdata->firm, SEC_NFC_FW_ON);
if (mode != SEC_NFC_MODE_OFF)
{
msleep(SEC_NFC_VEN_WAIT_TIME);
gpio_set_value(pdata->ven, SEC_NFC_PW_ON);
sec_nfc_clk_ctl_enable(info);
#ifdef CONFIG_SEC_NFC_IF_I2C
enable_irq_wake(info->i2c_info.i2c_dev->irq);
#endif
msleep(SEC_NFC_VEN_WAIT_TIME/2);
} else {
sec_nfc_clk_ctl_disable(info);
#ifdef CONFIG_SEC_NFC_IF_I2C
disable_irq_wake(info->i2c_info.i2c_dev->irq);
#endif
}
if(wake_lock_active(&info->nfc_wake_lock))
wake_unlock(&info->nfc_wake_lock);
dev_dbg(info->dev, "Power mode is : %d\n", mode);
}
static long sec_nfc_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct sec_nfc_info *info = container_of(file->private_data,
struct sec_nfc_info, miscdev);
struct sec_nfc_platform_data *pdata = info->pdata;
unsigned int new = (unsigned int)arg;
int ret = 0;
dev_dbg(info->dev, "%s: info: %p, cmd: 0x%x\n",
__func__, info, cmd);
mutex_lock(&info->mutex);
switch (cmd) {
case SEC_NFC_SET_MODE:
dev_dbg(info->dev, "%s: SEC_NFC_SET_MODE\n", __func__);
if (info->mode == new)
break;
if (new >= SEC_NFC_MODE_COUNT) {
dev_err(info->dev, "wrong mode (%d)\n", new);
ret = -EFAULT;
break;
}
sec_nfc_set_mode(info, new);
break;
#if defined(CONFIG_SEC_NFC_PRODUCT_N3)
case SEC_NFC_SLEEP:
case SEC_NFC_WAKEUP:
break;
#elif defined(CONFIG_SEC_NFC_PRODUCT_N5)
case SEC_NFC_SLEEP:
if (info->mode != SEC_NFC_MODE_BOOTLOADER) {
if(wake_lock_active(&info->nfc_wake_lock))
wake_unlock(&info->nfc_wake_lock);
gpio_set_value(pdata->wake, SEC_NFC_WAKE_SLEEP);
}
break;
case SEC_NFC_WAKEUP:
if (info->mode != SEC_NFC_MODE_BOOTLOADER) {
gpio_set_value(pdata->wake, SEC_NFC_WAKE_UP);
if(!wake_lock_active(&info->nfc_wake_lock))
wake_lock(&info->nfc_wake_lock);
}
break;
#endif
default:
dev_err(info->dev, "Unknow ioctl 0x%x\n", cmd);
ret = -ENOIOCTLCMD;
break;
}
mutex_unlock(&info->mutex);
return ret;
}
static int sec_nfc_open(struct inode *inode, struct file *file)
{
struct sec_nfc_info *info = container_of(file->private_data,
struct sec_nfc_info, miscdev);
int ret = 0;
dev_dbg(info->dev, "%s: info : %p" , __func__, info);
mutex_lock(&info->mutex);
if (info->mode != SEC_NFC_MODE_OFF) {
dev_err(info->dev, "sec_nfc is busy\n");
ret = -EBUSY;
goto out;
}
sec_nfc_set_mode(info, SEC_NFC_MODE_OFF);
out:
mutex_unlock(&info->mutex);
return ret;
}
static int sec_nfc_close(struct inode *inode, struct file *file)
{
struct sec_nfc_info *info = container_of(file->private_data,
struct sec_nfc_info, miscdev);
dev_dbg(info->dev, "%s: info : %p" , __func__, info);
mutex_lock(&info->mutex);
sec_nfc_set_mode(info, SEC_NFC_MODE_OFF);
mutex_unlock(&info->mutex);
return 0;
}
static const struct file_operations sec_nfc_fops = {
.owner = THIS_MODULE,
.read = sec_nfc_read,
.write = sec_nfc_write,
.poll = sec_nfc_poll,
.open = sec_nfc_open,
.release = sec_nfc_close,
.unlocked_ioctl = sec_nfc_ioctl,
};
#ifdef CONFIG_PM
static int sec_nfc_suspend(struct device *dev)
{
struct sec_nfc_info *info = SEC_NFC_GET_INFO(dev);
int ret = 0;
mutex_lock(&info->mutex);
if (info->mode == SEC_NFC_MODE_BOOTLOADER)
ret = -EPERM;
mutex_unlock(&info->mutex);
return ret;
}
static int sec_nfc_resume(struct device *dev)
{
return 0;
}
static SIMPLE_DEV_PM_OPS(sec_nfc_pm_ops, sec_nfc_suspend, sec_nfc_resume);
#endif
#ifdef CONFIG_OF
/*device tree parsing*/
static int sec_nfc_parse_dt(struct device *dev,
struct sec_nfc_platform_data *pdata)
{
struct device_node *np = dev->of_node;
pdata->ven = of_get_named_gpio(np, "sec-nfc,ven-gpio", 0);
pdata->firm = of_get_named_gpio(np, "sec-nfc,firm-gpio", 0);
pdata->wake = pdata->firm;
#ifdef CONFIG_SEC_NFC_IF_I2C
pdata->irq = of_get_named_gpio(np, "sec-nfc,irq-gpio", 0);
#endif
#ifdef CONFIG_SEC_NFC_CLK_REQ
pdata->clk_req = of_get_named_gpio(np, "sec-nfc,clk_req-gpio", 0);
#endif
#ifdef CONFIG_SEC_NFC_LDO_CONTROL
if (of_property_read_string(np, "sec-nfc,i2c_1p8",
&pdata->i2c_1p8) < 0) {
pr_err("%s - get i2c_1p8 error\n", __func__);
pdata->i2c_1p8 = NULL;
}
#endif
pr_info("%s: irq : %d, ven : %d, firm : %d\n",
__func__, pdata->irq, pdata->ven, pdata->firm);
return 0;
}
#ifdef CONFIG_SEC_NFC_CLK_REQ
#if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)
static int sec_nfc_clk_parse_dt(struct sec_nfc_info *info)
{
struct sec_nfc_platform_data *pdata = info->pdata;
u32 frequency;
int ret;
ret = fimc_is_set_parent_dt(info->pdev, "mout_sclk_isp_sensor1", "oscclk");
if (ret) {
pr_err("%s, fimc_is_set_parent_dt:%d\n", __func__, ret);
return -EPERM;
}
ret = fimc_is_set_rate_dt(info->pdev, "dout_sclk_isp_sensor1_a", 24 * 1000000);
if (ret) {
pr_err("%s, fimc_is_set_rate_dt A:%d\n", __func__, ret);
return -EPERM;
}
ret = fimc_is_set_rate_dt(info->pdev, "dout_sclk_isp_sensor1_b", 24 * 1000000);
if (ret) {
pr_err("%s, fimc_is_set_rate_dt B:%d\n", __func__, ret);
return -EPERM;
}
frequency = fimc_is_get_rate_dt(info->pdev, "sclk_isp_sensor1");
pr_info("%s(mclk : %d)\n", __func__, frequency);
pdata->clk = clk_get(info->dev, "sclk_isp_sensor1");
if(IS_ERR(pdata->clk)){
pr_err("%s: clk not found\n",__func__);
return -EPERM;
}
#ifdef CONFIG_SOC_EXYNOS5433
pdata->gate_top_cam1 = samsung_clk_get_by_reg((unsigned long)EXYNOS5430_ENABLE_IP_TOP,6);
if(IS_ERR(pdata->gate_top_cam1)){
pr_err("%s : cam1 clk not found\n", __func__);
return -ENODEV;
}
#endif
return 0;
}
#endif
#endif
#else
static int sec_nfc_parse_dt(struct device *dev,
struct sec_nfc_platform_data *pdata)
{
return -ENODEV;
}
#endif
static int __devinit __sec_nfc_probe(struct device *dev)
{
struct sec_nfc_info *info;
struct sec_nfc_platform_data *pdata = NULL;
int ret = 0;
dev_dbg(dev, "[NFC]sec-nfc probe start \n");
if (dev->of_node) {
pdata = devm_kzalloc(dev,
sizeof(struct sec_nfc_platform_data), GFP_KERNEL);
if (!pdata) {
dev_err(dev, "Failed to allocate memory\n");
return -ENOMEM;
}
ret = sec_nfc_parse_dt(dev, pdata);
if (ret)
return ret;
} else {
pdata = dev->platform_data;
}
if (!pdata) {
dev_err(dev, "No platform data\n");
ret = -ENOMEM;
goto err_pdata;
}
info = kzalloc(sizeof(struct sec_nfc_info), GFP_KERNEL);
if (!info) {
dev_err(dev, "failed to allocate memory for sec_nfc_info\n");
ret = -ENOMEM;
goto err_info_alloc;
}
info->dev = dev;
info->pdata = pdata;
info->mode = SEC_NFC_MODE_OFF;
mutex_init(&info->mutex);
dev_set_drvdata(dev, info);
info->miscdev.minor = MISC_DYNAMIC_MINOR;
info->miscdev.name = SEC_NFC_DRIVER_NAME;
info->miscdev.fops = &sec_nfc_fops;
info->miscdev.parent = dev;
ret = misc_register(&info->miscdev);
if (ret < 0) {
dev_err(dev, "failed to register Device\n");
goto err_dev_reg;
}
#ifdef CONFIG_SEC_NFC_CLK_REQ
#if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)
info->pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
if (info->pdev == NULL) {
dev_err(dev, "failed to allocate memory for module data\n");
ret = -ENOMEM;
goto err_get_pdev;
} else {
info->pdev->dev = *dev;
}
if (sec_nfc_clk_parse_dt(info) < 0) {
dev_err(dev, "failed to get clock information\n");
ret = -ENOMEM;
goto err_gpio_clk_parse;
}
#endif
#endif
ret = gpio_request(pdata->ven, "nfc_ven");
if (ret) {
dev_err(dev, "failed to get gpio ven\n");
goto err_gpio_ven;
}
gpio_direction_output(pdata->ven, SEC_NFC_PW_OFF);
if (pdata->firm)
{
ret = gpio_request(pdata->firm, "nfc_firm");
if (ret) {
dev_err(dev, "failed to get gpio firm\n");
goto err_gpio_firm;
}
gpio_direction_output(pdata->firm, SEC_NFC_FW_OFF);
}
wake_lock_init(&info->nfc_wake_lock, WAKE_LOCK_SUSPEND, "nfc_wake_lock");
dev_dbg(dev, "%s: success info: %p, pdata %p\n", __func__, info, pdata);
return 0;
err_gpio_firm:
gpio_free(pdata->ven);
err_gpio_ven:
#ifdef CONFIG_SEC_NFC_CLK_REQ
#if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)
err_gpio_clk_parse:
kfree(info->pdev);
err_get_pdev:
#endif
#endif
err_dev_reg:
kfree(info);
err_info_alloc:
err_pdata:
return ret;
}
static int __devexit __sec_nfc_remove(struct device *dev)
{
struct sec_nfc_info *info = dev_get_drvdata(dev);
struct sec_nfc_platform_data *pdata = info->pdata;
dev_dbg(info->dev, "%s\n", __func__);
misc_deregister(&info->miscdev);
sec_nfc_set_mode(info, SEC_NFC_MODE_OFF);
gpio_set_value(pdata->firm, 0);
gpio_free(pdata->ven);
if (pdata->firm) gpio_free(pdata->firm);
wake_lock_destroy(&info->nfc_wake_lock);
#ifdef CONFIG_SEC_NFC_CLK_REQ
#if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433)
kfree(info->pdev);
#endif
#endif
kfree(info);
return 0;
}
#ifdef CONFIG_SEC_NFC_IF_I2C
MODULE_DEVICE_TABLE(i2c, sec_nfc_id_table);
typedef struct i2c_driver sec_nfc_driver_type;
#define SEC_NFC_INIT(driver) i2c_add_driver(driver);
#define SEC_NFC_EXIT(driver) i2c_del_driver(driver);
static int __devinit sec_nfc_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret = 0;
ret = __sec_nfc_probe(&client->dev);
if (ret)
return ret;
if (sec_nfc_i2c_probe(client))
__sec_nfc_remove(&client->dev);
return ret;
}
static int __devexit sec_nfc_remove(struct i2c_client *client)
{
sec_nfc_i2c_remove(&client->dev);
return __sec_nfc_remove(&client->dev);
}
static struct i2c_device_id sec_nfc_id_table[] = {
{ SEC_NFC_DRIVER_NAME, 0 },
{ }
};
#else /* CONFIG_SEC_NFC_IF_I2C */
MODULE_DEVICE_TABLE(platform, sec_nfc_id_table);
typedef struct platform_driver sec_nfc_driver_type;
#define SEC_NFC_INIT(driver) platform_driver_register(driver);
#define SEC_NFC_EXIT(driver) platform_driver_unregister(driver);
static int __devinit sec_nfc_probe(struct platform_device *pdev)
{
return __sec_nfc_probe(&pdev->dev);
}
static int __devexit sec_nfc_remove(struct platform_device *pdev)
{
return __sec_nfc_remove(&pdev->dev);
}
static struct platform_device_id sec_nfc_id_table[] = {
{ SEC_NFC_DRIVER_NAME, 0 },
{ }
};
#endif /* CONFIG_SEC_NFC_IF_I2C */
#ifdef CONFIG_OF
static struct of_device_id nfc_match_table[] = {
{ .compatible = SEC_NFC_DRIVER_NAME,},
{},
};
#else
#define nfc_match_table NULL
#endif
static sec_nfc_driver_type sec_nfc_driver = {
.probe = sec_nfc_probe,
.id_table = sec_nfc_id_table,
.remove = sec_nfc_remove,
.driver = {
.name = SEC_NFC_DRIVER_NAME,
#ifdef CONFIG_PM
.pm = &sec_nfc_pm_ops,
#endif
.of_match_table = nfc_match_table,
},
};
static int __init sec_nfc_init(void)
{
return SEC_NFC_INIT(&sec_nfc_driver);
}
static void __exit sec_nfc_exit(void)
{
SEC_NFC_EXIT(&sec_nfc_driver);
}
module_init(sec_nfc_init);
module_exit(sec_nfc_exit);
MODULE_DESCRIPTION("Samsung sec_nfc driver");
MODULE_LICENSE("GPL");
| engine95/exynos5433-BPB1-gts28-210wifi | drivers/nfc/sec_nfc.c | C | gpl-2.0 | 24,014 |
<?php
/*
V5.09 25 June 2009 (c) 2000-2009 John Lim (jlim#natsoft.com). All rights reserved.
Released under both BSD license and Lesser GPL library license.
Whenever there is any discrepancy between the two licenses,
the BSD license will take precedence.
Set tabs to 4 for best viewing.
Latest version is available at http://adodb.sourceforge.net
MSSQL support via ODBC. Requires ODBC. Works on Windows and Unix.
For Unix configuration, see http://phpbuilder.com/columns/alberto20000919.php3
*/
// security - hide paths
if (!defined('ADODB_DIR')) die();
if (!defined('_ADODB_ODBC_LAYER')) {
include(ADODB_DIR."/drivers/adodb-odbc.inc.php");
}
class ADODB_odbc_mssql extends ADODB_odbc {
var $databaseType = 'odbc_mssql';
var $fmtDate = "'Y-m-d'";
var $fmtTimeStamp = "'Y-m-d H:i:s'";
var $_bindInputArray = true;
var $metaTablesSQL="select name,case when type='U' then 'T' else 'V' end from sysobjects where (type='U' or type='V') and (name not in ('sysallocations','syscolumns','syscomments','sysdepends','sysfilegroups','sysfiles','sysfiles1','sysforeignkeys','sysfulltextcatalogs','sysindexes','sysindexkeys','sysmembers','sysobjects','syspermissions','sysprotects','sysreferences','systypes','sysusers','sysalternates','sysconstraints','syssegments','REFERENTIAL_CONSTRAINTS','CHECK_CONSTRAINTS','CONSTRAINT_TABLE_USAGE','CONSTRAINT_COLUMN_USAGE','VIEWS','VIEW_TABLE_USAGE','VIEW_COLUMN_USAGE','SCHEMATA','TABLES','TABLE_CONSTRAINTS','TABLE_PRIVILEGES','COLUMNS','COLUMN_DOMAIN_USAGE','COLUMN_PRIVILEGES','DOMAINS','DOMAIN_CONSTRAINTS','KEY_COLUMN_USAGE'))";
var $metaColumnsSQL = "select c.name,t.name,c.length from syscolumns c join systypes t on t.xusertype=c.xusertype join sysobjects o on o.id=c.id where o.name='%s'";
var $hasTop = 'top'; // support mssql/interbase SELECT TOP 10 * FROM TABLE
var $sysDate = 'GetDate()';
var $sysTimeStamp = 'GetDate()';
var $leftOuter = '*=';
var $rightOuter = '=*';
var $substr = 'substring';
var $length = 'len';
var $ansiOuter = true; // for mssql7 or later
var $identitySQL = 'select SCOPE_IDENTITY()'; // 'select SCOPE_IDENTITY'; # for mssql 2000
var $hasInsertID = true;
var $connectStmt = 'SET CONCAT_NULL_YIELDS_NULL OFF'; # When SET CONCAT_NULL_YIELDS_NULL is ON,
# concatenating a null value with a string yields a NULL result
function ADODB_odbc_mssql()
{
$this->ADODB_odbc();
//$this->curmode = SQL_CUR_USE_ODBC;
}
// crashes php...
function ServerInfo()
{
global $ADODB_FETCH_MODE;
$save = $ADODB_FETCH_MODE;
$ADODB_FETCH_MODE = ADODB_FETCH_NUM;
$row = $this->GetRow("execute sp_server_info 2");
$ADODB_FETCH_MODE = $save;
if (!is_array($row)) return false;
$arr['description'] = $row[2];
$arr['version'] = ADOConnection::_findvers($arr['description']);
return $arr;
}
function IfNull( $field, $ifNull )
{
return " ISNULL($field, $ifNull) "; // if MS SQL Server
}
function _insertid()
{
// SCOPE_IDENTITY()
// Returns the last IDENTITY value inserted into an IDENTITY column in
// the same scope. A scope is a module -- a stored procedure, trigger,
// function, or batch. Thus, two statements are in the same scope if
// they are in the same stored procedure, function, or batch.
return $this->GetOne($this->identitySQL);
}
function MetaForeignKeys($table, $owner=false, $upper=false)
{
global $ADODB_FETCH_MODE;
$save = $ADODB_FETCH_MODE;
$ADODB_FETCH_MODE = ADODB_FETCH_NUM;
$table = $this->qstr(strtoupper($table));
$sql =
"select object_name(constid) as constraint_name,
col_name(fkeyid, fkey) as column_name,
object_name(rkeyid) as referenced_table_name,
col_name(rkeyid, rkey) as referenced_column_name
from sysforeignkeys
where upper(object_name(fkeyid)) = $table
order by constraint_name, referenced_table_name, keyno";
$constraints = $this->GetArray($sql);
$ADODB_FETCH_MODE = $save;
$arr = false;
foreach($constraints as $constr) {
//print_r($constr);
$arr[$constr[0]][$constr[2]][] = $constr[1].'='.$constr[3];
}
if (!$arr) return false;
$arr2 = false;
foreach($arr as $k => $v) {
foreach($v as $a => $b) {
if ($upper) $a = strtoupper($a);
$arr2[$a] = $b;
}
}
return $arr2;
}
function MetaTables($ttype=false,$showSchema=false,$mask=false)
{
if ($mask) {$this->debug=1;
$save = $this->metaTablesSQL;
$mask = $this->qstr($mask);
$this->metaTablesSQL .= " AND name like $mask";
}
$ret = ADOConnection::MetaTables($ttype,$showSchema);
if ($mask) {
$this->metaTablesSQL = $save;
}
return $ret;
}
function MetaColumns($table, $normalize=true)
{
$arr = ADOConnection::MetaColumns($table);
return $arr;
}
function MetaIndexes($table,$primary=false, $owner=false)
{
$table = $this->qstr($table);
$sql = "SELECT i.name AS ind_name, C.name AS col_name, USER_NAME(O.uid) AS Owner, c.colid, k.Keyno,
CASE WHEN I.indid BETWEEN 1 AND 254 AND (I.status & 2048 = 2048 OR I.Status = 16402 AND O.XType = 'V') THEN 1 ELSE 0 END AS IsPK,
CASE WHEN I.status & 2 = 2 THEN 1 ELSE 0 END AS IsUnique
FROM dbo.sysobjects o INNER JOIN dbo.sysindexes I ON o.id = i.id
INNER JOIN dbo.sysindexkeys K ON I.id = K.id AND I.Indid = K.Indid
INNER JOIN dbo.syscolumns c ON K.id = C.id AND K.colid = C.Colid
WHERE LEFT(i.name, 8) <> '_WA_Sys_' AND o.status >= 0 AND O.Name LIKE $table
ORDER BY O.name, I.Name, K.keyno";
global $ADODB_FETCH_MODE;
$save = $ADODB_FETCH_MODE;
$ADODB_FETCH_MODE = ADODB_FETCH_NUM;
if ($this->fetchMode !== FALSE) {
$savem = $this->SetFetchMode(FALSE);
}
$rs = $this->Execute($sql);
if (isset($savem)) {
$this->SetFetchMode($savem);
}
$ADODB_FETCH_MODE = $save;
if (!is_object($rs)) {
return FALSE;
}
$indexes = array();
while ($row = $rs->FetchRow()) {
if (!$primary && $row[5]) continue;
$indexes[$row[0]]['unique'] = $row[6];
$indexes[$row[0]]['columns'][] = $row[1];
}
return $indexes;
}
function _query($sql,$inputarr=false)
{
if (is_string($sql)) $sql = str_replace('||','+',$sql);
return ADODB_odbc::_query($sql,$inputarr);
}
function SetTransactionMode( $transaction_mode )
{
$this->_transmode = $transaction_mode;
if (empty($transaction_mode)) {
$this->Execute('SET TRANSACTION ISOLATION LEVEL READ COMMITTED');
return;
}
if (!stristr($transaction_mode,'isolation')) $transaction_mode = 'ISOLATION LEVEL '.$transaction_mode;
$this->Execute("SET TRANSACTION ".$transaction_mode);
}
// "Stein-Aksel Basma" <basma@accelero.no>
// tested with MSSQL 2000
function MetaPrimaryKeys($table)
{
global $ADODB_FETCH_MODE;
$schema = '';
$this->_findschema($table,$schema);
//if (!$schema) $schema = $this->database;
if ($schema) $schema = "and k.table_catalog like '$schema%'";
$sql = "select distinct k.column_name,ordinal_position from information_schema.key_column_usage k,
information_schema.table_constraints tc
where tc.constraint_name = k.constraint_name and tc.constraint_type =
'PRIMARY KEY' and k.table_name = '$table' $schema order by ordinal_position ";
$savem = $ADODB_FETCH_MODE;
$ADODB_FETCH_MODE = ADODB_FETCH_ASSOC;
$a = $this->GetCol($sql);
$ADODB_FETCH_MODE = $savem;
if ($a && sizeof($a)>0) return $a;
$false = false;
return $false;
}
function SelectLimit($sql,$nrows=-1,$offset=-1, $inputarr=false,$secs2cache=0)
{
if ($nrows > 0 && $offset <= 0) {
$sql = preg_replace(
'/(^\s*select\s+(distinctrow|distinct)?)/i','\\1 '.$this->hasTop." $nrows ",$sql);
$rs = $this->Execute($sql,$inputarr);
} else
$rs = ADOConnection::SelectLimit($sql,$nrows,$offset,$inputarr,$secs2cache);
return $rs;
}
// Format date column in sql string given an input format that understands Y M D
function SQLDate($fmt, $col=false)
{
if (!$col) $col = $this->sysTimeStamp;
$s = '';
$len = strlen($fmt);
for ($i=0; $i < $len; $i++) {
if ($s) $s .= '+';
$ch = $fmt[$i];
switch($ch) {
case 'Y':
case 'y':
$s .= "datename(yyyy,$col)";
break;
case 'M':
$s .= "convert(char(3),$col,0)";
break;
case 'm':
$s .= "replace(str(month($col),2),' ','0')";
break;
case 'Q':
case 'q':
$s .= "datename(quarter,$col)";
break;
case 'D':
case 'd':
$s .= "replace(str(day($col),2),' ','0')";
break;
case 'h':
$s .= "substring(convert(char(14),$col,0),13,2)";
break;
case 'H':
$s .= "replace(str(datepart(hh,$col),2),' ','0')";
break;
case 'i':
$s .= "replace(str(datepart(mi,$col),2),' ','0')";
break;
case 's':
$s .= "replace(str(datepart(ss,$col),2),' ','0')";
break;
case 'a':
case 'A':
$s .= "substring(convert(char(19),$col,0),18,2)";
break;
default:
if ($ch == '\\') {
$i++;
$ch = substr($fmt,$i,1);
}
$s .= $this->qstr($ch);
break;
}
}
return $s;
}
}
class ADORecordSet_odbc_mssql extends ADORecordSet_odbc {
var $databaseType = 'odbc_mssql';
function ADORecordSet_odbc_mssql($id,$mode=false)
{
return $this->ADORecordSet_odbc($id,$mode);
}
}
?> | joelbrock/PFC_CORE | pos/is4c-nf/lib/adodb5/drivers/adodb-odbc_mssql.inc.php | PHP | gpl-2.0 | 9,259 |
<?php
/**
* Job queue aggregator code that uses PhpRedis.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* http://www.gnu.org/copyleft/gpl.html
*
* @file
* @author Aaron Schulz
*/
/**
* Class to handle tracking information about all queues using PhpRedis
*
* @ingroup JobQueue
* @ingroup Redis
* @since 1.21
*/
class JobQueueAggregatorRedis extends JobQueueAggregator {
/** @var RedisConnectionPool */
protected $redisPool;
/** @var array List of Redis server addresses */
protected $servers;
/**
* @params include:
* - redisConfig : An array of parameters to RedisConnectionPool::__construct().
* - redisServers : Array of server entries, the first being the primary and the
* others being fallback servers. Each entry is either a hostname/port
* combination or the absolute path of a UNIX socket.
* If a hostname is specified but no port, the standard port number
* 6379 will be used. Required.
* @param array $params
*/
protected function __construct( array $params ) {
parent::__construct( $params );
$this->servers = isset( $params['redisServers'] )
? $params['redisServers']
: array( $params['redisServer'] ); // b/c
$this->redisPool = RedisConnectionPool::singleton( $params['redisConfig'] );
}
protected function doNotifyQueueEmpty( $wiki, $type ) {
$conn = $this->getConnection();
if ( !$conn ) {
return false;
}
try {
$conn->hDel( $this->getReadyQueueKey(), $this->encQueueName( $type, $wiki ) );
return true;
} catch ( RedisException $e ) {
$this->handleException( $conn, $e );
return false;
}
}
protected function doNotifyQueueNonEmpty( $wiki, $type ) {
$conn = $this->getConnection();
if ( !$conn ) {
return false;
}
try {
$conn->hSet( $this->getReadyQueueKey(), $this->encQueueName( $type, $wiki ), time() );
return true;
} catch ( RedisException $e ) {
$this->handleException( $conn, $e );
return false;
}
}
protected function doGetAllReadyWikiQueues() {
$conn = $this->getConnection();
if ( !$conn ) {
return array();
}
try {
$conn->multi( Redis::PIPELINE );
$conn->exists( $this->getReadyQueueKey() );
$conn->hGetAll( $this->getReadyQueueKey() );
list( $exists, $map ) = $conn->exec();
if ( $exists ) { // cache hit
$pendingDBs = array(); // (type => list of wikis)
foreach ( $map as $key => $time ) {
list( $type, $wiki ) = $this->dencQueueName( $key );
$pendingDBs[$type][] = $wiki;
}
} else { // cache miss
// Avoid duplicated effort
$rand = wfRandomString( 32 );
$conn->multi( Redis::MULTI );
$conn->setex( "{$rand}:lock", 3600, 1 );
$conn->renamenx( "{$rand}:lock", $this->getReadyQueueKey() . ":lock" );
if ( $conn->exec() !== array( true, true ) ) { // lock
$conn->delete( "{$rand}:lock" );
return array(); // already in progress
}
$pendingDBs = $this->findPendingWikiQueues(); // (type => list of wikis)
$conn->delete( $this->getReadyQueueKey() . ":lock" ); // unlock
$now = time();
$map = array();
foreach ( $pendingDBs as $type => $wikis ) {
foreach ( $wikis as $wiki ) {
$map[$this->encQueueName( $type, $wiki )] = $now;
}
}
$conn->hMSet( $this->getReadyQueueKey(), $map );
}
return $pendingDBs;
} catch ( RedisException $e ) {
$this->handleException( $conn, $e );
return array();
}
}
protected function doPurge() {
$conn = $this->getConnection();
if ( !$conn ) {
return false;
}
try {
$conn->delete( $this->getReadyQueueKey() );
} catch ( RedisException $e ) {
$this->handleException( $conn, $e );
return false;
}
return true;
}
/**
* Get a connection to the server that handles all sub-queues for this queue
*
* @return RedisConnRef|bool Returns false on failure
* @throws MWException
*/
protected function getConnection() {
$conn = false;
foreach ( $this->servers as $server ) {
$conn = $this->redisPool->getConnection( $server );
if ( $conn ) {
break;
}
}
return $conn;
}
/**
* @param RedisConnRef $conn
* @param RedisException $e
* @return void
*/
protected function handleException( RedisConnRef $conn, $e ) {
$this->redisPool->handleError( $conn, $e );
}
/**
* @return string
*/
private function getReadyQueueKey() {
return "jobqueue:aggregator:h-ready-queues:v1"; // global
}
/**
* @param string $type
* @param string $wiki
* @return string
*/
private function encQueueName( $type, $wiki ) {
return rawurlencode( $type ) . '/' . rawurlencode( $wiki );
}
/**
* @param string $name
* @return string
*/
private function dencQueueName( $name ) {
list( $type, $wiki ) = explode( '/', $name, 2 );
return array( rawurldecode( $type ), rawurldecode( $wiki ) );
}
}
| mozfr/wiki | includes/jobqueue/aggregator/JobQueueAggregatorRedis.php | PHP | gpl-2.0 | 5,541 |
/* Copyright (C) 1991,1995,1996,1997,2001,2002 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <errno.h>
#include <sys/socket.h>
/* Read N bytes into BUF through socket FD from peer
at address ADDR (which is ADDR_LEN bytes long).
Returns the number read or -1 for errors. */
ssize_t
__recvfrom (fd, buf, n, flags, addr, addr_len)
int fd;
void *buf;
size_t n;
int flags;
__SOCKADDR_ARG addr;
socklen_t *addr_len;
{
__set_errno (ENOSYS);
return -1;
}
weak_alias (__recvfrom, recvfrom)
stub_warning (recvfrom)
#include <stub-tag.h>
| zerovm/glibc | socket/recvfrom.c | C | gpl-2.0 | 1,363 |
/*
Unix SMB/CIFS implementation.
POSIX NTVFS backend - NT ACLs in xattrs
Copyright (C) Andrew Tridgell 2006
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "includes.h"
#include "vfs_posix.h"
#include "../lib/util/unix_privs.h"
#include "librpc/gen_ndr/ndr_xattr.h"
/*
load the current ACL from extended attributes
*/
static NTSTATUS pvfs_acl_load_xattr(struct pvfs_state *pvfs, struct pvfs_filename *name, int fd,
TALLOC_CTX *mem_ctx,
struct security_descriptor **sd)
{
NTSTATUS status;
struct xattr_NTACL *acl;
if (!(pvfs->flags & PVFS_FLAG_XATTR_ENABLE)) {
return NT_STATUS_NOT_FOUND;
}
acl = talloc_zero(mem_ctx, struct xattr_NTACL);
NT_STATUS_HAVE_NO_MEMORY(acl);
status = pvfs_xattr_ndr_load(pvfs, mem_ctx, name->full_name, fd,
XATTR_NTACL_NAME,
acl,
(ndr_pull_flags_fn_t)ndr_pull_xattr_NTACL);
if (!NT_STATUS_IS_OK(status)) {
talloc_free(acl);
return status;
}
if (acl->version != 1) {
talloc_free(acl);
return NT_STATUS_INVALID_ACL;
}
*sd = talloc_steal(mem_ctx, acl->info.sd);
return NT_STATUS_OK;
}
/*
save the acl for a file into filesystem xattr
*/
static NTSTATUS pvfs_acl_save_xattr(struct pvfs_state *pvfs, struct pvfs_filename *name, int fd,
struct security_descriptor *sd)
{
NTSTATUS status;
void *privs;
struct xattr_NTACL acl;
if (!(pvfs->flags & PVFS_FLAG_XATTR_ENABLE)) {
return NT_STATUS_OK;
}
acl.version = 1;
acl.info.sd = sd;
/* this xattr is in the "system" namespace, so we need
admin privileges to set it */
privs = root_privileges();
status = pvfs_xattr_ndr_save(pvfs, name->full_name, fd,
XATTR_NTACL_NAME,
&acl,
(ndr_push_flags_fn_t)ndr_push_xattr_NTACL);
talloc_free(privs);
return status;
}
/*
initialise pvfs acl xattr backend
*/
NTSTATUS pvfs_acl_xattr_init(void)
{
struct pvfs_acl_ops ops = {
.name = "xattr",
.acl_load = pvfs_acl_load_xattr,
.acl_save = pvfs_acl_save_xattr
};
return pvfs_acl_register(&ops);
}
| smarkwell/asuswrt-merlin | release/src/router/samba-3.5.8/source4/ntvfs/posix/pvfs_acl_xattr.c | C | gpl-2.0 | 2,628 |
/*
* DLCI Implementation of Frame Relay protocol for Linux, according to
* RFC 1490. This generic device provides en/decapsulation for an
* underlying hardware driver. Routes & IPs are assigned to these
* interfaces. Requires 'dlcicfg' program to create usable
* interfaces, the initial one, 'dlci' is for IOCTL use only.
*
* Version: @(#)dlci.c 0.35 4 Jan 1997
*
* Author: Mike McLagan <mike.mclagan@linux.org>
*
* Changes:
*
* 0.15 Mike Mclagan Packet freeing, bug in kmalloc call
* DLCI_RET handling
* 0.20 Mike McLagan More conservative on which packets
* are returned for retry and whic are
* are dropped. If DLCI_RET_DROP is
* returned from the FRAD, the packet is
* sent back to Linux for re-transmission
* 0.25 Mike McLagan Converted to use SIOC IOCTL calls
* 0.30 Jim Freeman Fixed to allow IPX traffic
* 0.35 Michael Elizabeth Fixed incorrect memcpy_fromfs
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h> /* for CONFIG_DLCI_COUNT */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/init.h>
#include <asm/system.h>
#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/uaccess.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/if_frad.h>
#include <net/sock.h>
static const char devname[] = "dlci";
static const char version[] = "DLCI driver v0.35, 4 Jan 1997, mike.mclagan@linux.org";
static struct net_device *open_dev[CONFIG_DLCI_COUNT];
static char *basename[16];
int dlci_init(struct net_device *dev);
/* allow FRAD's to register their name as a valid FRAD */
int register_frad(const char *name)
{
int i;
if (!name)
return(-EINVAL);
for (i=0;i<sizeof(basename) / sizeof(char *);i++)
{
if (!basename[i])
break;
/* take care of multiple registrations */
if (strcmp(basename[i], name) == 0)
return(0);
}
if (i == sizeof(basename) / sizeof(char *))
return(-EMLINK);
basename[i] = kmalloc(strlen(name) + 1, GFP_KERNEL);
if (!basename[i])
return(-ENOMEM);
strcpy(basename[i], name);
return(0);
}
int unregister_frad(const char *name)
{
int i;
if (!name)
return(-EINVAL);
for (i=0;i<sizeof(basename) / sizeof(char *);i++)
if (basename[i] && (strcmp(basename[i], name) == 0))
break;
if (i == sizeof(basename) / sizeof(char *))
return(-EINVAL);
kfree(basename[i]);
basename[i] = NULL;
return(0);
}
/*
* these encapsulate the RFC 1490 requirements as well as
* deal with packet transmission and reception, working with
* the upper network layers
*/
static int dlci_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, void *daddr, void *saddr,
unsigned len)
{
struct frhdr hdr;
struct dlci_local *dlp;
unsigned int hlen;
char *dest;
dlp = dev->priv;
hdr.control = FRAD_I_UI;
switch(type)
{
case ETH_P_IP:
hdr.IP_NLPID = FRAD_P_IP;
hlen = sizeof(hdr.control) + sizeof(hdr.IP_NLPID);
break;
/* feel free to add other types, if necessary */
default:
hdr.pad = FRAD_P_PADDING;
hdr.NLPID = FRAD_P_SNAP;
memset(hdr.OUI, 0, sizeof(hdr.OUI));
hdr.PID = htons(type);
hlen = sizeof(hdr);
break;
}
dest = skb_push(skb, hlen);
if (!dest)
return(0);
memcpy(dest, &hdr, hlen);
return(hlen);
}
static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
{
struct dlci_local *dlp;
struct frhdr *hdr;
int process, header;
dlp = dev->priv;
hdr = (struct frhdr *) skb->data;
process = 0;
header = 0;
skb->dev = dev;
if (hdr->control != FRAD_I_UI)
{
printk(KERN_NOTICE "%s: Invalid header flag 0x%02X.\n", dev->name, hdr->control);
dlp->stats.rx_errors++;
}
else
switch(hdr->IP_NLPID)
{
case FRAD_P_PADDING:
if (hdr->NLPID != FRAD_P_SNAP)
{
printk(KERN_NOTICE "%s: Unsupported NLPID 0x%02X.\n", dev->name, hdr->NLPID);
dlp->stats.rx_errors++;
break;
}
if (hdr->OUI[0] + hdr->OUI[1] + hdr->OUI[2] != 0)
{
printk(KERN_NOTICE "%s: Unsupported organizationally unique identifier 0x%02X-%02X-%02X.\n", dev->name, hdr->OUI[0], hdr->OUI[1], hdr->OUI[2]);
dlp->stats.rx_errors++;
break;
}
/* at this point, it's an EtherType frame */
header = sizeof(struct frhdr);
/* Already in network order ! */
skb->protocol = hdr->PID;
process = 1;
break;
case FRAD_P_IP:
header = sizeof(hdr->control) + sizeof(hdr->IP_NLPID);
skb->protocol = htons(ETH_P_IP);
process = 1;
break;
case FRAD_P_SNAP:
case FRAD_P_Q933:
case FRAD_P_CLNP:
printk(KERN_NOTICE "%s: Unsupported NLPID 0x%02X.\n", dev->name, hdr->pad);
dlp->stats.rx_errors++;
break;
default:
printk(KERN_NOTICE "%s: Invalid pad byte 0x%02X.\n", dev->name, hdr->pad);
dlp->stats.rx_errors++;
break;
}
if (process)
{
/* we've set up the protocol, so discard the header */
skb->mac.raw = skb->data;
skb_pull(skb, header);
dlp->stats.rx_bytes += skb->len;
netif_rx(skb);
dlp->stats.rx_packets++;
dev->last_rx = jiffies;
}
else
dev_kfree_skb(skb);
}
static int dlci_transmit(struct sk_buff *skb, struct net_device *dev)
{
struct dlci_local *dlp;
int ret;
ret = 0;
if (!skb || !dev)
return(0);
dlp = dev->priv;
netif_stop_queue(dev);
ret = dlp->slave->hard_start_xmit(skb, dlp->slave);
switch (ret)
{
case DLCI_RET_OK:
dlp->stats.tx_packets++;
ret = 0;
break;
case DLCI_RET_ERR:
dlp->stats.tx_errors++;
ret = 0;
break;
case DLCI_RET_DROP:
dlp->stats.tx_dropped++;
ret = 1;
break;
}
/* Alan Cox recommends always returning 0, and always freeing the packet */
/* experience suggest a slightly more conservative approach */
if (!ret)
{
dev_kfree_skb(skb);
netif_wake_queue(dev);
}
return(ret);
}
int dlci_config(struct net_device *dev, struct dlci_conf *conf, int get)
{
struct dlci_conf config;
struct dlci_local *dlp;
struct frad_local *flp;
int err;
dlp = dev->priv;
flp = dlp->slave->priv;
if (!get)
{
if(copy_from_user(&config, conf, sizeof(struct dlci_conf)))
return -EFAULT;
if (config.flags & ~DLCI_VALID_FLAGS)
return(-EINVAL);
memcpy(&dlp->config, &config, sizeof(struct dlci_conf));
dlp->configured = 1;
}
err = (*flp->dlci_conf)(dlp->slave, dev, get);
if (err)
return(err);
if (get)
{
if(copy_to_user(conf, &dlp->config, sizeof(struct dlci_conf)))
return -EFAULT;
}
return(0);
}
int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct dlci_local *dlp;
if (!capable(CAP_NET_ADMIN))
return(-EPERM);
dlp = dev->priv;
switch(cmd)
{
case DLCI_GET_SLAVE:
if (!*(short *)(dev->dev_addr))
return(-EINVAL);
strncpy(ifr->ifr_slave, dlp->slave->name, sizeof(ifr->ifr_slave));
break;
case DLCI_GET_CONF:
case DLCI_SET_CONF:
if (!*(short *)(dev->dev_addr))
return(-EINVAL);
return(dlci_config(dev, (struct dlci_conf *) ifr->ifr_data, cmd == DLCI_GET_CONF));
break;
default:
return(-EOPNOTSUPP);
}
return(0);
}
static int dlci_change_mtu(struct net_device *dev, int new_mtu)
{
struct dlci_local *dlp;
dlp = dev->priv;
return((*dlp->slave->change_mtu)(dlp->slave, new_mtu));
}
static int dlci_open(struct net_device *dev)
{
struct dlci_local *dlp;
struct frad_local *flp;
int err;
dlp = dev->priv;
if (!*(short *)(dev->dev_addr))
return(-EINVAL);
if (!netif_running(dlp->slave))
return(-ENOTCONN);
flp = dlp->slave->priv;
err = (*flp->activate)(dlp->slave, dev);
if (err)
return(err);
netif_start_queue(dev);
return 0;
}
static int dlci_close(struct net_device *dev)
{
struct dlci_local *dlp;
struct frad_local *flp;
int err;
netif_stop_queue(dev);
dlp = dev->priv;
flp = dlp->slave->priv;
err = (*flp->deactivate)(dlp->slave, dev);
return 0;
}
static struct net_device_stats *dlci_get_stats(struct net_device *dev)
{
struct dlci_local *dlp;
dlp = dev->priv;
return(&dlp->stats);
}
int dlci_add(struct dlci_add *dlci)
{
struct net_device *master, *slave;
struct dlci_local *dlp;
struct frad_local *flp;
int err, i;
char buf[10];
/* validate slave device */
slave = __dev_get_by_name(dlci->devname);
if (!slave)
return(-ENODEV);
if (slave->type != ARPHRD_FRAD)
return(-EINVAL);
/* check for registration */
for (i=0;i<sizeof(basename) / sizeof(char *); i++)
if ((basename[i]) &&
(strncmp(dlci->devname, basename[i], strlen(basename[i])) == 0) &&
(strlen(dlci->devname) > strlen(basename[i])))
break;
if (i == sizeof(basename) / sizeof(char *))
return(-EINVAL);
/* check for too many open devices : should this be dynamic ? */
for(i=0;i<CONFIG_DLCI_COUNT;i++)
if (!open_dev[i])
break;
if (i == CONFIG_DLCI_COUNT)
return(-ENOSPC); /* #### Alan: Comments on this?? */
/* create device name */
sprintf(buf, "%s%02i", devname, i);
master = kmalloc(sizeof(*master), GFP_KERNEL);
if (!master)
return(-ENOMEM);
memset(master, 0, sizeof(*master));
strcpy(master->name, buf);
master->init = dlci_init;
master->flags = 0;
err = register_netdev(master);
if (err < 0)
{
kfree(master);
return(err);
}
*(short *)(master->dev_addr) = dlci->dlci;
dlp = (struct dlci_local *) master->priv;
dlp->slave = slave;
flp = slave->priv;
err = flp ? (*flp->assoc)(slave, master) : -EINVAL;
if (err < 0)
{
unregister_netdev(master);
kfree(master->priv);
kfree(master);
return(err);
}
strcpy(dlci->devname, buf);
open_dev[i] = master;
MOD_INC_USE_COUNT;
return(0);
}
int dlci_del(struct dlci_add *dlci)
{
struct dlci_local *dlp;
struct frad_local *flp;
struct net_device *master, *slave;
int i, err;
/* validate slave device */
master = __dev_get_by_name(dlci->devname);
if (!master)
return(-ENODEV);
if (netif_running(master))
return(-EBUSY);
dlp = master->priv;
slave = dlp->slave;
flp = slave->priv;
err = (*flp->deassoc)(slave, master);
if (err)
return(err);
unregister_netdev(master);
for(i=0;i<CONFIG_DLCI_COUNT;i++)
if (master == open_dev[i])
break;
if (i<CONFIG_DLCI_COUNT)
open_dev[i] = NULL;
kfree(master->priv);
kfree(master);
MOD_DEC_USE_COUNT;
return(0);
}
int dlci_ioctl(unsigned int cmd, void *arg)
{
struct dlci_add add;
int err;
if (!capable(CAP_NET_ADMIN))
return(-EPERM);
if(copy_from_user(&add, arg, sizeof(struct dlci_add)))
return -EFAULT;
switch (cmd)
{
case SIOCADDDLCI:
err = dlci_add(&add);
if (!err)
if(copy_to_user(arg, &add, sizeof(struct dlci_add)))
return -EFAULT;
break;
case SIOCDELDLCI:
err = dlci_del(&add);
break;
default:
err = -EINVAL;
}
return(err);
}
int dlci_init(struct net_device *dev)
{
struct dlci_local *dlp;
dev->priv = kmalloc(sizeof(struct dlci_local), GFP_KERNEL);
if (!dev->priv)
return(-ENOMEM);
memset(dev->priv, 0, sizeof(struct dlci_local));
dlp = dev->priv;
dev->flags = 0;
dev->open = dlci_open;
dev->stop = dlci_close;
dev->do_ioctl = dlci_dev_ioctl;
dev->hard_start_xmit = dlci_transmit;
dev->hard_header = dlci_header;
dev->get_stats = dlci_get_stats;
dev->change_mtu = dlci_change_mtu;
dlp->receive = dlci_receive;
dev->type = ARPHRD_DLCI;
dev->hard_header_len = sizeof(struct frhdr);
dev->addr_len = sizeof(short);
memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
return(0);
}
int __init dlci_setup(void)
{
int i;
printk("%s.\n", version);
for(i=0;i<CONFIG_DLCI_COUNT;i++)
open_dev[i] = NULL;
for(i=0;i<sizeof(basename) / sizeof(char *);i++)
basename[i] = NULL;
return(0);
}
#ifdef MODULE
extern int (*dlci_ioctl_hook)(unsigned int, void *);
int init_module(void)
{
dlci_ioctl_hook = dlci_ioctl;
return(dlci_setup());
}
void cleanup_module(void)
{
dlci_ioctl_hook = NULL;
}
#endif /* MODULE */
MODULE_AUTHOR("Mike McLagan");
MODULE_DESCRIPTION("Frame Relay DLCI layer");
MODULE_LICENSE("GPL");
| n-soda/linux | drivers/net/wan/dlci.c | C | gpl-2.0 | 12,413 |
/**
* collectd - src/utils_mount.c
* Copyright (C) 2005,2006 Niki W. Waibel
*
* This program is free software; you can redistribute it and/
* or modify it under the terms of the GNU General Public Li-
* cence as published by the Free Software Foundation; either
* version 2 of the Licence, or any later version.
*
* This program is distributed in the hope that it will be use-
* ful, but WITHOUT ANY WARRANTY; without even the implied war-
* ranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public Licence for more details.
*
* You should have received a copy of the GNU General Public
* Licence along with this program; if not, write to the Free
* Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
* USA.
*
* Author:
* Niki W. Waibel <niki.waibel@gmx.net>
**/
#if HAVE_CONFIG_H
# include "config.h"
#endif
#include "common.h"
#if HAVE_XFS_XQM_H
# include <xfs/xqm.h>
#define XFS_SUPER_MAGIC_STR "XFSB"
#define XFS_SUPER_MAGIC2_STR "BSFX"
#endif
#include "plugin.h"
#include "utils_mount.h"
#if HAVE_GETVFSSTAT
# if HAVE_SYS_TYPES_H
# include <sys/types.h>
# endif
# if HAVE_SYS_STATVFS_H
# include <sys/statvfs.h>
# endif
/* #endif HAVE_GETVFSSTAT */
#elif HAVE_GETFSSTAT
# if HAVE_SYS_PARAM_H
# include <sys/param.h>
# endif
# if HAVE_SYS_UCRED_H
# include <sys/ucred.h>
# endif
# if HAVE_SYS_MOUNT_H
# include <sys/mount.h>
# endif
#endif /* HAVE_GETFSSTAT */
#if HAVE_MNTENT_H
# include <mntent.h>
#endif
#if HAVE_SYS_MNTTAB_H
# include <sys/mnttab.h>
#endif
#if HAVE_PATHS_H
# include <paths.h>
#endif
#ifdef COLLECTD_MNTTAB
# undef COLLECTD_MNTTAB
#endif
#if defined(_PATH_MOUNTED) /* glibc */
# define COLLECTD_MNTTAB _PATH_MOUNTED
#elif defined(MNTTAB) /* Solaris */
# define COLLECTD_MNTTAB MNTTAB
#elif defined(MNT_MNTTAB)
# define COLLECTD_MNTTAB MNT_MNTTAB
#elif defined(MNTTABNAME)
# define COLLECTD_MNTTAB MNTTABNAME
#elif defined(KMTAB)
# define COLLECTD_MNTTAB KMTAB
#else
# define COLLECTD_MNTTAB "/etc/mnttab"
#endif
/* *** *** *** ********************************************* *** *** *** */
/* *** *** *** *** *** *** private functions *** *** *** *** *** *** */
/* *** *** *** ********************************************* *** *** *** */
/* stolen from quota-3.13 (quota-tools) */
#define PROC_PARTITIONS "/proc/partitions"
#define DEVLABELDIR "/dev"
#define UUID 1
#define VOL 2
static struct uuidCache_s {
struct uuidCache_s *next;
char uuid[16];
char *label;
char *device;
} *uuidCache = NULL;
#define EXT2_SUPER_MAGIC 0xEF53
struct ext2_super_block {
unsigned char s_dummy1[56];
unsigned char s_magic[2];
unsigned char s_dummy2[46];
unsigned char s_uuid[16];
char s_volume_name[16];
};
#define ext2magic(s) ((unsigned int)s.s_magic[0] \
+ (((unsigned int)s.s_magic[1]) << 8))
#if HAVE_XFS_XQM_H
struct xfs_super_block {
unsigned char s_magic[4];
unsigned char s_dummy[28];
unsigned char s_uuid[16];
unsigned char s_dummy2[60];
char s_fsname[12];
};
#endif /* HAVE_XFS_XQM_H */
#define REISER_SUPER_MAGIC "ReIsEr2Fs"
struct reiserfs_super_block {
unsigned char s_dummy1[52];
unsigned char s_magic[10];
unsigned char s_dummy2[22];
unsigned char s_uuid[16];
char s_volume_name[16];
};
/* for now, only ext2 and xfs are supported */
static int
get_label_uuid(const char *device, char **label, char *uuid)
{
/* start with ext2 and xfs tests, taken from mount_guess_fstype */
/* should merge these later */
int fd, rv = 1;
size_t namesize;
struct ext2_super_block e2sb;
#if HAVE_XFS_XQM_H
struct xfs_super_block xfsb;
#endif
struct reiserfs_super_block reisersb;
fd = open(device, O_RDONLY);
if(fd == -1) {
return rv;
}
if(lseek(fd, 1024, SEEK_SET) == 1024
&& read(fd, (char *)&e2sb, sizeof(e2sb)) == sizeof(e2sb)
&& ext2magic(e2sb) == EXT2_SUPER_MAGIC) {
memcpy(uuid, e2sb.s_uuid, sizeof(e2sb.s_uuid));
namesize = sizeof(e2sb.s_volume_name);
*label = smalloc(namesize + 1);
sstrncpy(*label, e2sb.s_volume_name, namesize);
rv = 0;
#if HAVE_XFS_XQM_H
} else if(lseek(fd, 0, SEEK_SET) == 0
&& read(fd, (char *)&xfsb, sizeof(xfsb)) == sizeof(xfsb)
&& (strncmp((char *)&xfsb.s_magic, XFS_SUPER_MAGIC_STR, 4) == 0 ||
strncmp((char *)&xfsb.s_magic, XFS_SUPER_MAGIC2_STR, 4) == 0)) {
memcpy(uuid, xfsb.s_uuid, sizeof(xfsb.s_uuid));
namesize = sizeof(xfsb.s_fsname);
*label = smalloc(namesize + 1);
sstrncpy(*label, xfsb.s_fsname, namesize);
rv = 0;
#endif /* HAVE_XFS_XQM_H */
} else if(lseek(fd, 65536, SEEK_SET) == 65536
&& read(fd, (char *)&reisersb, sizeof(reisersb)) == sizeof(reisersb)
&& !strncmp((char *)&reisersb.s_magic, REISER_SUPER_MAGIC, 9)) {
memcpy(uuid, reisersb.s_uuid, sizeof(reisersb.s_uuid));
namesize = sizeof(reisersb.s_volume_name);
*label = smalloc(namesize + 1);
sstrncpy(*label, reisersb.s_volume_name, namesize);
rv = 0;
}
close(fd);
return rv;
}
static void
uuidcache_addentry(char *device, char *label, char *uuid)
{
struct uuidCache_s *last;
if(!uuidCache) {
last = uuidCache = smalloc(sizeof(*uuidCache));
} else {
for(last = uuidCache; last->next; last = last->next);
last->next = smalloc(sizeof(*uuidCache));
last = last->next;
}
last->next = NULL;
last->device = device;
last->label = label;
memcpy(last->uuid, uuid, sizeof(last->uuid));
}
static void
uuidcache_init(void)
{
char line[100];
char *s;
int ma, mi, sz;
static char ptname[100];
FILE *procpt;
char uuid[16], *label = NULL;
char device[110];
int firstPass;
int handleOnFirst;
if(uuidCache) {
return;
}
procpt = fopen(PROC_PARTITIONS, "r");
if(procpt == NULL) {
return;
}
for(firstPass = 1; firstPass >= 0; firstPass--) {
fseek(procpt, 0, SEEK_SET);
while(fgets(line, sizeof(line), procpt)) {
if(sscanf(line, " %d %d %d %[^\n ]",
&ma, &mi, &sz, ptname) != 4)
{
continue;
}
/* skip extended partitions (heuristic: size 1) */
if(sz == 1) {
continue;
}
/* look only at md devices on first pass */
handleOnFirst = !strncmp(ptname, "md", 2);
if(firstPass != handleOnFirst) {
continue;
}
/* skip entire disk (minor 0, 64, ... on ide;
0, 16, ... on sd) */
/* heuristic: partition name ends in a digit */
for(s = ptname; *s; s++);
if(isdigit((int)s[-1])) {
/*
* Note: this is a heuristic only - there is no reason
* why these devices should live in /dev.
* Perhaps this directory should be specifiable by option.
* One might for example have /devlabel with links to /dev
* for the devices that may be accessed in this way.
* (This is useful, if the cdrom on /dev/hdc must not
* be accessed.)
*/
ssnprintf(device, sizeof(device), "%s/%s",
DEVLABELDIR, ptname);
if(!get_label_uuid(device, &label, uuid)) {
uuidcache_addentry(sstrdup(device),
label, uuid);
}
}
}
}
fclose(procpt);
}
static unsigned char
fromhex(char c)
{
if(isdigit((int)c)) {
return (c - '0');
} else if(islower((int)c)) {
return (c - 'a' + 10);
} else {
return (c - 'A' + 10);
}
}
static char *
get_spec_by_x(int n, const char *t)
{
struct uuidCache_s *uc;
uuidcache_init();
uc = uuidCache;
while(uc) {
switch(n) {
case UUID:
if(!memcmp(t, uc->uuid, sizeof(uc->uuid))) {
return sstrdup(uc->device);
}
break;
case VOL:
if(!strcmp(t, uc->label)) {
return sstrdup(uc->device);
}
break;
}
uc = uc->next;
}
return NULL;
}
static char *
get_spec_by_uuid(const char *s)
{
char uuid[16];
int i;
if(strlen(s) != 36
|| s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-') {
goto bad_uuid;
}
for(i=0; i<16; i++) {
if(*s == '-') {
s++;
}
if(!isxdigit((int)s[0]) || !isxdigit((int)s[1])) {
goto bad_uuid;
}
uuid[i] = ((fromhex(s[0]) << 4) | fromhex(s[1]));
s += 2;
}
return get_spec_by_x(UUID, uuid);
bad_uuid:
DEBUG("utils_mount: Found an invalid UUID: %s", s);
return NULL;
}
static char *get_spec_by_volume_label(const char *s)
{
return get_spec_by_x (VOL, s);
}
static char *get_device_name(const char *optstr)
{
char *rc;
if (optstr == NULL)
{
return (NULL);
}
else if (strncmp (optstr, "UUID=", 5) == 0)
{
DEBUG ("utils_mount: TODO: check UUID= code!");
rc = get_spec_by_uuid (optstr + 5);
}
else if (strncmp (optstr, "LABEL=", 6) == 0)
{
DEBUG ("utils_mount: TODO: check LABEL= code!");
rc = get_spec_by_volume_label (optstr + 6);
}
else
{
rc = sstrdup (optstr);
}
if(!rc)
{
DEBUG ("utils_mount: Error checking device name: optstr = %s", optstr);
}
return rc;
}
/* What weird OS is this..? I can't find any info with google :/ -octo */
#if HAVE_LISTMNTENT && 0
static cu_mount_t *cu_mount_listmntent (void)
{
cu_mount_t *last = *list;
struct tabmntent *p;
struct mntent *mnt;
struct tabmntent *mntlist;
if(listmntent(&mntlist, COLLECTD_MNTTAB, NULL, NULL) < 0) {
#if COLLECT_DEBUG
char errbuf[1024];
DEBUG("utils_mount: calling listmntent() failed: %s",
sstrerror (errno, errbuf, sizeof (errbuf)));
#endif /* COLLECT_DEBUG */
}
for(p = mntlist; p; p = p->next) {
char *loop = NULL, *device = NULL;
mnt = p->ment;
loop = cu_mount_getoptionvalue(mnt->mnt_opts, "loop=");
if(loop == NULL) { /* no loop= mount */
device = get_device_name(mnt->mnt_fsname);
if(device == NULL) {
DEBUG("utils_mount: can't get devicename for fs (%s) %s (%s)"
": ignored", mnt->mnt_type,
mnt->mnt_dir, mnt->mnt_fsname);
continue;
}
} else {
device = loop;
}
if(*list == NULL) {
*list = (cu_mount_t *)smalloc(sizeof(cu_mount_t));
last = *list;
} else {
while(last->next != NULL) { /* is last really last? */
last = last->next;
}
last->next = (cu_mount_t *)smalloc(sizeof(cu_mount_t));
last = last->next;
}
last->dir = sstrdup(mnt->mnt_dir);
last->spec_device = sstrdup(mnt->mnt_fsname);
last->device = device;
last->type = sstrdup(mnt->mnt_type);
last->options = sstrdup(mnt->mnt_opts);
last->next = NULL;
} /* for(p = mntlist; p; p = p->next) */
return(last);
} /* cu_mount_t *cu_mount_listmntent(void) */
/* #endif HAVE_LISTMNTENT */
/* 4.4BSD and Mac OS X (getfsstat) or NetBSD (getvfsstat) */
#elif HAVE_GETVFSSTAT || HAVE_GETFSSTAT
static cu_mount_t *cu_mount_getfsstat (void)
{
#if HAVE_GETVFSSTAT
# define STRUCT_STATFS struct statvfs
# define CMD_STATFS getvfsstat
# define FLAGS_STATFS ST_NOWAIT
/* #endif HAVE_GETVFSSTAT */
#elif HAVE_GETFSSTAT
# define STRUCT_STATFS struct statfs
# define CMD_STATFS getfsstat
# define FLAGS_STATFS MNT_NOWAIT
#endif /* HAVE_GETFSSTAT */
int bufsize;
STRUCT_STATFS *buf;
int num;
int i;
cu_mount_t *first = NULL;
cu_mount_t *last = NULL;
cu_mount_t *new = NULL;
/* Get the number of mounted file systems */
if ((bufsize = CMD_STATFS (NULL, 0, FLAGS_STATFS)) < 1)
{
#if COLLECT_DEBUG
char errbuf[1024];
DEBUG ("utils_mount: getv?fsstat failed: %s",
sstrerror (errno, errbuf, sizeof (errbuf)));
#endif /* COLLECT_DEBUG */
return (NULL);
}
if ((buf = (STRUCT_STATFS *) malloc (bufsize * sizeof (STRUCT_STATFS)))
== NULL)
return (NULL);
memset (buf, '\0', bufsize * sizeof (STRUCT_STATFS));
/* The bufsize needs to be passed in bytes. Really. This is not in the
* manpage.. -octo */
if ((num = CMD_STATFS (buf, bufsize * sizeof (STRUCT_STATFS), FLAGS_STATFS)) < 1)
{
#if COLLECT_DEBUG
char errbuf[1024];
DEBUG ("utils_mount: getv?fsstat failed: %s",
sstrerror (errno, errbuf, sizeof (errbuf)));
#endif /* COLLECT_DEBUG */
free (buf);
return (NULL);
}
for (i = 0; i < num; i++)
{
if ((new = malloc (sizeof (cu_mount_t))) == NULL)
break;
memset (new, '\0', sizeof (cu_mount_t));
/* Copy values from `struct mnttab' */
new->dir = sstrdup (buf[i].f_mntonname);
new->spec_device = sstrdup (buf[i].f_mntfromname);
new->type = sstrdup (buf[i].f_fstypename);
new->options = NULL;
new->device = get_device_name (new->options);
new->next = NULL;
/* Append to list */
if (first == NULL)
{
first = new;
last = new;
}
else
{
last->next = new;
last = new;
}
}
free (buf);
return (first);
}
/* #endif HAVE_GETVFSSTAT || HAVE_GETFSSTAT */
/* Solaris (SunOS 10): int getmntent(FILE *fp, struct mnttab *mp); */
#elif HAVE_TWO_GETMNTENT || HAVE_GEN_GETMNTENT || HAVE_SUN_GETMNTENT
static cu_mount_t *cu_mount_gen_getmntent (void)
{
struct mnttab mt;
FILE *fp;
cu_mount_t *first = NULL;
cu_mount_t *last = NULL;
cu_mount_t *new = NULL;
DEBUG ("utils_mount: (void); COLLECTD_MNTTAB = %s", COLLECTD_MNTTAB);
if ((fp = fopen (COLLECTD_MNTTAB, "r")) == NULL)
{
char errbuf[1024];
ERROR ("fopen (%s): %s", COLLECTD_MNTTAB,
sstrerror (errno, errbuf, sizeof (errbuf)));
return (NULL);
}
while (getmntent (fp, &mt) == 0)
{
if ((new = malloc (sizeof (cu_mount_t))) == NULL)
break;
memset (new, '\0', sizeof (cu_mount_t));
/* Copy values from `struct mnttab' */
new->dir = sstrdup (mt.mnt_mountp);
new->spec_device = sstrdup (mt.mnt_special);
new->type = sstrdup (mt.mnt_fstype);
new->options = sstrdup (mt.mnt_mntopts);
new->device = get_device_name (new->options);
new->next = NULL;
/* Append to list */
if (first == NULL)
{
first = new;
last = new;
}
else
{
last->next = new;
last = new;
}
}
fclose (fp);
return (first);
} /* static cu_mount_t *cu_mount_gen_getmntent (void) */
/* #endif HAVE_TWO_GETMNTENT || HAVE_GEN_GETMNTENT || HAVE_SUN_GETMNTENT */
#elif HAVE_SEQ_GETMNTENT
#warn "This version of `getmntent' hat not yet been implemented!"
/* #endif HAVE_SEQ_GETMNTENT */
#elif HAVE_ONE_GETMNTENT
static cu_mount_t *cu_mount_getmntent (void)
{
FILE *fp;
struct mntent *me;
cu_mount_t *first = NULL;
cu_mount_t *last = NULL;
cu_mount_t *new = NULL;
DEBUG ("utils_mount: (void); COLLECTD_MNTTAB = %s", COLLECTD_MNTTAB);
if ((fp = setmntent (COLLECTD_MNTTAB, "r")) == NULL)
{
char errbuf[1024];
ERROR ("setmntent (%s): %s", COLLECTD_MNTTAB,
sstrerror (errno, errbuf, sizeof (errbuf)));
return (NULL);
}
while ((me = getmntent (fp)) != NULL)
{
if ((new = malloc (sizeof (cu_mount_t))) == NULL)
break;
memset (new, '\0', sizeof (cu_mount_t));
/* Copy values from `struct mntent *' */
new->dir = sstrdup (me->mnt_dir);
new->spec_device = sstrdup (me->mnt_fsname);
new->type = sstrdup (me->mnt_type);
new->options = sstrdup (me->mnt_opts);
new->device = get_device_name (new->options);
new->next = NULL;
DEBUG ("utils_mount: new = {dir = %s, spec_device = %s, type = %s, options = %s, device = %s}",
new->dir, new->spec_device, new->type, new->options, new->device);
/* Append to list */
if (first == NULL)
{
first = new;
last = new;
}
else
{
last->next = new;
last = new;
}
}
endmntent (fp);
DEBUG ("utils_mount: return (0x%p)", (void *) first);
return (first);
}
#endif /* HAVE_ONE_GETMNTENT */
/* *** *** *** ******************************************** *** *** *** */
/* *** *** *** *** *** *** public functions *** *** *** *** *** *** */
/* *** *** *** ******************************************** *** *** *** */
cu_mount_t *cu_mount_getlist(cu_mount_t **list)
{
cu_mount_t *new;
cu_mount_t *first = NULL;
cu_mount_t *last = NULL;
if (list == NULL)
return (NULL);
if (*list != NULL)
{
first = *list;
last = first;
while (last->next != NULL)
last = last->next;
}
#if HAVE_LISTMNTENT && 0
new = cu_mount_listmntent ();
#elif HAVE_GETVFSSTAT || HAVE_GETFSSTAT
new = cu_mount_getfsstat ();
#elif HAVE_TWO_GETMNTENT || HAVE_GEN_GETMNTENT || HAVE_SUN_GETMNTENT
new = cu_mount_gen_getmntent ();
#elif HAVE_SEQ_GETMNTENT
# error "This version of `getmntent' hat not yet been implemented!"
#elif HAVE_ONE_GETMNTENT
new = cu_mount_getmntent ();
#else
# error "Could not determine how to find mountpoints."
#endif
if (first != NULL)
{
last->next = new;
}
else
{
first = new;
last = new;
*list = first;
}
while ((last != NULL) && (last->next != NULL))
last = last->next;
return (last);
} /* cu_mount_t *cu_mount_getlist(cu_mount_t **list) */
void cu_mount_freelist (cu_mount_t *list)
{
cu_mount_t *this;
cu_mount_t *next;
for (this = list; this != NULL; this = next)
{
next = this->next;
sfree (this->dir);
sfree (this->spec_device);
sfree (this->device);
sfree (this->type);
sfree (this->options);
sfree (this);
}
} /* void cu_mount_freelist(cu_mount_t *list) */
char *
cu_mount_checkoption(char *line, char *keyword, int full)
{
char *line2, *l2;
int l = strlen(keyword);
char *p1, *p2;
if(line == NULL || keyword == NULL) {
return NULL;
}
if(full != 0) {
full = 1;
}
line2 = sstrdup(line);
l2 = line2;
while(*l2 != '\0') {
if(*l2 == ',') {
*l2 = '\0';
}
l2++;
}
p1 = line - 1;
p2 = strchr(line, ',');
do {
if(strncmp(line2+(p1-line)+1, keyword, l+full) == 0) {
free(line2);
return p1+1;
}
p1 = p2;
if(p1 != NULL) {
p2 = strchr(p1+1, ',');
}
} while(p1 != NULL);
free(line2);
return NULL;
} /* char *cu_mount_checkoption(char *line, char *keyword, int full) */
char *
cu_mount_getoptionvalue(char *line, char *keyword)
{
char *r;
r = cu_mount_checkoption(line, keyword, 0);
if(r != NULL) {
char *p;
r += strlen(keyword);
p = strchr(r, ',');
if(p == NULL) {
if(strlen(r) == 0) {
return NULL;
}
return sstrdup(r);
} else {
char *m;
if((p-r) == 1) {
return NULL;
}
m = (char *)smalloc(p-r+1);
sstrncpy(m, r, p-r+1);
return m;
}
}
return r;
} /* char *cu_mount_getoptionvalue(char *line, char *keyword) */
int
cu_mount_type(const char *type)
{
if(strcmp(type, "ext3") == 0) return CUMT_EXT3;
if(strcmp(type, "ext2") == 0) return CUMT_EXT2;
if(strcmp(type, "ufs") == 0) return CUMT_UFS;
if(strcmp(type, "vxfs") == 0) return CUMT_VXFS;
if(strcmp(type, "zfs") == 0) return CUMT_ZFS;
return CUMT_UNKNOWN;
} /* int cu_mount_type(const char *type) */
| aub/collectd-source | src/utils_mount.c | C | gpl-2.0 | 18,019 |
/*
Unix SMB/CIFS implementation.
WINS Replication server
Copyright (C) Stefan Metzmacher 2005
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "includes.h"
#include "lib/socket/socket.h"
#include "lib/stream/packet.h"
#include "smbd/service_task.h"
#include "smbd/service_stream.h"
#include "smbd/service.h"
#include "lib/messaging/irpc.h"
#include "librpc/gen_ndr/ndr_winsrepl.h"
#include "wrepl_server/wrepl_server.h"
#include "smbd/process_model.h"
#include "system/network.h"
#include "lib/socket/netif.h"
#include "param/param.h"
void wreplsrv_terminate_in_connection(struct wreplsrv_in_connection *wreplconn, const char *reason)
{
stream_terminate_connection(wreplconn->conn, reason);
}
static int terminate_after_send_destructor(struct wreplsrv_in_connection **tas)
{
wreplsrv_terminate_in_connection(*tas, "wreplsrv_in_connection: terminate_after_send");
return 0;
}
/*
receive some data on a WREPL connection
*/
static NTSTATUS wreplsrv_recv_request(void *private_data, DATA_BLOB blob)
{
struct wreplsrv_in_connection *wreplconn = talloc_get_type(private_data, struct wreplsrv_in_connection);
struct wreplsrv_in_call *call;
DATA_BLOB packet_in_blob;
DATA_BLOB packet_out_blob;
struct wrepl_wrap packet_out_wrap;
NTSTATUS status;
enum ndr_err_code ndr_err;
call = talloc_zero(wreplconn, struct wreplsrv_in_call);
NT_STATUS_HAVE_NO_MEMORY(call);
call->wreplconn = wreplconn;
talloc_steal(call, blob.data);
packet_in_blob.data = blob.data + 4;
packet_in_blob.length = blob.length - 4;
ndr_err = ndr_pull_struct_blob(&packet_in_blob, call,
lp_iconv_convenience(wreplconn->service->task->lp_ctx),
&call->req_packet,
(ndr_pull_flags_fn_t)ndr_pull_wrepl_packet);
if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
return ndr_map_error2ntstatus(ndr_err);
}
if (DEBUGLVL(10)) {
DEBUG(10,("Received WINS-Replication packet of length %u\n",
(unsigned)packet_in_blob.length + 4));
NDR_PRINT_DEBUG(wrepl_packet, &call->req_packet);
}
status = wreplsrv_in_call(call);
NT_STATUS_IS_ERR_RETURN(status);
if (!NT_STATUS_IS_OK(status)) {
/* w2k just ignores invalid packets, so we do */
DEBUG(10,("Received WINS-Replication packet was invalid, we just ignore it\n"));
talloc_free(call);
return NT_STATUS_OK;
}
/* and now encode the reply */
packet_out_wrap.packet = call->rep_packet;
ndr_err = ndr_push_struct_blob(&packet_out_blob, call,
lp_iconv_convenience(wreplconn->service->task->lp_ctx),
&packet_out_wrap,
(ndr_push_flags_fn_t)ndr_push_wrepl_wrap);
if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
return ndr_map_error2ntstatus(ndr_err);
}
if (DEBUGLVL(10)) {
DEBUG(10,("Sending WINS-Replication packet of length %d\n", (int)packet_out_blob.length));
NDR_PRINT_DEBUG(wrepl_packet, &call->rep_packet);
}
if (call->terminate_after_send) {
struct wreplsrv_in_connection **tas;
tas = talloc(packet_out_blob.data, struct wreplsrv_in_connection *);
NT_STATUS_HAVE_NO_MEMORY(tas);
*tas = wreplconn;
talloc_set_destructor(tas, terminate_after_send_destructor);
}
status = packet_send(wreplconn->packet, packet_out_blob);
NT_STATUS_NOT_OK_RETURN(status);
talloc_free(call);
return NT_STATUS_OK;
}
/*
called when the socket becomes readable
*/
static void wreplsrv_recv(struct stream_connection *conn, uint16_t flags)
{
struct wreplsrv_in_connection *wreplconn = talloc_get_type(conn->private_data,
struct wreplsrv_in_connection);
packet_recv(wreplconn->packet);
}
/*
called when the socket becomes writable
*/
static void wreplsrv_send(struct stream_connection *conn, uint16_t flags)
{
struct wreplsrv_in_connection *wreplconn = talloc_get_type(conn->private_data,
struct wreplsrv_in_connection);
packet_queue_run(wreplconn->packet);
}
/*
handle socket recv errors
*/
static void wreplsrv_recv_error(void *private_data, NTSTATUS status)
{
struct wreplsrv_in_connection *wreplconn = talloc_get_type(private_data,
struct wreplsrv_in_connection);
wreplsrv_terminate_in_connection(wreplconn, nt_errstr(status));
}
/*
called when we get a new connection
*/
static void wreplsrv_accept(struct stream_connection *conn)
{
struct wreplsrv_service *service = talloc_get_type(conn->private_data, struct wreplsrv_service);
struct wreplsrv_in_connection *wreplconn;
struct socket_address *peer_ip;
wreplconn = talloc_zero(conn, struct wreplsrv_in_connection);
if (!wreplconn) {
stream_terminate_connection(conn, "wreplsrv_accept: out of memory");
return;
}
wreplconn->packet = packet_init(wreplconn);
if (!wreplconn->packet) {
wreplsrv_terminate_in_connection(wreplconn, "wreplsrv_accept: out of memory");
return;
}
packet_set_private(wreplconn->packet, wreplconn);
packet_set_socket(wreplconn->packet, conn->socket);
packet_set_callback(wreplconn->packet, wreplsrv_recv_request);
packet_set_full_request(wreplconn->packet, packet_full_request_u32);
packet_set_error_handler(wreplconn->packet, wreplsrv_recv_error);
packet_set_event_context(wreplconn->packet, conn->event.ctx);
packet_set_fde(wreplconn->packet, conn->event.fde);
packet_set_serialise(wreplconn->packet);
wreplconn->conn = conn;
wreplconn->service = service;
peer_ip = socket_get_peer_addr(conn->socket, wreplconn);
if (!peer_ip) {
wreplsrv_terminate_in_connection(wreplconn, "wreplsrv_accept: could not obtain peer IP from kernel");
return;
}
wreplconn->partner = wreplsrv_find_partner(service, peer_ip->addr);
conn->private_data = wreplconn;
irpc_add_name(conn->msg_ctx, "wreplsrv_connection");
}
static const struct stream_server_ops wreplsrv_stream_ops = {
.name = "wreplsrv",
.accept_connection = wreplsrv_accept,
.recv_handler = wreplsrv_recv,
.send_handler = wreplsrv_send,
};
/*
called when we get a new connection
*/
NTSTATUS wreplsrv_in_connection_merge(struct wreplsrv_partner *partner,
struct socket_context *sock,
struct packet_context *packet,
struct wreplsrv_in_connection **_wrepl_in)
{
struct wreplsrv_service *service = partner->service;
struct wreplsrv_in_connection *wrepl_in;
const struct model_ops *model_ops;
struct stream_connection *conn;
NTSTATUS status;
/* within the wrepl task we want to be a single process, so
ask for the single process model ops and pass these to the
stream_setup_socket() call. */
model_ops = process_model_startup(service->task->event_ctx, "single");
if (!model_ops) {
DEBUG(0,("Can't find 'single' process model_ops"));
return NT_STATUS_INTERNAL_ERROR;
}
wrepl_in = talloc_zero(partner, struct wreplsrv_in_connection);
NT_STATUS_HAVE_NO_MEMORY(wrepl_in);
wrepl_in->service = service;
wrepl_in->partner = partner;
status = stream_new_connection_merge(service->task->event_ctx, service->task->lp_ctx, model_ops,
sock, &wreplsrv_stream_ops, service->task->msg_ctx,
wrepl_in, &conn);
NT_STATUS_NOT_OK_RETURN(status);
/*
* make the wreplsrv_in_connection structure a child of the
* stream_connection, to match the hierarchy of wreplsrv_accept
*/
wrepl_in->conn = conn;
talloc_steal(conn, wrepl_in);
/*
* now update the packet handling callback,...
*/
wrepl_in->packet = talloc_steal(wrepl_in, packet);
packet_set_private(wrepl_in->packet, wrepl_in);
packet_set_socket(wrepl_in->packet, conn->socket);
packet_set_callback(wrepl_in->packet, wreplsrv_recv_request);
packet_set_full_request(wrepl_in->packet, packet_full_request_u32);
packet_set_error_handler(wrepl_in->packet, wreplsrv_recv_error);
packet_set_event_context(wrepl_in->packet, conn->event.ctx);
packet_set_fde(wrepl_in->packet, conn->event.fde);
packet_set_serialise(wrepl_in->packet);
*_wrepl_in = wrepl_in;
return NT_STATUS_OK;
}
/*
startup the wrepl port 42 server sockets
*/
NTSTATUS wreplsrv_setup_sockets(struct wreplsrv_service *service, struct loadparm_context *lp_ctx)
{
NTSTATUS status;
struct task_server *task = service->task;
const struct model_ops *model_ops;
const char *address;
uint16_t port = WINS_REPLICATION_PORT;
/* within the wrepl task we want to be a single process, so
ask for the single process model ops and pass these to the
stream_setup_socket() call. */
model_ops = process_model_startup(task->event_ctx, "single");
if (!model_ops) {
DEBUG(0,("Can't find 'single' process model_ops"));
return NT_STATUS_INTERNAL_ERROR;
}
if (lp_interfaces(lp_ctx) && lp_bind_interfaces_only(lp_ctx)) {
int num_interfaces;
int i;
struct interface *ifaces;
load_interfaces(task, lp_interfaces(lp_ctx), &ifaces);
num_interfaces = iface_count(ifaces);
/* We have been given an interfaces line, and been
told to only bind to those interfaces. Create a
socket per interface and bind to only these.
*/
for(i = 0; i < num_interfaces; i++) {
address = iface_n_ip(ifaces, i);
status = stream_setup_socket(task->event_ctx,
task->lp_ctx, model_ops,
&wreplsrv_stream_ops,
"ipv4", address, &port,
lp_socket_options(task->lp_ctx),
service);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0,("stream_setup_socket(address=%s,port=%u) failed - %s\n",
address, port, nt_errstr(status)));
return status;
}
}
} else {
address = lp_socket_address(lp_ctx);
status = stream_setup_socket(task->event_ctx, task->lp_ctx,
model_ops, &wreplsrv_stream_ops,
"ipv4", address, &port, lp_socket_options(task->lp_ctx),
service);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(0,("stream_setup_socket(address=%s,port=%u) failed - %s\n",
address, port, nt_errstr(status)));
return status;
}
}
return NT_STATUS_OK;
}
| smarkwell/asuswrt-merlin | release/src/router/samba-3.5.8/source4/wrepl_server/wrepl_in_connection.c | C | gpl-2.0 | 10,327 |
/*
* linux/kernel/exit.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/security.h>
#include <linux/cpu.h>
#include <linux/acct.h>
#include <linux/tsacct_kern.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/freezer.h>
#include <linux/binfmts.h>
#include <linux/nsproxy.h>
#include <linux/pid_namespace.h>
#include <linux/ptrace.h>
#include <linux/profile.h>
#include <linux/mount.h>
#include <linux/proc_fs.h>
#include <linux/kthread.h>
#include <linux/mempolicy.h>
#include <linux/taskstats_kern.h>
#include <linux/delayacct.h>
#include <linux/cgroup.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/posix-timers.h>
#include <linux/cn_proc.h>
#include <linux/mutex.h>
#include <linux/futex.h>
#include <linux/pipe_fs_i.h>
#include <linux/audit.h> /* for audit_free() */
#include <linux/resource.h>
#include <linux/blkdev.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/tracehook.h>
#include <linux/fs_struct.h>
#include <linux/init_task.h>
#include <linux/perf_event.h>
#include <trace/events/sched.h>
#include <linux/hw_breakpoint.h>
#include <linux/oom.h>
#include <linux/writeback.h>
#include <linux/shm.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
static void exit_mm(struct task_struct * tsk);
static void __unhash_process(struct task_struct *p, bool group_dead)
{
nr_threads--;
detach_pid(p, PIDTYPE_PID);
if (group_dead) {
detach_pid(p, PIDTYPE_PGID);
detach_pid(p, PIDTYPE_SID);
list_del_rcu(&p->tasks);
list_del_init(&p->sibling);
__this_cpu_dec(process_counts);
}
list_del_rcu(&p->thread_group);
list_del_rcu(&p->thread_node);
}
/*
* This function expects the tasklist_lock write-locked.
*/
static void __exit_signal(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
bool group_dead = thread_group_leader(tsk);
struct sighand_struct *sighand;
struct tty_struct *uninitialized_var(tty);
cputime_t utime, stime;
sighand = rcu_dereference_check(tsk->sighand,
lockdep_tasklist_lock_is_held());
spin_lock(&sighand->siglock);
posix_cpu_timers_exit(tsk);
if (group_dead) {
posix_cpu_timers_exit_group(tsk);
tty = sig->tty;
sig->tty = NULL;
} else {
/*
* This can only happen if the caller is de_thread().
* FIXME: this is the temporary hack, we should teach
* posix-cpu-timers to handle this case correctly.
*/
if (unlikely(has_group_leader_pid(tsk)))
posix_cpu_timers_exit_group(tsk);
/*
* If there is any task waiting for the group exit
* then notify it:
*/
if (sig->notify_count > 0 && !--sig->notify_count)
wake_up_process(sig->group_exit_task);
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
/*
* Accumulate here the counters for all threads but the
* group leader as they die, so they can be added into
* the process-wide totals when those are taken.
* The group leader stays around as a zombie as long
* as there are other threads. When it gets reaped,
* the exit.c code will add its counts into these totals.
* We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct.
*/
task_cputime(tsk, &utime, &stime);
sig->utime += utime;
sig->stime += stime;
sig->gtime += task_gtime(tsk);
sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt;
sig->nvcsw += tsk->nvcsw;
sig->nivcsw += tsk->nivcsw;
sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk);
task_io_accounting_add(&sig->ioac, &tsk->ioac);
sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
}
sig->nr_threads--;
__unhash_process(tsk, group_dead);
/*
* Do this under ->siglock, we can race with another thread
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
*/
flush_sigqueue(&tsk->pending);
tsk->sighand = NULL;
spin_unlock(&sighand->siglock);
__cleanup_sighand(sighand);
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
if (group_dead) {
flush_sigqueue(&sig->shared_pending);
tty_kref_put(tty);
}
}
static void delayed_put_task_struct(struct rcu_head *rhp)
{
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
perf_event_delayed_put(tsk);
trace_sched_process_free(tsk);
put_task_struct(tsk);
}
void release_task(struct task_struct * p)
{
struct task_struct *leader;
int zap_leader;
repeat:
/* don't need to get the RCU readlock here - the process is dead and
* can't be modifying its own credentials. But shut RCU-lockdep up */
rcu_read_lock();
atomic_dec(&__task_cred(p)->user->processes);
rcu_read_unlock();
proc_flush_task(p);
write_lock_irq(&tasklist_lock);
ptrace_release_task(p);
__exit_signal(p);
/*
* If we are the last non-leader member of the thread
* group, and the leader is zombie, then notify the
* group leader's parent process. (if it wants notification.)
*/
zap_leader = 0;
leader = p->group_leader;
if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
/*
* If we were the last child thread and the leader has
* exited already, and the leader's parent ignores SIGCHLD,
* then we are the one who should release the leader.
*/
zap_leader = do_notify_parent(leader, leader->exit_signal);
if (zap_leader)
leader->exit_state = EXIT_DEAD;
}
write_unlock_irq(&tasklist_lock);
release_thread(p);
call_rcu(&p->rcu, delayed_put_task_struct);
p = leader;
if (unlikely(zap_leader))
goto repeat;
}
/*
* This checks not only the pgrp, but falls back on the pid if no
* satisfactory pgrp is found. I dunno - gdb doesn't work correctly
* without this...
*
* The caller must hold rcu lock or the tasklist lock.
*/
struct pid *session_of_pgrp(struct pid *pgrp)
{
struct task_struct *p;
struct pid *sid = NULL;
p = pid_task(pgrp, PIDTYPE_PGID);
if (p == NULL)
p = pid_task(pgrp, PIDTYPE_PID);
if (p != NULL)
sid = task_session(p);
return sid;
}
/*
* Determine if a process group is "orphaned", according to the POSIX
* definition in 2.2.2.52. Orphaned process groups are not to be affected
* by terminal-generated stop signals. Newly orphaned process groups are
* to receive a SIGHUP and a SIGCONT.
*
* "I ask you, have you ever known what it is to be an orphan?"
*/
static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
{
struct task_struct *p;
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
if ((p == ignored_task) ||
(p->exit_state && thread_group_empty(p)) ||
is_global_init(p->real_parent))
continue;
if (task_pgrp(p->real_parent) != pgrp &&
task_session(p->real_parent) == task_session(p))
return 0;
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
return 1;
}
int is_current_pgrp_orphaned(void)
{
int retval;
read_lock(&tasklist_lock);
retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
read_unlock(&tasklist_lock);
return retval;
}
static bool has_stopped_jobs(struct pid *pgrp)
{
struct task_struct *p;
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
if (p->signal->flags & SIGNAL_STOP_STOPPED)
return true;
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
return false;
}
/*
* Check to see if any process groups have become orphaned as
* a result of our exiting, and if they have any stopped jobs,
* send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
*/
static void
kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
{
struct pid *pgrp = task_pgrp(tsk);
struct task_struct *ignored_task = tsk;
if (!parent)
/* exit: our father is in a different pgrp than
* we are and we were the only connection outside.
*/
parent = tsk->real_parent;
else
/* reparent: our child is in a different pgrp than
* we are, and it was the only connection outside.
*/
ignored_task = NULL;
if (task_pgrp(parent) != pgrp &&
task_session(parent) == task_session(tsk) &&
will_become_orphaned_pgrp(pgrp, ignored_task) &&
has_stopped_jobs(pgrp)) {
__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
}
}
void __set_special_pids(struct pid *pid)
{
struct task_struct *curr = current->group_leader;
if (task_session(curr) != pid)
change_pid(curr, PIDTYPE_SID, pid);
if (task_pgrp(curr) != pid)
change_pid(curr, PIDTYPE_PGID, pid);
}
/*
* Let kernel threads use this to say that they allow a certain signal.
* Must not be used if kthread was cloned with CLONE_SIGHAND.
*/
int allow_signal(int sig)
{
if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(¤t->sighand->siglock);
/* This is only needed for daemonize()'ed kthreads */
sigdelset(¤t->blocked, sig);
/*
* Kernel threads handle their own signals. Let the signal code
* know it'll be handled, so that they don't get converted to
* SIGKILL or just silently dropped.
*/
current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
return 0;
}
EXPORT_SYMBOL(allow_signal);
int disallow_signal(int sig)
{
if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(¤t->sighand->siglock);
current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
return 0;
}
EXPORT_SYMBOL(disallow_signal);
#ifdef CONFIG_MM_OWNER
/*
* A task is exiting. If it owned this mm, find a new owner for the mm.
*/
void mm_update_next_owner(struct mm_struct *mm)
{
struct task_struct *c, *g, *p = current;
retry:
/*
* If the exiting or execing task is not the owner, it's
* someone else's problem.
*/
if (mm->owner != p)
return;
/*
* The current owner is exiting/execing and there are no other
* candidates. Do not leave the mm pointing to a possibly
* freed task structure.
*/
if (atomic_read(&mm->mm_users) <= 1) {
mm->owner = NULL;
return;
}
read_lock(&tasklist_lock);
/*
* Search in the children
*/
list_for_each_entry(c, &p->children, sibling) {
if (c->mm == mm)
goto assign_new_owner;
}
/*
* Search in the siblings
*/
list_for_each_entry(c, &p->real_parent->children, sibling) {
if (c->mm == mm)
goto assign_new_owner;
}
/*
* Search through everything else. We should not get
* here often
*/
do_each_thread(g, c) {
if (c->mm == mm)
goto assign_new_owner;
} while_each_thread(g, c);
read_unlock(&tasklist_lock);
/*
* We found no owner yet mm_users > 1: this implies that we are
* most likely racing with swapoff (try_to_unuse()) or /proc or
* ptrace or page migration (get_task_mm()). Mark owner as NULL.
*/
mm->owner = NULL;
return;
assign_new_owner:
BUG_ON(c == p);
get_task_struct(c);
/*
* The task_lock protects c->mm from changing.
* We always want mm->owner->mm == mm
*/
task_lock(c);
/*
* Delay read_unlock() till we have the task_lock()
* to ensure that c does not slip away underneath us
*/
read_unlock(&tasklist_lock);
if (c->mm != mm) {
task_unlock(c);
put_task_struct(c);
goto retry;
}
mm->owner = c;
task_unlock(c);
put_task_struct(c);
}
#endif /* CONFIG_MM_OWNER */
/*
* Turn us into a lazy TLB process if we
* aren't already..
*/
static void exit_mm(struct task_struct * tsk)
{
struct mm_struct *mm = tsk->mm;
struct core_state *core_state;
int mm_released;
mm_release(tsk, mm);
if (!mm)
return;
sync_mm_rss(mm);
/*
* Serialize with any possible pending coredump.
* We must hold mmap_sem around checking core_state
* and clearing tsk->mm. The core-inducing thread
* will increment ->nr_threads for each thread in the
* group with ->mm != NULL.
*/
down_read(&mm->mmap_sem);
core_state = mm->core_state;
if (core_state) {
struct core_thread self;
up_read(&mm->mmap_sem);
self.task = tsk;
self.next = xchg(&core_state->dumper.next, &self);
/*
* Implies mb(), the result of xchg() must be visible
* to core_state->dumper.
*/
if (atomic_dec_and_test(&core_state->nr_threads))
complete(&core_state->startup);
for (;;) {
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (!self.task) /* see coredump_finish() */
break;
freezable_schedule();
}
__set_task_state(tsk, TASK_RUNNING);
down_read(&mm->mmap_sem);
}
atomic_inc(&mm->mm_count);
BUG_ON(mm != tsk->active_mm);
/* more a memory barrier than a real lock */
task_lock(tsk);
tsk->mm = NULL;
up_read(&mm->mmap_sem);
enter_lazy_tlb(mm, current);
task_unlock(tsk);
mm_update_next_owner(mm);
mm_released = mmput(mm);
if (mm_released)
set_tsk_thread_flag(tsk, TIF_MM_RELEASED);
}
/*
* When we die, we re-parent all our children, and try to:
* 1. give them to another thread in our thread group, if such a member exists
* 2. give it to the first ancestor process which prctl'd itself as a
* child_subreaper for its children (like a service manager)
* 3. give it to the init process (PID 1) in our pid namespace
*/
static struct task_struct *find_new_reaper(struct task_struct *father)
__releases(&tasklist_lock)
__acquires(&tasklist_lock)
{
struct pid_namespace *pid_ns = task_active_pid_ns(father);
struct task_struct *thread;
thread = father;
while_each_thread(father, thread) {
if (thread->flags & PF_EXITING)
continue;
if (unlikely(pid_ns->child_reaper == father))
pid_ns->child_reaper = thread;
return thread;
}
if (unlikely(pid_ns->child_reaper == father)) {
write_unlock_irq(&tasklist_lock);
if (unlikely(pid_ns == &init_pid_ns)) {
panic("Attempted to kill init! exitcode=0x%08x\n",
father->signal->group_exit_code ?:
father->exit_code);
}
zap_pid_ns_processes(pid_ns);
write_lock_irq(&tasklist_lock);
} else if (father->signal->has_child_subreaper) {
struct task_struct *reaper;
/*
* Find the first ancestor marked as child_subreaper.
* Note that the code below checks same_thread_group(reaper,
* pid_ns->child_reaper). This is what we need to DTRT in a
* PID namespace. However we still need the check above, see
* http://marc.info/?l=linux-kernel&m=131385460420380
*/
for (reaper = father->real_parent;
reaper != &init_task;
reaper = reaper->real_parent) {
if (same_thread_group(reaper, pid_ns->child_reaper))
break;
if (!reaper->signal->is_child_subreaper)
continue;
thread = reaper;
do {
if (!(thread->flags & PF_EXITING))
return reaper;
} while_each_thread(reaper, thread);
}
}
return pid_ns->child_reaper;
}
/*
* Any that need to be release_task'd are put on the @dead list.
*/
static void reparent_leader(struct task_struct *father, struct task_struct *p,
struct list_head *dead)
{
list_move_tail(&p->sibling, &p->real_parent->children);
/*
* If this is a threaded reparent there is no need to
* notify anyone anything has happened.
*/
if (same_thread_group(p->real_parent, father))
return;
/*
* We don't want people slaying init.
*
* Note: we do this even if it is EXIT_DEAD, wait_task_zombie()
* can change ->exit_state to EXIT_ZOMBIE. If this is the final
* state, do_notify_parent() was already called and ->exit_signal
* doesn't matter.
*/
p->exit_signal = SIGCHLD;
if (p->exit_state == EXIT_DEAD)
return;
/* If it has exited notify the new parent about this child's death. */
if (!p->ptrace &&
p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
if (do_notify_parent(p, p->exit_signal)) {
p->exit_state = EXIT_DEAD;
list_move_tail(&p->sibling, dead);
}
}
kill_orphaned_pgrp(p, father);
}
static void forget_original_parent(struct task_struct *father)
{
struct task_struct *p, *n, *reaper;
LIST_HEAD(dead_children);
write_lock_irq(&tasklist_lock);
/*
* Note that exit_ptrace() and find_new_reaper() might
* drop tasklist_lock and reacquire it.
*/
exit_ptrace(father);
reaper = find_new_reaper(father);
list_for_each_entry_safe(p, n, &father->children, sibling) {
struct task_struct *t = p;
do {
t->real_parent = reaper;
if (t->parent == father) {
BUG_ON(t->ptrace);
t->parent = t->real_parent;
}
if (t->pdeath_signal)
group_send_sig_info(t->pdeath_signal,
SEND_SIG_NOINFO, t);
} while_each_thread(p, t);
reparent_leader(father, p, &dead_children);
}
write_unlock_irq(&tasklist_lock);
BUG_ON(!list_empty(&father->children));
list_for_each_entry_safe(p, n, &dead_children, sibling) {
list_del_init(&p->sibling);
release_task(p);
}
}
/*
* Send signals to all our closest relatives so that they know
* to properly mourn us..
*/
static void exit_notify(struct task_struct *tsk, int group_dead)
{
bool autoreap;
/*
* This does two things:
*
* A. Make init inherit all the child processes
* B. Check to see if any process groups have become orphaned
* as a result of our exiting, and if they have any stopped
* jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
*/
forget_original_parent(tsk);
write_lock_irq(&tasklist_lock);
if (group_dead)
kill_orphaned_pgrp(tsk->group_leader, NULL);
if (unlikely(tsk->ptrace)) {
int sig = thread_group_leader(tsk) &&
thread_group_empty(tsk) &&
!ptrace_reparented(tsk) ?
tsk->exit_signal : SIGCHLD;
autoreap = do_notify_parent(tsk, sig);
} else if (thread_group_leader(tsk)) {
autoreap = thread_group_empty(tsk) &&
do_notify_parent(tsk, tsk->exit_signal);
} else {
autoreap = true;
}
tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
/* mt-exec, de_thread() is waiting for group leader */
if (unlikely(tsk->signal->notify_count < 0))
wake_up_process(tsk->signal->group_exit_task);
write_unlock_irq(&tasklist_lock);
/* If the process is dead, release it - nobody will wait for it */
if (autoreap)
release_task(tsk);
}
#ifdef CONFIG_DEBUG_STACK_USAGE
static void check_stack_usage(void)
{
static DEFINE_SPINLOCK(low_water_lock);
static int lowest_to_date = THREAD_SIZE;
unsigned long free;
int islower = false;
free = stack_not_used(current);
if (free >= lowest_to_date)
return;
spin_lock(&low_water_lock);
if (free < lowest_to_date) {
lowest_to_date = free;
islower = true;
}
spin_unlock(&low_water_lock);
if (islower) {
printk(KERN_WARNING "%s (%d) used greatest stack depth: "
"%lu bytes left\n",
current->comm, task_pid_nr(current), free);
}
}
#else
static inline void check_stack_usage(void) {}
#endif
void do_exit(long code)
{
struct task_struct *tsk = current;
int group_dead;
profile_task_exit(tsk);
WARN_ON(blk_needs_flush_plug(tsk));
if (unlikely(in_interrupt()))
panic("Aiee, killing interrupt handler!");
if (unlikely(!tsk->pid) || unlikely(tsk->pid==1))
panic("Attempted to kill the idle task! or init task");
/*
* If do_exit is called because this processes oopsed, it's possible
* that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
* continuing. Amongst other possible reasons, this is to prevent
* mm_release()->clear_child_tid() from writing to a user-controlled
* kernel address.
*/
set_fs(USER_DS);
ptrace_event(PTRACE_EVENT_EXIT, code);
validate_creds_for_do_exit(tsk);
/*
* We're taking recursive faults here in do_exit. Safest is to just
* leave this task alone and wait for reboot.
*/
if (unlikely(tsk->flags & PF_EXITING)) {
printk(KERN_ALERT
"Fixing recursive fault but reboot is needed!\n");
/*
* We can do this unlocked here. The futex code uses
* this flag just to verify whether the pi state
* cleanup has been done or not. In the worst case it
* loops once more. We pretend that the cleanup was
* done as there is no way to return. Either the
* OWNER_DIED bit is set by now or we push the blocked
* task into the wait for ever nirwana as well.
*/
tsk->flags |= PF_EXITPIDONE;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
exit_signals(tsk); /* sets PF_EXITING */
/*
* tsk->flags are checked in the futex code to protect against
* an exiting task cleaning up the robust pi futexes.
*/
smp_mb();
raw_spin_unlock_wait(&tsk->pi_lock);
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, task_pid_nr(current),
preempt_count());
acct_update_integrals(tsk);
/* sync mm's RSS info before statistics gathering */
if (tsk->mm)
sync_mm_rss(tsk->mm);
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk->signal);
if (tsk->mm)
setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
}
acct_collect(code, group_dead);
if (group_dead)
tty_audit_exit();
audit_free(tsk);
tsk->exit_code = code;
taskstats_exit(tsk, group_dead);
exit_mm(tsk);
if (group_dead)
acct_process();
trace_sched_process_exit(tsk);
exit_sem(tsk);
exit_shm(tsk);
exit_files(tsk);
exit_fs(tsk);
if (group_dead)
disassociate_ctty(1);
exit_task_namespaces(tsk);
exit_task_work(tsk);
check_stack_usage();
exit_thread();
/*
* Flush inherited counters to the parent - before the parent
* gets woken up by child-exit notifications.
*
* because of cgroup mode, must be called before cgroup_exit()
*/
perf_event_exit_task(tsk);
cgroup_exit(tsk, 1);
module_put(task_thread_info(tsk)->exec_domain->module);
proc_exit_connector(tsk);
/*
* FIXME: do that only when needed, using sched_exit tracepoint
*/
ptrace_put_breakpoints(tsk);
exit_notify(tsk, group_dead);
#ifdef CONFIG_NUMA
task_lock(tsk);
mpol_put(tsk->mempolicy);
tsk->mempolicy = NULL;
task_unlock(tsk);
#endif
#ifdef CONFIG_FUTEX
if (unlikely(current->pi_state_cache))
kfree(current->pi_state_cache);
#endif
/*
* Make sure we are holding no locks:
*/
debug_check_no_locks_held();
/*
* We can do this unlocked here. The futex code uses this flag
* just to verify whether the pi state cleanup has been done
* or not. In the worst case it loops once more.
*/
tsk->flags |= PF_EXITPIDONE;
if (tsk->io_context)
exit_io_context(tsk);
if (tsk->splice_pipe)
free_pipe_info(tsk->splice_pipe);
if (tsk->task_frag.page)
put_page(tsk->task_frag.page);
validate_creds_for_do_exit(tsk);
preempt_disable();
if (tsk->nr_dirtied)
__this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
exit_rcu();
/*
* The setting of TASK_RUNNING by try_to_wake_up() may be delayed
* when the following two conditions become true.
* - There is race condition of mmap_sem (It is acquired by
* exit_mm()), and
* - SMI occurs before setting TASK_RUNINNG.
* (or hypervisor of virtual machine switches to other guest)
* As a result, we may become TASK_RUNNING after becoming TASK_DEAD
*
* To avoid it, we have to wait for releasing tsk->pi_lock which
* is held by try_to_wake_up()
*/
smp_mb();
raw_spin_unlock_wait(&tsk->pi_lock);
/* causes final put_task_struct in finish_task_switch(). */
tsk->state = TASK_DEAD;
tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
schedule();
BUG();
/* Avoid "noreturn function does return". */
for (;;)
cpu_relax(); /* For when BUG is null */
}
EXPORT_SYMBOL_GPL(do_exit);
void complete_and_exit(struct completion *comp, long code)
{
if (comp)
complete(comp);
do_exit(code);
}
EXPORT_SYMBOL(complete_and_exit);
SYSCALL_DEFINE1(exit, int, error_code)
{
do_exit((error_code&0xff)<<8);
}
/*
* Take down every thread in the group. This is called by fatal signals
* as well as by sys_exit_group (below).
*/
void
do_group_exit(int exit_code)
{
struct signal_struct *sig = current->signal;
BUG_ON(exit_code & 0x80); /* core dumps don't get here */
if (signal_group_exit(sig))
exit_code = sig->group_exit_code;
else if (!thread_group_empty(current)) {
struct sighand_struct *const sighand = current->sighand;
spin_lock_irq(&sighand->siglock);
if (signal_group_exit(sig))
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
else {
sig->group_exit_code = exit_code;
sig->flags = SIGNAL_GROUP_EXIT;
zap_other_threads(current);
}
spin_unlock_irq(&sighand->siglock);
}
do_exit(exit_code);
/* NOTREACHED */
}
/*
* this kills every thread in the thread group. Note that any externally
* wait4()-ing process will get the correct exit code - even if this
* thread is not the thread group leader.
*/
SYSCALL_DEFINE1(exit_group, int, error_code)
{
do_group_exit((error_code & 0xff) << 8);
/* NOTREACHED */
return 0;
}
struct wait_opts {
enum pid_type wo_type;
int wo_flags;
struct pid *wo_pid;
struct siginfo __user *wo_info;
int __user *wo_stat;
struct rusage __user *wo_rusage;
wait_queue_t child_wait;
int notask_error;
};
static inline
struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
{
if (type != PIDTYPE_PID)
task = task->group_leader;
return task->pids[type].pid;
}
static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
{
return wo->wo_type == PIDTYPE_MAX ||
task_pid_type(p, wo->wo_type) == wo->wo_pid;
}
static int eligible_child(struct wait_opts *wo, struct task_struct *p)
{
if (!eligible_pid(wo, p))
return 0;
/* Wait for all children (clone and not) if __WALL is set;
* otherwise, wait for clone children *only* if __WCLONE is
* set; otherwise, wait for non-clone children *only*. (Note:
* A "clone" child here is one that reports to its parent
* using a signal other than SIGCHLD.) */
if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
&& !(wo->wo_flags & __WALL))
return 0;
return 1;
}
static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
pid_t pid, uid_t uid, int why, int status)
{
struct siginfo __user *infop;
int retval = wo->wo_rusage
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
put_task_struct(p);
infop = wo->wo_info;
if (infop) {
if (!retval)
retval = put_user(SIGCHLD, &infop->si_signo);
if (!retval)
retval = put_user(0, &infop->si_errno);
if (!retval)
retval = put_user((short)why, &infop->si_code);
if (!retval)
retval = put_user(pid, &infop->si_pid);
if (!retval)
retval = put_user(uid, &infop->si_uid);
if (!retval)
retval = put_user(status, &infop->si_status);
}
if (!retval)
retval = pid;
return retval;
}
/*
* Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
* read_lock(&tasklist_lock) on entry. If we return zero, we still hold
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
{
unsigned long state;
int retval, status, traced;
pid_t pid = task_pid_vnr(p);
uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
struct siginfo __user *infop;
if (!likely(wo->wo_flags & WEXITED))
return 0;
if (unlikely(wo->wo_flags & WNOWAIT)) {
int exit_code = p->exit_code;
int why;
get_task_struct(p);
read_unlock(&tasklist_lock);
if ((exit_code & 0x7f) == 0) {
why = CLD_EXITED;
status = exit_code >> 8;
} else {
why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
status = exit_code & 0x7f;
}
return wait_noreap_copyout(wo, p, pid, uid, why, status);
}
/*
* Try to move the task's state to DEAD
* only one thread is allowed to do this:
*/
state = xchg(&p->exit_state, EXIT_DEAD);
if (state != EXIT_ZOMBIE) {
BUG_ON(state != EXIT_DEAD);
return 0;
}
traced = ptrace_reparented(p);
/*
* It can be ptraced but not reparented, check
* thread_group_leader() to filter out sub-threads.
*/
if (likely(!traced) && thread_group_leader(p)) {
struct signal_struct *psig;
struct signal_struct *sig;
unsigned long maxrss;
cputime_t tgutime, tgstime;
/*
* The resource counters for the group leader are in its
* own task_struct. Those for dead threads in the group
* are in its signal_struct, as are those for the child
* processes it has previously reaped. All these
* accumulate in the parent's signal_struct c* fields.
*
* We don't bother to take a lock here to protect these
* p->signal fields, because they are only touched by
* __exit_signal, which runs with tasklist_lock
* write-locked anyway, and so is excluded here. We do
* need to protect the access to parent->signal fields,
* as other threads in the parent group can be right
* here reaping other children at the same time.
*
* We use thread_group_cputime_adjusted() to get times for the thread
* group, which consolidates times for all threads in the
* group including the group leader.
*/
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
spin_lock_irq(&p->real_parent->sighand->siglock);
psig = p->real_parent->signal;
sig = p->signal;
psig->cutime += tgutime + sig->cutime;
psig->cstime += tgstime + sig->cstime;
psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
psig->cmin_flt +=
p->min_flt + sig->min_flt + sig->cmin_flt;
psig->cmaj_flt +=
p->maj_flt + sig->maj_flt + sig->cmaj_flt;
psig->cnvcsw +=
p->nvcsw + sig->nvcsw + sig->cnvcsw;
psig->cnivcsw +=
p->nivcsw + sig->nivcsw + sig->cnivcsw;
psig->cinblock +=
task_io_get_inblock(p) +
sig->inblock + sig->cinblock;
psig->coublock +=
task_io_get_oublock(p) +
sig->oublock + sig->coublock;
maxrss = max(sig->maxrss, sig->cmaxrss);
if (psig->cmaxrss < maxrss)
psig->cmaxrss = maxrss;
task_io_accounting_add(&psig->ioac, &p->ioac);
task_io_accounting_add(&psig->ioac, &sig->ioac);
spin_unlock_irq(&p->real_parent->sighand->siglock);
}
/*
* Now we are sure this task is interesting, and no other
* thread can reap it because we set its state to EXIT_DEAD.
*/
read_unlock(&tasklist_lock);
retval = wo->wo_rusage
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
status = (p->signal->flags & SIGNAL_GROUP_EXIT)
? p->signal->group_exit_code : p->exit_code;
if (!retval && wo->wo_stat)
retval = put_user(status, wo->wo_stat);
infop = wo->wo_info;
if (!retval && infop)
retval = put_user(SIGCHLD, &infop->si_signo);
if (!retval && infop)
retval = put_user(0, &infop->si_errno);
if (!retval && infop) {
int why;
if ((status & 0x7f) == 0) {
why = CLD_EXITED;
status >>= 8;
} else {
why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
status &= 0x7f;
}
retval = put_user((short)why, &infop->si_code);
if (!retval)
retval = put_user(status, &infop->si_status);
}
if (!retval && infop)
retval = put_user(pid, &infop->si_pid);
if (!retval && infop)
retval = put_user(uid, &infop->si_uid);
if (!retval)
retval = pid;
if (traced) {
write_lock_irq(&tasklist_lock);
/* We dropped tasklist, ptracer could die and untrace */
ptrace_unlink(p);
/*
* If this is not a sub-thread, notify the parent.
* If parent wants a zombie, don't release it now.
*/
if (thread_group_leader(p) &&
!do_notify_parent(p, p->exit_signal)) {
p->exit_state = EXIT_ZOMBIE;
p = NULL;
}
write_unlock_irq(&tasklist_lock);
}
if (p != NULL)
release_task(p);
return retval;
}
static int *task_stopped_code(struct task_struct *p, bool ptrace)
{
if (ptrace) {
if (task_is_stopped_or_traced(p) &&
!(p->jobctl & JOBCTL_LISTENING))
return &p->exit_code;
} else {
if (p->signal->flags & SIGNAL_STOP_STOPPED)
return &p->signal->group_exit_code;
}
return NULL;
}
/**
* wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
* @wo: wait options
* @ptrace: is the wait for ptrace
* @p: task to wait for
*
* Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
*
* CONTEXT:
* read_lock(&tasklist_lock), which is released if return value is
* non-zero. Also, grabs and releases @p->sighand->siglock.
*
* RETURNS:
* 0 if wait condition didn't exist and search for other wait conditions
* should continue. Non-zero return, -errno on failure and @p's pid on
* success, implies that tasklist_lock is released and wait condition
* search should terminate.
*/
static int wait_task_stopped(struct wait_opts *wo,
int ptrace, struct task_struct *p)
{
struct siginfo __user *infop;
int retval, exit_code, *p_code, why;
uid_t uid = 0; /* unneeded, required by compiler */
pid_t pid;
/*
* Traditionally we see ptrace'd stopped tasks regardless of options.
*/
if (!ptrace && !(wo->wo_flags & WUNTRACED))
return 0;
if (!task_stopped_code(p, ptrace))
return 0;
exit_code = 0;
spin_lock_irq(&p->sighand->siglock);
p_code = task_stopped_code(p, ptrace);
if (unlikely(!p_code))
goto unlock_sig;
exit_code = *p_code;
if (!exit_code)
goto unlock_sig;
if (!unlikely(wo->wo_flags & WNOWAIT))
*p_code = 0;
uid = from_kuid_munged(current_user_ns(), task_uid(p));
unlock_sig:
spin_unlock_irq(&p->sighand->siglock);
if (!exit_code)
return 0;
/*
* Now we are pretty sure this task is interesting.
* Make sure it doesn't get reaped out from under us while we
* give up the lock and then examine it below. We don't want to
* keep holding onto the tasklist_lock while we call getrusage and
* possibly take page faults for user memory.
*/
get_task_struct(p);
pid = task_pid_vnr(p);
why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
read_unlock(&tasklist_lock);
if (unlikely(wo->wo_flags & WNOWAIT))
return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
retval = wo->wo_rusage
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
if (!retval && wo->wo_stat)
retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat);
infop = wo->wo_info;
if (!retval && infop)
retval = put_user(SIGCHLD, &infop->si_signo);
if (!retval && infop)
retval = put_user(0, &infop->si_errno);
if (!retval && infop)
retval = put_user((short)why, &infop->si_code);
if (!retval && infop)
retval = put_user(exit_code, &infop->si_status);
if (!retval && infop)
retval = put_user(pid, &infop->si_pid);
if (!retval && infop)
retval = put_user(uid, &infop->si_uid);
if (!retval)
retval = pid;
put_task_struct(p);
BUG_ON(!retval);
return retval;
}
/*
* Handle do_wait work for one task in a live, non-stopped state.
* read_lock(&tasklist_lock) on entry. If we return zero, we still hold
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
{
int retval;
pid_t pid;
uid_t uid;
if (!unlikely(wo->wo_flags & WCONTINUED))
return 0;
if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
return 0;
spin_lock_irq(&p->sighand->siglock);
/* Re-check with the lock held. */
if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
spin_unlock_irq(&p->sighand->siglock);
return 0;
}
if (!unlikely(wo->wo_flags & WNOWAIT))
p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
uid = from_kuid_munged(current_user_ns(), task_uid(p));
spin_unlock_irq(&p->sighand->siglock);
pid = task_pid_vnr(p);
get_task_struct(p);
read_unlock(&tasklist_lock);
if (!wo->wo_info) {
retval = wo->wo_rusage
? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
put_task_struct(p);
if (!retval && wo->wo_stat)
retval = put_user(0xffff, wo->wo_stat);
if (!retval)
retval = pid;
} else {
retval = wait_noreap_copyout(wo, p, pid, uid,
CLD_CONTINUED, SIGCONT);
BUG_ON(retval == 0);
}
return retval;
}
/*
* Consider @p for a wait by @parent.
*
* -ECHILD should be in ->notask_error before the first call.
* Returns nonzero for a final return, when we have unlocked tasklist_lock.
* Returns zero if the search for a child should continue;
* then ->notask_error is 0 if @p is an eligible child,
* or another error from security_task_wait(), or still -ECHILD.
*/
static int wait_consider_task(struct wait_opts *wo, int ptrace,
struct task_struct *p)
{
int ret = eligible_child(wo, p);
if (!ret)
return ret;
ret = security_task_wait(p);
if (unlikely(ret < 0)) {
/*
* If we have not yet seen any eligible child,
* then let this error code replace -ECHILD.
* A permission error will give the user a clue
* to look for security policy problems, rather
* than for mysterious wait bugs.
*/
if (wo->notask_error)
wo->notask_error = ret;
return 0;
}
/* dead body doesn't have much to contribute */
if (unlikely(p->exit_state == EXIT_DEAD)) {
/*
* But do not ignore this task until the tracer does
* wait_task_zombie()->do_notify_parent().
*/
if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
wo->notask_error = 0;
return 0;
}
/* slay zombie? */
if (p->exit_state == EXIT_ZOMBIE) {
/*
* A zombie ptracee is only visible to its ptracer.
* Notification and reaping will be cascaded to the real
* parent when the ptracer detaches.
*/
if (likely(!ptrace) && unlikely(p->ptrace)) {
/* it will become visible, clear notask_error */
wo->notask_error = 0;
return 0;
}
/* we don't reap group leaders with subthreads */
if (!delay_group_leader(p))
return wait_task_zombie(wo, p);
/*
* Allow access to stopped/continued state via zombie by
* falling through. Clearing of notask_error is complex.
*
* When !@ptrace:
*
* If WEXITED is set, notask_error should naturally be
* cleared. If not, subset of WSTOPPED|WCONTINUED is set,
* so, if there are live subthreads, there are events to
* wait for. If all subthreads are dead, it's still safe
* to clear - this function will be called again in finite
* amount time once all the subthreads are released and
* will then return without clearing.
*
* When @ptrace:
*
* Stopped state is per-task and thus can't change once the
* target task dies. Only continued and exited can happen.
* Clear notask_error if WCONTINUED | WEXITED.
*/
if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
wo->notask_error = 0;
} else {
/*
* If @p is ptraced by a task in its real parent's group,
* hide group stop/continued state when looking at @p as
* the real parent; otherwise, a single stop can be
* reported twice as group and ptrace stops.
*
* If a ptracer wants to distinguish the two events for its
* own children, it should create a separate process which
* takes the role of real parent.
*/
if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p))
return 0;
/*
* @p is alive and it's gonna stop, continue or exit, so
* there always is something to wait for.
*/
wo->notask_error = 0;
}
/*
* Wait for stopped. Depending on @ptrace, different stopped state
* is used and the two don't interact with each other.
*/
ret = wait_task_stopped(wo, ptrace, p);
if (ret)
return ret;
/*
* Wait for continued. There's only one continued state and the
* ptracer can consume it which can confuse the real parent. Don't
* use WCONTINUED from ptracer. You don't need or want it.
*/
return wait_task_continued(wo, p);
}
/*
* Do the work of do_wait() for one thread in the group, @tsk.
*
* -ECHILD should be in ->notask_error before the first call.
* Returns nonzero for a final return, when we have unlocked tasklist_lock.
* Returns zero if the search for a child should continue; then
* ->notask_error is 0 if there were any eligible children,
* or another error from security_task_wait(), or still -ECHILD.
*/
static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
{
struct task_struct *p;
list_for_each_entry(p, &tsk->children, sibling) {
int ret = wait_consider_task(wo, 0, p);
if (ret)
return ret;
}
return 0;
}
static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
{
struct task_struct *p;
list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
int ret = wait_consider_task(wo, 1, p);
if (ret)
return ret;
}
return 0;
}
static int child_wait_callback(wait_queue_t *wait, unsigned mode,
int sync, void *key)
{
struct wait_opts *wo = container_of(wait, struct wait_opts,
child_wait);
struct task_struct *p = key;
if (!eligible_pid(wo, p))
return 0;
if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
return 0;
return default_wake_function(wait, mode, sync, key);
}
void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
{
__wake_up_sync_key(&parent->signal->wait_chldexit,
TASK_INTERRUPTIBLE, 1, p);
}
static long do_wait(struct wait_opts *wo)
{
struct task_struct *tsk;
int retval;
trace_sched_process_wait(wo->wo_pid);
init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
wo->child_wait.private = current;
add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
repeat:
/*
* If there is nothing that can match our critiera just get out.
* We will clear ->notask_error to zero if we see any child that
* might later match our criteria, even if we are not able to reap
* it yet.
*/
wo->notask_error = -ECHILD;
if ((wo->wo_type < PIDTYPE_MAX) &&
(!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
goto notask;
set_current_state(TASK_INTERRUPTIBLE);
read_lock(&tasklist_lock);
tsk = current;
do {
retval = do_wait_thread(wo, tsk);
if (retval)
goto end;
retval = ptrace_do_wait(wo, tsk);
if (retval)
goto end;
if (wo->wo_flags & __WNOTHREAD)
break;
} while_each_thread(current, tsk);
read_unlock(&tasklist_lock);
notask:
retval = wo->notask_error;
if (!retval && !(wo->wo_flags & WNOHANG)) {
retval = -ERESTARTSYS;
if (!signal_pending(current)) {
schedule();
goto repeat;
}
}
end:
__set_current_state(TASK_RUNNING);
remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
return retval;
}
SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
infop, int, options, struct rusage __user *, ru)
{
struct wait_opts wo;
struct pid *pid = NULL;
enum pid_type type;
long ret;
if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
return -EINVAL;
if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
return -EINVAL;
switch (which) {
case P_ALL:
type = PIDTYPE_MAX;
break;
case P_PID:
type = PIDTYPE_PID;
if (upid <= 0)
return -EINVAL;
break;
case P_PGID:
type = PIDTYPE_PGID;
if (upid <= 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
if (type < PIDTYPE_MAX)
pid = find_get_pid(upid);
wo.wo_type = type;
wo.wo_pid = pid;
wo.wo_flags = options;
wo.wo_info = infop;
wo.wo_stat = NULL;
wo.wo_rusage = ru;
ret = do_wait(&wo);
if (ret > 0) {
ret = 0;
} else if (infop) {
/*
* For a WNOHANG return, clear out all the fields
* we would set so the user can easily tell the
* difference.
*/
if (!ret)
ret = put_user(0, &infop->si_signo);
if (!ret)
ret = put_user(0, &infop->si_errno);
if (!ret)
ret = put_user(0, &infop->si_code);
if (!ret)
ret = put_user(0, &infop->si_pid);
if (!ret)
ret = put_user(0, &infop->si_uid);
if (!ret)
ret = put_user(0, &infop->si_status);
}
put_pid(pid);
return ret;
}
SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
int, options, struct rusage __user *, ru)
{
struct wait_opts wo;
struct pid *pid = NULL;
enum pid_type type;
long ret;
if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
__WNOTHREAD|__WCLONE|__WALL))
return -EINVAL;
if (upid == -1)
type = PIDTYPE_MAX;
else if (upid < 0) {
type = PIDTYPE_PGID;
pid = find_get_pid(-upid);
} else if (upid == 0) {
type = PIDTYPE_PGID;
pid = get_task_pid(current, PIDTYPE_PGID);
} else /* upid > 0 */ {
type = PIDTYPE_PID;
pid = find_get_pid(upid);
}
wo.wo_type = type;
wo.wo_pid = pid;
wo.wo_flags = options | WEXITED;
wo.wo_info = NULL;
wo.wo_stat = stat_addr;
wo.wo_rusage = ru;
ret = do_wait(&wo);
put_pid(pid);
return ret;
}
#ifdef __ARCH_WANT_SYS_WAITPID
/*
* sys_waitpid() remains for compatibility. waitpid() should be
* implemented by calling sys_wait4() from libc.a.
*/
SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
{
return sys_wait4(pid, stat_addr, options, NULL);
}
#endif
| Emotroid-Team/emotion_tw_caf_kernel | kernel/exit.c | C | gpl-2.0 | 44,291 |