repo_name
stringlengths 4
116
| path
stringlengths 3
942
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
xdajog/samsung_sources_i927 | external/chromium/chrome/browser/sync/glue/data_type_manager_mock.h | 1583 | // Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_SYNC_GLUE_DATA_TYPE_MANAGER_MOCK_H__
#define CHROME_BROWSER_SYNC_GLUE_DATA_TYPE_MANAGER_MOCK_H__
#pragma once
#include "chrome/browser/sync/glue/data_type_manager.h"
#include "chrome/browser/sync/profile_sync_test_util.h"
#include "content/common/notification_details.h"
#include "content/common/notification_service.h"
#include "content/common/notification_type.h"
#include "testing/gmock/include/gmock/gmock.h"
ACTION_P3(NotifyFromDataTypeManagerWithResult, dtm, type, result) {
NotificationService::current()->Notify(
type,
Source<browser_sync::DataTypeManager>(dtm),
Details<browser_sync::DataTypeManager::ConfigureResultWithErrorLocation>(
result));
}
ACTION_P2(NotifyFromDataTypeManager, dtm, type) {
NotificationService::current()->Notify(type,
Source<browser_sync::DataTypeManager>(dtm),
NotificationService::NoDetails());
}
namespace browser_sync {
class DataTypeManagerMock : public DataTypeManager {
public:
DataTypeManagerMock();
virtual ~DataTypeManagerMock();
MOCK_METHOD1(Configure, void(const TypeSet&));
MOCK_METHOD0(Stop, void());
MOCK_METHOD0(controllers, const DataTypeController::TypeMap&());
MOCK_METHOD0(state, State());
private:
browser_sync::DataTypeManager::ConfigureResultWithErrorLocation result_;
};
} // namespace browser_sync
#endif // CHROME_BROWSER_SYNC_GLUE_DATA_TYPE_MANAGER_MOCK_H__
| gpl-2.0 |
Jabqooo/chrome-app-samples | samples/camera-capture/background.js | 375 | /**
* Listens for the app launching then creates the window
*
* @see http://developer.chrome.com/apps/app.runtime.html
* @see http://developer.chrome.com/apps/app.window.html
*/
chrome.app.runtime.onLaunched.addListener(function() {
chrome.app.window.create('index.html', {
id: "camCaptureID",
innerBounds: {
width: 700,
height: 600
}
});
});
| apache-2.0 |
sh-cho/cshSpark | util/collection/CompactBuffer.scala | 5436 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection
import scala.reflect.ClassTag
/**
* An append-only buffer similar to ArrayBuffer, but more memory-efficient for small buffers.
* ArrayBuffer always allocates an Object array to store the data, with 16 entries by default,
* so it has about 80-100 bytes of overhead. In contrast, CompactBuffer can keep up to two
* elements in fields of the main object, and only allocates an Array[AnyRef] if there are more
* entries than that. This makes it more efficient for operations like groupBy where we expect
* some keys to have very few elements.
*/
private[spark] class CompactBuffer[T: ClassTag] extends Seq[T] with Serializable {
// First two elements
private var element0: T = _
private var element1: T = _
// Number of elements, including our two in the main object
private var curSize = 0
// Array for extra elements
private var otherElements: Array[T] = null
def apply(position: Int): T = {
if (position < 0 || position >= curSize) {
throw new IndexOutOfBoundsException
}
if (position == 0) {
element0
} else if (position == 1) {
element1
} else {
otherElements(position - 2)
}
}
private def update(position: Int, value: T): Unit = {
if (position < 0 || position >= curSize) {
throw new IndexOutOfBoundsException
}
if (position == 0) {
element0 = value
} else if (position == 1) {
element1 = value
} else {
otherElements(position - 2) = value
}
}
def += (value: T): CompactBuffer[T] = {
val newIndex = curSize
if (newIndex == 0) {
element0 = value
curSize = 1
} else if (newIndex == 1) {
element1 = value
curSize = 2
} else {
growToSize(curSize + 1)
otherElements(newIndex - 2) = value
}
this
}
def ++= (values: TraversableOnce[T]): CompactBuffer[T] = {
values match {
// Optimize merging of CompactBuffers, used in cogroup and groupByKey
case compactBuf: CompactBuffer[T] =>
val oldSize = curSize
// Copy the other buffer's size and elements to local variables in case it is equal to us
val itsSize = compactBuf.curSize
val itsElements = compactBuf.otherElements
growToSize(curSize + itsSize)
if (itsSize == 1) {
this(oldSize) = compactBuf.element0
} else if (itsSize == 2) {
this(oldSize) = compactBuf.element0
this(oldSize + 1) = compactBuf.element1
} else if (itsSize > 2) {
this(oldSize) = compactBuf.element0
this(oldSize + 1) = compactBuf.element1
// At this point our size is also above 2, so just copy its array directly into ours.
// Note that since we added two elements above, the index in this.otherElements that we
// should copy to is oldSize.
System.arraycopy(itsElements, 0, otherElements, oldSize, itsSize - 2)
}
case _ =>
values.foreach(e => this += e)
}
this
}
override def length: Int = curSize
override def size: Int = curSize
override def iterator: Iterator[T] = new Iterator[T] {
private var pos = 0
override def hasNext: Boolean = pos < curSize
override def next(): T = {
if (!hasNext) {
throw new NoSuchElementException
}
pos += 1
apply(pos - 1)
}
}
/** Increase our size to newSize and grow the backing array if needed. */
private def growToSize(newSize: Int): Unit = {
if (newSize < 0) {
throw new UnsupportedOperationException("Can't grow buffer past Int.MaxValue elements")
}
val capacity = if (otherElements != null) otherElements.length + 2 else 2
if (newSize > capacity) {
var newArrayLen = 8
while (newSize - 2 > newArrayLen) {
newArrayLen *= 2
if (newArrayLen == Int.MinValue) {
// Prevent overflow if we double from 2^30 to 2^31, which will become Int.MinValue.
// Note that we set the new array length to Int.MaxValue - 2 so that our capacity
// calculation above still gives a positive integer.
newArrayLen = Int.MaxValue - 2
}
}
val newArray = new Array[T](newArrayLen)
if (otherElements != null) {
System.arraycopy(otherElements, 0, newArray, 0, otherElements.length)
}
otherElements = newArray
}
curSize = newSize
}
}
private[spark] object CompactBuffer {
def apply[T: ClassTag](): CompactBuffer[T] = new CompactBuffer[T]
def apply[T: ClassTag](value: T): CompactBuffer[T] = {
val buf = new CompactBuffer[T]
buf += value
}
}
| apache-2.0 |
urbanslug/ghc | testsuite/tests/cabal/cabal05/p/P2.hs | 16 | module P2 where
| bsd-3-clause |
axinging/chromium-crosswalk | third_party/WebKit/LayoutTests/css1/box_properties/padding_inline.html | 3599 | <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd">
<HTML>
<HEAD>
<TITLE>CSS1 Test Suite: 5.5.10 padding</TITLE>
<META http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
<META http-equiv="Content-Style-Type" content="text/css">
<LINK rel="stylesheet" type="text/css" media="screen" href="../resources/base.css">
<STYLE type="text/css">
.zero {background-color: silver; padding: 0;}
.one {padding: 25px; background-color: aqua;}
.two {padding: -10px; background-color: aqua;}
</STYLE>
</HEAD>
<BODY><P>The style declarations which apply to the text below are:</P>
<PRE>.zero {background-color: silver; padding: 0;}
.one {padding: 25px; background-color: aqua;}
.two {padding: -10px; background-color: aqua;}
</PRE>
<HR>
<P class="zero">
This element has a class of zero.
</P>
<P style="background-color: gray;">
This element is unstyled save for a background color of gray. It contains an <SPAN class="one">inline element of class <TT>one</TT>, giving it an aqua background and a 25px padding</SPAN>. Padding on inline elements does not affect line-height calculations, so all lines in this element should have the same line-height. There may be implementation-specific limits on how much of the padding the user agent is able to display above and below each line. However, there should be at least 25px of padding to the left side of the inline box in the first line it appears, and 25px of padding to the right side of the inline element box in the last line where it appears.
</P>
<P class="zero">
This element has a class of zero.
</P>
<P style="background-color: gray;">
This element is unstyled save for a background color of gray. It contains an <SPAN class="two">inline element of class <TT>two</TT>, giving it an aqua background and no padding, since negative padding values are not allowed</SPAN>. Padding on inline elements does not affect line-height calculations, so all lines in this element should have the same line-height.
</P>
<P class="zero">
This element has a class of zero.
</P>
<TABLE border cellspacing="0" cellpadding="3" class="tabletest">
<TR>
<TD colspan="2" bgcolor="silver"><STRONG>TABLE Testing Section</STRONG></TD>
</TR>
<TR>
<TD bgcolor="silver"> </TD>
<TD><P class="zero">
This element has a class of zero.
</P>
<P style="background-color: gray;">
This element is unstyled save for a background color of gray. It contains an <SPAN class="one">inline element of class <TT>one</TT>, giving it an aqua background and a 25px padding</SPAN>. Padding on inline elements does not affect line-height calculations, so all lines in this element should have the same line-height. There may be implementation-specific limits on how much of the padding the user agent is able to display above and below each line. However, there should be at least 25px of padding to the left side of the inline box in the first line it appears, and 25px of padding to the right side of the inline element box in the last line where it appears.
</P>
<P class="zero">
This element has a class of zero.
</P>
<P style="background-color: gray;">
This element is unstyled save for a background color of gray. It contains an <SPAN class="two">inline element of class <TT>two</TT>, giving it an aqua background and no padding, since negative padding values are not allowed</SPAN>. Padding on inline elements does not affect line-height calculations, so all lines in this element should have the same line-height.
</P>
<P class="zero">
This element has a class of zero.
</P>
</TD></TR></TABLE></BODY>
</HTML>
| bsd-3-clause |
koct9i/linux | tools/arch/microblaze/include/uapi/asm/bitsperlong.h | 100 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#include <asm-generic/bitsperlong.h>
| gpl-2.0 |
xdajog/samsung_sources_i927 | external/chromium/chrome/browser/status_icons/status_icon.h | 2669 | // Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_STATUS_ICONS_STATUS_ICON_H_
#define CHROME_BROWSER_STATUS_ICONS_STATUS_ICON_H_
#pragma once
#include "base/memory/scoped_ptr.h"
#include "base/observer_list.h"
#include "base/string16.h"
class SkBitmap;
namespace ui {
class MenuModel;
}
class StatusIcon {
public:
StatusIcon();
virtual ~StatusIcon();
// Sets the image associated with this status icon.
virtual void SetImage(const SkBitmap& image) = 0;
// Sets the image associated with this status icon when pressed.
virtual void SetPressedImage(const SkBitmap& image) = 0;
// Sets the hover text for this status icon.
virtual void SetToolTip(const string16& tool_tip) = 0;
// Displays a notification balloon with the specified contents.
virtual void DisplayBalloon(const string16& title,
const string16& contents) = 0;
// Set the context menu for this icon. The icon takes ownership of the passed
// context menu. Passing NULL results in no menu at all.
void SetContextMenu(ui::MenuModel* menu);
class Observer {
public:
virtual ~Observer() {}
// Called when the user clicks on the system tray icon. Clicks that result
// in the context menu being displayed will not be passed to this observer
// (i.e. if there's a context menu set on this status icon, and the user
// right clicks on the icon to display the context menu, OnClicked will not
// be called).
virtual void OnClicked() = 0;
};
// Adds/Removes an observer for clicks on the status icon. If an observer is
// registered, then left clicks on the status icon will result in the observer
// being called, otherwise, both left and right clicks will display the
// context menu (if any).
void AddObserver(Observer* observer);
void RemoveObserver(Observer* observer);
// Returns true if there are registered click observers.
bool HasObservers();
// Dispatches a click event to the observers.
void DispatchClickEvent();
protected:
// Invoked after a call to SetContextMenu() to let the platform-specific
// subclass update the native context menu based on the new model. If NULL is
// passed, subclass should destroy the native context menu.
virtual void UpdatePlatformContextMenu(ui::MenuModel* model) = 0;
private:
ObserverList<Observer> observers_;
// Context menu, if any.
scoped_ptr<ui::MenuModel> context_menu_contents_;
DISALLOW_COPY_AND_ASSIGN(StatusIcon);
};
#endif // CHROME_BROWSER_STATUS_ICONS_STATUS_ICON_H_
| gpl-2.0 |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/scipy/optimize/tests/test_nonlin.py | 15160 | """ Unit tests for nonlinear solvers
Author: Ondrej Certik
May 2007
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_, dec, TestCase, run_module_suite
from scipy._lib.six import xrange
from scipy.optimize import nonlin, root
from numpy import matrix, diag, dot
from numpy.linalg import inv
import numpy as np
from test_minpack import pressure_network
SOLVERS = {'anderson': nonlin.anderson, 'diagbroyden': nonlin.diagbroyden,
'linearmixing': nonlin.linearmixing, 'excitingmixing': nonlin.excitingmixing,
'broyden1': nonlin.broyden1, 'broyden2': nonlin.broyden2,
'krylov': nonlin.newton_krylov}
MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1,
'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov}
#-------------------------------------------------------------------------------
# Test problems
#-------------------------------------------------------------------------------
def F(x):
x = np.asmatrix(x).T
d = matrix(diag([3,2,1.5,1,0.5]))
c = 0.01
f = -d*x - c*float(x.T*x)*x
return f
F.xin = [1,1,1,1,1]
F.KNOWN_BAD = {}
def F2(x):
return x
F2.xin = [1,2,3,4,5,6]
F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing}
def F2_lucky(x):
return x
F2_lucky.xin = [0,0,0,0,0,0]
F2_lucky.KNOWN_BAD = {}
def F3(x):
A = np.mat('-2 1 0; 1 -2 1; 0 1 -2')
b = np.mat('1 2 3')
return np.dot(A, x) - b
F3.xin = [1,2,3]
F3.KNOWN_BAD = {}
def F4_powell(x):
A = 1e4
return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)]
F4_powell.xin = [-1, -2]
F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing,
'diagbroyden': nonlin.diagbroyden}
def F5(x):
return pressure_network(x, 4, np.array([.5, .5, .5, .5]))
F5.xin = [2., 0, 2, 0]
F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
def F6(x):
x1, x2 = x
J0 = np.array([[-4.256, 14.7],
[0.8394989, 0.59964207]])
v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6,
np.sin(x2 * np.exp(x1) - 1)])
return -np.linalg.solve(J0, v)
F6.xin = [-0.5, 1.4]
F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
#-------------------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------------------
class TestNonlin(object):
"""
Check the Broyden methods for a few test problems.
broyden1, broyden2, and newton_krylov must succeed for
all functions. Some of the others don't -- tests in KNOWN_BAD are skipped.
"""
def _check_nonlin_func(self, f, func, f_tol=1e-2):
x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0)
assert_(np.absolute(f(x)).max() < f_tol)
def _check_root(self, f, method, f_tol=1e-2):
res = root(f, f.xin, method=method,
options={'ftol': f_tol, 'maxiter': 200, 'disp': 0})
assert_(np.absolute(res.fun).max() < f_tol)
@dec.knownfailureif(True)
def _check_func_fail(self, *a, **kw):
pass
def test_problem_nonlin(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for func in SOLVERS.values():
if func in f.KNOWN_BAD.values():
if func in MUST_WORK.values():
yield self._check_func_fail, f, func
continue
yield self._check_nonlin_func, f, func
def test_tol_norm_called(self):
# Check that supplying tol_norm keyword to nonlin_solve works
self._tol_norm_used = False
def local_norm_func(x):
self._tol_norm_used = True
return np.absolute(x).max()
nonlin.newton_krylov(F, F.xin, f_tol=1e-2, maxiter=200, verbose=0,
tol_norm=local_norm_func)
assert_(self._tol_norm_used)
def test_problem_root(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for meth in SOLVERS:
if meth in f.KNOWN_BAD:
if meth in MUST_WORK:
yield self._check_func_fail, f, meth
continue
yield self._check_root, f, meth
class TestSecant(TestCase):
"""Check that some Jacobian approximations satisfy the secant condition"""
xs = [np.array([1,2,3,4,5], float),
np.array([2,3,4,5,1], float),
np.array([3,4,5,1,2], float),
np.array([4,5,1,2,3], float),
np.array([9,1,9,1,3], float),
np.array([0,1,9,1,3], float),
np.array([5,5,7,1,1], float),
np.array([1,2,7,5,1], float),]
fs = [x**2 - 1 for x in xs]
def _check_secant(self, jac_cls, npoints=1, **kw):
"""
Check that the given Jacobian approximation satisfies secant
conditions for last `npoints` points.
"""
jac = jac_cls(**kw)
jac.setup(self.xs[0], self.fs[0], None)
for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
jac.update(x, f)
for k in xrange(min(npoints, j+1)):
dx = self.xs[j-k+1] - self.xs[j-k]
df = self.fs[j-k+1] - self.fs[j-k]
assert_(np.allclose(dx, jac.solve(df)))
# Check that the `npoints` secant bound is strict
if j >= npoints:
dx = self.xs[j-npoints+1] - self.xs[j-npoints]
df = self.fs[j-npoints+1] - self.fs[j-npoints]
assert_(not np.allclose(dx, jac.solve(df)))
def test_broyden1(self):
self._check_secant(nonlin.BroydenFirst)
def test_broyden2(self):
self._check_secant(nonlin.BroydenSecond)
def test_broyden1_update(self):
# Check that BroydenFirst update works as for a dense matrix
jac = nonlin.BroydenFirst(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
B = np.identity(5) * (-1/0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
B += (df - dot(B, dx))[:,None] * dx[None,:] / dot(dx, dx)
jac.update(x, f)
assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13))
def test_broyden2_update(self):
# Check that BroydenSecond update works as for a dense matrix
jac = nonlin.BroydenSecond(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
H = np.identity(5) * (-0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
H += (dx - dot(H, df))[:,None] * df[None,:] / dot(df, df)
jac.update(x, f)
assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13))
def test_anderson(self):
# Anderson mixing (with w0=0) satisfies secant conditions
# for the last M iterates, see [Ey]_
#
# .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3)
class TestLinear(TestCase):
"""Solve a linear equation;
some methods find the exact solution in a finite number of steps"""
def _check(self, jac, N, maxiter, complex=False, **kw):
np.random.seed(123)
A = np.random.randn(N, N)
if complex:
A = A + 1j*np.random.randn(N, N)
b = np.random.randn(N)
if complex:
b = b + 1j*np.random.randn(N)
def func(x):
return dot(A, x) - b
sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter,
f_tol=1e-6, line_search=None, verbose=0)
assert_(np.allclose(dot(A, sol), b, atol=1e-6))
def test_broyden1(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True)
def test_broyden2(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True)
def test_anderson(self):
# Anderson is rather similar to Broyden, if given enough storage space
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False)
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True)
def test_krylov(self):
# Krylov methods solve linear systems exactly in N inner steps
self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10)
self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10)
class TestJacobianDotSolve(object):
"""Check that solve/dot methods in Jacobian approximations are consistent"""
def _func(self, x):
return x**2 - 1 + np.dot(self.A, x)
def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
np.random.seed(123)
N = 7
def rand(*a):
q = np.random.rand(*a)
if complex:
q = q + 1j*np.random.rand(*a)
return q
def assert_close(a, b, msg):
d = abs(a - b).max()
f = tol + abs(b).max()*tol
if d > f:
raise AssertionError('%s: err %g' % (msg, d))
self.A = rand(N, N)
# initialize
x0 = np.random.rand(N)
jac = jac_cls(**kw)
jac.setup(x0, self._func(x0), self._func)
# check consistency
for k in xrange(2*N):
v = rand(N)
if hasattr(jac, '__array__'):
Jd = np.array(jac)
if hasattr(jac, 'solve'):
Gv = jac.solve(v)
Gv2 = np.linalg.solve(Jd, v)
assert_close(Gv, Gv2, 'solve vs array')
if hasattr(jac, 'rsolve'):
Gv = jac.rsolve(v)
Gv2 = np.linalg.solve(Jd.T.conj(), v)
assert_close(Gv, Gv2, 'rsolve vs array')
if hasattr(jac, 'matvec'):
Jv = jac.matvec(v)
Jv2 = np.dot(Jd, v)
assert_close(Jv, Jv2, 'dot vs array')
if hasattr(jac, 'rmatvec'):
Jv = jac.rmatvec(v)
Jv2 = np.dot(Jd.T.conj(), v)
assert_close(Jv, Jv2, 'rmatvec vs array')
if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
Jv = jac.matvec(v)
Jv2 = jac.solve(jac.matvec(Jv))
assert_close(Jv, Jv2, 'dot vs solve')
if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
Jv = jac.rmatvec(v)
Jv2 = jac.rmatvec(jac.rsolve(Jv))
assert_close(Jv, Jv2, 'rmatvec vs rsolve')
x = rand(N)
jac.update(x, self._func(x))
def test_broyden1(self):
self._check_dot(nonlin.BroydenFirst, complex=False)
self._check_dot(nonlin.BroydenFirst, complex=True)
def test_broyden2(self):
self._check_dot(nonlin.BroydenSecond, complex=False)
self._check_dot(nonlin.BroydenSecond, complex=True)
def test_anderson(self):
self._check_dot(nonlin.Anderson, complex=False)
self._check_dot(nonlin.Anderson, complex=True)
def test_diagbroyden(self):
self._check_dot(nonlin.DiagBroyden, complex=False)
self._check_dot(nonlin.DiagBroyden, complex=True)
def test_linearmixing(self):
self._check_dot(nonlin.LinearMixing, complex=False)
self._check_dot(nonlin.LinearMixing, complex=True)
def test_excitingmixing(self):
self._check_dot(nonlin.ExcitingMixing, complex=False)
self._check_dot(nonlin.ExcitingMixing, complex=True)
def test_krylov(self):
self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-4)
self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-4)
class TestNonlinOldTests(TestCase):
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def test_broyden1(self):
x = nonlin.broyden1(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_broyden2(self):
x = nonlin.broyden2(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_anderson(self):
x = nonlin.anderson(F,F.xin,iter=12,alpha=0.03,M=5)
assert_(nonlin.norm(x) < 0.33)
def test_linearmixing(self):
x = nonlin.linearmixing(F,F.xin,iter=60,alpha=0.5)
assert_(nonlin.norm(x) < 1e-7)
assert_(nonlin.norm(F(x)) < 1e-7)
def test_exciting(self):
x = nonlin.excitingmixing(F,F.xin,iter=20,alpha=0.5)
assert_(nonlin.norm(x) < 1e-5)
assert_(nonlin.norm(F(x)) < 1e-5)
def test_diagbroyden(self):
x = nonlin.diagbroyden(F,F.xin,iter=11,alpha=1)
assert_(nonlin.norm(x) < 1e-8)
assert_(nonlin.norm(F(x)) < 1e-8)
def test_root_broyden1(self):
res = root(F, F.xin, method='broyden1',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_broyden2(self):
res = root(F, F.xin, method='broyden2',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_anderson(self):
res = root(F, F.xin, method='anderson',
options={'nit': 12,
'jac_options': {'alpha': 0.03, 'M': 5}})
assert_(nonlin.norm(res.x) < 0.33)
def test_root_linearmixing(self):
res = root(F, F.xin, method='linearmixing',
options={'nit': 60,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-7)
assert_(nonlin.norm(res.fun) < 1e-7)
def test_root_excitingmixing(self):
res = root(F, F.xin, method='excitingmixing',
options={'nit': 20,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-5)
assert_(nonlin.norm(res.fun) < 1e-5)
def test_root_diagbroyden(self):
res = root(F, F.xin, method='diagbroyden',
options={'nit': 11,
'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-8)
assert_(nonlin.norm(res.fun) < 1e-8)
if __name__ == "__main__":
run_module_suite()
| gpl-2.0 |
caot/intellij-community | java/java-psi-api/src/com/intellij/psi/JavaDirectoryService.java | 5506 | /*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* @author max
*/
package com.intellij.psi;
import com.intellij.openapi.components.ServiceManager;
import com.intellij.pom.java.LanguageLevel;
import com.intellij.util.IncorrectOperationException;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.Map;
public abstract class JavaDirectoryService {
public static JavaDirectoryService getInstance() {
return ServiceManager.getService(JavaDirectoryService.class);
}
/**
* Returns the package corresponding to the directory.
*
* @return the package instance, or null if the directory does not correspond to any package.
*/
@Nullable
public abstract PsiPackage getPackage(@NotNull PsiDirectory dir);
/**
* Returns the list of Java classes contained in the directory.
*
* @return the array of classes.
*/
@NotNull
public abstract PsiClass[] getClasses(@NotNull PsiDirectory dir);
/**
* Creates a class with the specified name in the directory.
*
* @param name the name of the class to create (not including the file extension).
* @return the created class instance.
* @throws IncorrectOperationException if the operation failed for some reason.
*/
@NotNull
public abstract PsiClass createClass(@NotNull PsiDirectory dir, @NotNull String name) throws IncorrectOperationException;
/**
* Creates a class with the specified name in the directory.
*
* @param name the name of the class to create (not including the file extension).
* @param templateName custom file template to create class text based on.
* @return the created class instance.
* @throws IncorrectOperationException if the operation failed for some reason.
* @since 5.1
*/
@NotNull
public abstract PsiClass createClass(@NotNull PsiDirectory dir, @NotNull String name, @NotNull String templateName) throws IncorrectOperationException;
/**
* @param askForUndefinedVariables
* true show dialog asking for undefined variables
* false leave them blank
*/
public abstract PsiClass createClass(@NotNull PsiDirectory dir, @NotNull String name, @NotNull String templateName, boolean askForUndefinedVariables) throws IncorrectOperationException;
/**
* @param additionalProperties additional properties to be substituted in the template
*/
public abstract PsiClass createClass(@NotNull PsiDirectory dir,
@NotNull String name,
@NotNull String templateName,
boolean askForUndefinedVariables,
@NotNull final Map<String, String> additionalProperties) throws IncorrectOperationException;
/**
* Checks if it's possible to create a class with the specified name in the directory,
* and throws an exception if the creation is not possible. Does not actually modify
* anything.
*
* @param name the name of the class to check creation possibility (not including the file extension).
* @throws IncorrectOperationException if the creation is not possible.
*/
public abstract void checkCreateClass(@NotNull PsiDirectory dir, @NotNull String name) throws IncorrectOperationException;
/**
* Creates an interface class with the specified name in the directory.
*
* @param name the name of the interface to create (not including the file extension).
* @return the created interface instance.
* @throws IncorrectOperationException if the operation failed for some reason.
*/
@NotNull
public abstract PsiClass createInterface(@NotNull PsiDirectory dir, @NotNull String name) throws IncorrectOperationException;
/**
* Creates an enumeration class with the specified name in the directory.
*
* @param name the name of the enumeration class to create (not including the file extension).
* @return the created class instance.
* @throws IncorrectOperationException if the operation failed for some reason.
*/
@NotNull
public abstract PsiClass createEnum(@NotNull PsiDirectory dir, @NotNull String name) throws IncorrectOperationException;
/**
* Creates an annotation class with the specified name in the directory.
*
* @param name the name of the annotation class to create (not including the file extension).
* @return the created class instance.
* @throws IncorrectOperationException if the operation failed for some reason.
*/
@NotNull
public abstract PsiClass createAnnotationType(@NotNull PsiDirectory dir, @NotNull String name) throws IncorrectOperationException;
/**
* Checks if the directory is a source root for the project to which it belongs.
*
* @return true if the directory is a source root, false otherwise
*/
public abstract boolean isSourceRoot(@NotNull PsiDirectory dir);
public abstract LanguageLevel getLanguageLevel(@NotNull PsiDirectory dir);
} | apache-2.0 |
medicayun/medicayundicom | dcm4jboss-all/trunk/dcm4jboss-ejb/src/java/org/dcm4chex/archive/common/SPSStatus.java | 3087 | /* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is part of dcm4che, an implementation of DICOM(TM) in
* Java(TM), available at http://sourceforge.net/projects/dcm4che.
*
* The Initial Developer of the Original Code is
* TIANI Medgraph AG.
* Portions created by the Initial Developer are Copyright (C) 2003-2005
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Gunter Zeilinger <gunter.zeilinger@tiani.com>
* Franz Willer <franz.willer@gwi-ag.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
package org.dcm4chex.archive.common;
import java.util.Arrays;
/**
* @author gunter.zeilinger@tiani.com
* @version $Revision: 2772 $ $Date: 2006-09-20 16:58:51 +0800 (周三, 20 9月 2006) $
* @since 06.10.2004
*
*/
public class SPSStatus {
private static final String[] ENUM = { "SCHEDULED", "ARRIVED", "READY",
"STARTED", "COMPLETED", "DISCONTINUED" };
public static final int SCHEDULED = 0;
public static final int ARRIVED = 1;
public static final int READY = 2;
public static final int STARTED = 3;
public static final int COMPLETED = 4;
public static final int DISCONTINUED = 5;
public static final String toString(int value) {
return ENUM[value];
}
public static final int toInt(String s) {
final int index = Arrays.asList(ENUM).indexOf(s);
if (index == -1)
throw new IllegalArgumentException(s);
return index;
}
public static int[] toInts(String[] ss) {
if (ss == null) {
return null;
}
int[] ret = new int[ss.length];
for (int i = 0; i < ret.length; i++) {
ret[i] = toInt(ss[i]);
}
return ret;
}
} | apache-2.0 |
parvez3019/bitcoin | src/primitives/block.h | 4262 | // Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2013 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_PRIMITIVES_BLOCK_H
#define BITCOIN_PRIMITIVES_BLOCK_H
#include "primitives/transaction.h"
#include "serialize.h"
#include "uint256.h"
/** Nodes collect new transactions into a block, hash them into a hash tree,
* and scan through nonce values to make the block's hash satisfy proof-of-work
* requirements. When they solve the proof-of-work, they broadcast the block
* to everyone and the block is added to the block chain. The first transaction
* in the block is a special one that creates a new coin owned by the creator
* of the block.
*/
class CBlockHeader
{
public:
// header
static const int32_t CURRENT_VERSION=3;
int32_t nVersion;
uint256 hashPrevBlock;
uint256 hashMerkleRoot;
uint32_t nTime;
uint32_t nBits;
uint32_t nNonce;
CBlockHeader()
{
SetNull();
}
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
READWRITE(this->nVersion);
nVersion = this->nVersion;
READWRITE(hashPrevBlock);
READWRITE(hashMerkleRoot);
READWRITE(nTime);
READWRITE(nBits);
READWRITE(nNonce);
}
void SetNull()
{
nVersion = CBlockHeader::CURRENT_VERSION;
hashPrevBlock.SetNull();
hashMerkleRoot.SetNull();
nTime = 0;
nBits = 0;
nNonce = 0;
}
bool IsNull() const
{
return (nBits == 0);
}
uint256 GetHash() const;
int64_t GetBlockTime() const
{
return (int64_t)nTime;
}
};
class CBlock : public CBlockHeader
{
public:
// network and disk
std::vector<CTransaction> vtx;
// memory only
mutable std::vector<uint256> vMerkleTree;
CBlock()
{
SetNull();
}
CBlock(const CBlockHeader &header)
{
SetNull();
*((CBlockHeader*)this) = header;
}
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
READWRITE(*(CBlockHeader*)this);
READWRITE(vtx);
}
void SetNull()
{
CBlockHeader::SetNull();
vtx.clear();
vMerkleTree.clear();
}
CBlockHeader GetBlockHeader() const
{
CBlockHeader block;
block.nVersion = nVersion;
block.hashPrevBlock = hashPrevBlock;
block.hashMerkleRoot = hashMerkleRoot;
block.nTime = nTime;
block.nBits = nBits;
block.nNonce = nNonce;
return block;
}
// Build the in-memory merkle tree for this block and return the merkle root.
// If non-NULL, *mutated is set to whether mutation was detected in the merkle
// tree (a duplication of transactions in the block leading to an identical
// merkle root).
uint256 BuildMerkleTree(bool* mutated = NULL) const;
std::vector<uint256> GetMerkleBranch(int nIndex) const;
static uint256 CheckMerkleBranch(uint256 hash, const std::vector<uint256>& vMerkleBranch, int nIndex);
std::string ToString() const;
};
/** Describes a place in the block chain to another node such that if the
* other node doesn't have the same branch, it can find a recent common trunk.
* The further back it is, the further before the fork it may be.
*/
struct CBlockLocator
{
std::vector<uint256> vHave;
CBlockLocator() {}
CBlockLocator(const std::vector<uint256>& vHaveIn)
{
vHave = vHaveIn;
}
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
if (!(nType & SER_GETHASH))
READWRITE(nVersion);
READWRITE(vHave);
}
void SetNull()
{
vHave.clear();
}
bool IsNull() const
{
return vHave.empty();
}
};
#endif // BITCOIN_PRIMITIVES_BLOCK_H
| mit |
chisimba/modules | zend/resources/Zend/View/Helper/FormTextarea.php | 2995 | <?php
/**
* Zend Framework
*
* LICENSE
*
* This source file is subject to the new BSD license that is bundled
* with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://framework.zend.com/license/new-bsd
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@zend.com so we can send you a copy immediately.
*
* @category Zend
* @package Zend_View
* @subpackage Helper
* @copyright Copyright (c) 2005-2010 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
* @version $Id: FormTextarea.php 20096 2010-01-06 02:05:09Z bkarwin $
*/
/**
* Abstract class for extension
*/
require_once 'Zend/View/Helper/FormElement.php';
/**
* Helper to generate a "textarea" element
*
* @category Zend
* @package Zend_View
* @subpackage Helper
* @copyright Copyright (c) 2005-2010 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
class Zend_View_Helper_FormTextarea extends Zend_View_Helper_FormElement
{
/**
* The default number of rows for a textarea.
*
* @access public
*
* @var int
*/
public $rows = 24;
/**
* The default number of columns for a textarea.
*
* @access public
*
* @var int
*/
public $cols = 80;
/**
* Generates a 'textarea' element.
*
* @access public
*
* @param string|array $name If a string, the element name. If an
* array, all other parameters are ignored, and the array elements
* are extracted in place of added parameters.
*
* @param mixed $value The element value.
*
* @param array $attribs Attributes for the element tag.
*
* @return string The element XHTML.
*/
public function formTextarea($name, $value = null, $attribs = null)
{
$info = $this->_getInfo($name, $value, $attribs);
extract($info); // name, value, attribs, options, listsep, disable
// is it disabled?
$disabled = '';
if ($disable) {
// disabled.
$disabled = ' disabled="disabled"';
}
// Make sure that there are 'rows' and 'cols' values
// as required by the spec. noted by Orjan Persson.
if (empty($attribs['rows'])) {
$attribs['rows'] = (int) $this->rows;
}
if (empty($attribs['cols'])) {
$attribs['cols'] = (int) $this->cols;
}
// build the element
$xhtml = '<textarea name="' . $this->view->escape($name) . '"'
. ' id="' . $this->view->escape($id) . '"'
. $disabled
. $this->_htmlAttribs($attribs) . '>'
. $this->view->escape($value) . '</textarea>';
return $xhtml;
}
}
| gpl-2.0 |
rperier/linux-rockchip | arch/powerpc/platforms/ps3/system-bus.c | 19381 | // SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 system bus driver.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/dma-map-ops.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
#include <asm/firmware.h>
#include <asm/cell-regs.h>
#include "platform.h"
static struct device ps3_system_bus = {
.init_name = "ps3_system",
};
/* FIXME: need device usage counters! */
static struct {
struct mutex mutex;
int sb_11; /* usb 0 */
int sb_12; /* usb 0 */
int gpu;
} usage_hack;
static int ps3_is_device(struct ps3_system_bus_device *dev, u64 bus_id,
u64 dev_id)
{
return dev->bus_id == bus_id && dev->dev_id == dev_id;
}
static int ps3_open_hv_device_sb(struct ps3_system_bus_device *dev)
{
int result;
BUG_ON(!dev->bus_id);
mutex_lock(&usage_hack.mutex);
if (ps3_is_device(dev, 1, 1)) {
usage_hack.sb_11++;
if (usage_hack.sb_11 > 1) {
result = 0;
goto done;
}
}
if (ps3_is_device(dev, 1, 2)) {
usage_hack.sb_12++;
if (usage_hack.sb_12 > 1) {
result = 0;
goto done;
}
}
result = lv1_open_device(dev->bus_id, dev->dev_id, 0);
if (result) {
pr_debug("%s:%d: lv1_open_device failed: %s\n", __func__,
__LINE__, ps3_result(result));
result = -EPERM;
}
done:
mutex_unlock(&usage_hack.mutex);
return result;
}
static int ps3_close_hv_device_sb(struct ps3_system_bus_device *dev)
{
int result;
BUG_ON(!dev->bus_id);
mutex_lock(&usage_hack.mutex);
if (ps3_is_device(dev, 1, 1)) {
usage_hack.sb_11--;
if (usage_hack.sb_11) {
result = 0;
goto done;
}
}
if (ps3_is_device(dev, 1, 2)) {
usage_hack.sb_12--;
if (usage_hack.sb_12) {
result = 0;
goto done;
}
}
result = lv1_close_device(dev->bus_id, dev->dev_id);
BUG_ON(result);
done:
mutex_unlock(&usage_hack.mutex);
return result;
}
static int ps3_open_hv_device_gpu(struct ps3_system_bus_device *dev)
{
int result;
mutex_lock(&usage_hack.mutex);
usage_hack.gpu++;
if (usage_hack.gpu > 1) {
result = 0;
goto done;
}
result = lv1_gpu_open(0);
if (result) {
pr_debug("%s:%d: lv1_gpu_open failed: %s\n", __func__,
__LINE__, ps3_result(result));
result = -EPERM;
}
done:
mutex_unlock(&usage_hack.mutex);
return result;
}
static int ps3_close_hv_device_gpu(struct ps3_system_bus_device *dev)
{
int result;
mutex_lock(&usage_hack.mutex);
usage_hack.gpu--;
if (usage_hack.gpu) {
result = 0;
goto done;
}
result = lv1_gpu_close();
BUG_ON(result);
done:
mutex_unlock(&usage_hack.mutex);
return result;
}
int ps3_open_hv_device(struct ps3_system_bus_device *dev)
{
BUG_ON(!dev);
pr_debug("%s:%d: match_id: %u\n", __func__, __LINE__, dev->match_id);
switch (dev->match_id) {
case PS3_MATCH_ID_EHCI:
case PS3_MATCH_ID_OHCI:
case PS3_MATCH_ID_GELIC:
case PS3_MATCH_ID_STOR_DISK:
case PS3_MATCH_ID_STOR_ROM:
case PS3_MATCH_ID_STOR_FLASH:
return ps3_open_hv_device_sb(dev);
case PS3_MATCH_ID_SOUND:
case PS3_MATCH_ID_GPU:
return ps3_open_hv_device_gpu(dev);
case PS3_MATCH_ID_AV_SETTINGS:
case PS3_MATCH_ID_SYSTEM_MANAGER:
pr_debug("%s:%d: unsupported match_id: %u\n", __func__,
__LINE__, dev->match_id);
pr_debug("%s:%d: bus_id: %llu\n", __func__, __LINE__,
dev->bus_id);
BUG();
return -EINVAL;
default:
break;
}
pr_debug("%s:%d: unknown match_id: %u\n", __func__, __LINE__,
dev->match_id);
BUG();
return -ENODEV;
}
EXPORT_SYMBOL_GPL(ps3_open_hv_device);
int ps3_close_hv_device(struct ps3_system_bus_device *dev)
{
BUG_ON(!dev);
pr_debug("%s:%d: match_id: %u\n", __func__, __LINE__, dev->match_id);
switch (dev->match_id) {
case PS3_MATCH_ID_EHCI:
case PS3_MATCH_ID_OHCI:
case PS3_MATCH_ID_GELIC:
case PS3_MATCH_ID_STOR_DISK:
case PS3_MATCH_ID_STOR_ROM:
case PS3_MATCH_ID_STOR_FLASH:
return ps3_close_hv_device_sb(dev);
case PS3_MATCH_ID_SOUND:
case PS3_MATCH_ID_GPU:
return ps3_close_hv_device_gpu(dev);
case PS3_MATCH_ID_AV_SETTINGS:
case PS3_MATCH_ID_SYSTEM_MANAGER:
pr_debug("%s:%d: unsupported match_id: %u\n", __func__,
__LINE__, dev->match_id);
pr_debug("%s:%d: bus_id: %llu\n", __func__, __LINE__,
dev->bus_id);
BUG();
return -EINVAL;
default:
break;
}
pr_debug("%s:%d: unknown match_id: %u\n", __func__, __LINE__,
dev->match_id);
BUG();
return -ENODEV;
}
EXPORT_SYMBOL_GPL(ps3_close_hv_device);
#define dump_mmio_region(_a) _dump_mmio_region(_a, __func__, __LINE__)
static void _dump_mmio_region(const struct ps3_mmio_region* r,
const char* func, int line)
{
pr_debug("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id,
r->dev->dev_id);
pr_debug("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
pr_debug("%s:%d: len %lxh\n", func, line, r->len);
pr_debug("%s:%d: lpar_addr %lxh\n", func, line, r->lpar_addr);
}
static int ps3_sb_mmio_region_create(struct ps3_mmio_region *r)
{
int result;
u64 lpar_addr;
result = lv1_map_device_mmio_region(r->dev->bus_id, r->dev->dev_id,
r->bus_addr, r->len, r->page_size, &lpar_addr);
r->lpar_addr = lpar_addr;
if (result) {
pr_debug("%s:%d: lv1_map_device_mmio_region failed: %s\n",
__func__, __LINE__, ps3_result(result));
r->lpar_addr = 0;
}
dump_mmio_region(r);
return result;
}
static int ps3_ioc0_mmio_region_create(struct ps3_mmio_region *r)
{
/* device specific; do nothing currently */
return 0;
}
int ps3_mmio_region_create(struct ps3_mmio_region *r)
{
return r->mmio_ops->create(r);
}
EXPORT_SYMBOL_GPL(ps3_mmio_region_create);
static int ps3_sb_free_mmio_region(struct ps3_mmio_region *r)
{
int result;
dump_mmio_region(r);
result = lv1_unmap_device_mmio_region(r->dev->bus_id, r->dev->dev_id,
r->lpar_addr);
if (result)
pr_debug("%s:%d: lv1_unmap_device_mmio_region failed: %s\n",
__func__, __LINE__, ps3_result(result));
r->lpar_addr = 0;
return result;
}
static int ps3_ioc0_free_mmio_region(struct ps3_mmio_region *r)
{
/* device specific; do nothing currently */
return 0;
}
int ps3_free_mmio_region(struct ps3_mmio_region *r)
{
return r->mmio_ops->free(r);
}
EXPORT_SYMBOL_GPL(ps3_free_mmio_region);
static const struct ps3_mmio_region_ops ps3_mmio_sb_region_ops = {
.create = ps3_sb_mmio_region_create,
.free = ps3_sb_free_mmio_region
};
static const struct ps3_mmio_region_ops ps3_mmio_ioc0_region_ops = {
.create = ps3_ioc0_mmio_region_create,
.free = ps3_ioc0_free_mmio_region
};
int ps3_mmio_region_init(struct ps3_system_bus_device *dev,
struct ps3_mmio_region *r, unsigned long bus_addr, unsigned long len,
enum ps3_mmio_page_size page_size)
{
r->dev = dev;
r->bus_addr = bus_addr;
r->len = len;
r->page_size = page_size;
switch (dev->dev_type) {
case PS3_DEVICE_TYPE_SB:
r->mmio_ops = &ps3_mmio_sb_region_ops;
break;
case PS3_DEVICE_TYPE_IOC0:
r->mmio_ops = &ps3_mmio_ioc0_region_ops;
break;
default:
BUG();
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(ps3_mmio_region_init);
static int ps3_system_bus_match(struct device *_dev,
struct device_driver *_drv)
{
int result;
struct ps3_system_bus_driver *drv = ps3_drv_to_system_bus_drv(_drv);
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
if (!dev->match_sub_id)
result = dev->match_id == drv->match_id;
else
result = dev->match_sub_id == drv->match_sub_id &&
dev->match_id == drv->match_id;
if (result)
pr_info("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): match\n",
__func__, __LINE__,
dev->match_id, dev->match_sub_id, dev_name(&dev->core),
drv->match_id, drv->match_sub_id, drv->core.name);
else
pr_debug("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): miss\n",
__func__, __LINE__,
dev->match_id, dev->match_sub_id, dev_name(&dev->core),
drv->match_id, drv->match_sub_id, drv->core.name);
return result;
}
static int ps3_system_bus_probe(struct device *_dev)
{
int result = 0;
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
struct ps3_system_bus_driver *drv;
BUG_ON(!dev);
dev_dbg(_dev, "%s:%d\n", __func__, __LINE__);
drv = ps3_system_bus_dev_to_system_bus_drv(dev);
BUG_ON(!drv);
if (drv->probe)
result = drv->probe(dev);
else
pr_debug("%s:%d: %s no probe method\n", __func__, __LINE__,
dev_name(&dev->core));
pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core));
return result;
}
static int ps3_system_bus_remove(struct device *_dev)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
struct ps3_system_bus_driver *drv;
BUG_ON(!dev);
dev_dbg(_dev, "%s:%d\n", __func__, __LINE__);
drv = ps3_system_bus_dev_to_system_bus_drv(dev);
BUG_ON(!drv);
if (drv->remove)
drv->remove(dev);
else
dev_dbg(&dev->core, "%s:%d %s: no remove method\n",
__func__, __LINE__, drv->core.name);
pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core));
return 0;
}
static void ps3_system_bus_shutdown(struct device *_dev)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
struct ps3_system_bus_driver *drv;
BUG_ON(!dev);
dev_dbg(&dev->core, " -> %s:%d: match_id %d\n", __func__, __LINE__,
dev->match_id);
if (!dev->core.driver) {
dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
__LINE__);
return;
}
drv = ps3_system_bus_dev_to_system_bus_drv(dev);
BUG_ON(!drv);
dev_dbg(&dev->core, "%s:%d: %s -> %s\n", __func__, __LINE__,
dev_name(&dev->core), drv->core.name);
if (drv->shutdown)
drv->shutdown(dev);
else if (drv->remove) {
dev_dbg(&dev->core, "%s:%d %s: no shutdown, calling remove\n",
__func__, __LINE__, drv->core.name);
drv->remove(dev);
} else {
dev_dbg(&dev->core, "%s:%d %s: no shutdown method\n",
__func__, __LINE__, drv->core.name);
BUG();
}
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
}
static int ps3_system_bus_uevent(struct device *_dev, struct kobj_uevent_env *env)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
if (add_uevent_var(env, "MODALIAS=ps3:%d:%d", dev->match_id,
dev->match_sub_id))
return -ENOMEM;
return 0;
}
static ssize_t modalias_show(struct device *_dev, struct device_attribute *a,
char *buf)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int len = snprintf(buf, PAGE_SIZE, "ps3:%d:%d\n", dev->match_id,
dev->match_sub_id);
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *ps3_system_bus_dev_attrs[] = {
&dev_attr_modalias.attr,
NULL,
};
ATTRIBUTE_GROUPS(ps3_system_bus_dev);
struct bus_type ps3_system_bus_type = {
.name = "ps3_system_bus",
.match = ps3_system_bus_match,
.uevent = ps3_system_bus_uevent,
.probe = ps3_system_bus_probe,
.remove = ps3_system_bus_remove,
.shutdown = ps3_system_bus_shutdown,
.dev_groups = ps3_system_bus_dev_groups,
};
static int __init ps3_system_bus_init(void)
{
int result;
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
mutex_init(&usage_hack.mutex);
result = device_register(&ps3_system_bus);
BUG_ON(result);
result = bus_register(&ps3_system_bus_type);
BUG_ON(result);
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return result;
}
core_initcall(ps3_system_bus_init);
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
*/
static void * ps3_alloc_coherent(struct device *_dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
int result;
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
unsigned long virt_addr;
flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
flag |= __GFP_ZERO;
virt_addr = __get_free_pages(flag, get_order(size));
if (!virt_addr) {
pr_debug("%s:%d: get_free_pages failed\n", __func__, __LINE__);
goto clean_none;
}
result = ps3_dma_map(dev->d_region, virt_addr, size, dma_handle,
CBE_IOPTE_PP_W | CBE_IOPTE_PP_R |
CBE_IOPTE_SO_RW | CBE_IOPTE_M);
if (result) {
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
__func__, __LINE__, result);
BUG_ON("check region type");
goto clean_alloc;
}
return (void*)virt_addr;
clean_alloc:
free_pages(virt_addr, get_order(size));
clean_none:
dma_handle = NULL;
return NULL;
}
static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
ps3_dma_unmap(dev->d_region, dma_handle, size);
free_pages((unsigned long)vaddr, get_order(size));
}
/* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage (not vmalloc). The address passed here
* comprises a page address and offset into that page. The dma_addr_t
* returned will point to the same byte within the page as was passed in.
*/
static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int result;
dma_addr_t bus_addr;
void *ptr = page_address(page) + offset;
result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
&bus_addr,
CBE_IOPTE_PP_R | CBE_IOPTE_PP_W |
CBE_IOPTE_SO_RW | CBE_IOPTE_M);
if (result) {
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
__func__, __LINE__, result);
}
return bus_addr;
}
static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int result;
dma_addr_t bus_addr;
u64 iopte_flag;
void *ptr = page_address(page) + offset;
iopte_flag = CBE_IOPTE_M;
switch (direction) {
case DMA_BIDIRECTIONAL:
iopte_flag |= CBE_IOPTE_PP_R | CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW;
break;
case DMA_TO_DEVICE:
iopte_flag |= CBE_IOPTE_PP_R | CBE_IOPTE_SO_R;
break;
case DMA_FROM_DEVICE:
iopte_flag |= CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW;
break;
default:
/* not happned */
BUG();
};
result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
&bus_addr, iopte_flag);
if (result) {
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
__func__, __LINE__, result);
}
return bus_addr;
}
static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction, unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int result;
result = ps3_dma_unmap(dev->d_region, dma_addr, size);
if (result) {
pr_debug("%s:%d: ps3_dma_unmap failed (%d)\n",
__func__, __LINE__, result);
}
}
static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction, unsigned long attrs)
{
#if defined(CONFIG_PS3_DYNAMIC_DMA)
BUG_ON("do");
return -EPERM;
#else
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nents, i) {
int result = ps3_dma_map(dev->d_region, sg_phys(sg),
sg->length, &sg->dma_address, 0);
if (result) {
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
__func__, __LINE__, result);
return -EINVAL;
}
sg->dma_length = sg->length;
}
return nents;
#endif
}
static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg,
int nents,
enum dma_data_direction direction,
unsigned long attrs)
{
BUG();
return 0;
}
static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction, unsigned long attrs)
{
#if defined(CONFIG_PS3_DYNAMIC_DMA)
BUG_ON("do");
#endif
}
static void ps3_ioc0_unmap_sg(struct device *_dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
BUG();
}
static int ps3_dma_supported(struct device *_dev, u64 mask)
{
return mask >= DMA_BIT_MASK(32);
}
static const struct dma_map_ops ps3_sb_dma_ops = {
.alloc = ps3_alloc_coherent,
.free = ps3_free_coherent,
.map_sg = ps3_sb_map_sg,
.unmap_sg = ps3_sb_unmap_sg,
.dma_supported = ps3_dma_supported,
.map_page = ps3_sb_map_page,
.unmap_page = ps3_unmap_page,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
static const struct dma_map_ops ps3_ioc0_dma_ops = {
.alloc = ps3_alloc_coherent,
.free = ps3_free_coherent,
.map_sg = ps3_ioc0_map_sg,
.unmap_sg = ps3_ioc0_unmap_sg,
.dma_supported = ps3_dma_supported,
.map_page = ps3_ioc0_map_page,
.unmap_page = ps3_unmap_page,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
/**
* ps3_system_bus_release_device - remove a device from the system bus
*/
static void ps3_system_bus_release_device(struct device *_dev)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
kfree(dev);
}
/**
* ps3_system_bus_device_register - add a device to the system bus
*
* ps3_system_bus_device_register() expects the dev object to be allocated
* dynamically by the caller. The system bus takes ownership of the dev
* object and frees the object in ps3_system_bus_release_device().
*/
int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
{
int result;
static unsigned int dev_ioc0_count;
static unsigned int dev_sb_count;
static unsigned int dev_vuart_count;
static unsigned int dev_lpm_count;
if (!dev->core.parent)
dev->core.parent = &ps3_system_bus;
dev->core.bus = &ps3_system_bus_type;
dev->core.release = ps3_system_bus_release_device;
switch (dev->dev_type) {
case PS3_DEVICE_TYPE_IOC0:
dev->core.dma_ops = &ps3_ioc0_dma_ops;
dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count);
break;
case PS3_DEVICE_TYPE_SB:
dev->core.dma_ops = &ps3_sb_dma_ops;
dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count);
break;
case PS3_DEVICE_TYPE_VUART:
dev_set_name(&dev->core, "vuart_%02x", ++dev_vuart_count);
break;
case PS3_DEVICE_TYPE_LPM:
dev_set_name(&dev->core, "lpm_%02x", ++dev_lpm_count);
break;
default:
BUG();
};
dev->core.of_node = NULL;
set_dev_node(&dev->core, 0);
pr_debug("%s:%d add %s\n", __func__, __LINE__, dev_name(&dev->core));
result = device_register(&dev->core);
return result;
}
EXPORT_SYMBOL_GPL(ps3_system_bus_device_register);
int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv)
{
int result;
pr_debug(" -> %s:%d: %s\n", __func__, __LINE__, drv->core.name);
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
drv->core.bus = &ps3_system_bus_type;
result = driver_register(&drv->core);
pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, drv->core.name);
return result;
}
EXPORT_SYMBOL_GPL(ps3_system_bus_driver_register);
void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv)
{
pr_debug(" -> %s:%d: %s\n", __func__, __LINE__, drv->core.name);
driver_unregister(&drv->core);
pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, drv->core.name);
}
EXPORT_SYMBOL_GPL(ps3_system_bus_driver_unregister);
| gpl-2.0 |
akosyakov/intellij-community | plugins/cvs/cvs-plugin/src/com/intellij/cvsSupport2/actions/update/UpdateByBranchUpdateSettings.java | 1930 | /*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.cvsSupport2.actions.update;
import com.intellij.cvsSupport2.cvsoperations.dateOrRevision.RevisionOrDate;
import com.intellij.cvsSupport2.cvsoperations.dateOrRevision.SimpleRevision;
import org.netbeans.lib.cvsclient.command.KeywordSubstitution;
/**
* author: lesya
*/
public class UpdateByBranchUpdateSettings implements UpdateSettings{
private final String myBranchName;
private final boolean myMakeNewFilesReadOnly;
public UpdateByBranchUpdateSettings(String branchName, boolean makeNewFilesReadOnly) {
myBranchName = branchName;
myMakeNewFilesReadOnly = makeNewFilesReadOnly;
}
public boolean getPruneEmptyDirectories() {
return true;
}
public String getBranch1ToMergeWith() {
return null;
}
public String getBranch2ToMergeWith() {
return null;
}
public boolean getResetAllSticky() {
return false;
}
public boolean getDontMakeAnyChanges() {
return false;
}
public boolean getCreateDirectories() {
return true;
}
public boolean getCleanCopy() {
return false;
}
public KeywordSubstitution getKeywordSubstitution() {
return null;
}
public RevisionOrDate getRevisionOrDate() {
return new SimpleRevision(myBranchName);
}
public boolean getMakeNewFilesReadOnly() {
return myMakeNewFilesReadOnly;
}
}
| apache-2.0 |
Dilts/GruntworkflowWordpress | wp-content/themes/testGRUNT/src/node_modules/grunt-css/node_modules/grunt/docs/api_config.md | 3955 | [Grunt homepage](https://github.com/gruntjs/grunt) | [Documentation table of contents](toc.md)
# [The grunt API](api.md) / grunt.config
Access project-specific configuration data defined in the [grunt.js gruntfile](getting_started.md).
See the [config lib source](../lib/grunt/config.js) for more information.
## The config API
Note that any method marked with a ☃ (unicode snowman) is also available directly on the `grunt` object, and any method marked with a ☆ (white star) is also available inside tasks on the `this` object. Just so you know. See the [API main page](api.md) for more usage information.
## Initializing Config Data
_Note that the method listed below is also available on the `grunt` object as [grunt.initConfig](api.md)._
### grunt.config.init ☃
Initialize a configuration object for the current project. The specified `configObject` is used by tasks and helpers and can also be accessed using the `grunt.config` method. Nearly every project's [grunt.js gruntfile](getting_started.md) will call this method.
```javascript
grunt.config.init(configObject)
```
Note that any specified `<config>` and `<json>` [directives](api_task.md) will be automatically processed when the config object is initialized.
This example contains sample config data for the [lint task](task_lint.md):
```javascript
grunt.config.init({
lint: {
all: ['lib/*.js', 'test/*.js', 'grunt.js']
}
});
```
See the [configuring grunt](getting_started.md) page for more configuration examples.
_This method is also available as [grunt.initConfig](api.md)._
## Accessing Config Data
The following methods allow grunt configuration data to be accessed either via dot-delimited string like `'pkg.author.name'` or via array of property name parts like `['pkg', 'author', 'name']`.
Note that if a specified property name contains a `.` dot, it must be escaped with a literal backslash, eg. `'concat.dist/built\\.js'`. If an array of parts is specified, grunt will handle the escaping internally with the `grunt.config.escape` method.
### grunt.config
Get or set a value from the project's grunt configuration. This method serves as an alias to other methods; if two arguments are passed, `grunt.config.set` is called, otherwise `grunt.config.get` is called.
```javascript
grunt.config([prop [, value]])
```
### grunt.config.get
Get a value from the project's grunt configuration. If `prop` is specified, that property's value is returned, or `null` if that property is not defined. If `prop` isn't specified, a copy of the entire config object is returned.
```javascript
grunt.config.get([prop])
```
Any `<% %>` templates in returned values will not be automatically processed, but can be processed afterwards using the [grunt.template.process](api_template.md) method. If you want to do both at once, the `grunt.config.process` method can be used.
### grunt.config.set
Set a value into the project's grunt configuration.
```javascript
grunt.config.set(prop, value)
```
Note that any specified `<config>` and `<json>` [directives](api_task.md) will be automatically processed when the config data is set.
### grunt.config.escape
Escape `.` dots in the given `propString`. This should be used for property names that contain dots.
```javascript
grunt.config.escape(propString)
```
### grunt.config.process
Behaves like `grunt.config.get`, but additionally recursively processes all `<% %>` templates in the returned data.
```javascript
grunt.config.process([prop])
```
## Requiring Config Data
_Note that the method listed below is also available inside tasks on the `this` object as [this.requiresConfig](api.md)._
### grunt.config.requires ☆
Fail the current task if one or more required config properties is missing. One or more string or array config properties may be specified.
```javascript
grunt.config.requires(prop [, prop [, ...]])
```
_This method is also available inside tasks as [this.requiresConfig](api.md)._
| gpl-2.0 |
eabatalov/au-linux-kernel-autumn-2017 | linux/arch/frv/mm/fault.c | 8194 | /*
* linux/arch/frv/mm/fault.c
*
* Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
* - Written by David Howells (dhowells@redhat.com)
* - Derived from arch/m68knommu/mm/fault.c
* - Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
* - Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
*
* Based on:
*
* linux/arch/m68k/mm/fault.c
*
* Copyright (C) 1995 Hamish Macdonald
*/
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/gdb-stub.h>
/*****************************************************************************/
/*
* This routine handles page faults. It determines the problem, and
* then passes it off to one of the appropriate routines.
*/
asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear0)
{
struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long _pme, lrai, lrad;
unsigned long flags = 0;
siginfo_t info;
pgd_t *pge;
pud_t *pue;
pte_t *pte;
int fault;
#if 0
const char *atxc[16] = {
[0x0] = "mmu-miss", [0x8] = "multi-dat", [0x9] = "multi-sat",
[0xa] = "tlb-miss", [0xc] = "privilege", [0xd] = "write-prot",
};
printk("do_page_fault(%d,%lx [%s],%lx)\n",
datammu, esr0, atxc[esr0 >> 20 & 0xf], ear0);
#endif
mm = current->mm;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*
* This verifies that the fault happens in kernel space
* and that the fault was a page not present (invalid) error
*/
if (!user_mode(__frame) && (esr0 & ESR0_ATXC) == ESR0_ATXC_AMRTLB_MISS) {
if (ear0 >= VMALLOC_START && ear0 < VMALLOC_END)
goto kernel_pte_fault;
if (ear0 >= PKMAP_BASE && ear0 < PKMAP_END)
goto kernel_pte_fault;
}
info.si_code = SEGV_MAPERR;
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (faulthandler_disabled() || !mm)
goto no_context;
if (user_mode(__frame))
flags |= FAULT_FLAG_USER;
down_read(&mm->mmap_sem);
vma = find_vma(mm, ear0);
if (!vma)
goto bad_area;
if (vma->vm_start <= ear0)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (user_mode(__frame)) {
/*
* accessing the stack below %esp is always a bug.
* The "+ 32" is there due to some instructions (like
* pusha) doing post-decrement on the stack and that
* doesn't show up until later..
*/
if ((ear0 & PAGE_MASK) + 2 * PAGE_SIZE < __frame->sp) {
#if 0
printk("[%d] ### Access below stack @%lx (sp=%lx)\n",
current->pid, ear0, __frame->sp);
show_registers(__frame);
printk("[%d] ### Code: [%08lx] %02x %02x %02x %02x %02x %02x %02x %02x\n",
current->pid,
__frame->pc,
((u8*)__frame->pc)[0],
((u8*)__frame->pc)[1],
((u8*)__frame->pc)[2],
((u8*)__frame->pc)[3],
((u8*)__frame->pc)[4],
((u8*)__frame->pc)[5],
((u8*)__frame->pc)[6],
((u8*)__frame->pc)[7]
);
#endif
goto bad_area;
}
}
if (expand_stack(vma, ear0))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
info.si_code = SEGV_ACCERR;
switch (esr0 & ESR0_ATXC) {
default:
/* handle write to write protected page */
case ESR0_ATXC_WP_EXCEP:
#ifdef TEST_VERIFY_AREA
if (!(user_mode(__frame)))
printk("WP fault at %08lx\n", __frame->pc);
#endif
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
flags |= FAULT_FLAG_WRITE;
break;
/* handle read from protected page */
case ESR0_ATXC_PRIV_EXCEP:
goto bad_area;
/* handle read, write or exec on absent page
* - can't support write without permitting read
* - don't support execute without permitting read and vice-versa
*/
case ESR0_ATXC_AMRTLB_MISS:
if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
goto bad_area;
break;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(vma, ear0, flags);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR)
current->maj_flt++;
else
current->min_flt++;
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
/* User mode accesses just cause a SIGSEGV */
if (user_mode(__frame)) {
info.si_signo = SIGSEGV;
info.si_errno = 0;
/* info.si_code has been set above */
info.si_addr = (void *) ear0;
force_sig_info(SIGSEGV, &info, current);
return;
}
no_context:
/* are we prepared to handle this kernel fault? */
if (fixup_exception(__frame))
return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
if (ear0 < PAGE_SIZE)
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
else
printk(KERN_ALERT "Unable to handle kernel paging request");
printk(" at virtual addr %08lx\n", ear0);
printk(" PC : %08lx\n", __frame->pc);
printk(" EXC : esr0=%08lx ear0=%08lx\n", esr0, ear0);
asm("lrai %1,%0,#1,#0,#0" : "=&r"(lrai) : "r"(ear0));
asm("lrad %1,%0,#1,#0,#0" : "=&r"(lrad) : "r"(ear0));
printk(KERN_ALERT " LRAI: %08lx\n", lrai);
printk(KERN_ALERT " LRAD: %08lx\n", lrad);
__break_hijack_kernel_event();
pge = pgd_offset(current->mm, ear0);
pue = pud_offset(pge, ear0);
_pme = pue->pue[0].ste[0];
printk(KERN_ALERT " PGE : %8p { PME %08lx }\n", pge, _pme);
if (_pme & xAMPRx_V) {
unsigned long dampr, damlr, val;
asm volatile("movsg dampr2,%0 ! movgs %2,dampr2 ! movsg damlr2,%1"
: "=&r"(dampr), "=r"(damlr)
: "r" (_pme | xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V)
);
pte = (pte_t *) damlr + __pte_index(ear0);
val = pte_val(*pte);
asm volatile("movgs %0,dampr2" :: "r" (dampr));
printk(KERN_ALERT " PTE : %8p { %08lx }\n", pte, val);
}
die_if_kernel("Oops\n");
do_exit(SIGKILL);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (!user_mode(__frame))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void *) ear0;
force_sig_info(SIGBUS, &info, current);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(__frame))
goto no_context;
return;
/*
* The fault was caused by a kernel PTE (such as installed by vmalloc or kmap)
*/
kernel_pte_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
int index = pgd_index(ear0);
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
pgd = (pgd_t *) __get_TTBR();
pgd = (pgd_t *)__va(pgd) + index;
pgd_k = ((pgd_t *)(init_mm.pgd)) + index;
if (!pgd_present(*pgd_k))
goto no_context;
//set_pgd(pgd, *pgd_k); /////// gcc ICE's on this line
pud_k = pud_offset(pgd_k, ear0);
if (!pud_present(*pud_k))
goto no_context;
pmd_k = pmd_offset(pud_k, ear0);
if (!pmd_present(*pmd_k))
goto no_context;
pud = pud_offset(pgd, ear0);
pmd = pmd_offset(pud, ear0);
set_pmd(pmd, *pmd_k);
pte_k = pte_offset_kernel(pmd_k, ear0);
if (!pte_present(*pte_k))
goto no_context;
return;
}
} /* end do_page_fault() */
| gpl-3.0 |
linzhaoming/origin | vendor/k8s.io/kubernetes/staging/src/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go | 3846 | // +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
{
in := &in
*out = make(ExtraValue, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
func (in ExtraValue) DeepCopy() ExtraValue {
if in == nil {
return nil
}
out := new(ExtraValue)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TokenReview) DeepCopyInto(out *TokenReview) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReview.
func (in *TokenReview) DeepCopy() *TokenReview {
if in == nil {
return nil
}
out := new(TokenReview)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *TokenReview) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewSpec.
func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec {
if in == nil {
return nil
}
out := new(TokenReviewSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) {
*out = *in
in.User.DeepCopyInto(&out.User)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewStatus.
func (in *TokenReviewStatus) DeepCopy() *TokenReviewStatus {
if in == nil {
return nil
}
out := new(TokenReviewStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UserInfo) DeepCopyInto(out *UserInfo) {
*out = *in
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Extra != nil {
in, out := &in.Extra, &out.Extra
*out = make(map[string]ExtraValue, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make(ExtraValue, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInfo.
func (in *UserInfo) DeepCopy() *UserInfo {
if in == nil {
return nil
}
out := new(UserInfo)
in.DeepCopyInto(out)
return out
}
| apache-2.0 |
caot/intellij-community | plugins/groovy/groovy-psi/resources/fileTemplates/code/Groovy JUnit SetUp Method.groovy.html | 1073 | <html>
<body>
<table width="100%" border="0" cellpadding="5" cellspacing="0" style="border-collapse: collapse" bordercolor="#111111">
<tr>
<td colspan="3"><font face="verdana" size="-1">
This is a template used to create a setUp method in a Groovy JUnit test class.
</font>
</td>
</tr>
</table>
<table width="100%" border="0" cellpadding="5" cellspacing="0" style="border-collapse: collapse" bordercolor="#111111">
<tr>
<td colspan="3"><font face="verdana" size="-1">Predefined variables will take the following values:</font></td>
</tr>
<tr>
<td valign="top"><nobr><font face="verdana" size="-2"><b>${NAME}</b></font></nobr></td>
<td width="10"> </td>
<td width="100%" valign="top"><font face="verdana" size="-1">name of the created method.</font></td>
</tr>
<tr>
<td valign="top"><nobr><font face="verdana" size="-2"><b>${BODY}</b></font></nobr></td>
<td width="10"> </td>
<td width="100%" valign="top"><font face="verdana" size="-1">generated method body.</font></td>
</tr>
</table>
</body>
</html> | apache-2.0 |
cnfire/hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java | 4051 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.DataInputStream;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
/**
* An implementation of the abstract class {@link EditLogInputStream},
* which is used to updates HDFS meta-data state on a backup node.
*
* @see org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol#journal
* (org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration,
* int, int, byte[])
*/
class EditLogBackupInputStream extends EditLogInputStream {
final String address; // sender address
private final ByteBufferInputStream inner;
private DataInputStream in;
private FSEditLogOp.Reader reader = null;
private FSEditLogLoader.PositionTrackingInputStream tracker = null;
private int version = 0;
/**
* A ByteArrayInputStream, which lets modify the underlying byte array.
*/
private static class ByteBufferInputStream extends ByteArrayInputStream {
ByteBufferInputStream() {
super(new byte[0]);
}
void setData(byte[] newBytes) {
super.buf = newBytes;
super.count = newBytes == null ? 0 : newBytes.length;
super.mark = 0;
reset();
}
/**
* Number of bytes read from the stream so far.
*/
int length() {
return count;
}
}
EditLogBackupInputStream(String name) throws IOException {
address = name;
inner = new ByteBufferInputStream();
in = null;
reader = null;
}
@Override
public String getName() {
return address;
}
@Override
protected FSEditLogOp nextOp() throws IOException {
Preconditions.checkState(reader != null,
"Must call setBytes() before readOp()");
return reader.readOp(false);
}
@Override
protected FSEditLogOp nextValidOp() {
try {
return reader.readOp(true);
} catch (IOException e) {
throw new RuntimeException("got unexpected IOException " + e, e);
}
}
@Override
public int getVersion(boolean verifyVersion) throws IOException {
return this.version;
}
@Override
public long getPosition() {
return tracker.getPos();
}
@Override
public void close() throws IOException {
in.close();
}
@Override
public long length() throws IOException {
// file size + size of both buffers
return inner.length();
}
void setBytes(byte[] newBytes, int version) throws IOException {
inner.setData(newBytes);
tracker = new FSEditLogLoader.PositionTrackingInputStream(inner);
in = new DataInputStream(tracker);
this.version = version;
reader = FSEditLogOp.Reader.create(in, tracker, version);
}
void clear() throws IOException {
setBytes(null, 0);
reader = null;
this.version = 0;
}
@Override
public long getFirstTxId() {
return HdfsServerConstants.INVALID_TXID;
}
@Override
public long getLastTxId() {
return HdfsServerConstants.INVALID_TXID;
}
@Override
public boolean isInProgress() {
return true;
}
@Override
public void setMaxOpSize(int maxOpSize) {
reader.setMaxOpSize(maxOpSize);
}
@Override
public boolean isLocalLog() {
return true;
}
}
| apache-2.0 |
janrinze/loox7xxport.loox2624 | net/atm/common.h | 1501 | /* net/atm/common.h - ATM sockets (common part for PVC and SVC) */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#ifndef NET_ATM_COMMON_H
#define NET_ATM_COMMON_H
#include <linux/net.h>
#include <linux/poll.h> /* for poll_table */
int vcc_create(struct net *net, struct socket *sock, int protocol, int family);
int vcc_release(struct socket *sock);
int vcc_connect(struct socket *sock, int itf, short vpi, int vci);
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags);
int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
size_t total_len);
unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int vcc_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, int optlen);
int vcc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen);
int atmpvc_init(void);
void atmpvc_exit(void);
int atmsvc_init(void);
void atmsvc_exit(void);
int atm_sysfs_init(void);
void atm_sysfs_exit(void);
#ifdef CONFIG_PROC_FS
int atm_proc_init(void);
void atm_proc_exit(void);
#else
static inline int atm_proc_init(void)
{
return 0;
}
static inline void atm_proc_exit(void)
{
/* nothing */
}
#endif /* CONFIG_PROC_FS */
/* SVC */
int svc_change_qos(struct atm_vcc *vcc,struct atm_qos *qos);
void atm_dev_release_vccs(struct atm_dev *dev);
#endif
| gpl-2.0 |
shankarnakai/gbdesign | wp-content/mu-plugins/vsigestao/vendor/phpunit/phpunit/tests/Regression/GitHub/1351.phpt | 1097 | --TEST--
GH-1351: Test result does not serialize test class in process isolation
--SKIPIF--
<?php
if (!extension_loaded('pdo') || !in_array('sqlite', PDO::getAvailableDrivers())) {
print 'skip: PDO_SQLITE is required';
}
?>
--FILE--
<?php
$_SERVER['argv'][1] = '--no-configuration';
$_SERVER['argv'][2] = '--process-isolation';
$_SERVER['argv'][3] = 'Issue1351Test';
$_SERVER['argv'][4] = __DIR__ . '/1351/Issue1351Test.php';
require __DIR__ . '/../../bootstrap.php';
PHPUnit_TextUI_Command::main();
?>
--EXPECTF--
PHPUnit %s by Sebastian Bergmann and contributors.
F.E.E 5 / 5 (100%)
Time: %s, Memory: %sMb
There were 2 errors:
1) Issue1351Test::testExceptionPre
RuntimeException: Expected rethrown exception.
%A
Caused by
LogicException: Expected exception.
%A
2) Issue1351Test::testPhpCoreLanguageException
PDOException: SQLSTATE[HY000]: General error: 1 no such table: php_wtf
%A
--
There was 1 failure:
1) Issue1351Test::testFailurePre
Expected failure.
%A
FAILURES!
Tests: 5, Assertions: 5, Errors: 2, Failures: 1.
| gpl-2.0 |
liveqmock/platform-tools-idea | plugins/groovy/src/org/jetbrains/plugins/groovy/refactoring/convertToJava/invocators/CustomMethodInvocator.java | 3095 | /*
* Copyright 2000-2013 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.plugins.groovy.refactoring.convertToJava.invocators;
import com.intellij.openapi.extensions.ExtensionPointName;
import com.intellij.psi.PsiMethod;
import com.intellij.psi.PsiSubstitutor;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.plugins.groovy.lang.psi.GroovyPsiElement;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.arguments.GrNamedArgument;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.blocks.GrClosableBlock;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.expressions.GrExpression;
import org.jetbrains.plugins.groovy.lang.psi.api.statements.typedef.members.GrGdkMethod;
import org.jetbrains.plugins.groovy.refactoring.convertToJava.ExpressionGenerator;
/**
* @author Max Medvedev
*/
public abstract class CustomMethodInvocator {
private static final ExtensionPointName<CustomMethodInvocator> EP_NAME =
ExtensionPointName.create("org.intellij.groovy.convertToJava.customMethodInvocator");
protected abstract boolean invoke(@NotNull ExpressionGenerator generator,
@NotNull PsiMethod method,
@Nullable GrExpression caller,
@NotNull GrExpression[] exprs,
@NotNull GrNamedArgument[] namedArgs,
@NotNull GrClosableBlock[] closures,
@NotNull PsiSubstitutor substitutor,
@NotNull GroovyPsiElement context);
public static boolean invokeMethodOn(@NotNull ExpressionGenerator generator,
@NotNull GrGdkMethod method,
@Nullable GrExpression caller,
@NotNull GrExpression[] exprs,
@NotNull GrNamedArgument[] namedArgs,
@NotNull GrClosableBlock[] closures,
@NotNull PsiSubstitutor substitutor,
@NotNull GroovyPsiElement context) {
final PsiMethod staticMethod = method.getStaticMethod();
for (CustomMethodInvocator invocator : EP_NAME.getExtensions()) {
if (invocator.invoke(generator, staticMethod, caller, exprs, namedArgs, closures, substitutor, context)) {
return true;
}
}
return false;
}
}
| apache-2.0 |
vfalico/hydra-rpmsg | drivers/gpio/gpio-mm-lantiq.c | 4127 | /*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Copyright (C) 2012 John Crispin <blogic@openwrt.org>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <lantiq_soc.h>
/*
* By attaching hardware latches to the EBU it is possible to create output
* only gpios. This driver configures a special memory address, which when
* written to outputs 16 bit to the latches.
*/
#define LTQ_EBU_BUSCON 0x1e7ff /* 16 bit access, slowest timing */
#define LTQ_EBU_WP 0x80000000 /* write protect bit */
struct ltq_mm {
struct of_mm_gpio_chip mmchip;
u16 shadow; /* shadow the latches state */
};
/**
* ltq_mm_apply() - write the shadow value to the ebu address.
* @chip: Pointer to our private data structure.
*
* Write the shadow value to the EBU to set the gpios. We need to set the
* global EBU lock to make sure that PCI/MTD dont break.
*/
static void ltq_mm_apply(struct ltq_mm *chip)
{
unsigned long flags;
spin_lock_irqsave(&ebu_lock, flags);
ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
__raw_writew(chip->shadow, chip->mmchip.regs);
ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
spin_unlock_irqrestore(&ebu_lock, flags);
}
/**
* ltq_mm_set() - gpio_chip->set - set gpios.
* @gc: Pointer to gpio_chip device structure.
* @gpio: GPIO signal number.
* @val: Value to be written to specified signal.
*
* Set the shadow value and call ltq_mm_apply.
*/
static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct ltq_mm *chip =
container_of(mm_gc, struct ltq_mm, mmchip);
if (value)
chip->shadow |= (1 << offset);
else
chip->shadow &= ~(1 << offset);
ltq_mm_apply(chip);
}
/**
* ltq_mm_dir_out() - gpio_chip->dir_out - set gpio direction.
* @gc: Pointer to gpio_chip device structure.
* @gpio: GPIO signal number.
* @val: Value to be written to specified signal.
*
* Same as ltq_mm_set, always returns 0.
*/
static int ltq_mm_dir_out(struct gpio_chip *gc, unsigned offset, int value)
{
ltq_mm_set(gc, offset, value);
return 0;
}
/**
* ltq_mm_save_regs() - Set initial values of GPIO pins
* @mm_gc: pointer to memory mapped GPIO chip structure
*/
static void ltq_mm_save_regs(struct of_mm_gpio_chip *mm_gc)
{
struct ltq_mm *chip =
container_of(mm_gc, struct ltq_mm, mmchip);
/* tell the ebu controller which memory address we will be using */
ltq_ebu_w32(CPHYSADDR(chip->mmchip.regs) | 0x1, LTQ_EBU_ADDRSEL1);
ltq_mm_apply(chip);
}
static int ltq_mm_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct ltq_mm *chip;
const __be32 *shadow;
int ret = 0;
if (!res) {
dev_err(&pdev->dev, "failed to get memory resource\n");
return -ENOENT;
}
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->mmchip.gc.ngpio = 16;
chip->mmchip.gc.label = "gpio-mm-ltq";
chip->mmchip.gc.direction_output = ltq_mm_dir_out;
chip->mmchip.gc.set = ltq_mm_set;
chip->mmchip.save_regs = ltq_mm_save_regs;
/* store the shadow value if one was passed by the devicetree */
shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
if (shadow)
chip->shadow = be32_to_cpu(*shadow);
ret = of_mm_gpiochip_add(pdev->dev.of_node, &chip->mmchip);
if (ret)
kfree(chip);
return ret;
}
static const struct of_device_id ltq_mm_match[] = {
{ .compatible = "lantiq,gpio-mm" },
{},
};
MODULE_DEVICE_TABLE(of, ltq_mm_match);
static struct platform_driver ltq_mm_driver = {
.probe = ltq_mm_probe,
.driver = {
.name = "gpio-mm-ltq",
.of_match_table = ltq_mm_match,
},
};
static int __init ltq_mm_init(void)
{
return platform_driver_register(<q_mm_driver);
}
subsys_initcall(ltq_mm_init);
| gpl-2.0 |
chisimba/modules | zend/resources/Zend/Translate/Exception.php | 1099 | <?php
/**
* Zend Framework
*
* LICENSE
*
* This source file is subject to the new BSD license that is bundled
* with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://framework.zend.com/license/new-bsd
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@zend.com so we can send you a copy immediately.
*
* @category Zend
* @package Zend_Translate
* @copyright Copyright (c) 2005-2010 Zend Technologies USA Inc. (http://www.zend.com)
* @version $Id: Exception.php 20096 2010-01-06 02:05:09Z bkarwin $
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
/**
* Zend_Exception
*/
require_once 'Zend/Exception.php';
/**
* @category Zend
* @package Zend_Translate
* @copyright Copyright (c) 2005-2010 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
class Zend_Translate_Exception extends Zend_Exception
{
}
| gpl-2.0 |
coppercu/iot-neuron | src/osl/rt-thread/rt-thread_1.2.0/components/dfs/filesystems/romfs/dfs_romfs.h | 1336 | /*
* File : dfs_romfs.h
* This file is part of Device File System in RT-Thread RTOS
* COPYRIGHT (C) 2004-2011, RT-Thread Development Team
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Change Logs:
* Date Author Notes
*/
#ifndef __DFS_ROMFS_H__
#define __DFS_ROMFS_H__
#include <rtthread.h>
#define ROMFS_DIRENT_FILE 0x00
#define ROMFS_DIRENT_DIR 0x01
struct romfs_dirent
{
rt_uint32_t type; /* dirent type */
const char *name; /* dirent name */
const rt_uint8_t *data; /* file date ptr */
rt_size_t size; /* file size */
};
int dfs_romfs_init(void);
extern const struct romfs_dirent romfs_root;
#endif
| gpl-2.0 |
ajpi222/canvas-lms-hdi | public/javascripts/jquery.fancyplaceholder.js | 2861 | /**
* Copyright (C) 2011 Instructure, Inc.
*
* This file is part of Canvas.
*
* Canvas is free software: you can redistribute it and/or modify it under
* the terms of the GNU Affero General Public License as published by the Free
* Software Foundation, version 3 of the License.
*
* Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
* details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// markup required:
// <span class=" field-with-fancyplaceholder"><label for="email">Email Address</span></label><input type="text" id="login_apple_id"></span>
//
// css required:
// span.field-with-fancyplaceholder{display:block;display:inline-block;position:relative;vertical-align:top;}
// span.field-with-fancyplaceholder label.placeholder{color:#999;cursor:text;pointer-events:none;}
// span.field-with-fancyplaceholder label.placeholder span{position:absolute;z-index:2;-webkit-user-select:none;padding:3px 6px;}
// span.field-with-fancyplaceholder label.focus{color:#ccc;}
// span.field-with-fancyplaceholder label.hidden{color:#fff;}
// span.field-with-fancyplaceholder input.invalid{background:#ffffc5;color:#F30;}
// span.field-with-fancyplaceholder input.editing{color:#000;background:none repeat scroll 0 0 transparent;overflow:hidden;}
//
// then: $(".field-with-fancyplaceholder input").fancyPlaceholder();
define(['jquery'], function($) {
$.fn.fancyPlaceholder = function() {
var pollingInterval,
foundInputsAndLables = [];
function hideOrShowLabels(){
$.each(foundInputsAndLables, function(i, inputAndLable){
inputAndLable[1][inputAndLable[0].val() ? 'hide' : 'show']();
});
}
return this.each(function() {
var $input = $(this),
$label = $("label[for="+$input.attr('id')+"]");
$label.addClass('placeholder').wrapInner("<span/>").css({
'font-family' : $input.css('font-family'),
'font-size' : $input.css('font-size')
});
$input
.focus(function(){
$label.addClass('focus', 300);
})
.blur(function(){
$label.removeClass('focus', 300);
})
.bind('keyup', hideOrShowLabels);
// if this was already focused before we got here, make it light gray now. sorry, ie7 cant do :focus selector, it doesn't get this.
try {
if ($("input:focus").get(0) == this) {
$input.triggerHandler('focus');
}
} catch(e) {}
foundInputsAndLables.push([$input, $label]);
if (!pollingInterval) {
window.setInterval(hideOrShowLabels, 100);
}
});
};
});
| agpl-3.0 |
caot/intellij-community | platform/lang-api/src/com/intellij/navigation/ChooseByNameContributor.java | 2515 | /*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.navigation;
import com.intellij.openapi.extensions.ExtensionPointName;
import com.intellij.openapi.project.Project;
import org.jetbrains.annotations.NotNull;
/**
* Allows a plugin to add items to "Goto Class" and "Goto Symbol" lists.
*
* @see ChooseByNameRegistry
*/
public interface ChooseByNameContributor {
ExtensionPointName<ChooseByNameContributor> CLASS_EP_NAME = ExtensionPointName.create("com.intellij.gotoClassContributor");
ExtensionPointName<ChooseByNameContributor> SYMBOL_EP_NAME = ExtensionPointName.create("com.intellij.gotoSymbolContributor");
ExtensionPointName<ChooseByNameContributor> FILE_EP_NAME = ExtensionPointName.create("com.intellij.gotoFileContributor");
/**
* Returns the list of names for the specified project to which it is possible to navigate
* by name.
*
* @param project the project in which the navigation is performed.
* @param includeNonProjectItems if true, the names of non-project items (for example,
* library classes) should be included in the returned array.
* @return the array of names.
*/
@NotNull
String[] getNames(Project project, boolean includeNonProjectItems);
/**
* Returns the list of navigation items matching the specified name.
*
* @param name the name selected from the list.
* @param pattern the original pattern entered in the dialog
* @param project the project in which the navigation is performed.
* @param includeNonProjectItems if true, the navigation items for non-project items (for example,
* library classes) should be included in the returned array.
* @return the array of navigation items.
*/
@NotNull
NavigationItem[] getItemsByName(String name, final String pattern, Project project, boolean includeNonProjectItems);
} | apache-2.0 |
andrewjylee/omniplay | linux-lts-quantal-3.5.0/drivers/gpu/drm/nouveau/nouveau_acpi.c | 11244 | #include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
#include <acpi/video.h>
#include <acpi/acpi.h>
#include <linux/mxm-wmi.h>
#include "drmP.h"
#include "drm.h"
#include "drm_sarea.h"
#include "drm_crtc_helper.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nv50_display.h"
#include "nouveau_connector.h"
#include <linux/vga_switcheroo.h>
#define NOUVEAU_DSM_LED 0x02
#define NOUVEAU_DSM_LED_STATE 0x00
#define NOUVEAU_DSM_LED_OFF 0x10
#define NOUVEAU_DSM_LED_STAMINA 0x11
#define NOUVEAU_DSM_LED_SPEED 0x12
#define NOUVEAU_DSM_POWER 0x03
#define NOUVEAU_DSM_POWER_STATE 0x00
#define NOUVEAU_DSM_POWER_SPEED 0x01
#define NOUVEAU_DSM_POWER_STAMINA 0x02
#define NOUVEAU_DSM_OPTIMUS_FN 0x1A
#define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001
static struct nouveau_dsm_priv {
bool dsm_detected;
bool optimus_detected;
acpi_handle dhandle;
acpi_handle rom_handle;
} nouveau_dsm_priv;
#define NOUVEAU_DSM_HAS_MUX 0x1
#define NOUVEAU_DSM_HAS_OPT 0x2
static const char nouveau_dsm_muid[] = {
0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
};
static const char nouveau_op_dsm_muid[] = {
0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47,
0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0,
};
static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_object_list input;
union acpi_object params[4];
union acpi_object *obj;
int i, err;
char args_buff[4];
input.count = 4;
input.pointer = params;
params[0].type = ACPI_TYPE_BUFFER;
params[0].buffer.length = sizeof(nouveau_op_dsm_muid);
params[0].buffer.pointer = (char *)nouveau_op_dsm_muid;
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = 0x00000100;
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = func;
params[3].type = ACPI_TYPE_BUFFER;
params[3].buffer.length = 4;
/* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */
for (i = 0; i < 4; i++)
args_buff[i] = (arg >> i * 8) & 0xFF;
params[3].buffer.pointer = args_buff;
err = acpi_evaluate_object(handle, "_DSM", &input, &output);
if (err) {
printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
return err;
}
obj = (union acpi_object *)output.pointer;
if (obj->type == ACPI_TYPE_INTEGER)
if (obj->integer.value == 0x80000002) {
return -ENODEV;
}
if (obj->type == ACPI_TYPE_BUFFER) {
if (obj->buffer.length == 4 && result) {
*result = 0;
*result |= obj->buffer.pointer[0];
*result |= (obj->buffer.pointer[1] << 8);
*result |= (obj->buffer.pointer[2] << 16);
*result |= (obj->buffer.pointer[3] << 24);
}
}
kfree(output.pointer);
return 0;
}
static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_object_list input;
union acpi_object params[4];
union acpi_object *obj;
int err;
input.count = 4;
input.pointer = params;
params[0].type = ACPI_TYPE_BUFFER;
params[0].buffer.length = sizeof(nouveau_dsm_muid);
params[0].buffer.pointer = (char *)nouveau_dsm_muid;
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = 0x00000102;
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = func;
params[3].type = ACPI_TYPE_INTEGER;
params[3].integer.value = arg;
err = acpi_evaluate_object(handle, "_DSM", &input, &output);
if (err) {
printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
return err;
}
obj = (union acpi_object *)output.pointer;
if (obj->type == ACPI_TYPE_INTEGER)
if (obj->integer.value == 0x80000002)
return -ENODEV;
if (obj->type == ACPI_TYPE_BUFFER) {
if (obj->buffer.length == 4 && result) {
*result = 0;
*result |= obj->buffer.pointer[0];
*result |= (obj->buffer.pointer[1] << 8);
*result |= (obj->buffer.pointer[2] << 16);
*result |= (obj->buffer.pointer[3] << 24);
}
}
kfree(output.pointer);
return 0;
}
/* Returns 1 if a DSM function is usable and 0 otherwise */
static int nouveau_test_dsm(acpi_handle test_handle,
int (*dsm_func)(acpi_handle, int, int, uint32_t *),
int sfnc)
{
u32 result = 0;
/* Function 0 returns a Buffer containing available functions. The args
* parameter is ignored for function 0, so just put 0 in it */
if (dsm_func(test_handle, 0, 0, &result))
return 0;
/* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. If
* the n-th bit is enabled, function n is supported */
return result & 1 && result & (1 << sfnc);
}
static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
{
mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
mxm_wmi_call_mxds(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL);
}
static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state)
{
int arg;
if (state == VGA_SWITCHEROO_ON)
arg = NOUVEAU_DSM_POWER_SPEED;
else
arg = NOUVEAU_DSM_POWER_STAMINA;
nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL);
return 0;
}
static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
{
/* perhaps the _DSM functions are mutually exclusive, but prepare for
* the future */
if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
return 0;
if (id == VGA_SWITCHEROO_IGD)
return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
else
return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED);
}
static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state)
{
if (id == VGA_SWITCHEROO_IGD)
return 0;
/* Optimus laptops have the card already disabled in
* nouveau_switcheroo_set_state */
if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
return 0;
return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
}
static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
{
/* easy option one - intel vendor ID means Integrated */
if (pdev->vendor == PCI_VENDOR_ID_INTEL)
return VGA_SWITCHEROO_IGD;
/* is this device on Bus 0? - this may need improving */
if (pdev->bus->number == 0)
return VGA_SWITCHEROO_IGD;
return VGA_SWITCHEROO_DIS;
}
static struct vga_switcheroo_handler nouveau_dsm_handler = {
.switchto = nouveau_dsm_switchto,
.power_state = nouveau_dsm_power_state,
.get_client_id = nouveau_dsm_get_client_id,
};
static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
{
acpi_handle dhandle, nvidia_handle;
acpi_status status;
int retval = 0;
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
if (ACPI_FAILURE(status)) {
return false;
}
if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm,
NOUVEAU_DSM_OPTIMUS_FN))
retval |= NOUVEAU_DSM_HAS_OPT;
if (retval)
nouveau_dsm_priv.dhandle = dhandle;
return retval;
}
static bool nouveau_dsm_detect(void)
{
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
struct pci_dev *pdev = NULL;
int has_dsm = 0;
int has_optimus = 0;
int vga_count = 0;
bool guid_valid;
int retval;
bool ret = false;
/* lookup the MXM GUID */
guid_valid = mxm_wmi_supported();
if (guid_valid)
printk("MXM: GUID detected in BIOS\n");
/* now do DSM detection */
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
retval = nouveau_dsm_pci_probe(pdev);
if (retval & NOUVEAU_DSM_HAS_MUX)
has_dsm |= 1;
if (retval & NOUVEAU_DSM_HAS_OPT)
has_optimus = 1;
}
if (vga_count == 2 && has_dsm && guid_valid) {
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
nouveau_dsm_priv.dsm_detected = true;
ret = true;
}
if (has_optimus == 1) {
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
acpi_method_name);
nouveau_dsm_priv.optimus_detected = true;
ret = true;
}
return ret;
}
void nouveau_register_dsm_handler(void)
{
bool r;
r = nouveau_dsm_detect();
if (!r)
return;
vga_switcheroo_register_handler(&nouveau_dsm_handler);
}
/* Must be called for Optimus models before the card can be turned off */
void nouveau_switcheroo_optimus_dsm(void)
{
u32 result = 0;
if (!nouveau_dsm_priv.optimus_detected)
return;
nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN,
NOUVEAU_DSM_OPTIMUS_ARGS, &result);
}
void nouveau_unregister_dsm_handler(void)
{
if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected)
vga_switcheroo_unregister_handler();
}
/* retrieve the ROM in 4k blocks */
static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
int offset, int len)
{
acpi_status status;
union acpi_object rom_arg_elements[2], *obj;
struct acpi_object_list rom_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
rom_arg.count = 2;
rom_arg.pointer = &rom_arg_elements[0];
rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
rom_arg_elements[0].integer.value = offset;
rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
rom_arg_elements[1].integer.value = len;
status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
if (ACPI_FAILURE(status)) {
printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status));
return -ENODEV;
}
obj = (union acpi_object *)buffer.pointer;
memcpy(bios+offset, obj->buffer.pointer, len);
kfree(buffer.pointer);
return len;
}
bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
{
acpi_status status;
acpi_handle dhandle, rom_handle;
if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
return false;
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
if (ACPI_FAILURE(status))
return false;
nouveau_dsm_priv.rom_handle = rom_handle;
return true;
}
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
{
return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
}
int
nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct acpi_device *acpidev;
acpi_handle handle;
int type, ret;
void *edid;
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
type = ACPI_VIDEO_DISPLAY_LCD;
break;
default:
return -EINVAL;
}
handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
if (!handle)
return -ENODEV;
ret = acpi_bus_get_device(handle, &acpidev);
if (ret)
return -ENODEV;
ret = acpi_video_get_edid(acpidev, type, -1, &edid);
if (ret < 0)
return ret;
nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
return 0;
}
| bsd-2-clause |
Rich-Harris/cdnjs | ajax/libs/webicons/2.0.0/webicons.css | 23858 | /* Webicons by Adam Fairhead at Fairhead Creative http://fairheadcreative.com */
.webicon {
display: inline-block;
background-size: 100%;
width: 30px;
height: 30px;
text-indent: -999em;
text-align: left;
-moz-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.5);
-webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.5);
box-shadow: 0 1px 0 rgba(0, 0, 0, 0.5);
-moz-border-radius: 5px;
-webkit-border-radius: 5px;
border-radius: 5px;
margin-bottom: 5px; }
.webicon:hover {
margin: -1px 0 1px;
-moz-box-shadow: 0 2px 0 rgba(0, 0, 0, 0.25);
-webkit-box-shadow: 0 2px 0 rgba(0, 0, 0, 0.25);
box-shadow: 0 2px 0 rgba(0, 0, 0, 0.25); }
.webicon.small {
width: 20px;
height: 20px;
-moz-border-radius: 3px;
-webkit-border-radius: 3px;
border-radius: 3px; }
.webicon.large {
width: 48px;
height: 48px;
-moz-border-radius: 6px;
-webkit-border-radius: 6px;
border-radius: 6px; }
.no-svg .webicon.f500px {
background: url("webicons/webicon-f500px-m.png"); }
.no-svg .webicon.f500px.large {
background: url("webicons/webicon-f500px.png"); }
.no-svg .webicon.f500px.small {
background: url("webicons/webicon-f500px-s.png"); }
.svg .webicon.f500px {
background: url("webicons/webicon-f500px.svg"); }
.no-svg .webicon.aboutme {
background: url("webicons/webicon-aboutme-m.png"); }
.no-svg .webicon.aboutme.large {
background: url("webicons/webicon-aboutme.png"); }
.no-svg .webicon.aboutme.small {
background: url("webicons/webicon-aboutme-s.png"); }
.svg .webicon.aboutme {
background: url("webicons/webicon-aboutme.svg"); }
.no-svg .webicon.adn {
background: url("webicons/webicon-adn-m.png"); }
.no-svg .webicon.adn.large {
background: url("webicons/webicon-adn.png"); }
.no-svg .webicon.adn.small {
background: url("webicons/webicon-adn-s.png"); }
.svg .webicon.adn {
background: url("webicons/webicon-adn.svg"); }
.no-svg .webicon.android {
background: url("webicons/webicon-android-m.png"); }
.no-svg .webicon.android.large {
background: url("webicons/webicon-android.png"); }
.no-svg .webicon.android.small {
background: url("webicons/webicon-android-s.png"); }
.svg .webicon.android {
background: url("webicons/webicon-android.svg"); }
.no-svg .webicon.apple {
background: url("webicons/webicon-apple-m.png"); }
.no-svg .webicon.apple.large {
background: url("webicons/webicon-apple.png"); }
.no-svg .webicon.apple.small {
background: url("webicons/webicon-apple-s.png"); }
.svg .webicon.apple {
background: url("webicons/webicon-apple.svg"); }
.no-svg .webicon.behance {
background: url("webicons/webicon-behance-m.png"); }
.no-svg .webicon.behance.large {
background: url("webicons/webicon-behance.png"); }
.no-svg .webicon.behance.small {
background: url("webicons/webicon-behance-s.png"); }
.svg .webicon.behance {
background: url("webicons/webicon-behance.svg"); }
.no-svg .webicon.bitbucket {
background: url("webicons/webicon-bitbucket-m.png"); }
.no-svg .webicon.bitbucket.large {
background: url("webicons/webicon-bitbucket.png"); }
.no-svg .webicon.bitbucket.small {
background: url("webicons/webicon-bitbucket-s.png"); }
.svg .webicon.bitbucket {
background: url("webicons/webicon-bitbucket.svg"); }
.no-svg .webicon.blogger {
background: url("webicons/webicon-blogger-m.png"); }
.no-svg .webicon.blogger.large {
background: url("webicons/webicon-blogger.png"); }
.no-svg .webicon.blogger.small {
background: url("webicons/webicon-blogger-s.png"); }
.svg .webicon.blogger {
background: url("webicons/webicon-blogger.svg"); }
.no-svg .webicon.coderwall {
background: url("webicons/webicon-coderwall-m.png"); }
.no-svg .webicon.coderwall.large {
background: url("webicons/webicon-coderwall.png"); }
.no-svg .webicon.coderwall.small {
background: url("webicons/webicon-coderwall-s.png"); }
.svg .webicon.coderwall {
background: url("webicons/webicon-coderwall.svg"); }
.no-svg .webicon.creativecloud {
background: url("webicons/webicon-creativecloud-m.png"); }
.no-svg .webicon.creativecloud.large {
background: url("webicons/webicon-creativecloud.png"); }
.no-svg .webicon.creativecloud.small {
background: url("webicons/webicon-creativecloud-s.png"); }
.svg .webicon.creativecloud {
background: url("webicons/webicon-creativecloud.svg"); }
.no-svg .webicon.dribbble {
background: url("webicons/webicon-dribbble-m.png"); }
.no-svg .webicon.dribbble.large {
background: url("webicons/webicon-dribbble.png"); }
.no-svg .webicon.dribbble.small {
background: url("webicons/webicon-dribbble-s.png"); }
.svg .webicon.dribbble {
background: url("webicons/webicon-dribbble.svg"); }
.no-svg .webicon.dropbox {
background: url("webicons/webicon-dropbox-m.png"); }
.no-svg .webicon.dropbox.large {
background: url("webicons/webicon-dropbox.png"); }
.no-svg .webicon.dropbox.small {
background: url("webicons/webicon-dropbox-s.png"); }
.svg .webicon.dropbox {
background: url("webicons/webicon-dropbox.svg"); }
.no-svg .webicon.evernote {
background: url("webicons/webicon-evernote-m.png"); }
.no-svg .webicon.evernote.large {
background: url("webicons/webicon-evernote.png"); }
.no-svg .webicon.evernote.small {
background: url("webicons/webicon-evernote-s.png"); }
.svg .webicon.evernote {
background: url("webicons/webicon-evernote.svg"); }
.no-svg .webicon.fairheadcreative {
background: url("webicons/webicon-fairheadcreative-m.png"); }
.no-svg .webicon.fairheadcreative.large {
background: url("webicons/webicon-fairheadcreative.png"); }
.no-svg .webicon.fairheadcreative.small {
background: url("webicons/webicon-fairheadcreative-s.png"); }
.svg .webicon.fairheadcreative {
background: url("webicons/webicon-fairheadcreative.svg"); }
.no-svg .webicon.facebook {
background: url("webicons/webicon-facebook-m.png"); }
.no-svg .webicon.facebook.large {
background: url("webicons/webicon-facebook.png"); }
.no-svg .webicon.facebook.small {
background: url("webicons/webicon-facebook-s.png"); }
.svg .webicon.facebook {
background: url("webicons/webicon-facebook.svg"); }
.no-svg .webicon.flickr {
background: url("webicons/webicon-flickr-m.png"); }
.no-svg .webicon.flickr.large {
background: url("webicons/webicon-flickr.png"); }
.no-svg .webicon.flickr.small {
background: url("webicons/webicon-flickr-s.png"); }
.svg .webicon.flickr {
background: url("webicons/webicon-flickr.svg"); }
.no-svg .webicon.foursquare {
background: url("webicons/webicon-foursquare-m.png"); }
.no-svg .webicon.foursquare.large {
background: url("webicons/webicon-foursquare.png"); }
.no-svg .webicon.foursquare.small {
background: url("webicons/webicon-foursquare-s.png"); }
.svg .webicon.foursquare {
background: url("webicons/webicon-foursquare.svg"); }
.no-svg .webicon.git {
background: url("webicons/webicon-git-m.png"); }
.no-svg .webicon.git.large {
background: url("webicons/webicon-git.png"); }
.no-svg .webicon.git.small {
background: url("webicons/webicon-git-s.png"); }
.svg .webicon.git {
background: url("webicons/webicon-git.svg"); }
.no-svg .webicon.github {
background: url("webicons/webicon-github-m.png"); }
.no-svg .webicon.github.large {
background: url("webicons/webicon-github.png"); }
.no-svg .webicon.github.small {
background: url("webicons/webicon-github-s.png"); }
.svg .webicon.github {
background: url("webicons/webicon-github.svg"); }
.no-svg .webicon.goodreads {
background: url("webicons/webicon-goodreads-m.png"); }
.no-svg .webicon.goodreads.large {
background: url("webicons/webicon-goodreads.png"); }
.no-svg .webicon.goodreads.small {
background: url("webicons/webicon-goodreads-s.png"); }
.svg .webicon.goodreads {
background: url("webicons/webicon-goodreads.svg"); }
.no-svg .webicon.google {
background: url("webicons/webicon-google-m.png"); }
.no-svg .webicon.google.large {
background: url("webicons/webicon-google.png"); }
.no-svg .webicon.google.small {
background: url("webicons/webicon-google-s.png"); }
.svg .webicon.google {
background: url("webicons/webicon-google.svg"); }
.no-svg .webicon.googleplay {
background: url("webicons/webicon-googleplay-m.png"); }
.no-svg .webicon.googleplay.large {
background: url("webicons/webicon-googleplay.png"); }
.no-svg .webicon.googleplay.small {
background: url("webicons/webicon-googleplay-s.png"); }
.svg .webicon.googleplay {
background: url("webicons/webicon-googleplay.svg"); }
.no-svg .webicon.googleplus {
background: url("webicons/webicon-googleplus-m.png"); }
.no-svg .webicon.googleplus.large {
background: url("webicons/webicon-googleplus.png"); }
.no-svg .webicon.googleplus.small {
background: url("webicons/webicon-googleplus-s.png"); }
.svg .webicon.googleplus {
background: url("webicons/webicon-googleplus.svg"); }
.no-svg .webicon.hangouts {
background: url("webicons/webicon-hangouts-m.png"); }
.no-svg .webicon.hangouts.large {
background: url("webicons/webicon-hangouts.png"); }
.no-svg .webicon.hangouts.small {
background: url("webicons/webicon-hangouts-s.png"); }
.svg .webicon.hangouts {
background: url("webicons/webicon-hangouts.svg"); }
.no-svg .webicon.html5 {
background: url("webicons/webicon-html5-m.png"); }
.no-svg .webicon.html5.large {
background: url("webicons/webicon-html5.png"); }
.no-svg .webicon.html5.small {
background: url("webicons/webicon-html5-s.png"); }
.svg .webicon.html5 {
background: url("webicons/webicon-html5.svg"); }
.no-svg .webicon.icloud {
background: url("webicons/webicon-icloud-m.png"); }
.no-svg .webicon.icloud.large {
background: url("webicons/webicon-icloud.png"); }
.no-svg .webicon.icloud.small {
background: url("webicons/webicon-icloud-s.png"); }
.svg .webicon.icloud {
background: url("webicons/webicon-icloud.svg"); }
.no-svg .webicon.indiegogo {
background: url("webicons/webicon-indiegogo-m.png"); }
.no-svg .webicon.indiegogo.large {
background: url("webicons/webicon-indiegogo.png"); }
.no-svg .webicon.indiegogo.small {
background: url("webicons/webicon-indiegogo-s.png"); }
.svg .webicon.indiegogo {
background: url("webicons/webicon-indiegogo.svg"); }
.no-svg .webicon.instagram {
background: url("webicons/webicon-instagram-m.png"); }
.no-svg .webicon.instagram.large {
background: url("webicons/webicon-instagram.png"); }
.no-svg .webicon.instagram.small {
background: url("webicons/webicon-instagram-s.png"); }
.svg .webicon.instagram {
background: url("webicons/webicon-instagram.svg"); }
.no-svg .webicon.instapaper {
background: url("webicons/webicon-instapaper-m.png"); }
.no-svg .webicon.instapaper.large {
background: url("webicons/webicon-instapaper.png"); }
.no-svg .webicon.instapaper.small {
background: url("webicons/webicon-instapaper-s.png"); }
.svg .webicon.instapaper {
background: url("webicons/webicon-instapaper.svg"); }
.no-svg .webicon.kickstarter {
background: url("webicons/webicon-kickstarter-m.png"); }
.no-svg .webicon.kickstarter.large {
background: url("webicons/webicon-kickstarter.png"); }
.no-svg .webicon.kickstarter.small {
background: url("webicons/webicon-kickstarter-s.png"); }
.svg .webicon.kickstarter {
background: url("webicons/webicon-kickstarter.svg"); }
.no-svg .webicon.klout {
background: url("webicons/webicon-klout-m.png"); }
.no-svg .webicon.klout.large {
background: url("webicons/webicon-klout.png"); }
.no-svg .webicon.klout.small {
background: url("webicons/webicon-klout-s.png"); }
.svg .webicon.klout {
background: url("webicons/webicon-klout.svg"); }
.no-svg .webicon.lastfm {
background: url("webicons/webicon-lastfm-m.png"); }
.no-svg .webicon.lastfm.large {
background: url("webicons/webicon-lastfm.png"); }
.no-svg .webicon.lastfm.small {
background: url("webicons/webicon-lastfm-s.png"); }
.svg .webicon.lastfm {
background: url("webicons/webicon-lastfm.svg"); }
.no-svg .webicon.linkedin {
background: url("webicons/webicon-linkedin-m.png"); }
.no-svg .webicon.linkedin.large {
background: url("webicons/webicon-linkedin.png"); }
.no-svg .webicon.linkedin.small {
background: url("webicons/webicon-linkedin-s.png"); }
.svg .webicon.linkedin {
background: url("webicons/webicon-linkedin.svg"); }
.no-svg .webicon.mail {
background: url("webicons/webicon-mail-m.png"); }
.no-svg .webicon.mail.large {
background: url("webicons/webicon-mail.png"); }
.no-svg .webicon.mail.small {
background: url("webicons/webicon-mail-s.png"); }
.svg .webicon.mail {
background: url("webicons/webicon-mail.svg"); }
.no-svg .webicon.mixi {
background: url("webicons/webicon-mixi-m.png"); }
.no-svg .webicon.mixi.large {
background: url("webicons/webicon-mixi.png"); }
.no-svg .webicon.mixi.small {
background: url("webicons/webicon-mixi-s.png"); }
.svg .webicon.mixi {
background: url("webicons/webicon-mixi.svg"); }
.no-svg .webicon.msn {
background: url("webicons/webicon-msn-m.png"); }
.no-svg .webicon.msn.large {
background: url("webicons/webicon-msn.png"); }
.no-svg .webicon.msn.small {
background: url("webicons/webicon-msn-s.png"); }
.svg .webicon.msn {
background: url("webicons/webicon-msn.svg"); }
.no-svg .webicon.openid {
background: url("webicons/webicon-openid-m.png"); }
.no-svg .webicon.openid.large {
background: url("webicons/webicon-openid.png"); }
.no-svg .webicon.openid.small {
background: url("webicons/webicon-openid-s.png"); }
.svg .webicon.openid {
background: url("webicons/webicon-openid.svg"); }
.no-svg .webicon.picasa {
background: url("webicons/webicon-picasa-m.png"); }
.no-svg .webicon.picasa.large {
background: url("webicons/webicon-picasa.png"); }
.no-svg .webicon.picasa.small {
background: url("webicons/webicon-picasa-s.png"); }
.svg .webicon.picasa {
background: url("webicons/webicon-picasa.svg"); }
.no-svg .webicon.pinterest {
background: url("webicons/webicon-pinterest-m.png"); }
.no-svg .webicon.pinterest.large {
background: url("webicons/webicon-pinterest.png"); }
.no-svg .webicon.pinterest.small {
background: url("webicons/webicon-pinterest-s.png"); }
.svg .webicon.pinterest {
background: url("webicons/webicon-pinterest.svg"); }
.no-svg .webicon.pocketapp {
background: url("webicons/webicon-pocketapp-m.png"); }
.no-svg .webicon.pocketapp.large {
background: url("webicons/webicon-pocketapp.png"); }
.no-svg .webicon.pocketapp.small {
background: url("webicons/webicon-pocketapp-s.png"); }
.svg .webicon.pocketapp {
background: url("webicons/webicon-pocketapp.svg"); }
.no-svg .webicon.quora {
background: url("webicons/webicon-quora-m.png"); }
.no-svg .webicon.quora.large {
background: url("webicons/webicon-quora.png"); }
.no-svg .webicon.quora.small {
background: url("webicons/webicon-quora-s.png"); }
.svg .webicon.quora {
background: url("webicons/webicon-quora.svg"); }
.no-svg .webicon.orkut {
background: url("webicons/webicon-orkut-m.png"); }
.no-svg .webicon.orkut.large {
background: url("webicons/webicon-orkut.png"); }
.no-svg .webicon.orkut.small {
background: url("webicons/webicon-orkut-s.png"); }
.svg .webicon.orkut {
background: url("webicons/webicon-orkut.svg"); }
.no-svg .webicon.mercurial {
background: url("webicons/webicon-mercurial-m.png"); }
.no-svg .webicon.mercurial.large {
background: url("webicons/webicon-mercurial.png"); }
.no-svg .webicon.mercurial.small {
background: url("webicons/webicon-mercurial-s.png"); }
.svg .webicon.mercurial {
background: url("webicons/webicon-mercurial.svg"); }
.no-svg .webicon.rdio {
background: url("webicons/webicon-rdio-m.png"); }
.no-svg .webicon.rdio.large {
background: url("webicons/webicon-rdio.png"); }
.no-svg .webicon.rdio.small {
background: url("webicons/webicon-rdio-s.png"); }
.svg .webicon.rdio {
background: url("webicons/webicon-rdio.svg"); }
.no-svg .webicon.reddit {
background: url("webicons/webicon-reddit-m.png"); }
.no-svg .webicon.reddit.large {
background: url("webicons/webicon-reddit.png"); }
.no-svg .webicon.reddit.small {
background: url("webicons/webicon-reddit-s.png"); }
.svg .webicon.reddit {
background: url("webicons/webicon-reddit.svg"); }
.no-svg .webicon.renren {
background: url("webicons/webicon-renren-m.png"); }
.no-svg .webicon.renren.large {
background: url("webicons/webicon-renren.png"); }
.no-svg .webicon.renren.small {
background: url("webicons/webicon-renren-s.png"); }
.svg .webicon.renren {
background: url("webicons/webicon-renren.svg"); }
.no-svg .webicon.rss {
background: url("webicons/webicon-rss-m.png"); }
.no-svg .webicon.rss.large {
background: url("webicons/webicon-rss.png"); }
.no-svg .webicon.rss.small {
background: url("webicons/webicon-rss-s.png"); }
.svg .webicon.rss {
background: url("webicons/webicon-rss.svg"); }
.no-svg .webicon.skitch {
background: url("webicons/webicon-skitch-m.png"); }
.no-svg .webicon.skitch.large {
background: url("webicons/webicon-skitch.png"); }
.no-svg .webicon.skitch.small {
background: url("webicons/webicon-skitch-s.png"); }
.svg .webicon.skitch {
background: url("webicons/webicon-skitch.svg"); }
.no-svg .webicon.skype {
background: url("webicons/webicon-skype-m.png"); }
.no-svg .webicon.skype.large {
background: url("webicons/webicon-skype.png"); }
.no-svg .webicon.skype.small {
background: url("webicons/webicon-skype-s.png"); }
.svg .webicon.skype {
background: url("webicons/webicon-skype.svg"); }
.no-svg .webicon.soundcloud {
background: url("webicons/webicon-soundcloud-m.png"); }
.no-svg .webicon.soundcloud.large {
background: url("webicons/webicon-soundcloud.png"); }
.no-svg .webicon.soundcloud.small {
background: url("webicons/webicon-soundcloud-s.png"); }
.svg .webicon.soundcloud {
background: url("webicons/webicon-soundcloud.svg"); }
.no-svg .webicon.spotify {
background: url("webicons/webicon-spotify-m.png"); }
.no-svg .webicon.spotify.large {
background: url("webicons/webicon-spotify.png"); }
.no-svg .webicon.spotify.small {
background: url("webicons/webicon-spotify-s.png"); }
.svg .webicon.spotify {
background: url("webicons/webicon-spotify.svg"); }
.no-svg .webicon.stackoverflow {
background: url("webicons/webicon-stackoverflow-m.png"); }
.no-svg .webicon.stackoverflow.large {
background: url("webicons/webicon-stackoverflow.png"); }
.no-svg .webicon.stackoverflow.small {
background: url("webicons/webicon-stackoverflow-s.png"); }
.svg .webicon.stackoverflow {
background: url("webicons/webicon-stackoverflow.svg"); }
.no-svg .webicon.stumbleupon {
background: url("webicons/webicon-stumbleupon-m.png"); }
.no-svg .webicon.stumbleupon.large {
background: url("webicons/webicon-stumbleupon.png"); }
.no-svg .webicon.stumbleupon.small {
background: url("webicons/webicon-stumbleupon-s.png"); }
.svg .webicon.stumbleupon {
background: url("webicons/webicon-stumbleupon.svg"); }
.no-svg .webicon.svn {
background: url("webicons/webicon-svn-m.png"); }
.no-svg .webicon.svn.large {
background: url("webicons/webicon-svn.png"); }
.no-svg .webicon.svn.small {
background: url("webicons/webicon-svn-s.png"); }
.svg .webicon.svn {
background: url("webicons/webicon-svn.svg"); }
.no-svg .webicon.tent {
background: url("webicons/webicon-tent-m.png"); }
.no-svg .webicon.tent.large {
background: url("webicons/webicon-tent.png"); }
.no-svg .webicon.tent.small {
background: url("webicons/webicon-tent-s.png"); }
.svg .webicon.tent {
background: url("webicons/webicon-tent.svg"); }
.no-svg .webicon.tripadvisor {
background: url("webicons/webicon-tripadvisor-m.png"); }
.no-svg .webicon.tripadvisor.large {
background: url("webicons/webicon-tripadvisor.png"); }
.no-svg .webicon.tripadvisor.small {
background: url("webicons/webicon-tripadvisor-s.png"); }
.svg .webicon.tripadvisor {
background: url("webicons/webicon-tripadvisor.svg"); }
.no-svg .webicon.tumblr {
background: url("webicons/webicon-tumblr-m.png"); }
.no-svg .webicon.tumblr.large {
background: url("webicons/webicon-tumblr.png"); }
.no-svg .webicon.tumblr.small {
background: url("webicons/webicon-tumblr-s.png"); }
.svg .webicon.tumblr {
background: url("webicons/webicon-tumblr.svg"); }
.no-svg .webicon.twitter {
background: url("webicons/webicon-twitter-m.png"); }
.no-svg .webicon.twitter.large {
background: url("webicons/webicon-twitter.png"); }
.no-svg .webicon.twitter.small {
background: url("webicons/webicon-twitter-s.png"); }
.svg .webicon.twitter {
background: url("webicons/webicon-twitter.svg"); }
.no-svg .webicon.vimeo {
background: url("webicons/webicon-vimeo-m.png"); }
.no-svg .webicon.vimeo.large {
background: url("webicons/webicon-vimeo.png"); }
.no-svg .webicon.vimeo.small {
background: url("webicons/webicon-vimeo-s.png"); }
.svg .webicon.vimeo {
background: url("webicons/webicon-vimeo.svg"); }
.no-svg .webicon.weibo {
background: url("webicons/webicon-weibo-m.png"); }
.no-svg .webicon.weibo.large {
background: url("webicons/webicon-weibo.png"); }
.no-svg .webicon.weibo.small {
background: url("webicons/webicon-weibo-s.png"); }
.svg .webicon.weibo {
background: url("webicons/webicon-weibo.svg"); }
.no-svg .webicon.windows {
background: url("webicons/webicon-windows-m.png"); }
.no-svg .webicon.windows.large {
background: url("webicons/webicon-windows.png"); }
.no-svg .webicon.windows.small {
background: url("webicons/webicon-windows-s.png"); }
.svg .webicon.windows {
background: url("webicons/webicon-windows.svg"); }
.no-svg .webicon.wordpress {
background: url("webicons/webicon-wordpress-m.png"); }
.no-svg .webicon.wordpress.large {
background: url("webicons/webicon-wordpress.png"); }
.no-svg .webicon.wordpress.small {
background: url("webicons/webicon-wordpress-s.png"); }
.svg .webicon.wordpress {
background: url("webicons/webicon-wordpress.svg"); }
.no-svg .webicon.xing {
background: url("webicons/webicon-xing-m.png"); }
.no-svg .webicon.xing.large {
background: url("webicons/webicon-xing.png"); }
.no-svg .webicon.xing.small {
background: url("webicons/webicon-xing-s.png"); }
.svg .webicon.xing {
background: url("webicons/webicon-xing.svg"); }
.no-svg .webicon.yahoo {
background: url("webicons/webicon-yahoo-m.png"); }
.no-svg .webicon.yahoo.large {
background: url("webicons/webicon-yahoo.png"); }
.no-svg .webicon.yahoo.small {
background: url("webicons/webicon-yahoo-s.png"); }
.svg .webicon.yahoo {
background: url("webicons/webicon-yahoo.svg"); }
.no-svg .webicon.yelp {
background: url("webicons/webicon-yelp-m.png"); }
.no-svg .webicon.yelp.large {
background: url("webicons/webicon-yelp.png"); }
.no-svg .webicon.yelp.small {
background: url("webicons/webicon-yelp-s.png"); }
.svg .webicon.yelp {
background: url("webicons/webicon-yelp.svg"); }
.no-svg .webicon.youtube {
background: url("webicons/webicon-youtube-m.png"); }
.no-svg .webicon.youtube.large {
background: url("webicons/webicon-youtube.png"); }
.no-svg .webicon.youtube.small {
background: url("webicons/webicon-youtube-s.png"); }
.svg .webicon.youtube {
background: url("webicons/webicon-youtube.svg"); }
.no-svg .webicon.youversion {
background: url("webicons/webicon-youversion-m.png"); }
.no-svg .webicon.youversion.large {
background: url("webicons/webicon-youversion.png"); }
.no-svg .webicon.youversion.small {
background: url("webicons/webicon-youversion-s.png"); }
.svg .webicon.youversion {
background: url("webicons/webicon-youversion.svg"); }
.no-svg .webicon.zerply {
background: url("webicons/webicon-zerply-m.png"); }
.no-svg .webicon.zerply.large {
background: url("webicons/webicon-zerply.png"); }
.no-svg .webicon.zerply.small {
background: url("webicons/webicon-zerply-s.png"); }
.svg .webicon.zerply {
background: url("webicons/webicon-zerply.svg"); }
| mit |
veyesys/opencvr | velib/3rdparty/SDL2/src/core/linux/SDL_fcitx.c | 14320 | /*
Simple DirectMedia Layer
Copyright (C) 1997-2016 Sam Lantinga <slouken@libsdl.org>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include "../../SDL_internal.h"
#ifdef HAVE_FCITX_FRONTEND_H
#include <fcitx/frontend.h>
#include <unistd.h>
#include "SDL_fcitx.h"
#include "SDL_keycode.h"
#include "SDL_keyboard.h"
#include "../../events/SDL_keyboard_c.h"
#include "SDL_dbus.h"
#include "SDL_syswm.h"
#if SDL_VIDEO_DRIVER_X11
# include "../../video/x11/SDL_x11video.h"
#endif
#include "SDL_hints.h"
#define FCITX_DBUS_SERVICE "org.fcitx.Fcitx"
#define FCITX_IM_DBUS_PATH "/inputmethod"
#define FCITX_IC_DBUS_PATH "/inputcontext_%d"
#define FCITX_IM_DBUS_INTERFACE "org.fcitx.Fcitx.InputMethod"
#define FCITX_IC_DBUS_INTERFACE "org.fcitx.Fcitx.InputContext"
#define IC_NAME_MAX 64
#define DBUS_TIMEOUT 500
typedef struct _FcitxClient
{
SDL_DBusContext *dbus;
char servicename[IC_NAME_MAX];
char icname[IC_NAME_MAX];
int id;
SDL_Rect cursor_rect;
} FcitxClient;
static FcitxClient fcitx_client;
static int
GetDisplayNumber()
{
const char *display = SDL_getenv("DISPLAY");
const char *p = NULL;
int number = 0;
if (display == NULL)
return 0;
display = SDL_strchr(display, ':');
if (display == NULL)
return 0;
display++;
p = SDL_strchr(display, '.');
if (p == NULL && display != NULL) {
number = SDL_strtod(display, NULL);
} else {
char *buffer = SDL_strdup(display);
buffer[p - display] = '\0';
number = SDL_strtod(buffer, NULL);
SDL_free(buffer);
}
return number;
}
static char*
GetAppName()
{
#if defined(__LINUX__) || defined(__FREEBSD__)
char *spot;
char procfile[1024];
char linkfile[1024];
int linksize;
#if defined(__LINUX__)
SDL_snprintf(procfile, sizeof(procfile), "/proc/%d/exe", getpid());
#elif defined(__FREEBSD__)
SDL_snprintf(procfile, sizeof(procfile), "/proc/%d/file", getpid());
#endif
linksize = readlink(procfile, linkfile, sizeof(linkfile) - 1);
if (linksize > 0) {
linkfile[linksize] = '\0';
spot = SDL_strrchr(linkfile, '/');
if (spot) {
return SDL_strdup(spot + 1);
} else {
return SDL_strdup(linkfile);
}
}
#endif /* __LINUX__ || __FREEBSD__ */
return SDL_strdup("SDL_App");
}
/*
* Copied from fcitx source
*/
#define CONT(i) ISUTF8_CB(in[i])
#define VAL(i, s) ((in[i]&0x3f) << s)
static char *
_fcitx_utf8_get_char(const char *i, uint32_t *chr)
{
const unsigned char* in = (const unsigned char *)i;
if (!(in[0] & 0x80)) {
*(chr) = *(in);
return (char *)in + 1;
}
/* 2-byte, 0x80-0x7ff */
if ((in[0] & 0xe0) == 0xc0 && CONT(1)) {
*chr = ((in[0] & 0x1f) << 6) | VAL(1, 0);
return (char *)in + 2;
}
/* 3-byte, 0x800-0xffff */
if ((in[0] & 0xf0) == 0xe0 && CONT(1) && CONT(2)) {
*chr = ((in[0] & 0xf) << 12) | VAL(1, 6) | VAL(2, 0);
return (char *)in + 3;
}
/* 4-byte, 0x10000-0x1FFFFF */
if ((in[0] & 0xf8) == 0xf0 && CONT(1) && CONT(2) && CONT(3)) {
*chr = ((in[0] & 0x7) << 18) | VAL(1, 12) | VAL(2, 6) | VAL(3, 0);
return (char *)in + 4;
}
/* 5-byte, 0x200000-0x3FFFFFF */
if ((in[0] & 0xfc) == 0xf8 && CONT(1) && CONT(2) && CONT(3) && CONT(4)) {
*chr = ((in[0] & 0x3) << 24) | VAL(1, 18) | VAL(2, 12) | VAL(3, 6) | VAL(4, 0);
return (char *)in + 5;
}
/* 6-byte, 0x400000-0x7FFFFFF */
if ((in[0] & 0xfe) == 0xfc && CONT(1) && CONT(2) && CONT(3) && CONT(4) && CONT(5)) {
*chr = ((in[0] & 0x1) << 30) | VAL(1, 24) | VAL(2, 18) | VAL(3, 12) | VAL(4, 6) | VAL(5, 0);
return (char *)in + 6;
}
*chr = *in;
return (char *)in + 1;
}
static size_t
_fcitx_utf8_strlen(const char *s)
{
unsigned int l = 0;
while (*s) {
uint32_t chr;
s = _fcitx_utf8_get_char(s, &chr);
l++;
}
return l;
}
static DBusHandlerResult
DBus_MessageFilter(DBusConnection *conn, DBusMessage *msg, void *data)
{
SDL_DBusContext *dbus = (SDL_DBusContext *)data;
if (dbus->message_is_signal(msg, FCITX_IC_DBUS_INTERFACE, "CommitString")) {
DBusMessageIter iter;
const char *text = NULL;
dbus->message_iter_init(msg, &iter);
dbus->message_iter_get_basic(&iter, &text);
if (text)
SDL_SendKeyboardText(text);
return DBUS_HANDLER_RESULT_HANDLED;
}
if (dbus->message_is_signal(msg, FCITX_IC_DBUS_INTERFACE, "UpdatePreedit")) {
DBusMessageIter iter;
const char *text;
dbus->message_iter_init(msg, &iter);
dbus->message_iter_get_basic(&iter, &text);
if (text && *text) {
char buf[SDL_TEXTEDITINGEVENT_TEXT_SIZE];
size_t text_bytes = SDL_strlen(text), i = 0;
size_t cursor = 0;
while (i < text_bytes) {
size_t sz = SDL_utf8strlcpy(buf, text + i, sizeof(buf));
size_t chars = _fcitx_utf8_strlen(buf);
SDL_SendEditingText(buf, cursor, chars);
i += sz;
cursor += chars;
}
}
SDL_Fcitx_UpdateTextRect(NULL);
return DBUS_HANDLER_RESULT_HANDLED;
}
return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
}
static DBusMessage*
FcitxClientICNewMethod(FcitxClient *client,
const char *method)
{
SDL_DBusContext *dbus = client->dbus;
return dbus->message_new_method_call(
client->servicename,
client->icname,
FCITX_IC_DBUS_INTERFACE,
method);
}
static void
FcitxClientICCallMethod(FcitxClient *client,
const char *method)
{
SDL_DBusContext *dbus = client->dbus;
DBusMessage *msg = FcitxClientICNewMethod(client, method);
if (msg == NULL)
return ;
if (dbus->connection_send(dbus->session_conn, msg, NULL)) {
dbus->connection_flush(dbus->session_conn);
}
dbus->message_unref(msg);
}
static void
Fcitx_SetCapabilities(void *data,
const char *name,
const char *old_val,
const char *internal_editing)
{
FcitxClient *client = (FcitxClient *)data;
SDL_DBusContext *dbus = client->dbus;
Uint32 caps = CAPACITY_NONE;
DBusMessage *msg = FcitxClientICNewMethod(client, "SetCapacity");
if (msg == NULL)
return ;
if (!(internal_editing && *internal_editing == '1')) {
caps |= CAPACITY_PREEDIT;
}
dbus->message_append_args(msg,
DBUS_TYPE_UINT32, &caps,
DBUS_TYPE_INVALID);
if (dbus->connection_send(dbus->session_conn, msg, NULL)) {
dbus->connection_flush(dbus->session_conn);
}
dbus->message_unref(msg);
}
static void
FcitxClientCreateIC(FcitxClient *client)
{
char *appname = NULL;
pid_t pid = 0;
int id = 0;
SDL_bool enable;
Uint32 arg1, arg2, arg3, arg4;
SDL_DBusContext *dbus = client->dbus;
DBusMessage *reply = NULL;
DBusMessage *msg = dbus->message_new_method_call(
client->servicename,
FCITX_IM_DBUS_PATH,
FCITX_IM_DBUS_INTERFACE,
"CreateICv3"
);
if (msg == NULL)
return ;
appname = GetAppName();
pid = getpid();
dbus->message_append_args(msg,
DBUS_TYPE_STRING, &appname,
DBUS_TYPE_INT32, &pid,
DBUS_TYPE_INVALID);
do {
reply = dbus->connection_send_with_reply_and_block(
dbus->session_conn,
msg,
DBUS_TIMEOUT,
NULL);
if (!reply)
break;
if (!dbus->message_get_args(reply, NULL,
DBUS_TYPE_INT32, &id,
DBUS_TYPE_BOOLEAN, &enable,
DBUS_TYPE_UINT32, &arg1,
DBUS_TYPE_UINT32, &arg2,
DBUS_TYPE_UINT32, &arg3,
DBUS_TYPE_UINT32, &arg4,
DBUS_TYPE_INVALID))
break;
if (id < 0)
break;
client->id = id;
SDL_snprintf(client->icname, IC_NAME_MAX,
FCITX_IC_DBUS_PATH, client->id);
dbus->bus_add_match(dbus->session_conn,
"type='signal', interface='org.fcitx.Fcitx.InputContext'",
NULL);
dbus->connection_add_filter(dbus->session_conn,
&DBus_MessageFilter, dbus,
NULL);
dbus->connection_flush(dbus->session_conn);
SDL_AddHintCallback(SDL_HINT_IME_INTERNAL_EDITING, &Fcitx_SetCapabilities, client);
}
while (0);
if (reply)
dbus->message_unref(reply);
dbus->message_unref(msg);
SDL_free(appname);
}
static Uint32
Fcitx_ModState(void)
{
Uint32 fcitx_mods = 0;
SDL_Keymod sdl_mods = SDL_GetModState();
if (sdl_mods & KMOD_SHIFT) fcitx_mods |= FcitxKeyState_Shift;
if (sdl_mods & KMOD_CAPS) fcitx_mods |= FcitxKeyState_CapsLock;
if (sdl_mods & KMOD_CTRL) fcitx_mods |= FcitxKeyState_Ctrl;
if (sdl_mods & KMOD_ALT) fcitx_mods |= FcitxKeyState_Alt;
if (sdl_mods & KMOD_NUM) fcitx_mods |= FcitxKeyState_NumLock;
if (sdl_mods & KMOD_LGUI) fcitx_mods |= FcitxKeyState_Super;
if (sdl_mods & KMOD_RGUI) fcitx_mods |= FcitxKeyState_Meta;
return fcitx_mods;
}
SDL_bool
SDL_Fcitx_Init()
{
fcitx_client.dbus = SDL_DBus_GetContext();
fcitx_client.cursor_rect.x = -1;
fcitx_client.cursor_rect.y = -1;
fcitx_client.cursor_rect.w = 0;
fcitx_client.cursor_rect.h = 0;
SDL_snprintf(fcitx_client.servicename, IC_NAME_MAX,
"%s-%d",
FCITX_DBUS_SERVICE, GetDisplayNumber());
FcitxClientCreateIC(&fcitx_client);
return SDL_TRUE;
}
void
SDL_Fcitx_Quit()
{
FcitxClientICCallMethod(&fcitx_client, "DestroyIC");
}
void
SDL_Fcitx_SetFocus(SDL_bool focused)
{
if (focused) {
FcitxClientICCallMethod(&fcitx_client, "FocusIn");
} else {
FcitxClientICCallMethod(&fcitx_client, "FocusOut");
}
}
void
SDL_Fcitx_Reset(void)
{
FcitxClientICCallMethod(&fcitx_client, "Reset");
FcitxClientICCallMethod(&fcitx_client, "CloseIC");
}
SDL_bool
SDL_Fcitx_ProcessKeyEvent(Uint32 keysym, Uint32 keycode)
{
DBusMessage *msg = NULL;
DBusMessage *reply = NULL;
SDL_DBusContext *dbus = fcitx_client.dbus;
Uint32 state = 0;
SDL_bool handled = SDL_FALSE;
int type = FCITX_PRESS_KEY;
Uint32 event_time = 0;
msg = FcitxClientICNewMethod(&fcitx_client, "ProcessKeyEvent");
if (msg == NULL)
return SDL_FALSE;
state = Fcitx_ModState();
dbus->message_append_args(msg,
DBUS_TYPE_UINT32, &keysym,
DBUS_TYPE_UINT32, &keycode,
DBUS_TYPE_UINT32, &state,
DBUS_TYPE_INT32, &type,
DBUS_TYPE_UINT32, &event_time,
DBUS_TYPE_INVALID);
reply = dbus->connection_send_with_reply_and_block(dbus->session_conn,
msg,
-1,
NULL);
if (reply) {
dbus->message_get_args(reply,
NULL,
DBUS_TYPE_INT32, &handled,
DBUS_TYPE_INVALID);
dbus->message_unref(reply);
}
if (handled) {
SDL_Fcitx_UpdateTextRect(NULL);
}
return handled;
}
void
SDL_Fcitx_UpdateTextRect(SDL_Rect *rect)
{
SDL_Window *focused_win = NULL;
SDL_SysWMinfo info;
int x = 0, y = 0;
SDL_Rect *cursor = &fcitx_client.cursor_rect;
SDL_DBusContext *dbus = fcitx_client.dbus;
DBusMessage *msg = NULL;
DBusConnection *conn;
if (rect) {
SDL_memcpy(cursor, rect, sizeof(SDL_Rect));
}
focused_win = SDL_GetKeyboardFocus();
if (!focused_win) {
return ;
}
SDL_VERSION(&info.version);
if (!SDL_GetWindowWMInfo(focused_win, &info)) {
return;
}
SDL_GetWindowPosition(focused_win, &x, &y);
#if SDL_VIDEO_DRIVER_X11
if (info.subsystem == SDL_SYSWM_X11) {
SDL_DisplayData *displaydata = (SDL_DisplayData *) SDL_GetDisplayForWindow(focused_win)->driverdata;
Display *x_disp = info.info.x11.display;
Window x_win = info.info.x11.window;
int x_screen = displaydata->screen;
Window unused;
X11_XTranslateCoordinates(x_disp, x_win, RootWindow(x_disp, x_screen), 0, 0, &x, &y, &unused);
}
#endif
if (cursor->x == -1 && cursor->y == -1 && cursor->w == 0 && cursor->h == 0) {
// move to bottom left
int w = 0, h = 0;
SDL_GetWindowSize(focused_win, &w, &h);
cursor->x = 0;
cursor->y = h;
}
x += cursor->x;
y += cursor->y;
msg = FcitxClientICNewMethod(&fcitx_client, "SetCursorRect");
if (msg == NULL)
return ;
dbus->message_append_args(msg,
DBUS_TYPE_INT32, &x,
DBUS_TYPE_INT32, &y,
DBUS_TYPE_INT32, &cursor->w,
DBUS_TYPE_INT32, &cursor->h,
DBUS_TYPE_INVALID);
conn = dbus->session_conn;
if (dbus->connection_send(conn, msg, NULL))
dbus->connection_flush(conn);
dbus->message_unref(msg);
}
void
SDL_Fcitx_PumpEvents()
{
SDL_DBusContext *dbus = fcitx_client.dbus;
DBusConnection *conn = dbus->session_conn;
dbus->connection_read_write(conn, 0);
while (dbus->connection_dispatch(conn) == DBUS_DISPATCH_DATA_REMAINS) {
/* Do nothing, actual work happens in DBus_MessageFilter */
usleep(10);
}
}
#endif /* HAVE_FCITX_FRONTEND_H */
/* vi: set ts=4 sw=4 expandtab: */
| mit |
veyesys/opencvr | velib/3rdparty/SDL2/src/joystick/emscripten/SDL_sysjoystick.c | 11309 | /*
Simple DirectMedia Layer
Copyright (C) 1997-2016 Sam Lantinga <slouken@libsdl.org>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include "../../SDL_internal.h"
#ifdef SDL_JOYSTICK_EMSCRIPTEN
#include <stdio.h> /* For the definition of NULL */
#include "SDL_error.h"
#include "SDL_events.h"
#include "SDL_joystick.h"
#include "SDL_hints.h"
#include "SDL_assert.h"
#include "SDL_timer.h"
#include "SDL_log.h"
#include "SDL_sysjoystick_c.h"
#include "../SDL_joystick_c.h"
static SDL_joylist_item * JoystickByIndex(int index);
static SDL_joylist_item *SDL_joylist = NULL;
static SDL_joylist_item *SDL_joylist_tail = NULL;
static int numjoysticks = 0;
static int instance_counter = 0;
EM_BOOL
Emscripten_JoyStickConnected(int eventType, const EmscriptenGamepadEvent *gamepadEvent, void *userData)
{
int i;
SDL_joylist_item *item;
if (JoystickByIndex(gamepadEvent->index) != NULL) {
return 1;
}
item = (SDL_joylist_item *) SDL_malloc(sizeof (SDL_joylist_item));
if (item == NULL) {
return 1;
}
SDL_zerop(item);
item->index = gamepadEvent->index;
item->name = SDL_strdup(gamepadEvent->id);
if ( item->name == NULL ) {
SDL_free(item);
return 1;
}
item->mapping = SDL_strdup(gamepadEvent->mapping);
if ( item->mapping == NULL ) {
SDL_free(item->name);
SDL_free(item);
return 1;
}
item->naxes = gamepadEvent->numAxes;
item->nbuttons = gamepadEvent->numButtons;
item->device_instance = instance_counter++;
item->timestamp = gamepadEvent->timestamp;
for( i = 0; i < item->naxes; i++) {
item->axis[i] = gamepadEvent->axis[i];
}
for( i = 0; i < item->nbuttons; i++) {
item->analogButton[i] = gamepadEvent->analogButton[i];
item->digitalButton[i] = gamepadEvent->digitalButton[i];
}
if (SDL_joylist_tail == NULL) {
SDL_joylist = SDL_joylist_tail = item;
} else {
SDL_joylist_tail->next = item;
SDL_joylist_tail = item;
}
++numjoysticks;
SDL_PrivateJoystickAdded(numjoysticks - 1);
#ifdef DEBUG_JOYSTICK
SDL_Log("Number of joysticks is %d", numjoysticks);
#endif
#ifdef DEBUG_JOYSTICK
SDL_Log("Added joystick with index %d", item->index);
#endif
return 1;
}
EM_BOOL
Emscripten_JoyStickDisconnected(int eventType, const EmscriptenGamepadEvent *gamepadEvent, void *userData)
{
SDL_joylist_item *item = SDL_joylist;
SDL_joylist_item *prev = NULL;
while (item != NULL) {
if (item->index == gamepadEvent->index) {
break;
}
prev = item;
item = item->next;
}
if (item == NULL) {
return 1;
}
if (item->joystick) {
item->joystick->hwdata = NULL;
}
if (prev != NULL) {
prev->next = item->next;
} else {
SDL_assert(SDL_joylist == item);
SDL_joylist = item->next;
}
if (item == SDL_joylist_tail) {
SDL_joylist_tail = prev;
}
/* Need to decrement the joystick count before we post the event */
--numjoysticks;
SDL_PrivateJoystickRemoved(item->device_instance);
#ifdef DEBUG_JOYSTICK
SDL_Log("Removed joystick with id %d", item->device_instance);
#endif
SDL_free(item->name);
SDL_free(item->mapping);
SDL_free(item);
return 1;
}
/* Function to scan the system for joysticks.
* It should return 0, or -1 on an unrecoverable fatal error.
*/
int
SDL_SYS_JoystickInit(void)
{
int retval, i, numjs;
EmscriptenGamepadEvent gamepadState;
numjoysticks = 0;
numjs = emscripten_get_num_gamepads();
/* Check if gamepad is supported by browser */
if (numjs == EMSCRIPTEN_RESULT_NOT_SUPPORTED) {
return SDL_SetError("Gamepads not supported");
}
/* handle already connected gamepads */
if (numjs > 0) {
for(i = 0; i < numjs; i++) {
retval = emscripten_get_gamepad_status(i, &gamepadState);
if (retval == EMSCRIPTEN_RESULT_SUCCESS) {
Emscripten_JoyStickConnected(EMSCRIPTEN_EVENT_GAMEPADCONNECTED,
&gamepadState,
NULL);
}
}
}
retval = emscripten_set_gamepadconnected_callback(NULL,
0,
Emscripten_JoyStickConnected);
if(retval != EMSCRIPTEN_RESULT_SUCCESS) {
SDL_SYS_JoystickQuit();
return SDL_SetError("Could not set gamepad connect callback");
}
retval = emscripten_set_gamepaddisconnected_callback(NULL,
0,
Emscripten_JoyStickDisconnected);
if(retval != EMSCRIPTEN_RESULT_SUCCESS) {
SDL_SYS_JoystickQuit();
return SDL_SetError("Could not set gamepad disconnect callback");
}
return 0;
}
/* Returns item matching given SDL device index. */
static SDL_joylist_item *
JoystickByDeviceIndex(int device_index)
{
SDL_joylist_item *item = SDL_joylist;
while (0 < device_index) {
--device_index;
item = item->next;
}
return item;
}
/* Returns item matching given HTML gamepad index. */
static SDL_joylist_item *
JoystickByIndex(int index)
{
SDL_joylist_item *item = SDL_joylist;
if (index < 0) {
return NULL;
}
while (item != NULL) {
if (item->index == index) {
break;
}
item = item->next;
}
return item;
}
int SDL_SYS_NumJoysticks()
{
return numjoysticks;
}
void SDL_SYS_JoystickDetect()
{
}
/* Function to get the device-dependent name of a joystick */
const char *
SDL_SYS_JoystickNameForDeviceIndex(int device_index)
{
return JoystickByDeviceIndex(device_index)->name;
}
/* Function to perform the mapping from device index to the instance id for this index */
SDL_JoystickID SDL_SYS_GetInstanceIdOfDeviceIndex(int device_index)
{
return JoystickByDeviceIndex(device_index)->device_instance;
}
/* Function to open a joystick for use.
The joystick to open is specified by the device index.
This should fill the nbuttons and naxes fields of the joystick structure.
It returns 0, or -1 if there is an error.
*/
int
SDL_SYS_JoystickOpen(SDL_Joystick * joystick, int device_index)
{
SDL_joylist_item *item = JoystickByDeviceIndex(device_index);
if (item == NULL ) {
return SDL_SetError("No such device");
}
if (item->joystick != NULL) {
return SDL_SetError("Joystick already opened");
}
joystick->instance_id = item->device_instance;
joystick->hwdata = (struct joystick_hwdata *) item;
item->joystick = joystick;
/* HTML5 Gamepad API doesn't say anything about these */
joystick->nhats = 0;
joystick->nballs = 0;
joystick->nbuttons = item->nbuttons;
joystick->naxes = item->naxes;
return (0);
}
/* Function to determine if this joystick is attached to the system right now */
SDL_bool SDL_SYS_JoystickAttached(SDL_Joystick *joystick)
{
return joystick->hwdata != NULL;
}
/* Function to update the state of a joystick - called as a device poll.
* This function shouldn't update the joystick structure directly,
* but instead should call SDL_PrivateJoystick*() to deliver events
* and update joystick device state.
*/
void
SDL_SYS_JoystickUpdate(SDL_Joystick * joystick)
{
EmscriptenGamepadEvent gamepadState;
SDL_joylist_item *item = (SDL_joylist_item *) joystick->hwdata;
int i, result, buttonState;
if (item) {
result = emscripten_get_gamepad_status(item->index, &gamepadState);
if( result == EMSCRIPTEN_RESULT_SUCCESS) {
if(gamepadState.timestamp == 0 || gamepadState.timestamp != item->timestamp) {
for(i = 0; i < item->nbuttons; i++) {
if(item->digitalButton[i] != gamepadState.digitalButton[i]) {
buttonState = gamepadState.digitalButton[i]? SDL_PRESSED: SDL_RELEASED;
SDL_PrivateJoystickButton(item->joystick, i, buttonState);
}
/* store values to compare them in the next update */
item->analogButton[i] = gamepadState.analogButton[i];
item->digitalButton[i] = gamepadState.digitalButton[i];
}
for(i = 0; i < item->naxes; i++) {
if(item->axis[i] != gamepadState.axis[i]) {
// do we need to do conversion?
SDL_PrivateJoystickAxis(item->joystick, i,
(Sint16) (32767.*gamepadState.axis[i]));
}
/* store to compare in next update */
item->axis[i] = gamepadState.axis[i];
}
item->timestamp = gamepadState.timestamp;
}
}
}
}
/* Function to close a joystick after use */
void
SDL_SYS_JoystickClose(SDL_Joystick * joystick)
{
SDL_joylist_item *item = (SDL_joylist_item *) joystick->hwdata;
if (item) {
item->joystick = NULL;
}
}
/* Function to perform any system-specific joystick related cleanup */
void
SDL_SYS_JoystickQuit(void)
{
SDL_joylist_item *item = NULL;
SDL_joylist_item *next = NULL;
for (item = SDL_joylist; item; item = next) {
next = item->next;
SDL_free(item->mapping);
SDL_free(item->name);
SDL_free(item);
}
SDL_joylist = SDL_joylist_tail = NULL;
numjoysticks = 0;
instance_counter = 0;
emscripten_set_gamepadconnected_callback(NULL, 0, NULL);
emscripten_set_gamepaddisconnected_callback(NULL, 0, NULL);
}
SDL_JoystickGUID
SDL_SYS_JoystickGetDeviceGUID(int device_index)
{
SDL_JoystickGUID guid;
/* the GUID is just the first 16 chars of the name for now */
const char *name = SDL_SYS_JoystickNameForDeviceIndex(device_index);
SDL_zero(guid);
SDL_memcpy(&guid, name, SDL_min(sizeof(guid), SDL_strlen(name)));
return guid;
}
SDL_JoystickGUID
SDL_SYS_JoystickGetGUID(SDL_Joystick * joystick)
{
SDL_JoystickGUID guid;
/* the GUID is just the first 16 chars of the name for now */
const char *name = joystick->name;
SDL_zero(guid);
SDL_memcpy(&guid, name, SDL_min(sizeof(guid), SDL_strlen(name)));
return guid;
}
#endif /* SDL_JOYSTICK_EMSCRIPTEN */
| mit |
marcoR80/simple-app-mobile | www/vendor/angular-1.3.9/docs/examples/example-example74/index.html | 331 | <!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Example - example-example74</title>
<script src="../../../angular.min.js"></script>
</head>
<body ng-app="">
<button ng-mousemove="count = count + 1" ng-init="count=0">
Increment (when mouse moves)
</button>
count: {{count}}
</body>
</html> | mit |
visi0nary/mediatek | mt6732/mediatek/custom/common/kernel/hdmi/Sii8348/platform.h | 5735 | /*
SiI8348 Linux Driver
Copyright (C) 2013 Silicon Image, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation version 2.
This program is distributed AS-IS WITHOUT ANY WARRANTY of any
kind, whether express or implied; INCLUDING without the implied warranty
of MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE or NON-INFRINGEMENT. See
the GNU General Public License for more details at http://www.gnu.org/licenses/gpl-2.0.html.
*/
#if !defined(PLATFORM_H)
#define PLATFORM_H
#define DEVICE_ID_8348 0x8348
#define DEVICE_ID_8346 0x8346
#define DEBUG_I2C_WRITE 1
#define DEBUG_I2C_READ 0
#define MAX_DEBUG_TRANSFER_SIZE 16
#define LCD_TEXT_LENGTH_MAX 16
enum dbg_msg_level {
DBG_MSG_LEVEL_ERR = 0
,DBG_MSG_LEVEL_WARN
,DBG_MSG_LEVEL_INFO
,DBG_MSG_LEVEL_GPIO
,DBG_MSG_LEVEL_EDID_READ
,DBG_MSG_LEVEL_EDID_INFO
};
typedef enum
{
HAL_RET_SUCCESS,
HAL_RET_FAILURE,
HAL_RET_PARAMETER_ERROR,
HAL_RET_NO_DEVICE,
HAL_RET_DEVICE_NOT_OPEN,
HAL_RET_NOT_INITIALIZED,
HAL_RET_OUT_OF_RESOURCES,
HAL_RET_TIMEOUT,
HAL_RET_ALREADY_INITIALIZED
} halReturn_t;
#define DEBUG
#if defined(DEBUG)
void print_formatted_debug_msg(int level,
char *file_spec, const char *func_name,
int line_num,
char *fmt, ...);
void dump_i2c_transfer(void *context, u8 page, u8 offset,
u16 count, u8 *values, bool write);
/*
#define MHL_TX_DBG_GPIO(driver_context, fmt, arg...) \
print_formatted_debug_msg(DBG_MSG_LEVEL_GPIO, \
NULL, __func__, __LINE__, \
fmt, ## arg);
*/
#define MHL_TX_EDID_READ(context,fmt,arg...) \
print_formatted_debug_msg(DBG_MSG_LEVEL_EDID_READ, \
NULL, __func__, __LINE__, \
fmt, ## arg);
#define MHL_TX_EDID_INFO(context,fmt,arg...) \
print_formatted_debug_msg(DBG_MSG_LEVEL_EDID_INFO, \
NULL, __func__, __LINE__, \
fmt, ## arg);
#define MHL_TX_DBG_INFO(driver_context, fmt, arg...) \
print_formatted_debug_msg(DBG_MSG_LEVEL_GPIO, \
NULL, __func__, __LINE__, \
fmt, ## arg);
#define MHL_TX_DBG_WARN(driver_context, fmt, arg...) \
print_formatted_debug_msg(DBG_MSG_LEVEL_WARN, \
NULL, __func__, __LINE__, \
fmt, ## arg);
#define MHL_TX_DBG_ERR(driver_context, fmt, arg...) \
print_formatted_debug_msg(DBG_MSG_LEVEL_ERR, \
NULL, __func__, __LINE__, \
fmt, ## arg);
#define DUMP_I2C_TRANSFER(context, page, offset, count, values, write_flag) \
//dump_i2c_transfer(context, page, offset, count, values, write_flag);
#define DEBUG_PRINT_WRAPPER(...) \
print_formatted_debug_msg(DBG_MSG_LEVEL_ERR, \
__FILE__, __func__, \
__LINE__, __VA_ARGS__);
#define APP_DEBUG_PRINT(x) DEBUG_PRINT_WRAPPER x
#define PP_DEBUG_PRINT(x) DEBUG_PRINT_WRAPPER x
#define PIXCLK_DEBUG_PRINT(x) DEBUG_PRINT_WRAPPER x
#define ERROR_DEBUG_PRINT(x) DEBUG_PRINT_WRAPPER x
#define TX_EDID_PRINT(x) DEBUG_PRINT_WRAPPER x
#define TX_DEBUG_PRINT(x) DEBUG_PRINT_WRAPPER x
#define TX_PRUNE_PRINT(x) DEBUG_PRINT_WRAPPER x
#define EDID_DEBUG_PRINT(x) DEBUG_PRINT_WRAPPER x
#define TX_EDID_PRINT_SIMPLE(x) DEBUG_PRINT_WRAPPER x
#else
#define MHL_TX_DBG_INFO(driver_context, fmt, ...)
#define MHL_TX_DBG_WARN(driver_context, fmt, ...)
#define MHL_TX_DBG_ERR(driver_context, fmt, ...)
#define DUMP_I2C_TRANSFER(context, page, offset, count, values, write_flag)
#define APP_DEBUG_PRINT(x)
#define PP_DEBUG_PRINT(x)
#define PIXCLK_DEBUG_PRINT(x)
#define ERROR_DEBUG_PRINT(x)
#define TX_EDID_PRINT(x)
#define TX_DEBUG_PRINT(x)
#define TX_PRUNE_PRINT(x)
#define EDID_DEBUG_PRINT(x)
#define TX_EDID_PRINT_SIMPLE(x)
#endif
enum vbus_power_state {
VBUS_OFF,
VBUS_ON
};
void mhl_tx_vbus_control(enum vbus_power_state power_state);
/*
struct platform_reg_pair{
uint8_t slave_addr;
uint8_t offset;
};
struct platform_signals_list {
char *name;
int16_t gpio_number;
struct platform_reg_pair gpio_reg_PCA950x;
uint8_t gpio_mask_PCA950x;
bool *param;
};
*/
//int gpio_expander_init(struct mhl_dev_context *dev_context);
//void gpio_expander_reset(void);
bool is_reset_on_exit_requested(void);
int mhl_tx_write_reg(void *drv_context, u8 page, u8 offset, u8 value);
int mhl_tx_read_reg(void *drv_context, u8 page, u8 offset);
int mhl_tx_write_reg_block(void *drv_context, u8 page, u8 offset, u16 count, u8 *values);
int mhl_tx_read_reg_block(void *drv_context, u8 page, u8 offset, u8 count, u8 *values);
int mhl_tx_modify_reg(void *drv_context, u8 page, u8 offset, u8 mask, u8 value);
//void lcd_set_video(char *video);
//void lcd_set_audio(char *audio);
//void lcd_set_video_audio(char *video, char *audio);
int is_interrupt_asserted(void);
//int get_config(void *dev_context, int config_idx);
//#define GPIO_LED_ON 0
//#define GPIO_LED_OFF 1
//void set_pin_impl(void *dev_context, int pin_idx, int value,const char *function_name,int line_num);
//#define set_pin(dev_context,pin_idx,value) set_pin_impl(dev_context, pin_idx, value,__FUNCTION__,__LINE__)
/*
typedef enum {
LED_MHL = 0,
LED_USB,
LED_SRC_VBUS_ON,
LED_SINK_VBUS_ON,
LED_3D,
LED_PKD_PXL,
LED_HDCP_ON,
LED_D0,
LED_D2 = 8,
LED_D3,
LED_SPR_0,
LED_SPR_1,
LED_SPR_2,
AUD_CTRL_0,
AUD_CTRL_1,
VID_CTRL_RSVD,
VID_CTRL_0 = 16,
VID_CTRL_1,
VID_CTRL_2,
VID_CTRL_3,
VID_CTRL_4,
VID_CTRL_5,
VID_CTRL_3D_0,
VID_CTRL_3D_1,
VBUS_EN = 24,
DS_PWR_1A_EN,
DBG_MSGS,
RSVD,
I2C_SCL,
I2C_SDA,
SIIMON_REQ_N,
VID_CTRL_INT,
TX2MHLRX_PWR_M = 32,
M2U_VBUS_CTRL_M,
B_RST_M_N,
SIIMON_GNT_N,
LCD_PWM0_STB,
LCD_RS,
LCD_CSB,
LCD_RESET,
AUD_CTRL,
VID_CTRL_ALL,
TX_HW_RESET
}GPIO_INDICES;
*/
#endif /* if !defined(PLATFORM_H) */
| gpl-2.0 |
c0d3z3r0/linux-rockchip | sound/pci/au88x0/au88x0_core.c | 78826 | // SPDX-License-Identifier: GPL-2.0-or-later
/*
*/
/*
Vortex core low level functions.
Author: Manuel Jander (mjander@users.sourceforge.cl)
These functions are mainly the result of translations made
from the original disassembly of the au88x0 binary drivers,
written by Aureal before they went down.
Many thanks to the Jeff Muizelaar, Kester Maddock, and whoever
contributed to the OpenVortex project.
The author of this file, put the few available pieces together
and translated the rest of the riddle (Mix, Src and connection stuff).
Some things are still to be discovered, and their meanings are unclear.
Some of these functions aren't intended to be really used, rather
to help to understand how does the AU88X0 chips work. Keep them in, because
they could be used somewhere in the future.
This code hasn't been tested or proof read thoroughly. If you wanna help,
take a look at the AU88X0 assembly and check if this matches.
Functions tested ok so far are (they show the desired effect
at least):
vortex_routes(); (1 bug fixed).
vortex_adb_addroute();
vortex_adb_addroutes();
vortex_connect_codecplay();
vortex_src_flushbuffers();
vortex_adbdma_setmode(); note: still some unknown arguments!
vortex_adbdma_startfifo();
vortex_adbdma_stopfifo();
vortex_fifo_setadbctrl(); note: still some unknown arguments!
vortex_mix_setinputvolumebyte();
vortex_mix_enableinput();
vortex_mixer_addWTD(); (fixed)
vortex_connection_adbdma_src_src();
vortex_connection_adbdma_src();
vortex_src_change_convratio();
vortex_src_addWTD(); (fixed)
History:
01-03-2003 First revision.
01-21-2003 Some bug fixes.
17-02-2003 many bugfixes after a big versioning mess.
18-02-2003 JAAAAAHHHUUUUUU!!!! The mixer works !! I'm just so happy !
(2 hours later...) I cant believe it! Im really lucky today.
Now the SRC is working too! Yeah! XMMS works !
20-02-2003 First steps into the ALSA world.
28-02-2003 As my birthday present, i discovered how the DMA buffer pages really
work :-). It was all wrong.
12-03-2003 ALSA driver starts working (2 channels).
16-03-2003 More srcblock_setupchannel discoveries.
12-04-2003 AU8830 playback support. Recording in the works.
17-04-2003 vortex_route() and vortex_routes() bug fixes. AU8830 recording
works now, but chipn' dale effect is still there.
16-05-2003 SrcSetupChannel cleanup. Moved the Src setup stuff entirely
into au88x0_pcm.c .
06-06-2003 Buffer shifter bugfix. Mixer volume fix.
07-12-2003 A3D routing finally fixed. Believed to be OK.
25-03-2004 Many thanks to Claudia, for such valuable bug reports.
*/
#include "au88x0.h"
#include "au88x0_a3d.h"
#include <linux/delay.h>
/* MIXER (CAsp4Mix.s and CAsp4Mixer.s) */
// FIXME: get rid of this.
static int mchannels[NR_MIXIN];
static int rampchs[NR_MIXIN];
static void vortex_mixer_en_sr(vortex_t * vortex, int channel)
{
hwwrite(vortex->mmio, VORTEX_MIXER_SR,
hwread(vortex->mmio, VORTEX_MIXER_SR) | (0x1 << channel));
}
static void vortex_mixer_dis_sr(vortex_t * vortex, int channel)
{
hwwrite(vortex->mmio, VORTEX_MIXER_SR,
hwread(vortex->mmio, VORTEX_MIXER_SR) & ~(0x1 << channel));
}
#if 0
static void
vortex_mix_muteinputgain(vortex_t * vortex, unsigned char mix,
unsigned char channel)
{
hwwrite(vortex->mmio, VORTEX_MIX_INVOL_A + ((mix << 5) + channel),
0x80);
hwwrite(vortex->mmio, VORTEX_MIX_INVOL_B + ((mix << 5) + channel),
0x80);
}
static int vortex_mix_getvolume(vortex_t * vortex, unsigned char mix)
{
int a;
a = hwread(vortex->mmio, VORTEX_MIX_VOL_A + (mix << 2)) & 0xff;
//FP2LinearFrac(a);
return (a);
}
static int
vortex_mix_getinputvolume(vortex_t * vortex, unsigned char mix,
int channel, int *vol)
{
int a;
if (!(mchannels[mix] & (1 << channel)))
return 0;
a = hwread(vortex->mmio,
VORTEX_MIX_INVOL_A + (((mix << 5) + channel) << 2));
/*
if (rampchs[mix] == 0)
a = FP2LinearFrac(a);
else
a = FP2LinearFracWT(a);
*/
*vol = a;
return (0);
}
static unsigned int vortex_mix_boost6db(unsigned char vol)
{
return (vol + 8); /* WOW! what a complex function! */
}
static void vortex_mix_rampvolume(vortex_t * vortex, int mix)
{
int ch;
char a;
// This function is intended for ramping down only (see vortex_disableinput()).
for (ch = 0; ch < 0x20; ch++) {
if (((1 << ch) & rampchs[mix]) == 0)
continue;
a = hwread(vortex->mmio,
VORTEX_MIX_INVOL_B + (((mix << 5) + ch) << 2));
if (a > -126) {
a -= 2;
hwwrite(vortex->mmio,
VORTEX_MIX_INVOL_A +
(((mix << 5) + ch) << 2), a);
hwwrite(vortex->mmio,
VORTEX_MIX_INVOL_B +
(((mix << 5) + ch) << 2), a);
} else
vortex_mix_killinput(vortex, mix, ch);
}
}
static int
vortex_mix_getenablebit(vortex_t * vortex, unsigned char mix, int mixin)
{
int addr, temp;
if (mixin >= 0)
addr = mixin;
else
addr = mixin + 3;
addr = ((mix << 3) + (addr >> 2)) << 2;
temp = hwread(vortex->mmio, VORTEX_MIX_ENIN + addr);
return ((temp >> (mixin & 3)) & 1);
}
#endif
static void
vortex_mix_setvolumebyte(vortex_t * vortex, unsigned char mix,
unsigned char vol)
{
int temp;
hwwrite(vortex->mmio, VORTEX_MIX_VOL_A + (mix << 2), vol);
if (1) { /*if (this_10) */
temp = hwread(vortex->mmio, VORTEX_MIX_VOL_B + (mix << 2));
if ((temp != 0x80) || (vol == 0x80))
return;
}
hwwrite(vortex->mmio, VORTEX_MIX_VOL_B + (mix << 2), vol);
}
static void
vortex_mix_setinputvolumebyte(vortex_t * vortex, unsigned char mix,
int mixin, unsigned char vol)
{
int temp;
hwwrite(vortex->mmio,
VORTEX_MIX_INVOL_A + (((mix << 5) + mixin) << 2), vol);
if (1) { /* this_10, initialized to 1. */
temp =
hwread(vortex->mmio,
VORTEX_MIX_INVOL_B + (((mix << 5) + mixin) << 2));
if ((temp != 0x80) || (vol == 0x80))
return;
}
hwwrite(vortex->mmio,
VORTEX_MIX_INVOL_B + (((mix << 5) + mixin) << 2), vol);
}
static void
vortex_mix_setenablebit(vortex_t * vortex, unsigned char mix, int mixin, int en)
{
int temp, addr;
if (mixin < 0)
addr = (mixin + 3);
else
addr = mixin;
addr = ((mix << 3) + (addr >> 2)) << 2;
temp = hwread(vortex->mmio, VORTEX_MIX_ENIN + addr);
if (en)
temp |= (1 << (mixin & 3));
else
temp &= ~(1 << (mixin & 3));
/* Mute input. Astatic void crackling? */
hwwrite(vortex->mmio,
VORTEX_MIX_INVOL_B + (((mix << 5) + mixin) << 2), 0x80);
/* Looks like clear buffer. */
hwwrite(vortex->mmio, VORTEX_MIX_SMP + (mixin << 2), 0x0);
hwwrite(vortex->mmio, VORTEX_MIX_SMP + 4 + (mixin << 2), 0x0);
/* Write enable bit. */
hwwrite(vortex->mmio, VORTEX_MIX_ENIN + addr, temp);
}
static void
vortex_mix_killinput(vortex_t * vortex, unsigned char mix, int mixin)
{
rampchs[mix] &= ~(1 << mixin);
vortex_mix_setinputvolumebyte(vortex, mix, mixin, 0x80);
mchannels[mix] &= ~(1 << mixin);
vortex_mix_setenablebit(vortex, mix, mixin, 0);
}
static void
vortex_mix_enableinput(vortex_t * vortex, unsigned char mix, int mixin)
{
vortex_mix_killinput(vortex, mix, mixin);
if ((mchannels[mix] & (1 << mixin)) == 0) {
vortex_mix_setinputvolumebyte(vortex, mix, mixin, 0x80); /*0x80 : mute */
mchannels[mix] |= (1 << mixin);
}
vortex_mix_setenablebit(vortex, mix, mixin, 1);
}
static void
vortex_mix_disableinput(vortex_t * vortex, unsigned char mix, int channel,
int ramp)
{
if (ramp) {
rampchs[mix] |= (1 << channel);
// Register callback.
//vortex_mix_startrampvolume(vortex);
vortex_mix_killinput(vortex, mix, channel);
} else
vortex_mix_killinput(vortex, mix, channel);
}
static int
vortex_mixer_addWTD(vortex_t * vortex, unsigned char mix, unsigned char ch)
{
int temp, lifeboat = 0, prev;
temp = hwread(vortex->mmio, VORTEX_MIXER_SR);
if ((temp & (1 << ch)) == 0) {
hwwrite(vortex->mmio, VORTEX_MIXER_CHNBASE + (ch << 2), mix);
vortex_mixer_en_sr(vortex, ch);
return 1;
}
prev = VORTEX_MIXER_CHNBASE + (ch << 2);
temp = hwread(vortex->mmio, prev);
while (temp & 0x10) {
prev = VORTEX_MIXER_RTBASE + ((temp & 0xf) << 2);
temp = hwread(vortex->mmio, prev);
//printk(KERN_INFO "vortex: mixAddWTD: while addr=%x, val=%x\n", prev, temp);
if ((++lifeboat) > 0xf) {
dev_err(vortex->card->dev,
"vortex_mixer_addWTD: lifeboat overflow\n");
return 0;
}
}
hwwrite(vortex->mmio, VORTEX_MIXER_RTBASE + ((temp & 0xf) << 2), mix);
hwwrite(vortex->mmio, prev, (temp & 0xf) | 0x10);
return 1;
}
static int
vortex_mixer_delWTD(vortex_t * vortex, unsigned char mix, unsigned char ch)
{
int esp14 = -1, esp18, eax, ebx, edx, ebp, esi = 0;
//int esp1f=edi(while)=src, esp10=ch;
eax = hwread(vortex->mmio, VORTEX_MIXER_SR);
if (((1 << ch) & eax) == 0) {
dev_err(vortex->card->dev, "mix ALARM %x\n", eax);
return 0;
}
ebp = VORTEX_MIXER_CHNBASE + (ch << 2);
esp18 = hwread(vortex->mmio, ebp);
if (esp18 & 0x10) {
ebx = (esp18 & 0xf);
if (mix == ebx) {
ebx = VORTEX_MIXER_RTBASE + (mix << 2);
edx = hwread(vortex->mmio, ebx);
//7b60
hwwrite(vortex->mmio, ebp, edx);
hwwrite(vortex->mmio, ebx, 0);
} else {
//7ad3
edx =
hwread(vortex->mmio,
VORTEX_MIXER_RTBASE + (ebx << 2));
//printk(KERN_INFO "vortex: mixdelWTD: 1 addr=%x, val=%x, src=%x\n", ebx, edx, src);
while ((edx & 0xf) != mix) {
if ((esi) > 0xf) {
dev_err(vortex->card->dev,
"mixdelWTD: error lifeboat overflow\n");
return 0;
}
esp14 = ebx;
ebx = edx & 0xf;
ebp = ebx << 2;
edx =
hwread(vortex->mmio,
VORTEX_MIXER_RTBASE + ebp);
//printk(KERN_INFO "vortex: mixdelWTD: while addr=%x, val=%x\n", ebp, edx);
esi++;
}
//7b30
ebp = ebx << 2;
if (edx & 0x10) { /* Delete entry in between others */
ebx = VORTEX_MIXER_RTBASE + ((edx & 0xf) << 2);
edx = hwread(vortex->mmio, ebx);
//7b60
hwwrite(vortex->mmio,
VORTEX_MIXER_RTBASE + ebp, edx);
hwwrite(vortex->mmio, ebx, 0);
//printk(KERN_INFO "vortex mixdelWTD between addr= 0x%x, val= 0x%x\n", ebp, edx);
} else { /* Delete last entry */
//7b83
if (esp14 == -1)
hwwrite(vortex->mmio,
VORTEX_MIXER_CHNBASE +
(ch << 2), esp18 & 0xef);
else {
ebx = (0xffffffe0 & edx) | (0xf & ebx);
hwwrite(vortex->mmio,
VORTEX_MIXER_RTBASE +
(esp14 << 2), ebx);
//printk(KERN_INFO "vortex mixdelWTD last addr= 0x%x, val= 0x%x\n", esp14, ebx);
}
hwwrite(vortex->mmio,
VORTEX_MIXER_RTBASE + ebp, 0);
return 1;
}
}
} else {
//printk(KERN_INFO "removed last mix\n");
//7be0
vortex_mixer_dis_sr(vortex, ch);
hwwrite(vortex->mmio, ebp, 0);
}
return 1;
}
static void vortex_mixer_init(vortex_t * vortex)
{
u32 addr;
int x;
// FIXME: get rid of this crap.
memset(mchannels, 0, NR_MIXOUT * sizeof(int));
memset(rampchs, 0, NR_MIXOUT * sizeof(int));
addr = VORTEX_MIX_SMP + 0x17c;
for (x = 0x5f; x >= 0; x--) {
hwwrite(vortex->mmio, addr, 0);
addr -= 4;
}
addr = VORTEX_MIX_ENIN + 0x1fc;
for (x = 0x7f; x >= 0; x--) {
hwwrite(vortex->mmio, addr, 0);
addr -= 4;
}
addr = VORTEX_MIX_SMP + 0x17c;
for (x = 0x5f; x >= 0; x--) {
hwwrite(vortex->mmio, addr, 0);
addr -= 4;
}
addr = VORTEX_MIX_INVOL_A + 0x7fc;
for (x = 0x1ff; x >= 0; x--) {
hwwrite(vortex->mmio, addr, 0x80);
addr -= 4;
}
addr = VORTEX_MIX_VOL_A + 0x3c;
for (x = 0xf; x >= 0; x--) {
hwwrite(vortex->mmio, addr, 0x80);
addr -= 4;
}
addr = VORTEX_MIX_INVOL_B + 0x7fc;
for (x = 0x1ff; x >= 0; x--) {
hwwrite(vortex->mmio, addr, 0x80);
addr -= 4;
}
addr = VORTEX_MIX_VOL_B + 0x3c;
for (x = 0xf; x >= 0; x--) {
hwwrite(vortex->mmio, addr, 0x80);
addr -= 4;
}
addr = VORTEX_MIXER_RTBASE + (MIXER_RTBASE_SIZE - 1) * 4;
for (x = (MIXER_RTBASE_SIZE - 1); x >= 0; x--) {
hwwrite(vortex->mmio, addr, 0x0);
addr -= 4;
}
hwwrite(vortex->mmio, VORTEX_MIXER_SR, 0);
/* Set clipping ceiling (this may be all wrong). */
/*
for (x = 0; x < 0x80; x++) {
hwwrite(vortex->mmio, VORTEX_MIXER_CLIP + (x << 2), 0x3ffff);
}
*/
/*
call CAsp4Mix__Initialize_CAsp4HwIO____CAsp4Mixer____
Register ISR callback for volume smooth fade out.
Maybe this avoids clicks when press "stop" ?
*/
}
/* SRC (CAsp4Src.s and CAsp4SrcBlock) */
static void vortex_src_en_sr(vortex_t * vortex, int channel)
{
hwwrite(vortex->mmio, VORTEX_SRCBLOCK_SR,
hwread(vortex->mmio, VORTEX_SRCBLOCK_SR) | (0x1 << channel));
}
static void vortex_src_dis_sr(vortex_t * vortex, int channel)
{
hwwrite(vortex->mmio, VORTEX_SRCBLOCK_SR,
hwread(vortex->mmio, VORTEX_SRCBLOCK_SR) & ~(0x1 << channel));
}
static void vortex_src_flushbuffers(vortex_t * vortex, unsigned char src)
{
int i;
for (i = 0x1f; i >= 0; i--)
hwwrite(vortex->mmio,
VORTEX_SRC_DATA0 + (src << 7) + (i << 2), 0);
hwwrite(vortex->mmio, VORTEX_SRC_DATA + (src << 3), 0);
hwwrite(vortex->mmio, VORTEX_SRC_DATA + (src << 3) + 4, 0);
}
static void vortex_src_cleardrift(vortex_t * vortex, unsigned char src)
{
hwwrite(vortex->mmio, VORTEX_SRC_DRIFT0 + (src << 2), 0);
hwwrite(vortex->mmio, VORTEX_SRC_DRIFT1 + (src << 2), 0);
hwwrite(vortex->mmio, VORTEX_SRC_DRIFT2 + (src << 2), 1);
}
static void
vortex_src_set_throttlesource(vortex_t * vortex, unsigned char src, int en)
{
int temp;
temp = hwread(vortex->mmio, VORTEX_SRC_SOURCE);
if (en)
temp |= 1 << src;
else
temp &= ~(1 << src);
hwwrite(vortex->mmio, VORTEX_SRC_SOURCE, temp);
}
static int
vortex_src_persist_convratio(vortex_t * vortex, unsigned char src, int ratio)
{
int temp, lifeboat = 0;
do {
hwwrite(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2), ratio);
temp = hwread(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2));
if ((++lifeboat) > 0x9) {
dev_err(vortex->card->dev, "Src cvr fail\n");
break;
}
}
while (temp != ratio);
return temp;
}
#if 0
static void vortex_src_slowlock(vortex_t * vortex, unsigned char src)
{
int temp;
hwwrite(vortex->mmio, VORTEX_SRC_DRIFT2 + (src << 2), 1);
hwwrite(vortex->mmio, VORTEX_SRC_DRIFT0 + (src << 2), 0);
temp = hwread(vortex->mmio, VORTEX_SRC_U0 + (src << 2));
if (temp & 0x200)
hwwrite(vortex->mmio, VORTEX_SRC_U0 + (src << 2),
temp & ~0x200L);
}
static void
vortex_src_change_convratio(vortex_t * vortex, unsigned char src, int ratio)
{
int temp, a;
if ((ratio & 0x10000) && (ratio != 0x10000)) {
if (ratio & 0x3fff)
a = (0x11 - ((ratio >> 0xe) & 0x3)) - 1;
else
a = (0x11 - ((ratio >> 0xe) & 0x3)) - 2;
} else
a = 0xc;
temp = hwread(vortex->mmio, VORTEX_SRC_U0 + (src << 2));
if (((temp >> 4) & 0xf) != a)
hwwrite(vortex->mmio, VORTEX_SRC_U0 + (src << 2),
(temp & 0xf) | ((a & 0xf) << 4));
vortex_src_persist_convratio(vortex, src, ratio);
}
static int
vortex_src_checkratio(vortex_t * vortex, unsigned char src,
unsigned int desired_ratio)
{
int hw_ratio, lifeboat = 0;
hw_ratio = hwread(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2));
while (hw_ratio != desired_ratio) {
hwwrite(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2), desired_ratio);
if ((lifeboat++) > 15) {
pr_err( "Vortex: could not set src-%d from %d to %d\n",
src, hw_ratio, desired_ratio);
break;
}
}
return hw_ratio;
}
#endif
/*
Objective: Set samplerate for given SRC module.
Arguments:
card: pointer to vortex_t strcut.
src: Integer index of the SRC module.
cr: Current sample rate conversion factor.
b: unknown 16 bit value.
sweep: Enable Samplerate fade from cr toward tr flag.
dirplay: 1: playback, 0: recording.
sl: Slow Lock flag.
tr: Target samplerate conversion.
thsource: Throttle source flag (no idea what that means).
*/
static void vortex_src_setupchannel(vortex_t * card, unsigned char src,
unsigned int cr, unsigned int b, int sweep, int d,
int dirplay, int sl, unsigned int tr, int thsource)
{
// noplayback: d=2,4,7,0xa,0xb when using first 2 src's.
// c: enables pitch sweep.
// looks like g is c related. Maybe g is a sweep parameter ?
// g = cvr
// dirplay: 0 = recording, 1 = playback
// d = src hw index.
int esi, ebp = 0, esp10;
vortex_src_flushbuffers(card, src);
if (sweep) {
if ((tr & 0x10000) && (tr != 0x10000)) {
tr = 0;
esi = 0x7;
} else {
if ((((short)tr) < 0) && (tr != 0x8000)) {
tr = 0;
esi = 0x8;
} else {
tr = 1;
esi = 0xc;
}
}
} else {
if ((cr & 0x10000) && (cr != 0x10000)) {
tr = 0; /*ebx = 0 */
esi = 0x11 - ((cr >> 0xe) & 7);
if (cr & 0x3fff)
esi -= 1;
else
esi -= 2;
} else {
tr = 1;
esi = 0xc;
}
}
vortex_src_cleardrift(card, src);
vortex_src_set_throttlesource(card, src, thsource);
if ((dirplay == 0) && (sweep == 0)) {
if (tr)
esp10 = 0xf;
else
esp10 = 0xc;
ebp = 0;
} else {
if (tr)
ebp = 0xf;
else
ebp = 0xc;
esp10 = 0;
}
hwwrite(card->mmio, VORTEX_SRC_U0 + (src << 2),
(sl << 0x9) | (sweep << 0x8) | ((esi & 0xf) << 4) | d);
/* 0xc0 esi=0xc c=f=0 d=0 */
vortex_src_persist_convratio(card, src, cr);
hwwrite(card->mmio, VORTEX_SRC_U1 + (src << 2), b & 0xffff);
/* 0 b=0 */
hwwrite(card->mmio, VORTEX_SRC_U2 + (src << 2),
(tr << 0x11) | (dirplay << 0x10) | (ebp << 0x8) | esp10);
/* 0x30f00 e=g=1 esp10=0 ebp=f */
//printk(KERN_INFO "vortex: SRC %d, d=0x%x, esi=0x%x, esp10=0x%x, ebp=0x%x\n", src, d, esi, esp10, ebp);
}
static void vortex_srcblock_init(vortex_t * vortex)
{
u32 addr;
int x;
hwwrite(vortex->mmio, VORTEX_SRC_SOURCESIZE, 0x1ff);
/*
for (x=0; x<0x10; x++) {
vortex_src_init(&vortex_src[x], x);
}
*/
//addr = 0xcc3c;
//addr = 0x26c3c;
addr = VORTEX_SRC_RTBASE + 0x3c;
for (x = 0xf; x >= 0; x--) {
hwwrite(vortex->mmio, addr, 0);
addr -= 4;
}
//addr = 0xcc94;
//addr = 0x26c94;
addr = VORTEX_SRC_CHNBASE + 0x54;
for (x = 0x15; x >= 0; x--) {
hwwrite(vortex->mmio, addr, 0);
addr -= 4;
}
}
static int
vortex_src_addWTD(vortex_t * vortex, unsigned char src, unsigned char ch)
{
int temp, lifeboat = 0, prev;
// esp13 = src
temp = hwread(vortex->mmio, VORTEX_SRCBLOCK_SR);
if ((temp & (1 << ch)) == 0) {
hwwrite(vortex->mmio, VORTEX_SRC_CHNBASE + (ch << 2), src);
vortex_src_en_sr(vortex, ch);
return 1;
}
prev = VORTEX_SRC_CHNBASE + (ch << 2); /*ebp */
temp = hwread(vortex->mmio, prev);
//while (temp & NR_SRC) {
while (temp & 0x10) {
prev = VORTEX_SRC_RTBASE + ((temp & 0xf) << 2); /*esp12 */
//prev = VORTEX_SRC_RTBASE + ((temp & (NR_SRC-1)) << 2); /*esp12*/
temp = hwread(vortex->mmio, prev);
//printk(KERN_INFO "vortex: srcAddWTD: while addr=%x, val=%x\n", prev, temp);
if ((++lifeboat) > 0xf) {
dev_err(vortex->card->dev,
"vortex_src_addWTD: lifeboat overflow\n");
return 0;
}
}
hwwrite(vortex->mmio, VORTEX_SRC_RTBASE + ((temp & 0xf) << 2), src);
//hwwrite(vortex->mmio, prev, (temp & (NR_SRC-1)) | NR_SRC);
hwwrite(vortex->mmio, prev, (temp & 0xf) | 0x10);
return 1;
}
static int
vortex_src_delWTD(vortex_t * vortex, unsigned char src, unsigned char ch)
{
int esp14 = -1, esp18, eax, ebx, edx, ebp, esi = 0;
//int esp1f=edi(while)=src, esp10=ch;
eax = hwread(vortex->mmio, VORTEX_SRCBLOCK_SR);
if (((1 << ch) & eax) == 0) {
dev_err(vortex->card->dev, "src alarm\n");
return 0;
}
ebp = VORTEX_SRC_CHNBASE + (ch << 2);
esp18 = hwread(vortex->mmio, ebp);
if (esp18 & 0x10) {
ebx = (esp18 & 0xf);
if (src == ebx) {
ebx = VORTEX_SRC_RTBASE + (src << 2);
edx = hwread(vortex->mmio, ebx);
//7b60
hwwrite(vortex->mmio, ebp, edx);
hwwrite(vortex->mmio, ebx, 0);
} else {
//7ad3
edx =
hwread(vortex->mmio,
VORTEX_SRC_RTBASE + (ebx << 2));
//printk(KERN_INFO "vortex: srcdelWTD: 1 addr=%x, val=%x, src=%x\n", ebx, edx, src);
while ((edx & 0xf) != src) {
if ((esi) > 0xf) {
dev_warn(vortex->card->dev,
"srcdelWTD: error, lifeboat overflow\n");
return 0;
}
esp14 = ebx;
ebx = edx & 0xf;
ebp = ebx << 2;
edx =
hwread(vortex->mmio,
VORTEX_SRC_RTBASE + ebp);
//printk(KERN_INFO "vortex: srcdelWTD: while addr=%x, val=%x\n", ebp, edx);
esi++;
}
//7b30
ebp = ebx << 2;
if (edx & 0x10) { /* Delete entry in between others */
ebx = VORTEX_SRC_RTBASE + ((edx & 0xf) << 2);
edx = hwread(vortex->mmio, ebx);
//7b60
hwwrite(vortex->mmio,
VORTEX_SRC_RTBASE + ebp, edx);
hwwrite(vortex->mmio, ebx, 0);
//printk(KERN_INFO "vortex srcdelWTD between addr= 0x%x, val= 0x%x\n", ebp, edx);
} else { /* Delete last entry */
//7b83
if (esp14 == -1)
hwwrite(vortex->mmio,
VORTEX_SRC_CHNBASE +
(ch << 2), esp18 & 0xef);
else {
ebx = (0xffffffe0 & edx) | (0xf & ebx);
hwwrite(vortex->mmio,
VORTEX_SRC_RTBASE +
(esp14 << 2), ebx);
//printk(KERN_INFO"vortex srcdelWTD last addr= 0x%x, val= 0x%x\n", esp14, ebx);
}
hwwrite(vortex->mmio,
VORTEX_SRC_RTBASE + ebp, 0);
return 1;
}
}
} else {
//7be0
vortex_src_dis_sr(vortex, ch);
hwwrite(vortex->mmio, ebp, 0);
}
return 1;
}
/*FIFO*/
static void
vortex_fifo_clearadbdata(vortex_t * vortex, int fifo, int x)
{
for (x--; x >= 0; x--)
hwwrite(vortex->mmio,
VORTEX_FIFO_ADBDATA +
(((fifo << FIFO_SIZE_BITS) + x) << 2), 0);
}
#if 0
static void vortex_fifo_adbinitialize(vortex_t * vortex, int fifo, int j)
{
vortex_fifo_clearadbdata(vortex, fifo, FIFO_SIZE);
#ifdef CHIP_AU8820
hwwrite(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2),
(FIFO_U1 | ((j & FIFO_MASK) << 0xb)));
#else
hwwrite(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2),
(FIFO_U1 | ((j & FIFO_MASK) << 0xc)));
#endif
}
#endif
static void vortex_fifo_setadbvalid(vortex_t * vortex, int fifo, int en)
{
hwwrite(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2),
(hwread(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2)) &
0xffffffef) | ((1 & en) << 4) | FIFO_U1);
}
static void
vortex_fifo_setadbctrl(vortex_t * vortex, int fifo, int stereo, int priority,
int empty, int valid, int f)
{
int temp, lifeboat = 0;
//int this_8[NR_ADB] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; /* position */
int this_4 = 0x2;
/* f seems priority related.
* CAsp4AdbDma::SetPriority is the only place that calls SetAdbCtrl with f set to 1
* every where else it is set to 0. It seems, however, that CAsp4AdbDma::SetPriority
* is never called, thus the f related bits remain a mystery for now.
*/
do {
temp = hwread(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2));
if (lifeboat++ > 0xbb8) {
dev_err(vortex->card->dev,
"vortex_fifo_setadbctrl fail\n");
break;
}
}
while (temp & FIFO_RDONLY);
// AU8830 semes to take some special care about fifo content (data).
// But i'm just to lazy to translate that :)
if (valid) {
if ((temp & FIFO_VALID) == 0) {
//this_8[fifo] = 0;
vortex_fifo_clearadbdata(vortex, fifo, FIFO_SIZE); // this_4
#ifdef CHIP_AU8820
temp = (this_4 & 0x1f) << 0xb;
#else
temp = (this_4 & 0x3f) << 0xc;
#endif
temp = (temp & 0xfffffffd) | ((stereo & 1) << 1);
temp = (temp & 0xfffffff3) | ((priority & 3) << 2);
temp = (temp & 0xffffffef) | ((valid & 1) << 4);
temp |= FIFO_U1;
temp = (temp & 0xffffffdf) | ((empty & 1) << 5);
#ifdef CHIP_AU8820
temp = (temp & 0xfffbffff) | ((f & 1) << 0x12);
#endif
#ifdef CHIP_AU8830
temp = (temp & 0xf7ffffff) | ((f & 1) << 0x1b);
temp = (temp & 0xefffffff) | ((f & 1) << 0x1c);
#endif
#ifdef CHIP_AU8810
temp = (temp & 0xfeffffff) | ((f & 1) << 0x18);
temp = (temp & 0xfdffffff) | ((f & 1) << 0x19);
#endif
}
} else {
if (temp & FIFO_VALID) {
#ifdef CHIP_AU8820
temp = ((f & 1) << 0x12) | (temp & 0xfffbffef);
#endif
#ifdef CHIP_AU8830
temp =
((f & 1) << 0x1b) | (temp & 0xe7ffffef) | FIFO_BITS;
#endif
#ifdef CHIP_AU8810
temp =
((f & 1) << 0x18) | (temp & 0xfcffffef) | FIFO_BITS;
#endif
} else
/*if (this_8[fifo]) */
vortex_fifo_clearadbdata(vortex, fifo, FIFO_SIZE);
}
hwwrite(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2), temp);
hwread(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2));
}
#ifndef CHIP_AU8810
static void vortex_fifo_clearwtdata(vortex_t * vortex, int fifo, int x)
{
if (x < 1)
return;
for (x--; x >= 0; x--)
hwwrite(vortex->mmio,
VORTEX_FIFO_WTDATA +
(((fifo << FIFO_SIZE_BITS) + x) << 2), 0);
}
static void vortex_fifo_wtinitialize(vortex_t * vortex, int fifo, int j)
{
vortex_fifo_clearwtdata(vortex, fifo, FIFO_SIZE);
#ifdef CHIP_AU8820
hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2),
(FIFO_U1 | ((j & FIFO_MASK) << 0xb)));
#else
hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2),
(FIFO_U1 | ((j & FIFO_MASK) << 0xc)));
#endif
}
static void vortex_fifo_setwtvalid(vortex_t * vortex, int fifo, int en)
{
hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2),
(hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2)) &
0xffffffef) | ((en & 1) << 4) | FIFO_U1);
}
static void
vortex_fifo_setwtctrl(vortex_t * vortex, int fifo, int ctrl, int priority,
int empty, int valid, int f)
{
int temp = 0, lifeboat = 0;
int this_4 = 2;
do {
temp = hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2));
if (lifeboat++ > 0xbb8) {
dev_err(vortex->card->dev,
"vortex_fifo_setwtctrl fail\n");
break;
}
}
while (temp & FIFO_RDONLY);
if (valid) {
if ((temp & FIFO_VALID) == 0) {
vortex_fifo_clearwtdata(vortex, fifo, FIFO_SIZE); // this_4
#ifdef CHIP_AU8820
temp = (this_4 & 0x1f) << 0xb;
#else
temp = (this_4 & 0x3f) << 0xc;
#endif
temp = (temp & 0xfffffffd) | ((ctrl & 1) << 1);
temp = (temp & 0xfffffff3) | ((priority & 3) << 2);
temp = (temp & 0xffffffef) | ((valid & 1) << 4);
temp |= FIFO_U1;
temp = (temp & 0xffffffdf) | ((empty & 1) << 5);
#ifdef CHIP_AU8820
temp = (temp & 0xfffbffff) | ((f & 1) << 0x12);
#endif
#ifdef CHIP_AU8830
temp = (temp & 0xf7ffffff) | ((f & 1) << 0x1b);
temp = (temp & 0xefffffff) | ((f & 1) << 0x1c);
#endif
#ifdef CHIP_AU8810
temp = (temp & 0xfeffffff) | ((f & 1) << 0x18);
temp = (temp & 0xfdffffff) | ((f & 1) << 0x19);
#endif
}
} else {
if (temp & FIFO_VALID) {
#ifdef CHIP_AU8820
temp = ((f & 1) << 0x12) | (temp & 0xfffbffef);
#endif
#ifdef CHIP_AU8830
temp =
((f & 1) << 0x1b) | (temp & 0xe7ffffef) | FIFO_BITS;
#endif
#ifdef CHIP_AU8810
temp =
((f & 1) << 0x18) | (temp & 0xfcffffef) | FIFO_BITS;
#endif
} else
/*if (this_8[fifo]) */
vortex_fifo_clearwtdata(vortex, fifo, FIFO_SIZE);
}
hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), temp);
hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2));
/*
do {
temp = hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2));
if (lifeboat++ > 0xbb8) {
pr_err( "Vortex: vortex_fifo_setwtctrl fail (hanging)\n");
break;
}
} while ((temp & FIFO_RDONLY)&&(temp & FIFO_VALID)&&(temp != 0xFFFFFFFF));
if (valid) {
if (temp & FIFO_VALID) {
temp = 0x40000;
//temp |= 0x08000000;
//temp |= 0x10000000;
//temp |= 0x04000000;
//temp |= 0x00400000;
temp |= 0x1c400000;
temp &= 0xFFFFFFF3;
temp &= 0xFFFFFFEF;
temp |= (valid & 1) << 4;
hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), temp);
return;
} else {
vortex_fifo_clearwtdata(vortex, fifo, FIFO_SIZE);
return;
}
} else {
temp &= 0xffffffef;
temp |= 0x08000000;
temp |= 0x10000000;
temp |= 0x04000000;
temp |= 0x00400000;
hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), temp);
temp = hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2));
//((temp >> 6) & 0x3f)
priority = 0;
if (((temp & 0x0fc0) ^ ((temp >> 6) & 0x0fc0)) & 0FFFFFFC0)
vortex_fifo_clearwtdata(vortex, fifo, FIFO_SIZE);
valid = 0xfb;
temp = (temp & 0xfffffffd) | ((ctrl & 1) << 1);
temp = (temp & 0xfffdffff) | ((f & 1) << 0x11);
temp = (temp & 0xfffffff3) | ((priority & 3) << 2);
temp = (temp & 0xffffffef) | ((valid & 1) << 4);
temp = (temp & 0xffffffdf) | ((empty & 1) << 5);
hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), temp);
}
*/
/*
temp = (temp & 0xfffffffd) | ((ctrl & 1) << 1);
temp = (temp & 0xfffdffff) | ((f & 1) << 0x11);
temp = (temp & 0xfffffff3) | ((priority & 3) << 2);
temp = (temp & 0xffffffef) | ((valid & 1) << 4);
temp = (temp & 0xffffffdf) | ((empty & 1) << 5);
#ifdef FIFO_BITS
temp = temp | FIFO_BITS | 40000;
#endif
// 0x1c440010, 0x1c400000
hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), temp);
*/
}
#endif
static void vortex_fifo_init(vortex_t * vortex)
{
int x;
u32 addr;
/* ADB DMA channels fifos. */
addr = VORTEX_FIFO_ADBCTRL + ((NR_ADB - 1) * 4);
for (x = NR_ADB - 1; x >= 0; x--) {
hwwrite(vortex->mmio, addr, (FIFO_U0 | FIFO_U1));
if (hwread(vortex->mmio, addr) != (FIFO_U0 | FIFO_U1))
dev_err(vortex->card->dev, "bad adb fifo reset!\n");
vortex_fifo_clearadbdata(vortex, x, FIFO_SIZE);
addr -= 4;
}
#ifndef CHIP_AU8810
/* WT DMA channels fifos. */
addr = VORTEX_FIFO_WTCTRL + ((NR_WT - 1) * 4);
for (x = NR_WT - 1; x >= 0; x--) {
hwwrite(vortex->mmio, addr, FIFO_U0);
if (hwread(vortex->mmio, addr) != FIFO_U0)
dev_err(vortex->card->dev,
"bad wt fifo reset (0x%08x, 0x%08x)!\n",
addr, hwread(vortex->mmio, addr));
vortex_fifo_clearwtdata(vortex, x, FIFO_SIZE);
addr -= 4;
}
#endif
/* trigger... */
#ifdef CHIP_AU8820
hwwrite(vortex->mmio, 0xf8c0, 0xd03); //0x0843 0xd6b
#else
#ifdef CHIP_AU8830
hwwrite(vortex->mmio, 0x17000, 0x61); /* wt a */
hwwrite(vortex->mmio, 0x17004, 0x61); /* wt b */
#endif
hwwrite(vortex->mmio, 0x17008, 0x61); /* adb */
#endif
}
/* ADBDMA */
static void vortex_adbdma_init(vortex_t * vortex)
{
}
static void vortex_adbdma_setfirstbuffer(vortex_t * vortex, int adbdma)
{
stream_t *dma = &vortex->dma_adb[adbdma];
hwwrite(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2),
dma->dma_ctrl);
}
static void vortex_adbdma_setstartbuffer(vortex_t * vortex, int adbdma, int sb)
{
stream_t *dma = &vortex->dma_adb[adbdma];
//hwwrite(vortex->mmio, VORTEX_ADBDMA_START + (adbdma << 2), sb << (((NR_ADB-1)-((adbdma&0xf)*2))));
hwwrite(vortex->mmio, VORTEX_ADBDMA_START + (adbdma << 2),
sb << ((0xf - (adbdma & 0xf)) * 2));
dma->period_real = dma->period_virt = sb;
}
static void
vortex_adbdma_setbuffers(vortex_t * vortex, int adbdma,
int psize, int count)
{
stream_t *dma = &vortex->dma_adb[adbdma];
dma->period_bytes = psize;
dma->nr_periods = count;
dma->cfg0 = 0;
dma->cfg1 = 0;
switch (count) {
/* Four or more pages */
default:
case 4:
dma->cfg1 |= 0x88000000 | 0x44000000 | 0x30000000 | (psize - 1);
hwwrite(vortex->mmio,
VORTEX_ADBDMA_BUFBASE + (adbdma << 4) + 0xc,
snd_pcm_sgbuf_get_addr(dma->substream, psize * 3));
/* fall through */
/* 3 pages */
case 3:
dma->cfg0 |= 0x12000000;
dma->cfg1 |= 0x80000000 | 0x40000000 | ((psize - 1) << 0xc);
hwwrite(vortex->mmio,
VORTEX_ADBDMA_BUFBASE + (adbdma << 4) + 0x8,
snd_pcm_sgbuf_get_addr(dma->substream, psize * 2));
/* fall through */
/* 2 pages */
case 2:
dma->cfg0 |= 0x88000000 | 0x44000000 | 0x10000000 | (psize - 1);
hwwrite(vortex->mmio,
VORTEX_ADBDMA_BUFBASE + (adbdma << 4) + 0x4,
snd_pcm_sgbuf_get_addr(dma->substream, psize));
/* fall through */
/* 1 page */
case 1:
dma->cfg0 |= 0x80000000 | 0x40000000 | ((psize - 1) << 0xc);
hwwrite(vortex->mmio,
VORTEX_ADBDMA_BUFBASE + (adbdma << 4),
snd_pcm_sgbuf_get_addr(dma->substream, 0));
break;
}
/*
pr_debug( "vortex: cfg0 = 0x%x\nvortex: cfg1=0x%x\n",
dma->cfg0, dma->cfg1);
*/
hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFCFG0 + (adbdma << 3), dma->cfg0);
hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFCFG1 + (adbdma << 3), dma->cfg1);
vortex_adbdma_setfirstbuffer(vortex, adbdma);
vortex_adbdma_setstartbuffer(vortex, adbdma, 0);
}
static void
vortex_adbdma_setmode(vortex_t * vortex, int adbdma, int ie, int dir,
int fmt, int stereo, u32 offset)
{
stream_t *dma = &vortex->dma_adb[adbdma];
dma->dma_unknown = stereo;
dma->dma_ctrl =
((offset & OFFSET_MASK) | (dma->dma_ctrl & ~OFFSET_MASK));
/* Enable PCMOUT interrupts. */
dma->dma_ctrl =
(dma->dma_ctrl & ~IE_MASK) | ((ie << IE_SHIFT) & IE_MASK);
dma->dma_ctrl =
(dma->dma_ctrl & ~DIR_MASK) | ((dir << DIR_SHIFT) & DIR_MASK);
dma->dma_ctrl =
(dma->dma_ctrl & ~FMT_MASK) | ((fmt << FMT_SHIFT) & FMT_MASK);
hwwrite(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2),
dma->dma_ctrl);
hwread(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2));
}
static int vortex_adbdma_bufshift(vortex_t * vortex, int adbdma)
{
stream_t *dma = &vortex->dma_adb[adbdma];
int page, p, pp, delta, i;
page =
(hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)) &
ADB_SUBBUF_MASK) >> ADB_SUBBUF_SHIFT;
if (dma->nr_periods >= 4)
delta = (page - dma->period_real) & 3;
else {
delta = (page - dma->period_real);
if (delta < 0)
delta += dma->nr_periods;
}
if (delta == 0)
return 0;
/* refresh hw page table */
if (dma->nr_periods > 4) {
for (i = 0; i < delta; i++) {
/* p: audio buffer page index */
p = dma->period_virt + i + 4;
if (p >= dma->nr_periods)
p -= dma->nr_periods;
/* pp: hardware DMA page index. */
pp = dma->period_real + i;
if (pp >= 4)
pp -= 4;
//hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFBASE+(((adbdma << 2)+pp) << 2), dma->table[p].addr);
hwwrite(vortex->mmio,
VORTEX_ADBDMA_BUFBASE + (((adbdma << 2) + pp) << 2),
snd_pcm_sgbuf_get_addr(dma->substream,
dma->period_bytes * p));
/* Force write thru cache. */
hwread(vortex->mmio, VORTEX_ADBDMA_BUFBASE +
(((adbdma << 2) + pp) << 2));
}
}
dma->period_virt += delta;
dma->period_real = page;
if (dma->period_virt >= dma->nr_periods)
dma->period_virt -= dma->nr_periods;
if (delta != 1)
dev_info(vortex->card->dev,
"%d virt=%d, real=%d, delta=%d\n",
adbdma, dma->period_virt, dma->period_real, delta);
return delta;
}
static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma) {
stream_t *dma = &vortex->dma_adb[adbdma];
int p, pp, i;
/* refresh hw page table */
for (i=0 ; i < 4 && i < dma->nr_periods; i++) {
/* p: audio buffer page index */
p = dma->period_virt + i;
if (p >= dma->nr_periods)
p -= dma->nr_periods;
/* pp: hardware DMA page index. */
pp = dma->period_real + i;
if (dma->nr_periods < 4) {
if (pp >= dma->nr_periods)
pp -= dma->nr_periods;
}
else {
if (pp >= 4)
pp -= 4;
}
hwwrite(vortex->mmio,
VORTEX_ADBDMA_BUFBASE + (((adbdma << 2) + pp) << 2),
snd_pcm_sgbuf_get_addr(dma->substream,
dma->period_bytes * p));
/* Force write thru cache. */
hwread(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (((adbdma << 2)+pp) << 2));
}
}
static inline int vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma)
{
stream_t *dma = &vortex->dma_adb[adbdma];
int temp, page, delta;
temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2));
page = (temp & ADB_SUBBUF_MASK) >> ADB_SUBBUF_SHIFT;
if (dma->nr_periods >= 4)
delta = (page - dma->period_real) & 3;
else {
delta = (page - dma->period_real);
if (delta < 0)
delta += dma->nr_periods;
}
return (dma->period_virt + delta) * dma->period_bytes
+ (temp & (dma->period_bytes - 1));
}
static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma)
{
int this_8 = 0 /*empty */ , this_4 = 0 /*priority */ ;
stream_t *dma = &vortex->dma_adb[adbdma];
switch (dma->fifo_status) {
case FIFO_START:
vortex_fifo_setadbvalid(vortex, adbdma,
dma->fifo_enabled ? 1 : 0);
break;
case FIFO_STOP:
this_8 = 1;
hwwrite(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2),
dma->dma_ctrl);
vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown,
this_4, this_8,
dma->fifo_enabled ? 1 : 0, 0);
break;
case FIFO_PAUSE:
vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown,
this_4, this_8,
dma->fifo_enabled ? 1 : 0, 0);
break;
}
dma->fifo_status = FIFO_START;
}
static void vortex_adbdma_resumefifo(vortex_t * vortex, int adbdma)
{
stream_t *dma = &vortex->dma_adb[adbdma];
int this_8 = 1, this_4 = 0;
switch (dma->fifo_status) {
case FIFO_STOP:
hwwrite(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2),
dma->dma_ctrl);
vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown,
this_4, this_8,
dma->fifo_enabled ? 1 : 0, 0);
break;
case FIFO_PAUSE:
vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown,
this_4, this_8,
dma->fifo_enabled ? 1 : 0, 0);
break;
}
dma->fifo_status = FIFO_START;
}
static void vortex_adbdma_pausefifo(vortex_t * vortex, int adbdma)
{
stream_t *dma = &vortex->dma_adb[adbdma];
int this_8 = 0, this_4 = 0;
switch (dma->fifo_status) {
case FIFO_START:
vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown,
this_4, this_8, 0, 0);
break;
case FIFO_STOP:
hwwrite(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2),
dma->dma_ctrl);
vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown,
this_4, this_8, 0, 0);
break;
}
dma->fifo_status = FIFO_PAUSE;
}
static void vortex_adbdma_stopfifo(vortex_t * vortex, int adbdma)
{
stream_t *dma = &vortex->dma_adb[adbdma];
int this_4 = 0, this_8 = 0;
if (dma->fifo_status == FIFO_START)
vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown,
this_4, this_8, 0, 0);
else if (dma->fifo_status == FIFO_STOP)
return;
dma->fifo_status = FIFO_STOP;
dma->fifo_enabled = 0;
}
/* WTDMA */
#ifndef CHIP_AU8810
static void vortex_wtdma_setfirstbuffer(vortex_t * vortex, int wtdma)
{
//int this_7c=dma_ctrl;
stream_t *dma = &vortex->dma_wt[wtdma];
hwwrite(vortex->mmio, VORTEX_WTDMA_CTRL + (wtdma << 2), dma->dma_ctrl);
}
static void vortex_wtdma_setstartbuffer(vortex_t * vortex, int wtdma, int sb)
{
stream_t *dma = &vortex->dma_wt[wtdma];
//hwwrite(vortex->mmio, VORTEX_WTDMA_START + (wtdma << 2), sb << ((0x1f-(wtdma&0xf)*2)));
hwwrite(vortex->mmio, VORTEX_WTDMA_START + (wtdma << 2),
sb << ((0xf - (wtdma & 0xf)) * 2));
dma->period_real = dma->period_virt = sb;
}
static void
vortex_wtdma_setbuffers(vortex_t * vortex, int wtdma,
int psize, int count)
{
stream_t *dma = &vortex->dma_wt[wtdma];
dma->period_bytes = psize;
dma->nr_periods = count;
dma->cfg0 = 0;
dma->cfg1 = 0;
switch (count) {
/* Four or more pages */
default:
case 4:
dma->cfg1 |= 0x88000000 | 0x44000000 | 0x30000000 | (psize-1);
hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4) + 0xc,
snd_pcm_sgbuf_get_addr(dma->substream, psize * 3));
/* fall through */
/* 3 pages */
case 3:
dma->cfg0 |= 0x12000000;
dma->cfg1 |= 0x80000000 | 0x40000000 | ((psize-1) << 0xc);
hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4) + 0x8,
snd_pcm_sgbuf_get_addr(dma->substream, psize * 2));
/* fall through */
/* 2 pages */
case 2:
dma->cfg0 |= 0x88000000 | 0x44000000 | 0x10000000 | (psize-1);
hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4) + 0x4,
snd_pcm_sgbuf_get_addr(dma->substream, psize));
/* fall through */
/* 1 page */
case 1:
dma->cfg0 |= 0x80000000 | 0x40000000 | ((psize-1) << 0xc);
hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4),
snd_pcm_sgbuf_get_addr(dma->substream, 0));
break;
}
hwwrite(vortex->mmio, VORTEX_WTDMA_BUFCFG0 + (wtdma << 3), dma->cfg0);
hwwrite(vortex->mmio, VORTEX_WTDMA_BUFCFG1 + (wtdma << 3), dma->cfg1);
vortex_wtdma_setfirstbuffer(vortex, wtdma);
vortex_wtdma_setstartbuffer(vortex, wtdma, 0);
}
static void
vortex_wtdma_setmode(vortex_t * vortex, int wtdma, int ie, int fmt, int d,
/*int e, */ u32 offset)
{
stream_t *dma = &vortex->dma_wt[wtdma];
//dma->this_08 = e;
dma->dma_unknown = d;
dma->dma_ctrl = 0;
dma->dma_ctrl =
((offset & OFFSET_MASK) | (dma->dma_ctrl & ~OFFSET_MASK));
/* PCMOUT interrupt */
dma->dma_ctrl =
(dma->dma_ctrl & ~IE_MASK) | ((ie << IE_SHIFT) & IE_MASK);
/* Always playback. */
dma->dma_ctrl |= (1 << DIR_SHIFT);
/* Audio Format */
dma->dma_ctrl =
(dma->dma_ctrl & FMT_MASK) | ((fmt << FMT_SHIFT) & FMT_MASK);
/* Write into hardware */
hwwrite(vortex->mmio, VORTEX_WTDMA_CTRL + (wtdma << 2), dma->dma_ctrl);
}
static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
{
stream_t *dma = &vortex->dma_wt[wtdma];
int page, p, pp, delta, i;
page =
(hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
>> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
if (dma->nr_periods >= 4)
delta = (page - dma->period_real) & 3;
else {
delta = (page - dma->period_real);
if (delta < 0)
delta += dma->nr_periods;
}
if (delta == 0)
return 0;
/* refresh hw page table */
if (dma->nr_periods > 4) {
for (i = 0; i < delta; i++) {
/* p: audio buffer page index */
p = dma->period_virt + i + 4;
if (p >= dma->nr_periods)
p -= dma->nr_periods;
/* pp: hardware DMA page index. */
pp = dma->period_real + i;
if (pp >= 4)
pp -= 4;
hwwrite(vortex->mmio,
VORTEX_WTDMA_BUFBASE +
(((wtdma << 2) + pp) << 2),
snd_pcm_sgbuf_get_addr(dma->substream,
dma->period_bytes * p));
/* Force write thru cache. */
hwread(vortex->mmio, VORTEX_WTDMA_BUFBASE +
(((wtdma << 2) + pp) << 2));
}
}
dma->period_virt += delta;
if (dma->period_virt >= dma->nr_periods)
dma->period_virt -= dma->nr_periods;
dma->period_real = page;
if (delta != 1)
dev_warn(vortex->card->dev, "wt virt = %d, delta = %d\n",
dma->period_virt, delta);
return delta;
}
#if 0
static void
vortex_wtdma_getposition(vortex_t * vortex, int wtdma, int *subbuf, int *pos)
{
int temp;
temp = hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2));
*subbuf = (temp >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
*pos = temp & POS_MASK;
}
static int vortex_wtdma_getcursubuffer(vortex_t * vortex, int wtdma)
{
return ((hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) >>
POS_SHIFT) & POS_MASK);
}
#endif
static inline int vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma)
{
stream_t *dma = &vortex->dma_wt[wtdma];
int temp;
temp = hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2));
temp = (dma->period_virt * dma->period_bytes) + (temp & (dma->period_bytes - 1));
return temp;
}
static void vortex_wtdma_startfifo(vortex_t * vortex, int wtdma)
{
stream_t *dma = &vortex->dma_wt[wtdma];
int this_8 = 0, this_4 = 0;
switch (dma->fifo_status) {
case FIFO_START:
vortex_fifo_setwtvalid(vortex, wtdma,
dma->fifo_enabled ? 1 : 0);
break;
case FIFO_STOP:
this_8 = 1;
hwwrite(vortex->mmio, VORTEX_WTDMA_CTRL + (wtdma << 2),
dma->dma_ctrl);
vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown,
this_4, this_8,
dma->fifo_enabled ? 1 : 0, 0);
break;
case FIFO_PAUSE:
vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown,
this_4, this_8,
dma->fifo_enabled ? 1 : 0, 0);
break;
}
dma->fifo_status = FIFO_START;
}
static void vortex_wtdma_resumefifo(vortex_t * vortex, int wtdma)
{
stream_t *dma = &vortex->dma_wt[wtdma];
int this_8 = 0, this_4 = 0;
switch (dma->fifo_status) {
case FIFO_STOP:
hwwrite(vortex->mmio, VORTEX_WTDMA_CTRL + (wtdma << 2),
dma->dma_ctrl);
vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown,
this_4, this_8,
dma->fifo_enabled ? 1 : 0, 0);
break;
case FIFO_PAUSE:
vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown,
this_4, this_8,
dma->fifo_enabled ? 1 : 0, 0);
break;
}
dma->fifo_status = FIFO_START;
}
static void vortex_wtdma_pausefifo(vortex_t * vortex, int wtdma)
{
stream_t *dma = &vortex->dma_wt[wtdma];
int this_8 = 0, this_4 = 0;
switch (dma->fifo_status) {
case FIFO_START:
vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown,
this_4, this_8, 0, 0);
break;
case FIFO_STOP:
hwwrite(vortex->mmio, VORTEX_WTDMA_CTRL + (wtdma << 2),
dma->dma_ctrl);
vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown,
this_4, this_8, 0, 0);
break;
}
dma->fifo_status = FIFO_PAUSE;
}
static void vortex_wtdma_stopfifo(vortex_t * vortex, int wtdma)
{
stream_t *dma = &vortex->dma_wt[wtdma];
int this_4 = 0, this_8 = 0;
if (dma->fifo_status == FIFO_START)
vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown,
this_4, this_8, 0, 0);
else if (dma->fifo_status == FIFO_STOP)
return;
dma->fifo_status = FIFO_STOP;
dma->fifo_enabled = 0;
}
#endif
/* ADB Routes */
typedef int ADBRamLink;
static void vortex_adb_init(vortex_t * vortex)
{
int i;
/* it looks like we are writing more than we need to...
* if we write what we are supposed to it breaks things... */
hwwrite(vortex->mmio, VORTEX_ADB_SR, 0);
for (i = 0; i < VORTEX_ADB_RTBASE_COUNT; i++)
hwwrite(vortex->mmio, VORTEX_ADB_RTBASE + (i << 2),
hwread(vortex->mmio,
VORTEX_ADB_RTBASE + (i << 2)) | ROUTE_MASK);
for (i = 0; i < VORTEX_ADB_CHNBASE_COUNT; i++) {
hwwrite(vortex->mmio, VORTEX_ADB_CHNBASE + (i << 2),
hwread(vortex->mmio,
VORTEX_ADB_CHNBASE + (i << 2)) | ROUTE_MASK);
}
}
static void vortex_adb_en_sr(vortex_t * vortex, int channel)
{
hwwrite(vortex->mmio, VORTEX_ADB_SR,
hwread(vortex->mmio, VORTEX_ADB_SR) | (0x1 << channel));
}
static void vortex_adb_dis_sr(vortex_t * vortex, int channel)
{
hwwrite(vortex->mmio, VORTEX_ADB_SR,
hwread(vortex->mmio, VORTEX_ADB_SR) & ~(0x1 << channel));
}
static void
vortex_adb_addroutes(vortex_t * vortex, unsigned char channel,
ADBRamLink * route, int rnum)
{
int temp, prev, lifeboat = 0;
if ((rnum <= 0) || (route == NULL))
return;
/* Write last routes. */
rnum--;
hwwrite(vortex->mmio,
VORTEX_ADB_RTBASE + ((route[rnum] & ADB_MASK) << 2),
ROUTE_MASK);
while (rnum > 0) {
hwwrite(vortex->mmio,
VORTEX_ADB_RTBASE +
((route[rnum - 1] & ADB_MASK) << 2), route[rnum]);
rnum--;
}
/* Write first route. */
temp =
hwread(vortex->mmio,
VORTEX_ADB_CHNBASE + (channel << 2)) & ADB_MASK;
if (temp == ADB_MASK) {
/* First entry on this channel. */
hwwrite(vortex->mmio, VORTEX_ADB_CHNBASE + (channel << 2),
route[0]);
vortex_adb_en_sr(vortex, channel);
return;
}
/* Not first entry on this channel. Need to link. */
do {
prev = temp;
temp =
hwread(vortex->mmio,
VORTEX_ADB_RTBASE + (temp << 2)) & ADB_MASK;
if ((lifeboat++) > ADB_MASK) {
dev_err(vortex->card->dev,
"vortex_adb_addroutes: unending route! 0x%x\n",
*route);
return;
}
}
while (temp != ADB_MASK);
hwwrite(vortex->mmio, VORTEX_ADB_RTBASE + (prev << 2), route[0]);
}
static void
vortex_adb_delroutes(vortex_t * vortex, unsigned char channel,
ADBRamLink route0, ADBRamLink route1)
{
int temp, lifeboat = 0, prev;
/* Find route. */
temp =
hwread(vortex->mmio,
VORTEX_ADB_CHNBASE + (channel << 2)) & ADB_MASK;
if (temp == (route0 & ADB_MASK)) {
temp =
hwread(vortex->mmio,
VORTEX_ADB_RTBASE + ((route1 & ADB_MASK) << 2));
if ((temp & ADB_MASK) == ADB_MASK)
vortex_adb_dis_sr(vortex, channel);
hwwrite(vortex->mmio, VORTEX_ADB_CHNBASE + (channel << 2),
temp);
return;
}
do {
prev = temp;
temp =
hwread(vortex->mmio,
VORTEX_ADB_RTBASE + (prev << 2)) & ADB_MASK;
if (((lifeboat++) > ADB_MASK) || (temp == ADB_MASK)) {
dev_err(vortex->card->dev,
"vortex_adb_delroutes: route not found! 0x%x\n",
route0);
return;
}
}
while (temp != (route0 & ADB_MASK));
temp = hwread(vortex->mmio, VORTEX_ADB_RTBASE + (temp << 2));
if ((temp & ADB_MASK) == route1)
temp = hwread(vortex->mmio, VORTEX_ADB_RTBASE + (temp << 2));
/* Make bridge over deleted route. */
hwwrite(vortex->mmio, VORTEX_ADB_RTBASE + (prev << 2), temp);
}
static void
vortex_route(vortex_t * vortex, int en, unsigned char channel,
unsigned char source, unsigned char dest)
{
ADBRamLink route;
route = ((source & ADB_MASK) << ADB_SHIFT) | (dest & ADB_MASK);
if (en) {
vortex_adb_addroutes(vortex, channel, &route, 1);
if ((source < (OFFSET_SRCOUT + NR_SRC))
&& (source >= OFFSET_SRCOUT))
vortex_src_addWTD(vortex, (source - OFFSET_SRCOUT),
channel);
else if ((source < (OFFSET_MIXOUT + NR_MIXOUT))
&& (source >= OFFSET_MIXOUT))
vortex_mixer_addWTD(vortex,
(source - OFFSET_MIXOUT), channel);
} else {
vortex_adb_delroutes(vortex, channel, route, route);
if ((source < (OFFSET_SRCOUT + NR_SRC))
&& (source >= OFFSET_SRCOUT))
vortex_src_delWTD(vortex, (source - OFFSET_SRCOUT),
channel);
else if ((source < (OFFSET_MIXOUT + NR_MIXOUT))
&& (source >= OFFSET_MIXOUT))
vortex_mixer_delWTD(vortex,
(source - OFFSET_MIXOUT), channel);
}
}
#if 0
static void
vortex_routes(vortex_t * vortex, int en, unsigned char channel,
unsigned char source, unsigned char dest0, unsigned char dest1)
{
ADBRamLink route[2];
route[0] = ((source & ADB_MASK) << ADB_SHIFT) | (dest0 & ADB_MASK);
route[1] = ((source & ADB_MASK) << ADB_SHIFT) | (dest1 & ADB_MASK);
if (en) {
vortex_adb_addroutes(vortex, channel, route, 2);
if ((source < (OFFSET_SRCOUT + NR_SRC))
&& (source >= (OFFSET_SRCOUT)))
vortex_src_addWTD(vortex, (source - OFFSET_SRCOUT),
channel);
else if ((source < (OFFSET_MIXOUT + NR_MIXOUT))
&& (source >= (OFFSET_MIXOUT)))
vortex_mixer_addWTD(vortex,
(source - OFFSET_MIXOUT), channel);
} else {
vortex_adb_delroutes(vortex, channel, route[0], route[1]);
if ((source < (OFFSET_SRCOUT + NR_SRC))
&& (source >= (OFFSET_SRCOUT)))
vortex_src_delWTD(vortex, (source - OFFSET_SRCOUT),
channel);
else if ((source < (OFFSET_MIXOUT + NR_MIXOUT))
&& (source >= (OFFSET_MIXOUT)))
vortex_mixer_delWTD(vortex,
(source - OFFSET_MIXOUT), channel);
}
}
#endif
/* Route two sources to same target. Sources must be of same class !!! */
static void
vortex_routeLRT(vortex_t * vortex, int en, unsigned char ch,
unsigned char source0, unsigned char source1,
unsigned char dest)
{
ADBRamLink route[2];
route[0] = ((source0 & ADB_MASK) << ADB_SHIFT) | (dest & ADB_MASK);
route[1] = ((source1 & ADB_MASK) << ADB_SHIFT) | (dest & ADB_MASK);
if (dest < 0x10)
route[1] = (route[1] & ~ADB_MASK) | (dest + 0x20); /* fifo A */
if (en) {
vortex_adb_addroutes(vortex, ch, route, 2);
if ((source0 < (OFFSET_SRCOUT + NR_SRC))
&& (source0 >= OFFSET_SRCOUT)) {
vortex_src_addWTD(vortex,
(source0 - OFFSET_SRCOUT), ch);
vortex_src_addWTD(vortex,
(source1 - OFFSET_SRCOUT), ch);
} else if ((source0 < (OFFSET_MIXOUT + NR_MIXOUT))
&& (source0 >= OFFSET_MIXOUT)) {
vortex_mixer_addWTD(vortex,
(source0 - OFFSET_MIXOUT), ch);
vortex_mixer_addWTD(vortex,
(source1 - OFFSET_MIXOUT), ch);
}
} else {
vortex_adb_delroutes(vortex, ch, route[0], route[1]);
if ((source0 < (OFFSET_SRCOUT + NR_SRC))
&& (source0 >= OFFSET_SRCOUT)) {
vortex_src_delWTD(vortex,
(source0 - OFFSET_SRCOUT), ch);
vortex_src_delWTD(vortex,
(source1 - OFFSET_SRCOUT), ch);
} else if ((source0 < (OFFSET_MIXOUT + NR_MIXOUT))
&& (source0 >= OFFSET_MIXOUT)) {
vortex_mixer_delWTD(vortex,
(source0 - OFFSET_MIXOUT), ch);
vortex_mixer_delWTD(vortex,
(source1 - OFFSET_MIXOUT), ch);
}
}
}
/* Connection stuff */
// Connect adbdma to src('s).
static void
vortex_connection_adbdma_src(vortex_t * vortex, int en, unsigned char ch,
unsigned char adbdma, unsigned char src)
{
vortex_route(vortex, en, ch, ADB_DMA(adbdma), ADB_SRCIN(src));
}
// Connect SRC to mixin.
static void
vortex_connection_src_mixin(vortex_t * vortex, int en,
unsigned char channel, unsigned char src,
unsigned char mixin)
{
vortex_route(vortex, en, channel, ADB_SRCOUT(src), ADB_MIXIN(mixin));
}
// Connect mixin with mix output.
static void
vortex_connection_mixin_mix(vortex_t * vortex, int en, unsigned char mixin,
unsigned char mix, int a)
{
if (en) {
vortex_mix_enableinput(vortex, mix, mixin);
vortex_mix_setinputvolumebyte(vortex, mix, mixin, MIX_DEFIGAIN); // added to original code.
} else
vortex_mix_disableinput(vortex, mix, mixin, a);
}
// Connect absolut address to mixin.
static void
vortex_connection_adb_mixin(vortex_t * vortex, int en,
unsigned char channel, unsigned char source,
unsigned char mixin)
{
vortex_route(vortex, en, channel, source, ADB_MIXIN(mixin));
}
static void
vortex_connection_src_adbdma(vortex_t * vortex, int en, unsigned char ch,
unsigned char src, unsigned char adbdma)
{
vortex_route(vortex, en, ch, ADB_SRCOUT(src), ADB_DMA(adbdma));
}
static void
vortex_connection_src_src_adbdma(vortex_t * vortex, int en,
unsigned char ch, unsigned char src0,
unsigned char src1, unsigned char adbdma)
{
vortex_routeLRT(vortex, en, ch, ADB_SRCOUT(src0), ADB_SRCOUT(src1),
ADB_DMA(adbdma));
}
// mix to absolut address.
static void
vortex_connection_mix_adb(vortex_t * vortex, int en, unsigned char ch,
unsigned char mix, unsigned char dest)
{
vortex_route(vortex, en, ch, ADB_MIXOUT(mix), dest);
vortex_mix_setvolumebyte(vortex, mix, MIX_DEFOGAIN); // added to original code.
}
// mixer to src.
static void
vortex_connection_mix_src(vortex_t * vortex, int en, unsigned char ch,
unsigned char mix, unsigned char src)
{
vortex_route(vortex, en, ch, ADB_MIXOUT(mix), ADB_SRCIN(src));
vortex_mix_setvolumebyte(vortex, mix, MIX_DEFOGAIN); // added to original code.
}
#if 0
static void
vortex_connection_adbdma_src_src(vortex_t * vortex, int en,
unsigned char channel,
unsigned char adbdma, unsigned char src0,
unsigned char src1)
{
vortex_routes(vortex, en, channel, ADB_DMA(adbdma),
ADB_SRCIN(src0), ADB_SRCIN(src1));
}
// Connect two mix to AdbDma.
static void
vortex_connection_mix_mix_adbdma(vortex_t * vortex, int en,
unsigned char ch, unsigned char mix0,
unsigned char mix1, unsigned char adbdma)
{
ADBRamLink routes[2];
routes[0] =
(((mix0 +
OFFSET_MIXOUT) & ADB_MASK) << ADB_SHIFT) | (adbdma & ADB_MASK);
routes[1] =
(((mix1 + OFFSET_MIXOUT) & ADB_MASK) << ADB_SHIFT) | ((adbdma +
0x20) &
ADB_MASK);
if (en) {
vortex_adb_addroutes(vortex, ch, routes, 0x2);
vortex_mixer_addWTD(vortex, mix0, ch);
vortex_mixer_addWTD(vortex, mix1, ch);
} else {
vortex_adb_delroutes(vortex, ch, routes[0], routes[1]);
vortex_mixer_delWTD(vortex, mix0, ch);
vortex_mixer_delWTD(vortex, mix1, ch);
}
}
#endif
/* CODEC connect. */
static void
vortex_connect_codecplay(vortex_t * vortex, int en, unsigned char mixers[])
{
#ifdef CHIP_AU8820
vortex_connection_mix_adb(vortex, en, 0x11, mixers[0], ADB_CODECOUT(0));
vortex_connection_mix_adb(vortex, en, 0x11, mixers[1], ADB_CODECOUT(1));
#else
#if 1
// Connect front channels through EQ.
vortex_connection_mix_adb(vortex, en, 0x11, mixers[0], ADB_EQIN(0));
vortex_connection_mix_adb(vortex, en, 0x11, mixers[1], ADB_EQIN(1));
/* Lower volume, since EQ has some gain. */
vortex_mix_setvolumebyte(vortex, mixers[0], 0);
vortex_mix_setvolumebyte(vortex, mixers[1], 0);
vortex_route(vortex, en, 0x11, ADB_EQOUT(0), ADB_CODECOUT(0));
vortex_route(vortex, en, 0x11, ADB_EQOUT(1), ADB_CODECOUT(1));
/* Check if reg 0x28 has SDAC bit set. */
if (VORTEX_IS_QUAD(vortex)) {
/* Rear channel. Note: ADB_CODECOUT(0+2) and (1+2) is for AC97 modem */
vortex_connection_mix_adb(vortex, en, 0x11, mixers[2],
ADB_CODECOUT(0 + 4));
vortex_connection_mix_adb(vortex, en, 0x11, mixers[3],
ADB_CODECOUT(1 + 4));
/* pr_debug( "SDAC detected "); */
}
#else
// Use plain direct output to codec.
vortex_connection_mix_adb(vortex, en, 0x11, mixers[0], ADB_CODECOUT(0));
vortex_connection_mix_adb(vortex, en, 0x11, mixers[1], ADB_CODECOUT(1));
#endif
#endif
}
static void
vortex_connect_codecrec(vortex_t * vortex, int en, unsigned char mixin0,
unsigned char mixin1)
{
/*
Enable: 0x1, 0x1
Channel: 0x11, 0x11
ADB Source address: 0x48, 0x49
Destination Asp4Topology_0x9c,0x98
*/
vortex_connection_adb_mixin(vortex, en, 0x11, ADB_CODECIN(0), mixin0);
vortex_connection_adb_mixin(vortex, en, 0x11, ADB_CODECIN(1), mixin1);
}
// Higher level ADB audio path (de)allocator.
/* Resource manager */
static const int resnum[VORTEX_RESOURCE_LAST] =
{ NR_ADB, NR_SRC, NR_MIXIN, NR_MIXOUT, NR_A3D };
/*
Checkout/Checkin resource of given type.
resmap: resource map to be used. If NULL means that we want to allocate
a DMA resource (root of all other resources of a dma channel).
out: Mean checkout if != 0. Else mean Checkin resource.
restype: Indicates type of resource to be checked in or out.
*/
static char
vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, int restype)
{
int i, qty = resnum[restype], resinuse = 0;
if (out) {
/* Gather used resources by all streams. */
for (i = 0; i < NR_ADB; i++) {
resinuse |= vortex->dma_adb[i].resources[restype];
}
resinuse |= vortex->fixed_res[restype];
/* Find and take free resource. */
for (i = 0; i < qty; i++) {
if ((resinuse & (1 << i)) == 0) {
if (resmap != NULL)
resmap[restype] |= (1 << i);
else
vortex->dma_adb[i].resources[restype] |= (1 << i);
/*
pr_debug(
"vortex: ResManager: type %d out %d\n",
restype, i);
*/
return i;
}
}
} else {
if (resmap == NULL)
return -EINVAL;
/* Checkin first resource of type restype. */
for (i = 0; i < qty; i++) {
if (resmap[restype] & (1 << i)) {
resmap[restype] &= ~(1 << i);
/*
pr_debug(
"vortex: ResManager: type %d in %d\n",
restype, i);
*/
return i;
}
}
}
dev_err(vortex->card->dev,
"FATAL: ResManager: resource type %d exhausted.\n",
restype);
return -ENOMEM;
}
/* Default Connections */
static void vortex_connect_default(vortex_t * vortex, int en)
{
// Connect AC97 codec.
vortex->mixplayb[0] = vortex_adb_checkinout(vortex, vortex->fixed_res, en,
VORTEX_RESOURCE_MIXOUT);
vortex->mixplayb[1] = vortex_adb_checkinout(vortex, vortex->fixed_res, en,
VORTEX_RESOURCE_MIXOUT);
if (VORTEX_IS_QUAD(vortex)) {
vortex->mixplayb[2] = vortex_adb_checkinout(vortex, vortex->fixed_res, en,
VORTEX_RESOURCE_MIXOUT);
vortex->mixplayb[3] = vortex_adb_checkinout(vortex, vortex->fixed_res, en,
VORTEX_RESOURCE_MIXOUT);
}
vortex_connect_codecplay(vortex, en, vortex->mixplayb);
vortex->mixcapt[0] = vortex_adb_checkinout(vortex, vortex->fixed_res, en,
VORTEX_RESOURCE_MIXIN);
vortex->mixcapt[1] = vortex_adb_checkinout(vortex, vortex->fixed_res, en,
VORTEX_RESOURCE_MIXIN);
vortex_connect_codecrec(vortex, en, MIX_CAPT(0), MIX_CAPT(1));
// Connect SPDIF
#ifndef CHIP_AU8820
vortex->mixspdif[0] = vortex_adb_checkinout(vortex, vortex->fixed_res, en,
VORTEX_RESOURCE_MIXOUT);
vortex->mixspdif[1] = vortex_adb_checkinout(vortex, vortex->fixed_res, en,
VORTEX_RESOURCE_MIXOUT);
vortex_connection_mix_adb(vortex, en, 0x14, vortex->mixspdif[0],
ADB_SPDIFOUT(0));
vortex_connection_mix_adb(vortex, en, 0x14, vortex->mixspdif[1],
ADB_SPDIFOUT(1));
#endif
// Connect WT
#ifndef CHIP_AU8810
vortex_wt_connect(vortex, en);
#endif
// A3D (crosstalk canceler and A3D slices). AU8810 disabled for now.
#ifndef CHIP_AU8820
vortex_Vort3D_connect(vortex, en);
#endif
// Connect I2S
// Connect DSP interface for SQ3500 turbo (not here i think...)
// Connect AC98 modem codec
}
/*
Allocate nr_ch pcm audio routes if dma < 0. If dma >= 0, existing routes
are deallocated.
dma: DMA engine routes to be deallocated when dma >= 0.
nr_ch: Number of channels to be de/allocated.
dir: direction of stream. Uses same values as substream->stream.
type: Type of audio output/source (codec, spdif, i2s, dsp, etc)
Return: Return allocated DMA or same DMA passed as "dma" when dma >= 0.
*/
static int
vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
int type, int subdev)
{
stream_t *stream;
int i, en;
struct pcm_vol *p;
if (dma >= 0) {
en = 0;
vortex_adb_checkinout(vortex,
vortex->dma_adb[dma].resources, en,
VORTEX_RESOURCE_DMA);
} else {
en = 1;
if ((dma =
vortex_adb_checkinout(vortex, NULL, en,
VORTEX_RESOURCE_DMA)) < 0)
return -EBUSY;
}
stream = &vortex->dma_adb[dma];
stream->dma = dma;
stream->dir = dir;
stream->type = type;
/* PLAYBACK ROUTES. */
if (dir == SNDRV_PCM_STREAM_PLAYBACK) {
int src[4], mix[4], ch_top;
#ifndef CHIP_AU8820
int a3d = 0;
#endif
/* Get SRC and MIXER hardware resources. */
if (stream->type != VORTEX_PCM_SPDIF) {
for (i = 0; i < nr_ch; i++) {
if ((src[i] = vortex_adb_checkinout(vortex,
stream->resources, en,
VORTEX_RESOURCE_SRC)) < 0) {
memset(stream->resources, 0,
sizeof(stream->resources));
return -EBUSY;
}
if (stream->type != VORTEX_PCM_A3D) {
if ((mix[i] = vortex_adb_checkinout(vortex,
stream->resources,
en,
VORTEX_RESOURCE_MIXIN)) < 0) {
memset(stream->resources,
0,
sizeof(stream->resources));
return -EBUSY;
}
}
}
}
#ifndef CHIP_AU8820
if (stream->type == VORTEX_PCM_A3D) {
if ((a3d =
vortex_adb_checkinout(vortex,
stream->resources, en,
VORTEX_RESOURCE_A3D)) < 0) {
memset(stream->resources, 0,
sizeof(stream->resources));
dev_err(vortex->card->dev,
"out of A3D sources. Sorry\n");
return -EBUSY;
}
/* (De)Initialize A3D hardware source. */
vortex_Vort3D_InitializeSource(&vortex->a3d[a3d], en,
vortex);
}
/* Make SPDIF out exclusive to "spdif" device when in use. */
if ((stream->type == VORTEX_PCM_SPDIF) && (en)) {
vortex_route(vortex, 0, 0x14,
ADB_MIXOUT(vortex->mixspdif[0]),
ADB_SPDIFOUT(0));
vortex_route(vortex, 0, 0x14,
ADB_MIXOUT(vortex->mixspdif[1]),
ADB_SPDIFOUT(1));
}
#endif
/* Make playback routes. */
for (i = 0; i < nr_ch; i++) {
if (stream->type == VORTEX_PCM_ADB) {
vortex_connection_adbdma_src(vortex, en,
src[nr_ch - 1],
dma,
src[i]);
vortex_connection_src_mixin(vortex, en,
0x11, src[i],
mix[i]);
vortex_connection_mixin_mix(vortex, en,
mix[i],
MIX_PLAYB(i), 0);
#ifndef CHIP_AU8820
vortex_connection_mixin_mix(vortex, en,
mix[i],
MIX_SPDIF(i % 2), 0);
vortex_mix_setinputvolumebyte(vortex,
MIX_SPDIF(i % 2),
mix[i],
MIX_DEFIGAIN);
#endif
}
#ifndef CHIP_AU8820
if (stream->type == VORTEX_PCM_A3D) {
vortex_connection_adbdma_src(vortex, en,
src[nr_ch - 1],
dma,
src[i]);
vortex_route(vortex, en, 0x11, ADB_SRCOUT(src[i]), ADB_A3DIN(a3d));
/* XTalk test. */
//vortex_route(vortex, en, 0x11, dma, ADB_XTALKIN(i?9:4));
//vortex_route(vortex, en, 0x11, ADB_SRCOUT(src[i]), ADB_XTALKIN(i?4:9));
}
if (stream->type == VORTEX_PCM_SPDIF)
vortex_route(vortex, en, 0x14,
ADB_DMA(stream->dma),
ADB_SPDIFOUT(i));
#endif
}
if (stream->type != VORTEX_PCM_SPDIF && stream->type != VORTEX_PCM_A3D) {
ch_top = (VORTEX_IS_QUAD(vortex) ? 4 : 2);
for (i = nr_ch; i < ch_top; i++) {
vortex_connection_mixin_mix(vortex, en,
mix[i % nr_ch],
MIX_PLAYB(i), 0);
#ifndef CHIP_AU8820
vortex_connection_mixin_mix(vortex, en,
mix[i % nr_ch],
MIX_SPDIF(i % 2),
0);
vortex_mix_setinputvolumebyte(vortex,
MIX_SPDIF(i % 2),
mix[i % nr_ch],
MIX_DEFIGAIN);
#endif
}
if (stream->type == VORTEX_PCM_ADB && en) {
p = &vortex->pcm_vol[subdev];
p->dma = dma;
for (i = 0; i < nr_ch; i++)
p->mixin[i] = mix[i];
for (i = 0; i < ch_top; i++)
p->vol[i] = 0;
}
}
#ifndef CHIP_AU8820
else {
if (nr_ch == 1 && stream->type == VORTEX_PCM_SPDIF)
vortex_route(vortex, en, 0x14,
ADB_DMA(stream->dma),
ADB_SPDIFOUT(1));
}
/* Reconnect SPDIF out when "spdif" device is down. */
if ((stream->type == VORTEX_PCM_SPDIF) && (!en)) {
vortex_route(vortex, 1, 0x14,
ADB_MIXOUT(vortex->mixspdif[0]),
ADB_SPDIFOUT(0));
vortex_route(vortex, 1, 0x14,
ADB_MIXOUT(vortex->mixspdif[1]),
ADB_SPDIFOUT(1));
}
#endif
/* CAPTURE ROUTES. */
} else {
int src[2], mix[2];
if (nr_ch < 1)
return -EINVAL;
/* Get SRC and MIXER hardware resources. */
for (i = 0; i < nr_ch; i++) {
if ((mix[i] =
vortex_adb_checkinout(vortex,
stream->resources, en,
VORTEX_RESOURCE_MIXOUT))
< 0) {
memset(stream->resources, 0,
sizeof(stream->resources));
return -EBUSY;
}
if ((src[i] =
vortex_adb_checkinout(vortex,
stream->resources, en,
VORTEX_RESOURCE_SRC)) < 0) {
memset(stream->resources, 0,
sizeof(stream->resources));
return -EBUSY;
}
}
/* Make capture routes. */
vortex_connection_mixin_mix(vortex, en, MIX_CAPT(0), mix[0], 0);
vortex_connection_mix_src(vortex, en, 0x11, mix[0], src[0]);
if (nr_ch == 1) {
vortex_connection_mixin_mix(vortex, en,
MIX_CAPT(1), mix[0], 0);
vortex_connection_src_adbdma(vortex, en,
src[0],
src[0], dma);
} else {
vortex_connection_mixin_mix(vortex, en,
MIX_CAPT(1), mix[1], 0);
vortex_connection_mix_src(vortex, en, 0x11, mix[1],
src[1]);
vortex_connection_src_src_adbdma(vortex, en,
src[1], src[0],
src[1], dma);
}
}
vortex->dma_adb[dma].nr_ch = nr_ch;
#if 0
/* AC97 Codec channel setup. FIXME: this has no effect on some cards !! */
if (nr_ch < 4) {
/* Copy stereo to rear channel (surround) */
snd_ac97_write_cache(vortex->codec,
AC97_SIGMATEL_DAC2INVERT,
snd_ac97_read(vortex->codec,
AC97_SIGMATEL_DAC2INVERT)
| 4);
} else {
/* Allow separate front and rear channels. */
snd_ac97_write_cache(vortex->codec,
AC97_SIGMATEL_DAC2INVERT,
snd_ac97_read(vortex->codec,
AC97_SIGMATEL_DAC2INVERT)
& ~((u32)
4));
}
#endif
return dma;
}
/*
Set the SampleRate of the SRC's attached to the given DMA engine.
*/
static void
vortex_adb_setsrc(vortex_t * vortex, int adbdma, unsigned int rate, int dir)
{
stream_t *stream = &(vortex->dma_adb[adbdma]);
int i, cvrt;
/* dir=1:play ; dir=0:rec */
if (dir)
cvrt = SRC_RATIO(rate, 48000);
else
cvrt = SRC_RATIO(48000, rate);
/* Setup SRC's */
for (i = 0; i < NR_SRC; i++) {
if (stream->resources[VORTEX_RESOURCE_SRC] & (1 << i))
vortex_src_setupchannel(vortex, i, cvrt, 0, 0, i, dir, 1, cvrt, dir);
}
}
// Timer and ISR functions.
static void vortex_settimer(vortex_t * vortex, int period)
{
//set the timer period to <period> 48000ths of a second.
hwwrite(vortex->mmio, VORTEX_IRQ_STAT, period);
}
#if 0
static void vortex_enable_timer_int(vortex_t * card)
{
hwwrite(card->mmio, VORTEX_IRQ_CTRL,
hwread(card->mmio, VORTEX_IRQ_CTRL) | IRQ_TIMER | 0x60);
}
static void vortex_disable_timer_int(vortex_t * card)
{
hwwrite(card->mmio, VORTEX_IRQ_CTRL,
hwread(card->mmio, VORTEX_IRQ_CTRL) & ~IRQ_TIMER);
}
#endif
static void vortex_enable_int(vortex_t * card)
{
// CAsp4ISR__EnableVortexInt_void_
hwwrite(card->mmio, VORTEX_CTRL,
hwread(card->mmio, VORTEX_CTRL) | CTRL_IRQ_ENABLE);
hwwrite(card->mmio, VORTEX_IRQ_CTRL,
(hwread(card->mmio, VORTEX_IRQ_CTRL) & 0xffffefc0) | 0x24);
}
static void vortex_disable_int(vortex_t * card)
{
hwwrite(card->mmio, VORTEX_CTRL,
hwread(card->mmio, VORTEX_CTRL) & ~CTRL_IRQ_ENABLE);
}
static irqreturn_t vortex_interrupt(int irq, void *dev_id)
{
vortex_t *vortex = dev_id;
int i, handled;
u32 source;
//check if the interrupt is ours.
if (!(hwread(vortex->mmio, VORTEX_STAT) & 0x1))
return IRQ_NONE;
// This is the Interrupt Enable flag we set before (consistency check).
if (!(hwread(vortex->mmio, VORTEX_CTRL) & CTRL_IRQ_ENABLE))
return IRQ_NONE;
source = hwread(vortex->mmio, VORTEX_IRQ_SOURCE);
// Reset IRQ flags.
hwwrite(vortex->mmio, VORTEX_IRQ_SOURCE, source);
hwread(vortex->mmio, VORTEX_IRQ_SOURCE);
// Is at least one IRQ flag set?
if (source == 0) {
dev_err(vortex->card->dev, "missing irq source\n");
return IRQ_NONE;
}
handled = 0;
// Attend every interrupt source.
if (unlikely(source & IRQ_ERR_MASK)) {
if (source & IRQ_FATAL) {
dev_err(vortex->card->dev, "IRQ fatal error\n");
}
if (source & IRQ_PARITY) {
dev_err(vortex->card->dev, "IRQ parity error\n");
}
if (source & IRQ_REG) {
dev_err(vortex->card->dev, "IRQ reg error\n");
}
if (source & IRQ_FIFO) {
dev_err(vortex->card->dev, "IRQ fifo error\n");
}
if (source & IRQ_DMA) {
dev_err(vortex->card->dev, "IRQ dma error\n");
}
handled = 1;
}
if (source & IRQ_PCMOUT) {
/* ALSA period acknowledge. */
spin_lock(&vortex->lock);
for (i = 0; i < NR_ADB; i++) {
if (vortex->dma_adb[i].fifo_status == FIFO_START) {
if (!vortex_adbdma_bufshift(vortex, i))
continue;
spin_unlock(&vortex->lock);
snd_pcm_period_elapsed(vortex->dma_adb[i].
substream);
spin_lock(&vortex->lock);
}
}
#ifndef CHIP_AU8810
for (i = 0; i < NR_WT; i++) {
if (vortex->dma_wt[i].fifo_status == FIFO_START) {
/* FIXME: we ignore the return value from
* vortex_wtdma_bufshift() below as the delta
* calculation seems not working for wavetable
* by some reason
*/
vortex_wtdma_bufshift(vortex, i);
spin_unlock(&vortex->lock);
snd_pcm_period_elapsed(vortex->dma_wt[i].
substream);
spin_lock(&vortex->lock);
}
}
#endif
spin_unlock(&vortex->lock);
handled = 1;
}
//Acknowledge the Timer interrupt
if (source & IRQ_TIMER) {
hwread(vortex->mmio, VORTEX_IRQ_STAT);
handled = 1;
}
if ((source & IRQ_MIDI) && vortex->rmidi) {
snd_mpu401_uart_interrupt(vortex->irq,
vortex->rmidi->private_data);
handled = 1;
}
if (!handled) {
dev_err(vortex->card->dev, "unknown irq source %x\n", source);
}
return IRQ_RETVAL(handled);
}
/* Codec */
#define POLL_COUNT 1000
static void vortex_codec_init(vortex_t * vortex)
{
int i;
for (i = 0; i < 32; i++) {
/* the windows driver writes -i, so we write -i */
hwwrite(vortex->mmio, (VORTEX_CODEC_CHN + (i << 2)), -i);
msleep(2);
}
if (0) {
hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x8068);
msleep(1);
hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x00e8);
msleep(1);
} else {
hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x00a8);
msleep(2);
hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x80a8);
msleep(2);
hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x80e8);
msleep(2);
hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x80a8);
msleep(2);
hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x00a8);
msleep(2);
hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x00e8);
}
for (i = 0; i < 32; i++) {
hwwrite(vortex->mmio, (VORTEX_CODEC_CHN + (i << 2)), -i);
msleep(5);
}
hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0xe8);
msleep(1);
/* Enable codec channels 0 and 1. */
hwwrite(vortex->mmio, VORTEX_CODEC_EN,
hwread(vortex->mmio, VORTEX_CODEC_EN) | EN_CODEC);
}
static void
vortex_codec_write(struct snd_ac97 * codec, unsigned short addr, unsigned short data)
{
vortex_t *card = (vortex_t *) codec->private_data;
unsigned int lifeboat = 0;
/* wait for transactions to clear */
while (!(hwread(card->mmio, VORTEX_CODEC_CTRL) & 0x100)) {
udelay(100);
if (lifeboat++ > POLL_COUNT) {
dev_err(card->card->dev, "ac97 codec stuck busy\n");
return;
}
}
/* write register */
hwwrite(card->mmio, VORTEX_CODEC_IO,
((addr << VORTEX_CODEC_ADDSHIFT) & VORTEX_CODEC_ADDMASK) |
((data << VORTEX_CODEC_DATSHIFT) & VORTEX_CODEC_DATMASK) |
VORTEX_CODEC_WRITE |
(codec->num << VORTEX_CODEC_ID_SHIFT) );
/* Flush Caches. */
hwread(card->mmio, VORTEX_CODEC_IO);
}
static unsigned short vortex_codec_read(struct snd_ac97 * codec, unsigned short addr)
{
vortex_t *card = (vortex_t *) codec->private_data;
u32 read_addr, data;
unsigned lifeboat = 0;
/* wait for transactions to clear */
while (!(hwread(card->mmio, VORTEX_CODEC_CTRL) & 0x100)) {
udelay(100);
if (lifeboat++ > POLL_COUNT) {
dev_err(card->card->dev, "ac97 codec stuck busy\n");
return 0xffff;
}
}
/* set up read address */
read_addr = ((addr << VORTEX_CODEC_ADDSHIFT) & VORTEX_CODEC_ADDMASK) |
(codec->num << VORTEX_CODEC_ID_SHIFT) ;
hwwrite(card->mmio, VORTEX_CODEC_IO, read_addr);
/* wait for address */
do {
udelay(100);
data = hwread(card->mmio, VORTEX_CODEC_IO);
if (lifeboat++ > POLL_COUNT) {
dev_err(card->card->dev,
"ac97 address never arrived\n");
return 0xffff;
}
} while ((data & VORTEX_CODEC_ADDMASK) !=
(addr << VORTEX_CODEC_ADDSHIFT));
/* return data. */
return (u16) (data & VORTEX_CODEC_DATMASK);
}
/* SPDIF support */
static void vortex_spdif_init(vortex_t * vortex, int spdif_sr, int spdif_mode)
{
int i, this_38 = 0, this_04 = 0, this_08 = 0, this_0c = 0;
/* CAsp4Spdif::InitializeSpdifHardware(void) */
hwwrite(vortex->mmio, VORTEX_SPDIF_FLAGS,
hwread(vortex->mmio, VORTEX_SPDIF_FLAGS) & 0xfff3fffd);
//for (i=0x291D4; i<0x29200; i+=4)
for (i = 0; i < 11; i++)
hwwrite(vortex->mmio, VORTEX_SPDIF_CFG1 + (i << 2), 0);
//hwwrite(vortex->mmio, 0x29190, hwread(vortex->mmio, 0x29190) | 0xc0000);
hwwrite(vortex->mmio, VORTEX_CODEC_EN,
hwread(vortex->mmio, VORTEX_CODEC_EN) | EN_SPDIF);
/* CAsp4Spdif::ProgramSRCInHardware(enum SPDIF_SR,enum SPDIFMODE) */
if (this_04 && this_08) {
int edi;
i = (((0x5DC00000 / spdif_sr) + 1) >> 1);
if (i > 0x800) {
if (i < 0x1ffff)
edi = (i >> 1);
else
edi = 0x1ffff;
} else {
edi = 0x800;
}
/* this_04 and this_08 are the CASp4Src's (samplerate converters) */
vortex_src_setupchannel(vortex, this_04, edi, 0, 1,
this_0c, 1, 0, edi, 1);
vortex_src_setupchannel(vortex, this_08, edi, 0, 1,
this_0c, 1, 0, edi, 1);
}
i = spdif_sr;
spdif_sr |= 0x8c;
switch (i) {
case 32000:
this_38 &= 0xFFFFFFFE;
this_38 &= 0xFFFFFFFD;
this_38 &= 0xF3FFFFFF;
this_38 |= 0x03000000; /* set 32khz samplerate */
this_38 &= 0xFFFFFF3F;
spdif_sr &= 0xFFFFFFFD;
spdif_sr |= 1;
break;
case 44100:
this_38 &= 0xFFFFFFFE;
this_38 &= 0xFFFFFFFD;
this_38 &= 0xF0FFFFFF;
this_38 |= 0x03000000;
this_38 &= 0xFFFFFF3F;
spdif_sr &= 0xFFFFFFFC;
break;
case 48000:
if (spdif_mode == 1) {
this_38 &= 0xFFFFFFFE;
this_38 &= 0xFFFFFFFD;
this_38 &= 0xF2FFFFFF;
this_38 |= 0x02000000; /* set 48khz samplerate */
this_38 &= 0xFFFFFF3F;
} else {
/* J. Gordon Wolfe: I think this stuff is for AC3 */
this_38 |= 0x00000003;
this_38 &= 0xFFFFFFBF;
this_38 |= 0x80;
}
spdif_sr |= 2;
spdif_sr &= 0xFFFFFFFE;
break;
}
/* looks like the next 2 lines transfer a 16-bit value into 2 8-bit
registers. seems to be for the standard IEC/SPDIF initialization
stuff */
hwwrite(vortex->mmio, VORTEX_SPDIF_CFG0, this_38 & 0xffff);
hwwrite(vortex->mmio, VORTEX_SPDIF_CFG1, this_38 >> 0x10);
hwwrite(vortex->mmio, VORTEX_SPDIF_SMPRATE, spdif_sr);
}
/* Initialization */
static int vortex_core_init(vortex_t *vortex)
{
dev_info(vortex->card->dev, "init started\n");
/* Hardware Init. */
hwwrite(vortex->mmio, VORTEX_CTRL, 0xffffffff);
msleep(5);
hwwrite(vortex->mmio, VORTEX_CTRL,
hwread(vortex->mmio, VORTEX_CTRL) & 0xffdfffff);
msleep(5);
/* Reset IRQ flags */
hwwrite(vortex->mmio, VORTEX_IRQ_SOURCE, 0xffffffff);
hwread(vortex->mmio, VORTEX_IRQ_STAT);
vortex_codec_init(vortex);
#ifdef CHIP_AU8830
hwwrite(vortex->mmio, VORTEX_CTRL,
hwread(vortex->mmio, VORTEX_CTRL) | 0x1000000);
#endif
/* Init audio engine. */
vortex_adbdma_init(vortex);
hwwrite(vortex->mmio, VORTEX_ENGINE_CTRL, 0x0); //, 0xc83c7e58, 0xc5f93e58
vortex_adb_init(vortex);
/* Init processing blocks. */
vortex_fifo_init(vortex);
vortex_mixer_init(vortex);
vortex_srcblock_init(vortex);
#ifndef CHIP_AU8820
vortex_eq_init(vortex);
vortex_spdif_init(vortex, 48000, 1);
vortex_Vort3D_enable(vortex);
#endif
#ifndef CHIP_AU8810
vortex_wt_init(vortex);
#endif
// Moved to au88x0.c
//vortex_connect_default(vortex, 1);
vortex_settimer(vortex, 0x90);
// Enable Interrupts.
// vortex_enable_int() must be first !!
// hwwrite(vortex->mmio, VORTEX_IRQ_CTRL, 0);
// vortex_enable_int(vortex);
//vortex_enable_timer_int(vortex);
//vortex_disable_timer_int(vortex);
dev_info(vortex->card->dev, "init.... done.\n");
spin_lock_init(&vortex->lock);
return 0;
}
static int vortex_core_shutdown(vortex_t * vortex)
{
dev_info(vortex->card->dev, "shutdown started\n");
#ifndef CHIP_AU8820
vortex_eq_free(vortex);
vortex_Vort3D_disable(vortex);
#endif
//vortex_disable_timer_int(vortex);
vortex_disable_int(vortex);
vortex_connect_default(vortex, 0);
/* Reset all DMA fifos. */
vortex_fifo_init(vortex);
/* Erase all audio routes. */
vortex_adb_init(vortex);
/* Disable MPU401 */
//hwwrite(vortex->mmio, VORTEX_IRQ_CTRL, hwread(vortex->mmio, VORTEX_IRQ_CTRL) & ~IRQ_MIDI);
//hwwrite(vortex->mmio, VORTEX_CTRL, hwread(vortex->mmio, VORTEX_CTRL) & ~CTRL_MIDI_EN);
hwwrite(vortex->mmio, VORTEX_IRQ_CTRL, 0);
hwwrite(vortex->mmio, VORTEX_CTRL, 0);
msleep(5);
hwwrite(vortex->mmio, VORTEX_IRQ_SOURCE, 0xffff);
dev_info(vortex->card->dev, "shutdown.... done.\n");
return 0;
}
/* Alsa support. */
static int vortex_alsafmt_aspfmt(snd_pcm_format_t alsafmt, vortex_t *v)
{
int fmt;
switch (alsafmt) {
case SNDRV_PCM_FORMAT_U8:
fmt = 0x1;
break;
case SNDRV_PCM_FORMAT_MU_LAW:
fmt = 0x2;
break;
case SNDRV_PCM_FORMAT_A_LAW:
fmt = 0x3;
break;
case SNDRV_PCM_FORMAT_SPECIAL:
fmt = 0x4; /* guess. */
break;
case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE:
fmt = 0x5; /* guess. */
break;
case SNDRV_PCM_FORMAT_S16_LE:
fmt = 0x8;
break;
case SNDRV_PCM_FORMAT_S16_BE:
fmt = 0x9; /* check this... */
break;
default:
fmt = 0x8;
dev_err(v->card->dev,
"format unsupported %d\n", alsafmt);
break;
}
return fmt;
}
/* Some not yet useful translations. */
#if 0
typedef enum {
ASPFMTLINEAR16 = 0, /* 0x8 */
ASPFMTLINEAR8, /* 0x1 */
ASPFMTULAW, /* 0x2 */
ASPFMTALAW, /* 0x3 */
ASPFMTSPORT, /* ? */
ASPFMTSPDIF, /* ? */
} ASPENCODING;
static int
vortex_translateformat(vortex_t * vortex, char bits, char nch, int encod)
{
int a, this_194;
if ((bits != 8) && (bits != 16))
return -1;
switch (encod) {
case 0:
if (bits == 0x10)
a = 8; // 16 bit
break;
case 1:
if (bits == 8)
a = 1; // 8 bit
break;
case 2:
a = 2; // U_LAW
break;
case 3:
a = 3; // A_LAW
break;
}
switch (nch) {
case 1:
this_194 = 0;
break;
case 2:
this_194 = 1;
break;
case 4:
this_194 = 1;
break;
case 6:
this_194 = 1;
break;
}
return (a);
}
static void vortex_cdmacore_setformat(vortex_t * vortex, int bits, int nch)
{
short int d, this_148;
d = ((bits >> 3) * nch);
this_148 = 0xbb80 / d;
}
#endif
| gpl-2.0 |
gglinux/node.js | webChat/node_modules/qs/test/stringify.js | 3198 |
if (require.register) {
var qs = require('querystring');
} else {
var qs = require('../')
, expect = require('expect.js');
}
var date = new Date(0);
var str_identities = {
'basics': [
{ str: 'foo=bar', obj: {'foo' : 'bar'}},
{ str: 'foo=%22bar%22', obj: {'foo' : '\"bar\"'}},
{ str: 'foo=', obj: {'foo': ''}},
{ str: 'foo=1&bar=2', obj: {'foo' : '1', 'bar' : '2'}},
{ str: 'my%20weird%20field=q1!2%22\'w%245%267%2Fz8)%3F', obj: {'my weird field': "q1!2\"'w$5&7/z8)?"}},
{ str: 'foo%3Dbaz=bar', obj: {'foo=baz': 'bar'}},
{ str: 'foo=bar&bar=baz', obj: {foo: 'bar', bar: 'baz'}}
],
'escaping': [
{ str: 'foo=foo%20bar', obj: {foo: 'foo bar'}},
{ str: 'cht=p3&chd=t%3A60%2C40&chs=250x100&chl=Hello%7CWorld', obj: {
cht: 'p3'
, chd: 't:60,40'
, chs: '250x100'
, chl: 'Hello|World'
}}
],
'nested': [
{ str: 'foo[0]=bar&foo[1]=quux', obj: {'foo' : ['bar', 'quux']}},
{ str: 'foo[0]=bar', obj: {foo: ['bar']}},
{ str: 'foo[0]=1&foo[1]=2', obj: {'foo' : ['1', '2']}},
{ str: 'foo=bar&baz[0]=1&baz[1]=2&baz[2]=3', obj: {'foo' : 'bar', 'baz' : ['1', '2', '3']}},
{ str: 'foo[0]=bar&baz[0]=1&baz[1]=2&baz[2]=3', obj: {'foo' : ['bar'], 'baz' : ['1', '2', '3']}},
{ str: 'x[y][z]=1', obj: {'x' : {'y' : {'z' : '1'}}}},
{ str: 'x[y][z][0]=1', obj: {'x' : {'y' : {'z' : ['1']}}}},
{ str: 'x[y][z]=2', obj: {'x' : {'y' : {'z' : '2'}}}},
{ str: 'x[y][z][0]=1&x[y][z][1]=2', obj: {'x' : {'y' : {'z' : ['1', '2']}}}},
{ str: 'x[y][0][z]=1', obj: {'x' : {'y' : [{'z' : '1'}]}}},
{ str: 'x[y][0][z][0]=1', obj: {'x' : {'y' : [{'z' : ['1']}]}}},
{ str: 'x[y][0][z]=1&x[y][0][w]=2', obj: {'x' : {'y' : [{'z' : '1', 'w' : '2'}]}}},
{ str: 'x[y][0][v][w]=1', obj: {'x' : {'y' : [{'v' : {'w' : '1'}}]}}},
{ str: 'x[y][0][z]=1&x[y][0][v][w]=2', obj: {'x' : {'y' : [{'z' : '1', 'v' : {'w' : '2'}}]}}},
{ str: 'x[y][0][z]=1&x[y][1][z]=2', obj: {'x' : {'y' : [{'z' : '1'}, {'z' : '2'}]}}},
{ str: 'x[y][0][z]=1&x[y][0][w]=a&x[y][1][z]=2&x[y][1][w]=3', obj: {'x' : {'y' : [{'z' : '1', 'w' : 'a'}, {'z' : '2', 'w' : '3'}]}}},
{ str: 'user[name][first]=tj&user[name][last]=holowaychuk', obj: { user: { name: { first: 'tj', last: 'holowaychuk' }}}}
],
'errors': [
{ obj: 'foo=bar', message: 'stringify expects an object' },
{ obj: ['foo', 'bar'], message: 'stringify expects an object' }
],
'numbers': [
{ str: 'limit[0]=1&limit[1]=2&limit[2]=3', obj: { limit: [1, 2, '3'] }},
{ str: 'limit=1', obj: { limit: 1 }}
],
'others': [
{ str: 'at=' + encodeURIComponent(date), obj: { at: date } }
]
};
function test(type) {
return function(){
var str, obj;
for (var i = 0; i < str_identities[type].length; i++) {
str = str_identities[type][i].str;
obj = str_identities[type][i].obj;
expect(qs.stringify(obj)).to.eql(str);
}
}
}
describe('qs.stringify()', function(){
it('should support the basics', test('basics'))
it('should support escapes', test('escaping'))
it('should support nesting', test('nested'))
it('should support numbers', test('numbers'))
it('should support others', test('others'))
})
| apache-2.0 |
jessekl/flixr | venv/lib/python2.7/site-packages/sqlalchemy/databases/__init__.py | 881 | # databases/__init__.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Include imports from the sqlalchemy.dialects package for backwards
compatibility with pre 0.6 versions.
"""
from ..dialects.sqlite import base as sqlite
from ..dialects.postgresql import base as postgresql
postgres = postgresql
from ..dialects.mysql import base as mysql
from ..dialects.drizzle import base as drizzle
from ..dialects.oracle import base as oracle
from ..dialects.firebird import base as firebird
from ..dialects.mssql import base as mssql
from ..dialects.sybase import base as sybase
__all__ = (
'drizzle',
'firebird',
'mssql',
'mysql',
'postgresql',
'sqlite',
'oracle',
'sybase',
)
| mit |
Coveros/starcanada2016 | www-db/cookbooks/compat_resource/files/lib/chef_compat/copied_from_chef.rb | 784 | module ChefCompat
module CopiedFromChef
def self.extend_chef_module(chef_module, target)
target.instance_eval do
include chef_module
@chef_module = chef_module
def self.method_missing(name, *args, &block)
@chef_module.send(name, *args, &block)
end
def self.const_missing(name)
@chef_module.const_get(name)
end
end
end
# This patch to CopiedFromChef's ActionClass is necessary for the include to work
require 'chef/resource'
class Chef < ::Chef
class Resource < ::Chef::Resource
module ActionClass
def self.use_inline_resources
end
def self.include_resource_dsl(include_resource_dsl)
end
end
end
end
end
end
| apache-2.0 |
LauriM/PropellerEngine | thirdparty/Bullet/src/BulletDynamics/Featherstone/btMultiBodyDynamicsWorld.cpp | 27139 | /*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2013 Erwin Coumans http://bulletphysics.org
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include "btMultiBodyDynamicsWorld.h"
#include "btMultiBodyConstraintSolver.h"
#include "btMultiBody.h"
#include "btMultiBodyLinkCollider.h"
#include "BulletCollision/CollisionDispatch/btSimulationIslandManager.h"
#include "LinearMath/btQuickprof.h"
#include "btMultiBodyConstraint.h"
#include "LinearMath/btIDebugDraw.h"
void btMultiBodyDynamicsWorld::addMultiBody(btMultiBody* body, short group, short mask)
{
m_multiBodies.push_back(body);
}
void btMultiBodyDynamicsWorld::removeMultiBody(btMultiBody* body)
{
m_multiBodies.remove(body);
}
void btMultiBodyDynamicsWorld::calculateSimulationIslands()
{
BT_PROFILE("calculateSimulationIslands");
getSimulationIslandManager()->updateActivationState(getCollisionWorld(),getCollisionWorld()->getDispatcher());
{
//merge islands based on speculative contact manifolds too
for (int i=0;i<this->m_predictiveManifolds.size();i++)
{
btPersistentManifold* manifold = m_predictiveManifolds[i];
const btCollisionObject* colObj0 = manifold->getBody0();
const btCollisionObject* colObj1 = manifold->getBody1();
if (((colObj0) && (!(colObj0)->isStaticOrKinematicObject())) &&
((colObj1) && (!(colObj1)->isStaticOrKinematicObject())))
{
getSimulationIslandManager()->getUnionFind().unite((colObj0)->getIslandTag(),(colObj1)->getIslandTag());
}
}
}
{
int i;
int numConstraints = int(m_constraints.size());
for (i=0;i< numConstraints ; i++ )
{
btTypedConstraint* constraint = m_constraints[i];
if (constraint->isEnabled())
{
const btRigidBody* colObj0 = &constraint->getRigidBodyA();
const btRigidBody* colObj1 = &constraint->getRigidBodyB();
if (((colObj0) && (!(colObj0)->isStaticOrKinematicObject())) &&
((colObj1) && (!(colObj1)->isStaticOrKinematicObject())))
{
getSimulationIslandManager()->getUnionFind().unite((colObj0)->getIslandTag(),(colObj1)->getIslandTag());
}
}
}
}
//merge islands linked by Featherstone link colliders
for (int i=0;i<m_multiBodies.size();i++)
{
btMultiBody* body = m_multiBodies[i];
{
btMultiBodyLinkCollider* prev = body->getBaseCollider();
for (int b=0;b<body->getNumLinks();b++)
{
btMultiBodyLinkCollider* cur = body->getLink(b).m_collider;
if (((cur) && (!(cur)->isStaticOrKinematicObject())) &&
((prev) && (!(prev)->isStaticOrKinematicObject())))
{
int tagPrev = prev->getIslandTag();
int tagCur = cur->getIslandTag();
getSimulationIslandManager()->getUnionFind().unite(tagPrev, tagCur);
}
if (cur && !cur->isStaticOrKinematicObject())
prev = cur;
}
}
}
//merge islands linked by multibody constraints
{
for (int i=0;i<this->m_multiBodyConstraints.size();i++)
{
btMultiBodyConstraint* c = m_multiBodyConstraints[i];
int tagA = c->getIslandIdA();
int tagB = c->getIslandIdB();
if (tagA>=0 && tagB>=0)
getSimulationIslandManager()->getUnionFind().unite(tagA, tagB);
}
}
//Store the island id in each body
getSimulationIslandManager()->storeIslandActivationState(getCollisionWorld());
}
void btMultiBodyDynamicsWorld::updateActivationState(btScalar timeStep)
{
BT_PROFILE("btMultiBodyDynamicsWorld::updateActivationState");
for ( int i=0;i<m_multiBodies.size();i++)
{
btMultiBody* body = m_multiBodies[i];
if (body)
{
body->checkMotionAndSleepIfRequired(timeStep);
if (!body->isAwake())
{
btMultiBodyLinkCollider* col = body->getBaseCollider();
if (col && col->getActivationState() == ACTIVE_TAG)
{
col->setActivationState( WANTS_DEACTIVATION);
col->setDeactivationTime(0.f);
}
for (int b=0;b<body->getNumLinks();b++)
{
btMultiBodyLinkCollider* col = body->getLink(b).m_collider;
if (col && col->getActivationState() == ACTIVE_TAG)
{
col->setActivationState( WANTS_DEACTIVATION);
col->setDeactivationTime(0.f);
}
}
} else
{
btMultiBodyLinkCollider* col = body->getBaseCollider();
if (col && col->getActivationState() != DISABLE_DEACTIVATION)
col->setActivationState( ACTIVE_TAG );
for (int b=0;b<body->getNumLinks();b++)
{
btMultiBodyLinkCollider* col = body->getLink(b).m_collider;
if (col && col->getActivationState() != DISABLE_DEACTIVATION)
col->setActivationState( ACTIVE_TAG );
}
}
}
}
btDiscreteDynamicsWorld::updateActivationState(timeStep);
}
SIMD_FORCE_INLINE int btGetConstraintIslandId2(const btTypedConstraint* lhs)
{
int islandId;
const btCollisionObject& rcolObj0 = lhs->getRigidBodyA();
const btCollisionObject& rcolObj1 = lhs->getRigidBodyB();
islandId= rcolObj0.getIslandTag()>=0?rcolObj0.getIslandTag():rcolObj1.getIslandTag();
return islandId;
}
class btSortConstraintOnIslandPredicate2
{
public:
bool operator() ( const btTypedConstraint* lhs, const btTypedConstraint* rhs ) const
{
int rIslandId0,lIslandId0;
rIslandId0 = btGetConstraintIslandId2(rhs);
lIslandId0 = btGetConstraintIslandId2(lhs);
return lIslandId0 < rIslandId0;
}
};
SIMD_FORCE_INLINE int btGetMultiBodyConstraintIslandId(const btMultiBodyConstraint* lhs)
{
int islandId;
int islandTagA = lhs->getIslandIdA();
int islandTagB = lhs->getIslandIdB();
islandId= islandTagA>=0?islandTagA:islandTagB;
return islandId;
}
class btSortMultiBodyConstraintOnIslandPredicate
{
public:
bool operator() ( const btMultiBodyConstraint* lhs, const btMultiBodyConstraint* rhs ) const
{
int rIslandId0,lIslandId0;
rIslandId0 = btGetMultiBodyConstraintIslandId(rhs);
lIslandId0 = btGetMultiBodyConstraintIslandId(lhs);
return lIslandId0 < rIslandId0;
}
};
struct MultiBodyInplaceSolverIslandCallback : public btSimulationIslandManager::IslandCallback
{
btContactSolverInfo* m_solverInfo;
btMultiBodyConstraintSolver* m_solver;
btMultiBodyConstraint** m_multiBodySortedConstraints;
int m_numMultiBodyConstraints;
btTypedConstraint** m_sortedConstraints;
int m_numConstraints;
btIDebugDraw* m_debugDrawer;
btDispatcher* m_dispatcher;
btAlignedObjectArray<btCollisionObject*> m_bodies;
btAlignedObjectArray<btPersistentManifold*> m_manifolds;
btAlignedObjectArray<btTypedConstraint*> m_constraints;
btAlignedObjectArray<btMultiBodyConstraint*> m_multiBodyConstraints;
MultiBodyInplaceSolverIslandCallback( btMultiBodyConstraintSolver* solver,
btDispatcher* dispatcher)
:m_solverInfo(NULL),
m_solver(solver),
m_multiBodySortedConstraints(NULL),
m_numConstraints(0),
m_debugDrawer(NULL),
m_dispatcher(dispatcher)
{
}
MultiBodyInplaceSolverIslandCallback& operator=(MultiBodyInplaceSolverIslandCallback& other)
{
btAssert(0);
(void)other;
return *this;
}
SIMD_FORCE_INLINE void setup ( btContactSolverInfo* solverInfo, btTypedConstraint** sortedConstraints, int numConstraints, btMultiBodyConstraint** sortedMultiBodyConstraints, int numMultiBodyConstraints, btIDebugDraw* debugDrawer)
{
btAssert(solverInfo);
m_solverInfo = solverInfo;
m_multiBodySortedConstraints = sortedMultiBodyConstraints;
m_numMultiBodyConstraints = numMultiBodyConstraints;
m_sortedConstraints = sortedConstraints;
m_numConstraints = numConstraints;
m_debugDrawer = debugDrawer;
m_bodies.resize (0);
m_manifolds.resize (0);
m_constraints.resize (0);
m_multiBodyConstraints.resize(0);
}
virtual void processIsland(btCollisionObject** bodies,int numBodies,btPersistentManifold** manifolds,int numManifolds, int islandId)
{
if (islandId<0)
{
///we don't split islands, so all constraints/contact manifolds/bodies are passed into the solver regardless the island id
m_solver->solveMultiBodyGroup( bodies,numBodies,manifolds, numManifolds,m_sortedConstraints, m_numConstraints, &m_multiBodySortedConstraints[0],m_numConstraints,*m_solverInfo,m_debugDrawer,m_dispatcher);
} else
{
//also add all non-contact constraints/joints for this island
btTypedConstraint** startConstraint = 0;
btMultiBodyConstraint** startMultiBodyConstraint = 0;
int numCurConstraints = 0;
int numCurMultiBodyConstraints = 0;
int i;
//find the first constraint for this island
for (i=0;i<m_numConstraints;i++)
{
if (btGetConstraintIslandId2(m_sortedConstraints[i]) == islandId)
{
startConstraint = &m_sortedConstraints[i];
break;
}
}
//count the number of constraints in this island
for (;i<m_numConstraints;i++)
{
if (btGetConstraintIslandId2(m_sortedConstraints[i]) == islandId)
{
numCurConstraints++;
}
}
for (i=0;i<m_numMultiBodyConstraints;i++)
{
if (btGetMultiBodyConstraintIslandId(m_multiBodySortedConstraints[i]) == islandId)
{
startMultiBodyConstraint = &m_multiBodySortedConstraints[i];
break;
}
}
//count the number of multi body constraints in this island
for (;i<m_numMultiBodyConstraints;i++)
{
if (btGetMultiBodyConstraintIslandId(m_multiBodySortedConstraints[i]) == islandId)
{
numCurMultiBodyConstraints++;
}
}
if (m_solverInfo->m_minimumSolverBatchSize<=1)
{
m_solver->solveGroup( bodies,numBodies,manifolds, numManifolds,startConstraint,numCurConstraints,*m_solverInfo,m_debugDrawer,m_dispatcher);
} else
{
for (i=0;i<numBodies;i++)
m_bodies.push_back(bodies[i]);
for (i=0;i<numManifolds;i++)
m_manifolds.push_back(manifolds[i]);
for (i=0;i<numCurConstraints;i++)
m_constraints.push_back(startConstraint[i]);
for (i=0;i<numCurMultiBodyConstraints;i++)
m_multiBodyConstraints.push_back(startMultiBodyConstraint[i]);
if ((m_constraints.size()+m_manifolds.size())>m_solverInfo->m_minimumSolverBatchSize)
{
processConstraints();
} else
{
//printf("deferred\n");
}
}
}
}
void processConstraints()
{
btCollisionObject** bodies = m_bodies.size()? &m_bodies[0]:0;
btPersistentManifold** manifold = m_manifolds.size()?&m_manifolds[0]:0;
btTypedConstraint** constraints = m_constraints.size()?&m_constraints[0]:0;
btMultiBodyConstraint** multiBodyConstraints = m_multiBodyConstraints.size() ? &m_multiBodyConstraints[0] : 0;
//printf("mb contacts = %d, mb constraints = %d\n", mbContacts, m_multiBodyConstraints.size());
m_solver->solveMultiBodyGroup( bodies,m_bodies.size(),manifold, m_manifolds.size(),constraints, m_constraints.size() ,multiBodyConstraints, m_multiBodyConstraints.size(), *m_solverInfo,m_debugDrawer,m_dispatcher);
m_bodies.resize(0);
m_manifolds.resize(0);
m_constraints.resize(0);
m_multiBodyConstraints.resize(0);
}
};
btMultiBodyDynamicsWorld::btMultiBodyDynamicsWorld(btDispatcher* dispatcher,btBroadphaseInterface* pairCache,btMultiBodyConstraintSolver* constraintSolver,btCollisionConfiguration* collisionConfiguration)
:btDiscreteDynamicsWorld(dispatcher,pairCache,constraintSolver,collisionConfiguration),
m_multiBodyConstraintSolver(constraintSolver)
{
//split impulse is not yet supported for Featherstone hierarchies
getSolverInfo().m_splitImpulse = false;
getSolverInfo().m_solverMode |=SOLVER_USE_2_FRICTION_DIRECTIONS;
m_solverMultiBodyIslandCallback = new MultiBodyInplaceSolverIslandCallback(constraintSolver,dispatcher);
}
btMultiBodyDynamicsWorld::~btMultiBodyDynamicsWorld ()
{
delete m_solverMultiBodyIslandCallback;
}
void btMultiBodyDynamicsWorld::solveConstraints(btContactSolverInfo& solverInfo)
{
btAlignedObjectArray<btScalar> scratch_r;
btAlignedObjectArray<btVector3> scratch_v;
btAlignedObjectArray<btMatrix3x3> scratch_m;
BT_PROFILE("solveConstraints");
m_sortedConstraints.resize( m_constraints.size());
int i;
for (i=0;i<getNumConstraints();i++)
{
m_sortedConstraints[i] = m_constraints[i];
}
m_sortedConstraints.quickSort(btSortConstraintOnIslandPredicate2());
btTypedConstraint** constraintsPtr = getNumConstraints() ? &m_sortedConstraints[0] : 0;
m_sortedMultiBodyConstraints.resize(m_multiBodyConstraints.size());
for (i=0;i<m_multiBodyConstraints.size();i++)
{
m_sortedMultiBodyConstraints[i] = m_multiBodyConstraints[i];
}
m_sortedMultiBodyConstraints.quickSort(btSortMultiBodyConstraintOnIslandPredicate());
btMultiBodyConstraint** sortedMultiBodyConstraints = m_sortedMultiBodyConstraints.size() ? &m_sortedMultiBodyConstraints[0] : 0;
m_solverMultiBodyIslandCallback->setup(&solverInfo,constraintsPtr,m_sortedConstraints.size(),sortedMultiBodyConstraints,m_sortedMultiBodyConstraints.size(), getDebugDrawer());
m_constraintSolver->prepareSolve(getCollisionWorld()->getNumCollisionObjects(), getCollisionWorld()->getDispatcher()->getNumManifolds());
/// solve all the constraints for this island
m_islandManager->buildAndProcessIslands(getCollisionWorld()->getDispatcher(),getCollisionWorld(),m_solverMultiBodyIslandCallback);
{
BT_PROFILE("btMultiBody addForce and stepVelocities");
for (int i=0;i<this->m_multiBodies.size();i++)
{
btMultiBody* bod = m_multiBodies[i];
bool isSleeping = false;
if (bod->getBaseCollider() && bod->getBaseCollider()->getActivationState() == ISLAND_SLEEPING)
{
isSleeping = true;
}
for (int b=0;b<bod->getNumLinks();b++)
{
if (bod->getLink(b).m_collider && bod->getLink(b).m_collider->getActivationState()==ISLAND_SLEEPING)
isSleeping = true;
}
if (!isSleeping)
{
//useless? they get resized in stepVelocities once again (AND DIFFERENTLY)
scratch_r.resize(bod->getNumLinks()+1); //multidof? ("Y"s use it and it is used to store qdd)
scratch_v.resize(bod->getNumLinks()+1);
scratch_m.resize(bod->getNumLinks()+1);
bod->addBaseForce(m_gravity * bod->getBaseMass());
for (int j = 0; j < bod->getNumLinks(); ++j)
{
bod->addLinkForce(j, m_gravity * bod->getLinkMass(j));
}
bool doNotUpdatePos = false;
if(bod->isMultiDof())
{
if(!bod->isUsingRK4Integration())
{
bod->stepVelocitiesMultiDof(solverInfo.m_timeStep, scratch_r, scratch_v, scratch_m);
}
else
{
//
int numDofs = bod->getNumDofs() + 6;
int numPosVars = bod->getNumPosVars() + 7;
btAlignedObjectArray<btScalar> scratch_r2; scratch_r2.resize(2*numPosVars + 8*numDofs);
//convenience
btScalar *pMem = &scratch_r2[0];
btScalar *scratch_q0 = pMem; pMem += numPosVars;
btScalar *scratch_qx = pMem; pMem += numPosVars;
btScalar *scratch_qd0 = pMem; pMem += numDofs;
btScalar *scratch_qd1 = pMem; pMem += numDofs;
btScalar *scratch_qd2 = pMem; pMem += numDofs;
btScalar *scratch_qd3 = pMem; pMem += numDofs;
btScalar *scratch_qdd0 = pMem; pMem += numDofs;
btScalar *scratch_qdd1 = pMem; pMem += numDofs;
btScalar *scratch_qdd2 = pMem; pMem += numDofs;
btScalar *scratch_qdd3 = pMem; pMem += numDofs;
btAssert((pMem - (2*numPosVars + 8*numDofs)) == &scratch_r2[0]);
/////
//copy q0 to scratch_q0 and qd0 to scratch_qd0
scratch_q0[0] = bod->getWorldToBaseRot().x();
scratch_q0[1] = bod->getWorldToBaseRot().y();
scratch_q0[2] = bod->getWorldToBaseRot().z();
scratch_q0[3] = bod->getWorldToBaseRot().w();
scratch_q0[4] = bod->getBasePos().x();
scratch_q0[5] = bod->getBasePos().y();
scratch_q0[6] = bod->getBasePos().z();
//
for(int link = 0; link < bod->getNumLinks(); ++link)
{
for(int dof = 0; dof < bod->getLink(link).m_posVarCount; ++dof)
scratch_q0[7 + bod->getLink(link).m_cfgOffset + dof] = bod->getLink(link).m_jointPos[dof];
}
//
for(int dof = 0; dof < numDofs; ++dof)
scratch_qd0[dof] = bod->getVelocityVector()[dof];
////
struct
{
btMultiBody *bod;
btScalar *scratch_qx, *scratch_q0;
void operator()()
{
for(int dof = 0; dof < bod->getNumPosVars() + 7; ++dof)
scratch_qx[dof] = scratch_q0[dof];
}
} pResetQx = {bod, scratch_qx, scratch_q0};
//
struct
{
void operator()(btScalar dt, const btScalar *pDer, const btScalar *pCurVal, btScalar *pVal, int size)
{
for(int i = 0; i < size; ++i)
pVal[i] = pCurVal[i] + dt * pDer[i];
}
} pEulerIntegrate;
//
struct
{
void operator()(btMultiBody *pBody, const btScalar *pData)
{
btScalar *pVel = const_cast<btScalar*>(pBody->getVelocityVector());
for(int i = 0; i < pBody->getNumDofs() + 6; ++i)
pVel[i] = pData[i];
}
} pCopyToVelocityVector;
//
struct
{
void operator()(const btScalar *pSrc, btScalar *pDst, int start, int size)
{
for(int i = 0; i < size; ++i)
pDst[i] = pSrc[start + i];
}
} pCopy;
//
btScalar h = solverInfo.m_timeStep;
#define output &scratch_r[bod->getNumDofs()]
//calc qdd0 from: q0 & qd0
bod->stepVelocitiesMultiDof(0., scratch_r, scratch_v, scratch_m);
pCopy(output, scratch_qdd0, 0, numDofs);
//calc q1 = q0 + h/2 * qd0
pResetQx();
bod->stepPositionsMultiDof(btScalar(.5)*h, scratch_qx, scratch_qd0);
//calc qd1 = qd0 + h/2 * qdd0
pEulerIntegrate(btScalar(.5)*h, scratch_qdd0, scratch_qd0, scratch_qd1, numDofs);
//
//calc qdd1 from: q1 & qd1
pCopyToVelocityVector(bod, scratch_qd1);
bod->stepVelocitiesMultiDof(0., scratch_r, scratch_v, scratch_m);
pCopy(output, scratch_qdd1, 0, numDofs);
//calc q2 = q0 + h/2 * qd1
pResetQx();
bod->stepPositionsMultiDof(btScalar(.5)*h, scratch_qx, scratch_qd1);
//calc qd2 = qd0 + h/2 * qdd1
pEulerIntegrate(btScalar(.5)*h, scratch_qdd1, scratch_qd0, scratch_qd2, numDofs);
//
//calc qdd2 from: q2 & qd2
pCopyToVelocityVector(bod, scratch_qd2);
bod->stepVelocitiesMultiDof(0., scratch_r, scratch_v, scratch_m);
pCopy(output, scratch_qdd2, 0, numDofs);
//calc q3 = q0 + h * qd2
pResetQx();
bod->stepPositionsMultiDof(h, scratch_qx, scratch_qd2);
//calc qd3 = qd0 + h * qdd2
pEulerIntegrate(h, scratch_qdd2, scratch_qd0, scratch_qd3, numDofs);
//
//calc qdd3 from: q3 & qd3
pCopyToVelocityVector(bod, scratch_qd3);
bod->stepVelocitiesMultiDof(0., scratch_r, scratch_v, scratch_m);
pCopy(output, scratch_qdd3, 0, numDofs);
//
//calc q = q0 + h/6(qd0 + 2*(qd1 + qd2) + qd3)
//calc qd = qd0 + h/6(qdd0 + 2*(qdd1 + qdd2) + qdd3)
btAlignedObjectArray<btScalar> delta_q; delta_q.resize(numDofs);
btAlignedObjectArray<btScalar> delta_qd; delta_qd.resize(numDofs);
for(int i = 0; i < numDofs; ++i)
{
delta_q[i] = h/btScalar(6.)*(scratch_qd0[i] + 2*scratch_qd1[i] + 2*scratch_qd2[i] + scratch_qd3[i]);
delta_qd[i] = h/btScalar(6.)*(scratch_qdd0[i] + 2*scratch_qdd1[i] + 2*scratch_qdd2[i] + scratch_qdd3[i]);
//delta_q[i] = h*scratch_qd0[i];
//delta_qd[i] = h*scratch_qdd0[i];
}
//
pCopyToVelocityVector(bod, scratch_qd0);
bod->applyDeltaVeeMultiDof(&delta_qd[0], 1);
//
if(!doNotUpdatePos)
{
btScalar *pRealBuf = const_cast<btScalar *>(bod->getVelocityVector());
pRealBuf += 6 + bod->getNumDofs() + bod->getNumDofs()*bod->getNumDofs();
for(int i = 0; i < numDofs; ++i)
pRealBuf[i] = delta_q[i];
//bod->stepPositionsMultiDof(1, 0, &delta_q[0]);
bod->setPosUpdated(true);
}
//ugly hack which resets the cached data to t0 (needed for constraint solver)
{
for(int link = 0; link < bod->getNumLinks(); ++link)
bod->getLink(link).updateCacheMultiDof();
bod->stepVelocitiesMultiDof(0, scratch_r, scratch_v, scratch_m);
}
}
}
else//if(bod->isMultiDof())
{
bod->stepVelocities(solverInfo.m_timeStep, scratch_r, scratch_v, scratch_m);
}
bod->clearForcesAndTorques();
}//if (!isSleeping)
}
}
m_solverMultiBodyIslandCallback->processConstraints();
m_constraintSolver->allSolved(solverInfo, m_debugDrawer);
}
void btMultiBodyDynamicsWorld::integrateTransforms(btScalar timeStep)
{
btDiscreteDynamicsWorld::integrateTransforms(timeStep);
{
BT_PROFILE("btMultiBody stepPositions");
//integrate and update the Featherstone hierarchies
btAlignedObjectArray<btQuaternion> world_to_local;
btAlignedObjectArray<btVector3> local_origin;
for (int b=0;b<m_multiBodies.size();b++)
{
btMultiBody* bod = m_multiBodies[b];
bool isSleeping = false;
if (bod->getBaseCollider() && bod->getBaseCollider()->getActivationState() == ISLAND_SLEEPING)
{
isSleeping = true;
}
for (int b=0;b<bod->getNumLinks();b++)
{
if (bod->getLink(b).m_collider && bod->getLink(b).m_collider->getActivationState()==ISLAND_SLEEPING)
isSleeping = true;
}
if (!isSleeping)
{
int nLinks = bod->getNumLinks();
///base + num m_links
world_to_local.resize(nLinks+1);
local_origin.resize(nLinks+1);
if(bod->isMultiDof())
{
if(!bod->isPosUpdated())
bod->stepPositionsMultiDof(timeStep);
else
{
btScalar *pRealBuf = const_cast<btScalar *>(bod->getVelocityVector());
pRealBuf += 6 + bod->getNumDofs() + bod->getNumDofs()*bod->getNumDofs();
bod->stepPositionsMultiDof(1, 0, pRealBuf);
bod->setPosUpdated(false);
}
}
else
bod->stepPositions(timeStep);
world_to_local[0] = bod->getWorldToBaseRot();
local_origin[0] = bod->getBasePos();
if (bod->getBaseCollider())
{
btVector3 posr = local_origin[0];
// float pos[4]={posr.x(),posr.y(),posr.z(),1};
btScalar quat[4]={-world_to_local[0].x(),-world_to_local[0].y(),-world_to_local[0].z(),world_to_local[0].w()};
btTransform tr;
tr.setIdentity();
tr.setOrigin(posr);
tr.setRotation(btQuaternion(quat[0],quat[1],quat[2],quat[3]));
bod->getBaseCollider()->setWorldTransform(tr);
}
for (int k=0;k<bod->getNumLinks();k++)
{
const int parent = bod->getParent(k);
world_to_local[k+1] = bod->getParentToLocalRot(k) * world_to_local[parent+1];
local_origin[k+1] = local_origin[parent+1] + (quatRotate(world_to_local[k+1].inverse() , bod->getRVector(k)));
}
for (int m=0;m<bod->getNumLinks();m++)
{
btMultiBodyLinkCollider* col = bod->getLink(m).m_collider;
if (col)
{
int link = col->m_link;
btAssert(link == m);
int index = link+1;
btVector3 posr = local_origin[index];
// float pos[4]={posr.x(),posr.y(),posr.z(),1};
btScalar quat[4]={-world_to_local[index].x(),-world_to_local[index].y(),-world_to_local[index].z(),world_to_local[index].w()};
btTransform tr;
tr.setIdentity();
tr.setOrigin(posr);
tr.setRotation(btQuaternion(quat[0],quat[1],quat[2],quat[3]));
col->setWorldTransform(tr);
}
}
} else
{
bod->clearVelocities();
}
}
}
}
void btMultiBodyDynamicsWorld::addMultiBodyConstraint( btMultiBodyConstraint* constraint)
{
m_multiBodyConstraints.push_back(constraint);
}
void btMultiBodyDynamicsWorld::removeMultiBodyConstraint( btMultiBodyConstraint* constraint)
{
m_multiBodyConstraints.remove(constraint);
}
void btMultiBodyDynamicsWorld::debugDrawMultiBodyConstraint(btMultiBodyConstraint* constraint)
{
constraint->debugDraw(getDebugDrawer());
}
void btMultiBodyDynamicsWorld::debugDrawWorld()
{
BT_PROFILE("btMultiBodyDynamicsWorld debugDrawWorld");
bool drawConstraints = false;
if (getDebugDrawer())
{
int mode = getDebugDrawer()->getDebugMode();
if (mode & (btIDebugDraw::DBG_DrawConstraints | btIDebugDraw::DBG_DrawConstraintLimits))
{
drawConstraints = true;
}
if (drawConstraints)
{
BT_PROFILE("btMultiBody debugDrawWorld");
btAlignedObjectArray<btQuaternion> world_to_local;
btAlignedObjectArray<btVector3> local_origin;
for (int c=0;c<m_multiBodyConstraints.size();c++)
{
btMultiBodyConstraint* constraint = m_multiBodyConstraints[c];
debugDrawMultiBodyConstraint(constraint);
}
for (int b = 0; b<m_multiBodies.size(); b++)
{
btMultiBody* bod = m_multiBodies[b];
int nLinks = bod->getNumLinks();
///base + num m_links
world_to_local.resize(nLinks + 1);
local_origin.resize(nLinks + 1);
world_to_local[0] = bod->getWorldToBaseRot();
local_origin[0] = bod->getBasePos();
{
btVector3 posr = local_origin[0];
// float pos[4]={posr.x(),posr.y(),posr.z(),1};
btScalar quat[4] = { -world_to_local[0].x(), -world_to_local[0].y(), -world_to_local[0].z(), world_to_local[0].w() };
btTransform tr;
tr.setIdentity();
tr.setOrigin(posr);
tr.setRotation(btQuaternion(quat[0], quat[1], quat[2], quat[3]));
getDebugDrawer()->drawTransform(tr, 0.1);
}
for (int k = 0; k<bod->getNumLinks(); k++)
{
const int parent = bod->getParent(k);
world_to_local[k + 1] = bod->getParentToLocalRot(k) * world_to_local[parent + 1];
local_origin[k + 1] = local_origin[parent + 1] + (quatRotate(world_to_local[k + 1].inverse(), bod->getRVector(k)));
}
for (int m = 0; m<bod->getNumLinks(); m++)
{
int link = m;
int index = link + 1;
btVector3 posr = local_origin[index];
// float pos[4]={posr.x(),posr.y(),posr.z(),1};
btScalar quat[4] = { -world_to_local[index].x(), -world_to_local[index].y(), -world_to_local[index].z(), world_to_local[index].w() };
btTransform tr;
tr.setIdentity();
tr.setOrigin(posr);
tr.setRotation(btQuaternion(quat[0], quat[1], quat[2], quat[3]));
getDebugDrawer()->drawTransform(tr, 0.1);
}
}
}
}
btDiscreteDynamicsWorld::debugDrawWorld();
}
| bsd-2-clause |
Urbannet/cleanclean | yii2/vendor/zendframework/zend-i18n/src/Validator/PhoneNumber/NF.php | 720 | <?php
/**
* Zend Framework (http://framework.zend.com/)
*
* @link http://github.com/zendframework/zf2 for the canonical source repository
* @copyright Copyright (c) 2005-2015 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
return [
'code' => '672',
'patterns' => [
'national' => [
'general' => '/^[13]\\d{5}$/',
'fixed' => '/^(?:1(?:06|17|28|39)|3[012]\\d)\\d{3}$/',
'mobile' => '/^38\\d{4}$/',
'emergency' => '/^9(?:11|55|77)$/',
],
'possible' => [
'general' => '/^\\d{5,6}$/',
'emergency' => '/^\\d{3}$/',
],
],
];
| bsd-3-clause |
him2him2/cdnjs | ajax/libs/fukol-grids/0.1.1/fukol.min.css | 133 | .fukol-grid{display:flex;flex-wrap:wrap;margin:-.5em}.fukol-grid>*{flex:1 0 5em;margin:.5em}/*# sourceMappingURL=fukol.min.css.map */ | mit |
evltuma/moodle | mod/glossary/edit.php | 4125 | <?php
require_once('../../config.php');
require_once('lib.php');
require_once('edit_form.php');
$cmid = required_param('cmid', PARAM_INT); // Course Module ID
$id = optional_param('id', 0, PARAM_INT); // EntryID
if (!$cm = get_coursemodule_from_id('glossary', $cmid)) {
print_error('invalidcoursemodule');
}
if (!$course = $DB->get_record('course', array('id'=>$cm->course))) {
print_error('coursemisconf');
}
require_login($course, false, $cm);
$context = context_module::instance($cm->id);
if (!$glossary = $DB->get_record('glossary', array('id'=>$cm->instance))) {
print_error('invalidid', 'glossary');
}
$url = new moodle_url('/mod/glossary/edit.php', array('cmid'=>$cm->id));
if (!empty($id)) {
$url->param('id', $id);
}
$PAGE->set_url($url);
if ($id) { // if entry is specified
if (isguestuser()) {
print_error('guestnoedit', 'glossary', "$CFG->wwwroot/mod/glossary/view.php?id=$cmid");
}
if (!$entry = $DB->get_record('glossary_entries', array('id'=>$id, 'glossaryid'=>$glossary->id))) {
print_error('invalidentry');
}
$ineditperiod = ((time() - $entry->timecreated < $CFG->maxeditingtime) || $glossary->editalways);
if (!has_capability('mod/glossary:manageentries', $context) and !($entry->userid == $USER->id and ($ineditperiod and has_capability('mod/glossary:write', $context)))) {
if ($USER->id != $entry->userid) {
print_error('errcannoteditothers', 'glossary', "view.php?id=$cm->id&mode=entry&hook=$id");
} elseif (!$ineditperiod) {
print_error('erredittimeexpired', 'glossary', "view.php?id=$cm->id&mode=entry&hook=$id");
}
}
//prepare extra data
if ($aliases = $DB->get_records_menu("glossary_alias", array("entryid"=>$id), '', 'id, alias')) {
$entry->aliases = implode("\n", $aliases) . "\n";
}
if ($categoriesarr = $DB->get_records_menu("glossary_entries_categories", array('entryid'=>$id), '', 'id, categoryid')) {
// TODO: this fetches cats from both main and secondary glossary :-(
$entry->categories = array_values($categoriesarr);
}
} else { // new entry
require_capability('mod/glossary:write', $context);
// note: guest user does not have any write capability
$entry = new stdClass();
$entry->id = null;
}
list($definitionoptions, $attachmentoptions) = glossary_get_editor_and_attachment_options($course, $context, $entry);
$entry = file_prepare_standard_editor($entry, 'definition', $definitionoptions, $context, 'mod_glossary', 'entry', $entry->id);
$entry = file_prepare_standard_filemanager($entry, 'attachment', $attachmentoptions, $context, 'mod_glossary', 'attachment', $entry->id);
$entry->cmid = $cm->id;
// create form and set initial data
$mform = new mod_glossary_entry_form(null, array('current'=>$entry, 'cm'=>$cm, 'glossary'=>$glossary,
'definitionoptions'=>$definitionoptions, 'attachmentoptions'=>$attachmentoptions));
if ($mform->is_cancelled()){
if ($id){
redirect("view.php?id=$cm->id&mode=entry&hook=$id");
} else {
redirect("view.php?id=$cm->id");
}
} else if ($data = $mform->get_data()) {
$entry = glossary_edit_entry($data, $course, $cm, $glossary, $context);
if (core_tag_tag::is_enabled('mod_glossary', 'glossary_entries') && isset($data->tags)) {
core_tag_tag::set_item_tags('mod_glossary', 'glossary_entries', $data->id, $context, $data->tags);
}
redirect("view.php?id=$cm->id&mode=entry&hook=$entry->id");
}
if (!empty($id)) {
$PAGE->navbar->add(get_string('edit'));
}
$PAGE->set_title($glossary->name);
$PAGE->set_heading($course->fullname);
echo $OUTPUT->header();
echo $OUTPUT->heading(format_string($glossary->name), 2);
if ($glossary->intro) {
echo $OUTPUT->box(format_module_intro('glossary', $glossary, $cm->id), 'generalbox', 'intro');
}
$data = new StdClass();
$data->tags = core_tag_tag::get_item_tags_array('mod_glossary', 'glossary_entries', $id);
$mform->set_data($data);
$mform->display();
echo $OUTPUT->footer();
| gpl-3.0 |
victorzhao/miniblink49 | v8_4_5/test/webkit/fast/js/parser-syntax-check.js | 13137 | // Copyright 2013 the V8 project authors. All rights reserved.
// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
description(
"This test checks that the following expressions or statements are valid ECMASCRIPT code or should throw parse error"
);
function runTest(_a, errorType)
{
var success;
if (typeof _a != "string")
testFailed("runTest expects string argument: " + _a);
try {
eval(_a);
success = true;
} catch (e) {
success = !(e instanceof SyntaxError);
}
if ((!!errorType) == !success) {
if (errorType)
testPassed('Invalid: "' + _a + '"');
else
testPassed('Valid: "' + _a + '"');
} else {
if (errorType)
testFailed('Invalid: "' + _a + '" should throw ' + errorType.name);
else
testFailed('Valid: "' + _a + '" should NOT throw ');
}
}
function valid(_a)
{
// Test both the grammar and the syntax checker
runTest(_a, false);
runTest("function f() { " + _a + " }", false);
}
function invalid(_a, _type)
{
_type = _type || SyntaxError;
// Test both the grammar and the syntax checker
runTest(_a, true);
runTest("function f() { " + _a + " }", true);
}
// known issue:
// some statements requires statement as argument, and
// it seems the End-Of-File terminator is converted to semicolon
// "a:[EOF]" is not parse error, while "{ a: }" is parse error
// "if (a)[EOF]" is not parse error, while "{ if (a) }" is parse error
// known issues of bison parser:
// accepts: 'function f() { return 6 + }' (only inside a function declaration)
// some comma expressions: see reparsing-semicolon-insertion.js
debug ("Unary operators and member access");
valid ("");
invalid("(a");
invalid("a[5");
invalid("a[5 + 6");
invalid("a.");
invalid("()");
invalid("a.'l'");
valid ("a: +~!new a");
invalid("new -a");
valid ("new (-1)")
valid ("a: b: c: new f(x++)++")
valid ("(a)++");
valid ("(1--).x");
invalid("a-- ++");
invalid("(a:) --b");
valid ("++ -- ++ a");
valid ("++ new new a ++");
valid ("delete void 0");
invalid("delete the void");
invalid("(a++");
valid ("++a--");
valid ("++((a))--");
valid ("(a.x++)++");
invalid("1: null");
invalid("+-!~");
invalid("+-!~((");
invalid("a)");
invalid("a]");
invalid(".l");
invalid("1.l");
valid ("1 .l");
debug ("Binary and conditional operators");
valid ("a + + typeof this");
invalid("a + * b");
invalid("a ? b");
invalid("a ? b :");
invalid("%a");
invalid("a-");
valid ("a = b ? b = c : d = e");
valid ("s: a[1].l ? b.l['s'] ? c++ : d : true");
valid ("a ? b + 1 ? c + 3 * d.l : d[5][6] : e");
valid ("a in b instanceof delete -c");
invalid("a in instanceof b.l");
valid ("- - true % 5");
invalid("- false = 3");
valid ("a: b: c: (1 + null) = 3");
valid ("a[2] = b.l += c /= 4 * 7 ^ !6");
invalid("a + typeof b += c in d");
invalid("typeof a &= typeof b");
valid ("a: ((typeof (a))) >>>= a || b.l && c");
valid ("a: b: c[a /= f[a %= b]].l[c[x] = 7] -= a ? b <<= f : g");
valid ("-void+x['y'].l == x.l != 5 - f[7]");
debug ("Function calls (and new with arguments)");
valid ("a()()()");
valid ("s: l: a[2](4 == 6, 5 = 6)(f[4], 6)");
valid ("s: eval(a.apply(), b.call(c[5] - f[7]))");
invalid("a(");
invalid("a(5");
invalid("a(5,");
invalid("a(5,)");
invalid("a(5,6");
valid ("a(b[7], c <d> e.l, new a() > b)");
invalid("a(b[5)");
invalid("a(b.)");
valid ("~new new a(1)(i++)(c[l])");
invalid("a(*a)");
valid ("((((a))((b)()).l))()");
valid ("(a)[b + (c) / (d())].l--");
valid ("new (5)");
invalid("new a(5");
valid ("new (f + 5)(6, (g)() - 'l'() - true(false))");
invalid("a(.length)");
debug ("function declaration and expression");
valid ("function f() {}");
valid ("function f(a,b) {}");
invalid("function () {}");
invalid("function f(a b) {}");
invalid("function f(a,) {}");
invalid("function f(a,");
invalid("function f(a, 1) {}");
valid ("function g(arguments, eval) {}");
valid ("function f() {} + function g() {}");
invalid("(function a{})");
invalid("(function this(){})");
valid ("(delete new function f(){} + function(a,b){}(5)(6))");
valid ("6 - function (m) { function g() {} }");
invalid("function l() {");
invalid("function l++(){}");
debug ("Array and object literal, comma operator");
// Note these are tested elsewhere, no need to repeat those tests here
valid ("[] in [5,6] * [,5,] / [,,5,,] || [a,] && new [,b] % [,,]");
invalid("[5,");
invalid("[,");
invalid("(a,)");
valid ("1 + {get get(){}, set set(a){}, get1:4, set1:get-set, }");
invalid("1 + {a");
invalid("1 + {a:");
invalid("1 + {get l(");
invalid(",a");
valid ("(4,(5,a(3,4))),f[4,a-6]");
invalid("(,f)");
invalid("a,,b");
invalid("a ? b, c : d");
debug ("simple statements");
valid ("{ }");
invalid("{ { }");
valid ("{ ; ; ; }");
valid ("a: { ; }");
invalid("{ a: }");
valid ("{} f; { 6 + f() }");
valid ("{ a[5],6; {} ++b-new (-5)() } c().l++");
valid ("{ l1: l2: l3: { this } a = 32 ; { i++ ; { { { } } ++i } } }");
valid ("if (a) ;");
invalid("{ if (a) }");
invalid("if a {}");
invalid("if (a");
invalid("if (a { }");
valid ("x: s: if (a) ; else b");
invalid("else {}");
valid ("if (a) if (b) y; else {} else ;");
invalid("if (a) {} else x; else");
invalid("if (a) { else }");
valid ("if (a.l + new b()) 4 + 5 - f()");
valid ("if (a) with (x) ; else with (y) ;");
invalid("with a.b { }");
valid ("while (a() - new b) ;");
invalid("while a {}");
valid ("do ; while(0) i++"); // Is this REALLY valid? (Firefox also accepts this)
valid ("do if (a) x; else y; while(z)");
invalid("do g; while 4");
invalid("do g; while ((4)");
valid ("{ { do do do ; while(0) while(0) while(0) } }");
valid ("do while (0) if (a) {} else y; while(0)");
valid ("if (a) while (b) if (c) with(d) {} else e; else f");
invalid("break ; break your_limits ; continue ; continue living ; debugger");
invalid("debugger X");
invalid("break 0.2");
invalid("continue a++");
invalid("continue (my_friend)");
valid ("while (1) break");
valid ("do if (a) with (b) continue; else debugger; while (false)");
invalid("do if (a) while (false) else debugger");
invalid("while if (a) ;");
valid ("if (a) function f() {} else function g() {}");
valid ("if (a()) while(0) function f() {} else function g() {}");
invalid("if (a()) function f() { else function g() }");
invalid("if (a) if (b) ; else function f {}");
invalid("if (a) if (b) ; else function (){}");
valid ("throw a");
valid ("throw a + b in void c");
invalid("throw");
debug ("var and const statements");
valid ("var a, b = null");
valid ("const a = 5, b, c");
invalid("var");
invalid("var = 7");
invalid("var c (6)");
valid ("if (a) var a,b; else const b, c");
invalid("var 5 = 6");
valid ("while (0) var a, b, c=6, d, e, f=5*6, g=f*h, h");
invalid("var a = if (b) { c }");
invalid("var a = var b");
valid ("const a = b += c, a, a, a = (b - f())");
invalid("var a %= b | 5");
invalid("var (a) = 5");
invalid("var a = (4, b = 6");
invalid("const 'l' = 3");
invalid("var var = 3");
valid ("var varr = 3 in 1");
valid ("const a, a, a = void 7 - typeof 8, a = 8");
valid ("const x_x = 6 /= 7 ? e : f");
invalid("var a = ?");
invalid("const a = *7");
invalid("var a = :)");
valid ("var a = a in b in c instanceof d");
invalid("var a = b ? c, b");
invalid("const a = b : c");
debug ("for statement");
valid ("for ( ; ; ) { break }");
valid ("for ( a ; ; ) { break }");
valid ("for ( ; a ; ) { break }");
valid ("for ( ; ; a ) { break }");
valid ("for ( a ; a ; ) break");
valid ("for ( a ; ; a ) break");
valid ("for ( ; a ; a ) break");
invalid("for () { }");
invalid("for ( a ) { }");
invalid("for ( ; ) ;");
invalid("for a ; b ; c { }");
invalid("for (a ; { }");
invalid("for ( a ; ) ;");
invalid("for ( ; a ) break");
valid ("for (var a, b ; ; ) { break } ");
valid ("for (var a = b, b = a ; ; ) break");
valid ("for (var a = b, c, d, b = a ; x in b ; ) { break }");
valid ("for (var a = b, c, d ; ; 1 in a()) break");
invalid("for ( ; var a ; ) break");
invalid("for (const a; ; ) break");
invalid("for ( %a ; ; ) { }");
valid ("for (a in b) break");
valid ("for (a() in b) break");
valid ("for (a().l[4] in b) break");
valid ("for (new a in b in c in d) break");
valid ("for (new new new a in b) break");
invalid("for (delete new a() in b) break");
invalid("for (a * a in b) break");
valid ("for ((a * a) in b) break");
invalid("for (a++ in b) break");
valid ("for ((a++) in b) break");
invalid("for (++a in b) break");
valid ("for ((++a) in b) break");
invalid("for (a, b in c) break");
invalid("for (a,b in c ;;) break");
valid ("for (a,(b in c) ;;) break");
valid ("for ((a, b) in c) break");
invalid("for (a ? b : c in c) break");
valid ("for ((a ? b : c) in c) break");
valid ("for (var a in b in c) break");
valid ("for (var a = 5 += 6 in b) break");
invalid("for (var a += 5 in b) break");
invalid("for (var a = in b) break");
invalid("for (var a, b in b) break");
invalid("for (var a = -6, b in b) break");
invalid("for (var a, b = 8 in b) break");
valid ("for (var a = (b in c) in d) break");
invalid("for (var a = (b in c in d) break");
invalid("for (var (a) in b) { }");
valid ("for (var a = 7, b = c < d >= d ; f()[6]++ ; --i()[1]++ ) {}");
debug ("try statement");
invalid("try { break } catch(e) {}");
valid ("try {} finally { c++ }");
valid ("try { with (x) { } } catch(e) {} finally { if (a) ; }");
invalid("try {}");
invalid("catch(e) {}");
invalid("finally {}");
invalid("try a; catch(e) {}");
invalid("try {} catch(e) a()");
invalid("try {} finally a()");
invalid("try {} catch(e)");
invalid("try {} finally");
invalid("try {} finally {} catch(e) {}");
invalid("try {} catch (...) {}");
invalid("try {} catch {}");
valid ("if (a) try {} finally {} else b;");
valid ("if (--a()) do with(1) try {} catch(ke) { f() ; g() } while (a in b) else {}");
invalid("if (a) try {} else b; catch (e) { }");
invalid("try { finally {}");
debug ("switch statement");
valid ("switch (a) {}");
invalid("switch () {}");
invalid("case 5:");
invalid("default:");
invalid("switch (a) b;");
invalid("switch (a) case 3: b;");
valid ("switch (f()) { case 5 * f(): default: case '6' - 9: ++i }");
invalid("switch (true) { default: case 6: default: }");
invalid("switch (l) { f(); }");
invalid("switch (l) { case 1: ; a: case 5: }");
valid ("switch (g() - h[5].l) { case 1 + 6: a: b: c: ++f }");
invalid("switch (g) { case 1: a: }");
invalid("switch (g) { case 1: a: default: }");
invalid("switch g { case 1: l() }");
invalid("switch (g) { case 1:");
valid ("switch (l) { case a = b ? c : d : }");
valid ("switch (sw) { case a ? b - 7[1] ? [c,,] : d = 6 : { } : }");
invalid("switch (l) { case b ? c : }");
valid ("switch (l) { case 1: a: with(g) switch (g) { case 2: default: } default: }");
invalid("switch (4 - ) { }");
invalid("switch (l) { default case: 5; }");
invalid("L: L: ;");
invalid("L: L1: L: ;");
invalid("L: L1: L2: L3: L4: L: ;");
invalid("for(var a,b 'this shouldn\'t be allowed' false ; ) ;");
invalid("for(var a,b '");
valid("function __proto__(){}")
valid("(function __proto__(){})")
valid("'use strict'; function __proto__(){}")
valid("'use strict'; (function __proto__(){})")
valid("if (0) $foo; ")
valid("if (0) _foo; ")
valid("if (0) foo$; ")
valid("if (0) foo_; ")
valid("if (0) obj.$foo; ")
valid("if (0) obj._foo; ")
valid("if (0) obj.foo$; ")
valid("if (0) obj.foo_; ")
valid("if (0) obj.foo\\u03bb; ")
valid("if (0) new a(b+c).d = 5");
valid("if (0) new a(b+c) = 5");
valid("([1 || 1].a = 1)");
valid("({a: 1 || 1}.a = 1)");
invalid("var a.b = c");
invalid("var a.b;");
try { eval("a.b.c = {};"); } catch(e1) { e=e1; shouldBe("e.line", "1") }
foo = 'FAIL';
bar = 'PASS';
try {
eval("foo = 'PASS'; a.b.c = {}; bar = 'FAIL';");
} catch(e) {
shouldBe("foo", "'PASS'");
shouldBe("bar", "'PASS'");
}
| gpl-3.0 |
takano32/rubinius | spec/ruby/library/date/ajd_to_amjd_spec.rb | 160 | require File.expand_path('../../../spec_helper', __FILE__)
require 'date'
describe "Date.ajd_to_amjd" do
it "needs to be reviewed for spec completeness"
end
| bsd-3-clause |
tmthrgd/pagespeed-libraries-cdnjs | packages/bootbox.js/2.3.0/bootbox.min.js | 6055 | /**
* bootbox.js v2.3.0
*
* The MIT License
*
* Copyright (C) 2011-2012 by Nick Payne <nick@kurai.co.uk>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE
*/
var bootbox=window.bootbox||function(){function j(b,a){null==a&&(a=k);return"string"==typeof h[a][b]?h[a][b]:a!=l?j(b,l):b}var k="en",l="en",o=!0,g={},f={},h={en:{OK:"OK",CANCEL:"Cancel",CONFIRM:"OK"},fr:{OK:"OK",CANCEL:"Annuler",CONFIRM:"D'accord"},de:{OK:"OK",CANCEL:"Abbrechen",CONFIRM:"Akzeptieren"},es:{OK:"OK",CANCEL:"Cancelar",CONFIRM:"Aceptar"},br:{OK:"OK",CANCEL:"Cancelar",CONFIRM:"Sim"},nl:{OK:"OK",CANCEL:"Annuleren",CONFIRM:"Accepteren"},ru:{OK:"OK",CANCEL:"\u041e\u0442\u043c\u0435\u043d\u0430",
CONFIRM:"\u041f\u0440\u0438\u043c\u0435\u043d\u0438\u0442\u044c"}};f.setLocale=function(b){for(var a in h)if(a==b){k=b;return}throw Error("Invalid locale: "+b);};f.addLocale=function(b,a){"undefined"==typeof h[b]&&(h[b]={});for(var c in a)h[b][c]=a[c]};f.setIcons=function(b){g=b;if("object"!==typeof g||null==g)g={}};f.alert=function(){var b="",a=j("OK"),c=null;switch(arguments.length){case 1:b=arguments[0];break;case 2:b=arguments[0];"function"==typeof arguments[1]?c=arguments[1]:a=arguments[1];break;
case 3:b=arguments[0];a=arguments[1];c=arguments[2];break;default:throw Error("Incorrect number of arguments: expected 1-3");}return f.dialog(b,{label:a,icon:g.OK,callback:c},{onEscape:c})};f.confirm=function(){var b="",a=j("CANCEL"),c=j("CONFIRM"),e=null;switch(arguments.length){case 1:b=arguments[0];break;case 2:b=arguments[0];"function"==typeof arguments[1]?e=arguments[1]:a=arguments[1];break;case 3:b=arguments[0];a=arguments[1];"function"==typeof arguments[2]?e=arguments[2]:c=arguments[2];break;
case 4:b=arguments[0];a=arguments[1];c=arguments[2];e=arguments[3];break;default:throw Error("Incorrect number of arguments: expected 1-4");}return f.dialog(b,[{label:a,icon:g.CANCEL,callback:function(){"function"==typeof e&&e(!1)}},{label:c,icon:g.CONFIRM,callback:function(){"function"==typeof e&&e(!0)}}])};f.prompt=function(){var b="",a=j("CANCEL"),c=j("CONFIRM"),e=null;switch(arguments.length){case 1:b=arguments[0];break;case 2:b=arguments[0];"function"==typeof arguments[1]?e=arguments[1]:a=arguments[1];
break;case 3:b=arguments[0];a=arguments[1];"function"==typeof arguments[2]?e=arguments[2]:c=arguments[2];break;case 4:b=arguments[0];a=arguments[1];c=arguments[2];e=arguments[3];break;default:throw Error("Incorrect number of arguments: expected 1-4");}var m=$("<form></form>");m.append("<input type=text />");var h=f.dialog(m,[{label:a,icon:g.CANCEL,callback:function(){"function"==typeof e&&e(null)}},{label:c,icon:g.CONFIRM,callback:function(){"function"==typeof e&&e(m.find("input[type=text]").val())}}],
{header:b});m.on("submit",function(a){a.preventDefault();h.find(".btn-primary").click()});return h};f.modal=function(){var b,a,c,e={onEscape:null,keyboard:!0,backdrop:!0};switch(arguments.length){case 1:b=arguments[0];break;case 2:b=arguments[0];"object"==typeof arguments[1]?c=arguments[1]:a=arguments[1];break;case 3:b=arguments[0];a=arguments[1];c=arguments[2];break;default:throw Error("Incorrect number of arguments: expected 1-3");}e.header=a;c="object"==typeof c?$.extend(e,c):e;return f.dialog(b,
[],c)};f.dialog=function(b,a,c){var e=null,f="",h=[],c=c||{};null==a?a=[]:"undefined"==typeof a.length&&(a=[a]);for(var d=a.length;d--;){var g=null,j=null,k="",l=null;if("undefined"==typeof a[d].label&&"undefined"==typeof a[d]["class"]&&"undefined"==typeof a[d].callback){var g=0,p=null,n;for(n in a[d])if(p=n,1<++g)break;1==g&&"function"==typeof a[d][n]&&(a[d].label=p,a[d].callback=a[d][n])}"function"==typeof a[d].callback&&(l=a[d].callback);a[d]["class"]?j=a[d]["class"]:d==a.length-1&&2>=a.length&&
(j="btn-primary");g=a[d].label?a[d].label:"Option "+(d+1);a[d].icon&&(k="<i class='"+a[d].icon+"'></i> ");f+="<a data-handler='"+d+"' class='btn "+j+"' href='#'>"+k+""+g+"</a>";h[d]=l}a=["<div class='bootbox modal'>"];if(c.header){d="";if("undefined"==typeof c.headerCloseButton||c.headerCloseButton)d="<a href='#' class='close'>×</a>";a.push("<div class='modal-header'>"+d+"<h3>"+c.header+"</h3></div>")}a.push("<div class='modal-body'></div>");f&&a.push("<div class='modal-footer'>"+f+"</div>");
a.push("</div>");var i=$(a.join("\n"));("undefined"===typeof c.animate?o:c.animate)&&i.addClass("fade");$(".modal-body",i).html(b);i.bind("hidden",function(){i.remove()});i.bind("hide",function(){if("escape"==e&&"function"==typeof c.onEscape)c.onEscape()});$(document).bind("keyup.modal",function(a){27==a.which&&(e="escape")});i.bind("shown",function(){$("a.btn-primary:last",i).focus()});i.on("click",".modal-footer a, a.close",function(a){var b=$(this).data("handler"),b=h[b],c=null;"function"==typeof b&&
(c=b());!1!==c&&(a.preventDefault(),e="button",i.modal("hide"))});null==c.keyboard&&(c.keyboard="function"==typeof c.onEscape);$("body").append(i);i.modal({backdrop:c.backdrop||!0,keyboard:c.keyboard});return i};f.hideAll=function(){$(".bootbox").modal("hide")};f.animate=function(b){o=b};return f}();
| mit |
Dokaponteam/ITF_Project | xampp/phpMyAdmin/libraries/plugins/TransformationsPlugin.class.php | 1804 | <?php
/* vim: set expandtab sw=4 ts=4 sts=4: */
/**
* Abstract class for the transformations plugins
*
* @package PhpMyAdmin
*/
if (! defined('PHPMYADMIN')) {
exit;
}
/* It also implements the transformations interface */
require_once 'TransformationsInterface.int.php';
/**
* Provides a common interface that will have to
* be implemented by all of the transformations plugins.
*
* @package PhpMyAdmin
*/
abstract class TransformationsPlugin implements TransformationsInterface
{
/**
* Does the actual work of each specific transformations plugin.
*
* @param array $options transformation options
*
* @return void
*/
public function applyTransformationNoWrap($options = array())
{
;
}
/**
* Does the actual work of each specific transformations plugin.
*
* @param string $buffer text to be transformed
* @param array $options transformation options
* @param string $meta meta information
*
* @return string the transformed text
*/
abstract public function applyTransformation(
$buffer, $options = array(), $meta = ''
);
/**
* Returns passed options or default values if they were not set
*
* @param string[] $options List of passed options
* @param string[] $defaults List of default values
*
* @return string[] List of options possibly filled in by defaults.
*/
public function getOptions($options, $defaults)
{
$result = array();
foreach ($defaults as $key => $value) {
if (isset($options[$key]) && $options[$key] !== '') {
$result[$key] = $options[$key];
} else {
$result[$key] = $value;
}
}
return $result;
}
}
| mit |
qtekfun/htcDesire820Kernel | external/chromium/chrome/browser/printing/cloud_print/resources/cloud_print_setup_login.html | 2695 | <!DOCTYPE HTML>
<html i18n-values="dir:textdirection;">
<head>
<title></title>
<link rel="stylesheet" type="text/css" href="cloud_print_setup_login.css" />
<script src="chrome://resources/js/cr.js"></script>
<script src="chrome://resources/js/util.js"></script>
<script type="text/javascript" src="cloud_print_setup_login.js"></script>
</head>
<body i18n-values=".style.fontFamily:fontfamily;.style.fontSize:fontsize"
onload="cloudprint.fixUpTemplateLink();">
<table class="cloudprint-contents" id="cloudprint-contents">
<tbody>
<tr>
<td class="cloudprint-signup" id="cloudprint-signup">
<table class="cloudprint-intro">
<tbody>
<tr><td><div class="cloudprint-header" id="header"
i18n-content="header"></div></td></tr>
<tr><td><div class="cloudprint-explain" id="explain"
i18n-content="explain"></div></td></tr>
</tbody>
</table>
<table class="cloudprint-body">
<tbody>
<tr>
<td class="cloudprint-item-image">
<img src="cell_phone.png" /></td>
<td>
<div class="cloudprint-item-header" id="anywhere-header"
i18n-content="anywhereheader"></div>
<div class="cloudprint-item-explain" id="anywhere-explain"
i18n-content="anywhereexplain"></div></td>
</tr>
<tr>
<td class="cloudprint-item-image">
<img src="cloud_printer.png" /></td>
<td>
<div class="cloudprint-item-header" id="printer-header"
i18n-content="printerheader"></div>
<div class="cloudprint-item-explain" id="printer-explain"
i18n-content="printerexplain"></div></td>
</tr>
<tr>
<td class="cloudprint-item-image">
<img src="sharing.png" /></td>
<td>
<div class="cloudprint-item-header" id="sharing-header"
i18n-content="sharingheader"></div>
<div class="cloudprint-item-explain" id="sharing-explain"
i18n-content="sharingexplain"></div></td>
</tr>
</tbody>
</table>
</td>
<td class="cloudprint-login">
<iframe id="gaialogin" frameborder="0"
width="100%" scrolling="no" height="100%"
src="chrome://cloudprintsetup/gaialogin"></iframe>
</td>
</tr>
</tbody>
</table>
</body>
</html>
| gpl-2.0 |
mericon/Xp_Kernel_LGH850 | virt/drivers/net/wireless/mwifiex/tdls.c | 31286 | /* Marvell Wireless LAN device driver: TDLS handling
*
* Copyright (C) 2014, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available on the worldwide web at
* http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include "main.h"
#include "wmm.h"
#include "11n.h"
#include "11n_rxreorder.h"
#include "11ac.h"
#define TDLS_REQ_FIX_LEN 6
#define TDLS_RESP_FIX_LEN 8
#define TDLS_CONFIRM_FIX_LEN 6
static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
const u8 *mac, u8 status)
{
struct mwifiex_ra_list_tbl *ra_list;
struct list_head *tid_list;
struct sk_buff *skb, *tmp;
struct mwifiex_txinfo *tx_info;
unsigned long flags;
u32 tid;
u8 tid_down;
dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
if (!ether_addr_equal(mac, skb->data))
continue;
__skb_unlink(skb, &priv->tdls_txq);
tx_info = MWIFIEX_SKB_TXCB(skb);
tid = skb->priority;
tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
if (status == TDLS_SETUP_COMPLETE) {
ra_list = mwifiex_wmm_get_queue_raptr(priv, tid, mac);
ra_list->tdls_link = true;
tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
} else {
tid_list = &priv->wmm.tid_tbl_ptr[tid_down].ra_list;
if (!list_empty(tid_list))
ra_list = list_first_entry(tid_list,
struct mwifiex_ra_list_tbl, list);
else
ra_list = NULL;
tx_info->flags &= ~MWIFIEX_BUF_FLAG_TDLS_PKT;
}
if (!ra_list) {
mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
continue;
}
skb_queue_tail(&ra_list->skb_head, skb);
ra_list->ba_pkt_count++;
ra_list->total_pkt_count++;
if (atomic_read(&priv->wmm.highest_queued_prio) <
tos_to_tid_inv[tid_down])
atomic_set(&priv->wmm.highest_queued_prio,
tos_to_tid_inv[tid_down]);
atomic_inc(&priv->wmm.tx_pkts_queued);
}
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
return;
}
static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv,
const u8 *mac)
{
struct mwifiex_ra_list_tbl *ra_list;
struct list_head *ra_list_head;
struct sk_buff *skb, *tmp;
unsigned long flags;
int i;
dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
for (i = 0; i < MAX_NUM_TID; i++) {
if (!list_empty(&priv->wmm.tid_tbl_ptr[i].ra_list)) {
ra_list_head = &priv->wmm.tid_tbl_ptr[i].ra_list;
list_for_each_entry(ra_list, ra_list_head, list) {
skb_queue_walk_safe(&ra_list->skb_head, skb,
tmp) {
if (!ether_addr_equal(mac, skb->data))
continue;
__skb_unlink(skb, &ra_list->skb_head);
atomic_dec(&priv->wmm.tx_pkts_queued);
ra_list->total_pkt_count--;
skb_queue_tail(&priv->tdls_txq, skb);
}
}
}
}
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
return;
}
/* This function appends rate TLV to scan config command. */
static int
mwifiex_tdls_append_rates_ie(struct mwifiex_private *priv,
struct sk_buff *skb)
{
u8 rates[MWIFIEX_SUPPORTED_RATES], *pos;
u16 rates_size, supp_rates_size, ext_rates_size;
memset(rates, 0, sizeof(rates));
rates_size = mwifiex_get_supported_rates(priv, rates);
supp_rates_size = min_t(u16, rates_size, MWIFIEX_TDLS_SUPPORTED_RATES);
if (skb_tailroom(skb) < rates_size + 4) {
dev_err(priv->adapter->dev,
"Insuffient space while adding rates\n");
return -ENOMEM;
}
pos = skb_put(skb, supp_rates_size + 2);
*pos++ = WLAN_EID_SUPP_RATES;
*pos++ = supp_rates_size;
memcpy(pos, rates, supp_rates_size);
if (rates_size > MWIFIEX_TDLS_SUPPORTED_RATES) {
ext_rates_size = rates_size - MWIFIEX_TDLS_SUPPORTED_RATES;
pos = skb_put(skb, ext_rates_size + 2);
*pos++ = WLAN_EID_EXT_SUPP_RATES;
*pos++ = ext_rates_size;
memcpy(pos, rates + MWIFIEX_TDLS_SUPPORTED_RATES,
ext_rates_size);
}
return 0;
}
static void mwifiex_tdls_add_aid(struct mwifiex_private *priv,
struct sk_buff *skb)
{
struct ieee_types_assoc_rsp *assoc_rsp;
u8 *pos;
assoc_rsp = (struct ieee_types_assoc_rsp *)&priv->assoc_rsp_buf;
pos = (void *)skb_put(skb, 4);
*pos++ = WLAN_EID_AID;
*pos++ = 2;
*pos++ = le16_to_cpu(assoc_rsp->a_id);
return;
}
static int mwifiex_tdls_add_vht_capab(struct mwifiex_private *priv,
struct sk_buff *skb)
{
struct ieee80211_vht_cap vht_cap;
u8 *pos;
pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
*pos++ = WLAN_EID_VHT_CAPABILITY;
*pos++ = sizeof(struct ieee80211_vht_cap);
memset(&vht_cap, 0, sizeof(struct ieee80211_vht_cap));
mwifiex_fill_vht_cap_tlv(priv, &vht_cap, priv->curr_bss_params.band);
memcpy(pos, &vht_cap, sizeof(vht_cap));
return 0;
}
static int
mwifiex_tdls_add_ht_oper(struct mwifiex_private *priv, const u8 *mac,
u8 vht_enabled, struct sk_buff *skb)
{
struct ieee80211_ht_operation *ht_oper;
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_bssdescriptor *bss_desc =
&priv->curr_bss_params.bss_descriptor;
u8 *pos;
sta_ptr = mwifiex_get_sta_entry(priv, mac);
if (unlikely(!sta_ptr)) {
dev_warn(priv->adapter->dev,
"TDLS peer station not found in list\n");
return -1;
}
pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_operation) + 2);
*pos++ = WLAN_EID_HT_OPERATION;
*pos++ = sizeof(struct ieee80211_ht_operation);
ht_oper = (void *)pos;
ht_oper->primary_chan = bss_desc->channel;
/* follow AP's channel bandwidth */
if (ISSUPP_CHANWIDTH40(priv->adapter->hw_dot_11n_dev_cap) &&
bss_desc->bcn_ht_cap &&
ISALLOWED_CHANWIDTH40(bss_desc->bcn_ht_oper->ht_param))
ht_oper->ht_param = bss_desc->bcn_ht_oper->ht_param;
if (vht_enabled) {
ht_oper->ht_param =
mwifiex_get_sec_chan_offset(bss_desc->channel);
ht_oper->ht_param |= BIT(2);
}
memcpy(&sta_ptr->tdls_cap.ht_oper, ht_oper,
sizeof(struct ieee80211_ht_operation));
return 0;
}
static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
const u8 *mac, struct sk_buff *skb)
{
struct mwifiex_bssdescriptor *bss_desc;
struct ieee80211_vht_operation *vht_oper;
struct ieee80211_vht_cap *vht_cap, *ap_vht_cap = NULL;
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_adapter *adapter = priv->adapter;
u8 supp_chwd_set, peer_supp_chwd_set;
u8 *pos, ap_supp_chwd_set, chan_bw;
u16 mcs_map_user, mcs_map_resp, mcs_map_result;
u16 mcs_user, mcs_resp, nss;
u32 usr_vht_cap_info;
bss_desc = &priv->curr_bss_params.bss_descriptor;
sta_ptr = mwifiex_get_sta_entry(priv, mac);
if (unlikely(!sta_ptr)) {
dev_warn(adapter->dev, "TDLS peer station not found in list\n");
return -1;
}
if (!mwifiex_is_bss_in_11ac_mode(priv)) {
if (sta_ptr->tdls_cap.extcap.ext_capab[7] &
WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
dev_dbg(adapter->dev,
"TDLS peer doesn't support wider bandwitdh\n");
return 0;
}
} else {
ap_vht_cap = bss_desc->bcn_vht_cap;
}
pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_operation) + 2);
*pos++ = WLAN_EID_VHT_OPERATION;
*pos++ = sizeof(struct ieee80211_vht_operation);
vht_oper = (struct ieee80211_vht_operation *)pos;
if (bss_desc->bss_band & BAND_A)
usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a;
else
usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
/* find the minmum bandwith between AP/TDLS peers */
vht_cap = &sta_ptr->tdls_cap.vhtcap;
supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info);
peer_supp_chwd_set =
GET_VHTCAP_CHWDSET(le32_to_cpu(vht_cap->vht_cap_info));
supp_chwd_set = min_t(u8, supp_chwd_set, peer_supp_chwd_set);
/* We need check AP's bandwidth when TDLS_WIDER_BANDWIDTH is off */
if (ap_vht_cap && sta_ptr->tdls_cap.extcap.ext_capab[7] &
WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
ap_supp_chwd_set =
GET_VHTCAP_CHWDSET(le32_to_cpu(ap_vht_cap->vht_cap_info));
supp_chwd_set = min_t(u8, supp_chwd_set, ap_supp_chwd_set);
}
switch (supp_chwd_set) {
case IEEE80211_VHT_CHANWIDTH_80MHZ:
vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
break;
case IEEE80211_VHT_CHANWIDTH_160MHZ:
vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_160MHZ;
break;
case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
break;
default:
vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT;
break;
}
mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support);
mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.rx_mcs_map);
mcs_map_result = 0;
for (nss = 1; nss <= 8; nss++) {
mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
(mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
SET_VHTNSSMCS(mcs_map_result, nss,
IEEE80211_VHT_MCS_NOT_SUPPORTED);
else
SET_VHTNSSMCS(mcs_map_result, nss,
min_t(u16, mcs_user, mcs_resp));
}
vht_oper->basic_mcs_set = cpu_to_le16(mcs_map_result);
switch (vht_oper->chan_width) {
case IEEE80211_VHT_CHANWIDTH_80MHZ:
chan_bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
break;
case IEEE80211_VHT_CHANWIDTH_160MHZ:
chan_bw = IEEE80211_VHT_CHANWIDTH_160MHZ;
break;
case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
chan_bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
break;
default:
chan_bw = IEEE80211_VHT_CHANWIDTH_USE_HT;
break;
}
vht_oper->center_freq_seg1_idx =
mwifiex_get_center_freq_index(priv, BAND_AAC,
bss_desc->channel,
chan_bw);
return 0;
}
static void mwifiex_tdls_add_ext_capab(struct mwifiex_private *priv,
struct sk_buff *skb)
{
struct ieee_types_extcap *extcap;
extcap = (void *)skb_put(skb, sizeof(struct ieee_types_extcap));
extcap->ieee_hdr.element_id = WLAN_EID_EXT_CAPABILITY;
extcap->ieee_hdr.len = 8;
memset(extcap->ext_capab, 0, 8);
extcap->ext_capab[4] |= WLAN_EXT_CAPA5_TDLS_ENABLED;
if (priv->adapter->is_hw_11ac_capable)
extcap->ext_capab[7] |= WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED;
}
static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
{
u8 *pos = (void *)skb_put(skb, 3);
*pos++ = WLAN_EID_QOS_CAPA;
*pos++ = 1;
*pos++ = MWIFIEX_TDLS_DEF_QOS_CAPAB;
}
static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
const u8 *peer, u8 action_code,
u8 dialog_token,
u16 status_code, struct sk_buff *skb)
{
struct ieee80211_tdls_data *tf;
int ret;
u16 capab;
struct ieee80211_ht_cap *ht_cap;
u8 radio, *pos;
capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
memcpy(tf->da, peer, ETH_ALEN);
memcpy(tf->sa, priv->curr_addr, ETH_ALEN);
tf->ether_type = cpu_to_be16(ETH_P_TDLS);
tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
tf->category = WLAN_CATEGORY_TDLS;
tf->action_code = WLAN_TDLS_SETUP_REQUEST;
skb_put(skb, sizeof(tf->u.setup_req));
tf->u.setup_req.dialog_token = dialog_token;
tf->u.setup_req.capability = cpu_to_le16(capab);
ret = mwifiex_tdls_append_rates_ie(priv, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
*pos++ = WLAN_EID_HT_CAPABILITY;
*pos++ = sizeof(struct ieee80211_ht_cap);
ht_cap = (void *)pos;
radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
if (priv->adapter->is_hw_11ac_capable) {
ret = mwifiex_tdls_add_vht_capab(priv, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
mwifiex_tdls_add_aid(priv, skb);
}
mwifiex_tdls_add_ext_capab(priv, skb);
mwifiex_tdls_add_qos_capab(skb);
break;
case WLAN_TDLS_SETUP_RESPONSE:
tf->category = WLAN_CATEGORY_TDLS;
tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
skb_put(skb, sizeof(tf->u.setup_resp));
tf->u.setup_resp.status_code = cpu_to_le16(status_code);
tf->u.setup_resp.dialog_token = dialog_token;
tf->u.setup_resp.capability = cpu_to_le16(capab);
ret = mwifiex_tdls_append_rates_ie(priv, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
*pos++ = WLAN_EID_HT_CAPABILITY;
*pos++ = sizeof(struct ieee80211_ht_cap);
ht_cap = (void *)pos;
radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
if (priv->adapter->is_hw_11ac_capable) {
ret = mwifiex_tdls_add_vht_capab(priv, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
mwifiex_tdls_add_aid(priv, skb);
}
mwifiex_tdls_add_ext_capab(priv, skb);
mwifiex_tdls_add_qos_capab(skb);
break;
case WLAN_TDLS_SETUP_CONFIRM:
tf->category = WLAN_CATEGORY_TDLS;
tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
skb_put(skb, sizeof(tf->u.setup_cfm));
tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
tf->u.setup_cfm.dialog_token = dialog_token;
if (priv->adapter->is_hw_11ac_capable) {
ret = mwifiex_tdls_add_vht_oper(priv, peer, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
ret = mwifiex_tdls_add_ht_oper(priv, peer, 1, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
} else {
ret = mwifiex_tdls_add_ht_oper(priv, peer, 0, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
}
break;
case WLAN_TDLS_TEARDOWN:
tf->category = WLAN_CATEGORY_TDLS;
tf->action_code = WLAN_TDLS_TEARDOWN;
skb_put(skb, sizeof(tf->u.teardown));
tf->u.teardown.reason_code = cpu_to_le16(status_code);
break;
case WLAN_TDLS_DISCOVERY_REQUEST:
tf->category = WLAN_CATEGORY_TDLS;
tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
skb_put(skb, sizeof(tf->u.discover_req));
tf->u.discover_req.dialog_token = dialog_token;
break;
default:
dev_err(priv->adapter->dev, "Unknown TDLS frame type.\n");
return -EINVAL;
}
return 0;
}
static void
mwifiex_tdls_add_link_ie(struct sk_buff *skb, const u8 *src_addr,
const u8 *peer, const u8 *bssid)
{
struct ieee80211_tdls_lnkie *lnkid;
lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
lnkid->ie_type = WLAN_EID_LINK_ID;
lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) -
sizeof(struct ieee_types_header);
memcpy(lnkid->bssid, bssid, ETH_ALEN);
memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
memcpy(lnkid->resp_sta, peer, ETH_ALEN);
}
int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
u8 action_code, u8 dialog_token,
u16 status_code, const u8 *extra_ies,
size_t extra_ies_len)
{
struct sk_buff *skb;
struct mwifiex_txinfo *tx_info;
int ret;
u16 skb_len;
skb_len = MWIFIEX_MIN_DATA_HEADER_LEN +
max(sizeof(struct ieee80211_mgmt),
sizeof(struct ieee80211_tdls_data)) +
MWIFIEX_MGMT_FRAME_HEADER_SIZE +
MWIFIEX_SUPPORTED_RATES +
3 + /* Qos Info */
sizeof(struct ieee_types_extcap) +
sizeof(struct ieee80211_ht_cap) +
sizeof(struct ieee_types_bss_co_2040) +
sizeof(struct ieee80211_ht_operation) +
sizeof(struct ieee80211_tdls_lnkie) +
extra_ies_len;
if (priv->adapter->is_hw_11ac_capable)
skb_len += sizeof(struct ieee_types_vht_cap) +
sizeof(struct ieee_types_vht_oper) +
sizeof(struct ieee_types_aid);
skb = dev_alloc_skb(skb_len);
if (!skb) {
dev_err(priv->adapter->dev,
"allocate skb failed for management frame\n");
return -ENOMEM;
}
skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
case WLAN_TDLS_SETUP_CONFIRM:
case WLAN_TDLS_TEARDOWN:
case WLAN_TDLS_DISCOVERY_REQUEST:
ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
dialog_token, status_code,
skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
if (extra_ies_len)
memcpy(skb_put(skb, extra_ies_len), extra_ies,
extra_ies_len);
mwifiex_tdls_add_link_ie(skb, priv->curr_addr, peer,
priv->cfg_bssid);
break;
case WLAN_TDLS_SETUP_RESPONSE:
ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
dialog_token, status_code,
skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
if (extra_ies_len)
memcpy(skb_put(skb, extra_ies_len), extra_ies,
extra_ies_len);
mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
priv->cfg_bssid);
break;
}
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
case WLAN_TDLS_SETUP_RESPONSE:
skb->priority = MWIFIEX_PRIO_BK;
break;
default:
skb->priority = MWIFIEX_PRIO_VI;
break;
}
tx_info = MWIFIEX_SKB_TXCB(skb);
memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
__net_timestamp(skb);
mwifiex_queue_tx_pkt(priv, skb);
return 0;
}
static int
mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
const u8 *peer,
u8 action_code, u8 dialog_token,
u16 status_code, struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt;
u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
int ret;
u16 capab;
struct ieee80211_ht_cap *ht_cap;
u8 radio, *pos;
capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
mgmt = (void *)skb_put(skb, offsetof(struct ieee80211_mgmt, u));
memset(mgmt, 0, 24);
memcpy(mgmt->da, peer, ETH_ALEN);
memcpy(mgmt->sa, priv->curr_addr, ETH_ALEN);
memcpy(mgmt->bssid, priv->cfg_bssid, ETH_ALEN);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
/* add address 4 */
pos = skb_put(skb, ETH_ALEN);
switch (action_code) {
case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
skb_put(skb, sizeof(mgmt->u.action.u.tdls_discover_resp) + 1);
mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
mgmt->u.action.u.tdls_discover_resp.action_code =
WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
mgmt->u.action.u.tdls_discover_resp.dialog_token =
dialog_token;
mgmt->u.action.u.tdls_discover_resp.capability =
cpu_to_le16(capab);
/* move back for addr4 */
memmove(pos + ETH_ALEN, &mgmt->u.action.category,
sizeof(mgmt->u.action.u.tdls_discover_resp));
/* init address 4 */
memcpy(pos, bc_addr, ETH_ALEN);
ret = mwifiex_tdls_append_rates_ie(priv, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
*pos++ = WLAN_EID_HT_CAPABILITY;
*pos++ = sizeof(struct ieee80211_ht_cap);
ht_cap = (void *)pos;
radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
if (priv->adapter->is_hw_11ac_capable) {
ret = mwifiex_tdls_add_vht_capab(priv, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
mwifiex_tdls_add_aid(priv, skb);
}
mwifiex_tdls_add_ext_capab(priv, skb);
mwifiex_tdls_add_qos_capab(skb);
break;
default:
dev_err(priv->adapter->dev, "Unknown TDLS action frame type\n");
return -EINVAL;
}
return 0;
}
int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
u8 action_code, u8 dialog_token,
u16 status_code, const u8 *extra_ies,
size_t extra_ies_len)
{
struct sk_buff *skb;
struct mwifiex_txinfo *tx_info;
u8 *pos;
u32 pkt_type, tx_control;
u16 pkt_len, skb_len;
skb_len = MWIFIEX_MIN_DATA_HEADER_LEN +
max(sizeof(struct ieee80211_mgmt),
sizeof(struct ieee80211_tdls_data)) +
MWIFIEX_MGMT_FRAME_HEADER_SIZE +
MWIFIEX_SUPPORTED_RATES +
sizeof(struct ieee_types_extcap) +
sizeof(struct ieee80211_ht_cap) +
sizeof(struct ieee_types_bss_co_2040) +
sizeof(struct ieee80211_ht_operation) +
sizeof(struct ieee80211_tdls_lnkie) +
extra_ies_len +
3 + /* Qos Info */
ETH_ALEN; /* Address4 */
if (priv->adapter->is_hw_11ac_capable)
skb_len += sizeof(struct ieee_types_vht_cap) +
sizeof(struct ieee_types_vht_oper) +
sizeof(struct ieee_types_aid);
skb = dev_alloc_skb(skb_len);
if (!skb) {
dev_err(priv->adapter->dev,
"allocate skb failed for management frame\n");
return -ENOMEM;
}
skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
pkt_type = PKT_TYPE_MGMT;
tx_control = 0;
pos = skb_put(skb, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
memset(pos, 0, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
memcpy(pos, &pkt_type, sizeof(pkt_type));
memcpy(pos + sizeof(pkt_type), &tx_control, sizeof(tx_control));
if (mwifiex_construct_tdls_action_frame(priv, peer, action_code,
dialog_token, status_code,
skb)) {
dev_kfree_skb_any(skb);
return -EINVAL;
}
if (extra_ies_len)
memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
/* the TDLS link IE is always added last we are the responder */
mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
priv->cfg_bssid);
skb->priority = MWIFIEX_PRIO_VI;
tx_info = MWIFIEX_SKB_TXCB(skb);
memset(tx_info, 0, sizeof(*tx_info));
tx_info->bss_num = priv->bss_num;
tx_info->bss_type = priv->bss_type;
tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
pkt_len = skb->len - MWIFIEX_MGMT_FRAME_HEADER_SIZE - sizeof(pkt_len);
memcpy(skb->data + MWIFIEX_MGMT_FRAME_HEADER_SIZE, &pkt_len,
sizeof(pkt_len));
__net_timestamp(skb);
mwifiex_queue_tx_pkt(priv, skb);
return 0;
}
/* This function process tdls action frame from peer.
* Peer capabilities are stored into station node structure.
*/
void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
u8 *buf, int len)
{
struct mwifiex_sta_node *sta_ptr;
u8 *peer, *pos, *end;
u8 i, action, basic;
__le16 cap = 0;
int ie_len = 0;
if (len < (sizeof(struct ethhdr) + 3))
return;
if (*(buf + sizeof(struct ethhdr)) != WLAN_TDLS_SNAP_RFTYPE)
return;
if (*(buf + sizeof(struct ethhdr) + 1) != WLAN_CATEGORY_TDLS)
return;
peer = buf + ETH_ALEN;
action = *(buf + sizeof(struct ethhdr) + 2);
dev_dbg(priv->adapter->dev,
"rx:tdls action: peer=%pM, action=%d\n", peer, action);
switch (action) {
case WLAN_TDLS_SETUP_REQUEST:
if (len < (sizeof(struct ethhdr) + TDLS_REQ_FIX_LEN))
return;
pos = buf + sizeof(struct ethhdr) + 4;
/* payload 1+ category 1 + action 1 + dialog 1 */
cap = cpu_to_le16(*(u16 *)pos);
ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
pos += 2;
break;
case WLAN_TDLS_SETUP_RESPONSE:
if (len < (sizeof(struct ethhdr) + TDLS_RESP_FIX_LEN))
return;
/* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
pos = buf + sizeof(struct ethhdr) + 6;
cap = cpu_to_le16(*(u16 *)pos);
ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
pos += 2;
break;
case WLAN_TDLS_SETUP_CONFIRM:
if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN))
return;
pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN;
ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
break;
default:
dev_dbg(priv->adapter->dev, "Unknown TDLS frame type.\n");
return;
}
sta_ptr = mwifiex_add_sta_entry(priv, peer);
if (!sta_ptr)
return;
sta_ptr->tdls_cap.capab = cap;
for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
if (pos + 2 + pos[1] > end)
break;
switch (*pos) {
case WLAN_EID_SUPP_RATES:
sta_ptr->tdls_cap.rates_len = pos[1];
for (i = 0; i < pos[1]; i++)
sta_ptr->tdls_cap.rates[i] = pos[i + 2];
break;
case WLAN_EID_EXT_SUPP_RATES:
basic = sta_ptr->tdls_cap.rates_len;
for (i = 0; i < pos[1]; i++)
sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
sta_ptr->tdls_cap.rates_len += pos[1];
break;
case WLAN_EID_HT_CAPABILITY:
memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
sizeof(struct ieee80211_ht_cap));
sta_ptr->is_11n_enabled = 1;
break;
case WLAN_EID_HT_OPERATION:
memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
sizeof(struct ieee80211_ht_operation));
break;
case WLAN_EID_BSS_COEX_2040:
sta_ptr->tdls_cap.coex_2040 = pos[2];
break;
case WLAN_EID_EXT_CAPABILITY:
memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
sizeof(struct ieee_types_header) +
min_t(u8, pos[1], 8));
break;
case WLAN_EID_RSN:
memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
sizeof(struct ieee_types_header) +
min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
sizeof(struct ieee_types_header)));
break;
case WLAN_EID_QOS_CAPA:
sta_ptr->tdls_cap.qos_info = pos[2];
break;
case WLAN_EID_VHT_OPERATION:
if (priv->adapter->is_hw_11ac_capable)
memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
sizeof(struct ieee80211_vht_operation));
break;
case WLAN_EID_VHT_CAPABILITY:
if (priv->adapter->is_hw_11ac_capable) {
memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
sizeof(struct ieee80211_vht_cap));
sta_ptr->is_11ac_enabled = 1;
}
break;
case WLAN_EID_AID:
if (priv->adapter->is_hw_11ac_capable)
sta_ptr->tdls_cap.aid =
le16_to_cpu(*(__le16 *)(pos + 2));
default:
break;
}
}
return;
}
static int
mwifiex_tdls_process_config_link(struct mwifiex_private *priv, const u8 *peer)
{
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_ds_tdls_oper tdls_oper;
memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
sta_ptr = mwifiex_get_sta_entry(priv, peer);
if (!sta_ptr || sta_ptr->tdls_status == TDLS_SETUP_FAILURE) {
dev_err(priv->adapter->dev,
"link absent for peer %pM; cannot config\n", peer);
return -EINVAL;
}
memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
tdls_oper.tdls_action = MWIFIEX_TDLS_CONFIG_LINK;
return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
HostCmd_ACT_GEN_SET, 0, &tdls_oper, true);
}
static int
mwifiex_tdls_process_create_link(struct mwifiex_private *priv, const u8 *peer)
{
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_ds_tdls_oper tdls_oper;
memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
sta_ptr = mwifiex_get_sta_entry(priv, peer);
if (sta_ptr && sta_ptr->tdls_status == TDLS_SETUP_INPROGRESS) {
dev_dbg(priv->adapter->dev,
"Setup already in progress for peer %pM\n", peer);
return 0;
}
sta_ptr = mwifiex_add_sta_entry(priv, peer);
if (!sta_ptr)
return -ENOMEM;
sta_ptr->tdls_status = TDLS_SETUP_INPROGRESS;
mwifiex_hold_tdls_packets(priv, peer);
memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
tdls_oper.tdls_action = MWIFIEX_TDLS_CREATE_LINK;
return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
HostCmd_ACT_GEN_SET, 0, &tdls_oper, true);
}
static int
mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, const u8 *peer)
{
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_ds_tdls_oper tdls_oper;
unsigned long flags;
memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
sta_ptr = mwifiex_get_sta_entry(priv, peer);
if (sta_ptr) {
if (sta_ptr->is_11n_enabled) {
mwifiex_11n_cleanup_reorder_tbl(priv);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
flags);
mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
flags);
}
mwifiex_del_sta_entry(priv, peer);
}
mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
HostCmd_ACT_GEN_SET, 0, &tdls_oper, true);
}
static int
mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
{
struct mwifiex_sta_node *sta_ptr;
struct ieee80211_mcs_info mcs;
unsigned long flags;
int i;
sta_ptr = mwifiex_get_sta_entry(priv, peer);
if (sta_ptr && (sta_ptr->tdls_status != TDLS_SETUP_FAILURE)) {
dev_dbg(priv->adapter->dev,
"tdls: enable link %pM success\n", peer);
sta_ptr->tdls_status = TDLS_SETUP_COMPLETE;
mcs = sta_ptr->tdls_cap.ht_capb.mcs;
if (mcs.rx_mask[0] != 0xff)
sta_ptr->is_11n_enabled = true;
if (sta_ptr->is_11n_enabled) {
if (le16_to_cpu(sta_ptr->tdls_cap.ht_capb.cap_info) &
IEEE80211_HT_CAP_MAX_AMSDU)
sta_ptr->max_amsdu =
MWIFIEX_TX_DATA_BUF_SIZE_8K;
else
sta_ptr->max_amsdu =
MWIFIEX_TX_DATA_BUF_SIZE_4K;
for (i = 0; i < MAX_NUM_TID; i++)
sta_ptr->ampdu_sta[i] =
priv->aggr_prio_tbl[i].ampdu_user;
} else {
for (i = 0; i < MAX_NUM_TID; i++)
sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
}
memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
} else {
dev_dbg(priv->adapter->dev,
"tdls: enable link %pM failed\n", peer);
if (sta_ptr) {
mwifiex_11n_cleanup_reorder_tbl(priv);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
flags);
mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
flags);
mwifiex_del_sta_entry(priv, peer);
}
mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
return -1;
}
return 0;
}
int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action)
{
switch (action) {
case MWIFIEX_TDLS_ENABLE_LINK:
return mwifiex_tdls_process_enable_link(priv, peer);
case MWIFIEX_TDLS_DISABLE_LINK:
return mwifiex_tdls_process_disable_link(priv, peer);
case MWIFIEX_TDLS_CREATE_LINK:
return mwifiex_tdls_process_create_link(priv, peer);
case MWIFIEX_TDLS_CONFIG_LINK:
return mwifiex_tdls_process_config_link(priv, peer);
}
return 0;
}
int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac)
{
struct mwifiex_sta_node *sta_ptr;
sta_ptr = mwifiex_get_sta_entry(priv, mac);
if (sta_ptr)
return sta_ptr->tdls_status;
return TDLS_NOT_SETUP;
}
void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
{
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_ds_tdls_oper tdls_oper;
unsigned long flags;
if (list_empty(&priv->sta_list))
return;
list_for_each_entry(sta_ptr, &priv->sta_list, list) {
memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
if (sta_ptr->is_11n_enabled) {
mwifiex_11n_cleanup_reorder_tbl(priv);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
flags);
mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
flags);
}
mwifiex_restore_tdls_packets(priv, sta_ptr->mac_addr,
TDLS_LINK_TEARDOWN);
memcpy(&tdls_oper.peer_mac, sta_ptr->mac_addr, ETH_ALEN);
tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
if (mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
HostCmd_ACT_GEN_SET, 0, &tdls_oper, false))
dev_warn(priv->adapter->dev,
"Disable link failed for TDLS peer %pM",
sta_ptr->mac_addr);
}
mwifiex_del_all_sta_list(priv);
}
| gpl-2.0 |
MinecraftSnowServer/Server-Docker | web/wiki/vendor/easybook/geshi/geshi/m68k.php | 4666 | <?php
/*************************************************************************************
* m68k.php
* --------
* Author: Benny Baumann (BenBE@omorphia.de)
* Copyright: (c) 2007 Benny Baumann (http://www.omorphia.de/), Nigel McNie (http://qbnz.com/highlighter)
* Release Version: 1.0.8.11
* Date Started: 2007/02/06
*
* Motorola 68000 Assembler language file for GeSHi.
*
* Syntax definition as commonly used by the motorola documentation for the
* MC68HC908GP32 Microcontroller (and maybe others).
*
* CHANGES
* -------
* 2008/05/23 (1.0.7.22)
* - Added description of extra language features (SF#1970248)
* 2007/06/02 (1.0.0)
* - First Release
*
* TODO (updated 2007/06/02)
* -------------------------
*
*************************************************************************************
*
* This file is part of GeSHi.
*
* GeSHi is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* GeSHi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GeSHi; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
************************************************************************************/
$language_data = array (
'LANG_NAME' => 'Motorola 68000 Assembler',
'COMMENT_SINGLE' => array(1 => ';'),
'COMMENT_MULTI' => array(),
'CASE_KEYWORDS' => GESHI_CAPS_NO_CHANGE,
'QUOTEMARKS' => array("'", '"'),
'ESCAPE_CHAR' => '',
'KEYWORDS' => array(
/*CPU*/
1 => array(
'adc','add','ais','aix','and','asl','asr','bcc','bclr','bcs','beq',
'bge','bgt','bhcc','bhcs','bhi','bhs','bih','bil','bit','ble','blo',
'bls','blt','bmc','bmi','bms','bne','bpl','bra','brclr','brn',
'brset','bset','bsr','cbeq','clc','cli','clr','cmp','com','cphx',
'cpx','daa','dbnz','dec','div','eor','inc','jmp','jsr','lda','ldhx',
'ldx','lsl','lsr','mov','mul','neg','nop','nsa','ora','psha','pshh',
'pshx','pula','pulh','pulx','rol','ror','rsp','rti','rts','sbc',
'sec','sei','sta','sthx','stop','stx','sub','swi','tap','tax','tpa',
'tst','tsx','txa','txs','wait'
),
/*registers*/
2 => array(
'a','h','x',
'hx','sp'
),
/*Directive*/
3 => array(
'#define','#endif','#else','#ifdef','#ifndef','#include','#undef',
'.db','.dd','.df','.dq','.dt','.dw','.end','.org','equ'
),
),
'SYMBOLS' => array(
','
),
'CASE_SENSITIVE' => array(
GESHI_COMMENTS => false,
1 => false,
2 => false,
3 => false,
),
'STYLES' => array(
'KEYWORDS' => array(
1 => 'color: #0000ff; font-weight:bold;',
2 => 'color: #0000ff;',
3 => 'color: #46aa03; font-weight:bold;'
),
'COMMENTS' => array(
1 => 'color: #adadad; font-style: italic;',
),
'ESCAPE_CHAR' => array(
0 => 'color: #000099; font-weight: bold;'
),
'BRACKETS' => array(
0 => 'color: #0000ff;'
),
'STRINGS' => array(
0 => 'color: #7f007f;'
),
'NUMBERS' => array(
0 => 'color: #dd22dd;'
),
'METHODS' => array(
),
'SYMBOLS' => array(
0 => 'color: #008000;'
),
'REGEXPS' => array(
0 => 'color: #22bbff;',
1 => 'color: #22bbff;',
2 => 'color: #993333;'
),
'SCRIPT' => array(
)
),
'URLS' => array(
1 => '',
2 => '',
3 => ''
),
'OOLANG' => false,
'OBJECT_SPLITTERS' => array(
),
'REGEXPS' => array(
//Hex numbers
0 => '#?0[0-9a-fA-F]{1,32}[hH]',
//Binary numbers
1 => '\%[01]{1,64}[bB]',
//Labels
2 => '^[_a-zA-Z][_a-zA-Z0-9]*?\:'
),
'STRICT_MODE_APPLIES' => GESHI_NEVER,
'SCRIPT_DELIMITERS' => array(
),
'HIGHLIGHT_STRICT_BLOCK' => array(
),
'TAB_WIDTH' => 8
);
| mit |
unigent/openwrt-3.10.14 | package/ramips/ui/luci-mtk/src/applications/luci-asterisk/luasrc/model/cbi/asterisk-mod-format.lua | 3636 | --[[
LuCI - Lua Configuration Interface
Copyright 2008 Steven Barth <steven@midlink.org>
Copyright 2008 Jo-Philipp Wich <xm@leipzig.freifunk.net>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
$Id$
]]--
cbimap = Map("asterisk", "asterisk", "")
module = cbimap:section(TypedSection, "module", "Modules", "")
module.anonymous = true
format_au = module:option(ListValue, "format_au", "Sun Microsystems AU format (signed linear)", "")
format_au:value("yes", "Load")
format_au:value("no", "Do Not Load")
format_au:value("auto", "Load as Required")
format_au.rmempty = true
format_g723 = module:option(ListValue, "format_g723", "G.723.1 Simple Timestamp File Format", "")
format_g723:value("yes", "Load")
format_g723:value("no", "Do Not Load")
format_g723:value("auto", "Load as Required")
format_g723.rmempty = true
format_g726 = module:option(ListValue, "format_g726", "Raw G.726 (16/24/32/40kbps) data", "")
format_g726:value("yes", "Load")
format_g726:value("no", "Do Not Load")
format_g726:value("auto", "Load as Required")
format_g726.rmempty = true
format_g729 = module:option(ListValue, "format_g729", "Raw G729 data", "")
format_g729:value("yes", "Load")
format_g729:value("no", "Do Not Load")
format_g729:value("auto", "Load as Required")
format_g729.rmempty = true
format_gsm = module:option(ListValue, "format_gsm", "Raw GSM data", "")
format_gsm:value("yes", "Load")
format_gsm:value("no", "Do Not Load")
format_gsm:value("auto", "Load as Required")
format_gsm.rmempty = true
format_h263 = module:option(ListValue, "format_h263", "Raw h263 data", "")
format_h263:value("yes", "Load")
format_h263:value("no", "Do Not Load")
format_h263:value("auto", "Load as Required")
format_h263.rmempty = true
format_jpeg = module:option(ListValue, "format_jpeg", "JPEG (Joint Picture Experts Group) Image", "")
format_jpeg:value("yes", "Load")
format_jpeg:value("no", "Do Not Load")
format_jpeg:value("auto", "Load as Required")
format_jpeg.rmempty = true
format_pcm = module:option(ListValue, "format_pcm", "Raw uLaw 8khz Audio support (PCM)", "")
format_pcm:value("yes", "Load")
format_pcm:value("no", "Do Not Load")
format_pcm:value("auto", "Load as Required")
format_pcm.rmempty = true
format_pcm_alaw = module:option(ListValue, "format_pcm_alaw", "load => .so ; Raw aLaw 8khz PCM Audio support", "")
format_pcm_alaw:value("yes", "Load")
format_pcm_alaw:value("no", "Do Not Load")
format_pcm_alaw:value("auto", "Load as Required")
format_pcm_alaw.rmempty = true
format_sln = module:option(ListValue, "format_sln", "Raw Signed Linear Audio support (SLN)", "")
format_sln:value("yes", "Load")
format_sln:value("no", "Do Not Load")
format_sln:value("auto", "Load as Required")
format_sln.rmempty = true
format_vox = module:option(ListValue, "format_vox", "Dialogic VOX (ADPCM) File Format", "")
format_vox:value("yes", "Load")
format_vox:value("no", "Do Not Load")
format_vox:value("auto", "Load as Required")
format_vox.rmempty = true
format_wav = module:option(ListValue, "format_wav", "Microsoft WAV format (8000hz Signed Line", "")
format_wav:value("yes", "Load")
format_wav:value("no", "Do Not Load")
format_wav:value("auto", "Load as Required")
format_wav.rmempty = true
format_wav_gsm = module:option(ListValue, "format_wav_gsm", "Microsoft WAV format (Proprietary GSM)", "")
format_wav_gsm:value("yes", "Load")
format_wav_gsm:value("no", "Do Not Load")
format_wav_gsm:value("auto", "Load as Required")
format_wav_gsm.rmempty = true
return cbimap
| gpl-2.0 |
sc0ttkclark/elasticsearch | core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java | 19930 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.*;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.core.IntegerFieldMapper;
import org.elasticsearch.index.mapper.core.StringFieldMapper;
import org.elasticsearch.test.ESSingleNodeTestCase;
import java.io.IOException;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
public class DynamicMappingTests extends ESSingleNodeTestCase {
public void testDynamicTrue() throws IOException {
String mapping = jsonBuilder().startObject().startObject("type")
.field("dynamic", "true")
.startObject("properties")
.startObject("field1").field("type", "string").endObject()
.endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
ParsedDocument doc = defaultMapper.parse("test", "type", "1", jsonBuilder()
.startObject()
.field("field1", "value1")
.field("field2", "value2")
.bytes());
assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
assertThat(doc.rootDoc().get("field2"), equalTo("value2"));
}
public void testDynamicFalse() throws IOException {
String mapping = jsonBuilder().startObject().startObject("type")
.field("dynamic", "false")
.startObject("properties")
.startObject("field1").field("type", "string").endObject()
.endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
ParsedDocument doc = defaultMapper.parse("test", "type", "1", jsonBuilder()
.startObject()
.field("field1", "value1")
.field("field2", "value2")
.bytes());
assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
assertThat(doc.rootDoc().get("field2"), nullValue());
}
public void testDynamicStrict() throws IOException {
String mapping = jsonBuilder().startObject().startObject("type")
.field("dynamic", "strict")
.startObject("properties")
.startObject("field1").field("type", "string").endObject()
.endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
try {
defaultMapper.parse("test", "type", "1", jsonBuilder()
.startObject()
.field("field1", "value1")
.field("field2", "value2")
.bytes());
fail();
} catch (StrictDynamicMappingException e) {
// all is well
}
try {
defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
.startObject()
.field("field1", "value1")
.field("field2", (String) null)
.bytes());
fail();
} catch (StrictDynamicMappingException e) {
// all is well
}
}
public void testDynamicFalseWithInnerObjectButDynamicSetOnRoot() throws IOException {
String mapping = jsonBuilder().startObject().startObject("type")
.field("dynamic", "false")
.startObject("properties")
.startObject("obj1").startObject("properties")
.startObject("field1").field("type", "string").endObject()
.endObject().endObject()
.endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
ParsedDocument doc = defaultMapper.parse("test", "type", "1", jsonBuilder()
.startObject().startObject("obj1")
.field("field1", "value1")
.field("field2", "value2")
.endObject()
.bytes());
assertThat(doc.rootDoc().get("obj1.field1"), equalTo("value1"));
assertThat(doc.rootDoc().get("obj1.field2"), nullValue());
}
public void testDynamicStrictWithInnerObjectButDynamicSetOnRoot() throws IOException {
String mapping = jsonBuilder().startObject().startObject("type")
.field("dynamic", "strict")
.startObject("properties")
.startObject("obj1").startObject("properties")
.startObject("field1").field("type", "string").endObject()
.endObject().endObject()
.endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
try {
defaultMapper.parse("test", "type", "1", jsonBuilder()
.startObject().startObject("obj1")
.field("field1", "value1")
.field("field2", "value2")
.endObject()
.bytes());
fail();
} catch (StrictDynamicMappingException e) {
// all is well
}
}
public void testDynamicMappingOnEmptyString() throws Exception {
IndexService service = createIndex("test");
client().prepareIndex("test", "type").setSource("empty_field", "").get();
MappedFieldType fieldType = service.mapperService().fullName("empty_field");
assertNotNull(fieldType);
}
public void testTypeNotCreatedOnIndexFailure() throws IOException, InterruptedException {
XContentBuilder mapping = jsonBuilder().startObject().startObject("_default_")
.field("dynamic", "strict")
.endObject().endObject();
IndexService indexService = createIndex("test", Settings.EMPTY, "_default_", mapping);
try {
client().prepareIndex().setIndex("test").setType("type").setSource(jsonBuilder().startObject().field("test", "test").endObject()).get();
fail();
} catch (StrictDynamicMappingException e) {
}
GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").get();
assertNull(getMappingsResponse.getMappings().get("test").get("type"));
}
private String serialize(ToXContent mapper) throws Exception {
XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
mapper.toXContent(builder, new ToXContent.MapParams(ImmutableMap.<String, String>of()));
return builder.endObject().string();
}
private Mapper parse(DocumentMapper mapper, DocumentMapperParser parser, XContentBuilder builder) throws Exception {
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
ParseContext.InternalParseContext ctx = new ParseContext.InternalParseContext(settings, parser, mapper, new ContentPath(0));
SourceToParse source = SourceToParse.source(builder.bytes());
ctx.reset(XContentHelper.createParser(source.source()), new ParseContext.Document(), source);
assertEquals(XContentParser.Token.START_OBJECT, ctx.parser().nextToken());
ctx.parser().nextToken();
return DocumentParser.parseObject(ctx, mapper.root());
}
public void testDynamicMappingsNotNeeded() throws Exception {
IndexService indexService = createIndex("test");
DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("foo").field("type", "string").endObject().endObject()
.endObject().string();
DocumentMapper mapper = parser.parse(mapping);
Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject());
// foo is already defined in the mappings
assertNull(update);
}
public void testField() throws Exception {
IndexService indexService = createIndex("test");
DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
String mapping = XContentFactory.jsonBuilder().startObject()
.startObject("type").endObject()
.endObject().string();
DocumentMapper mapper = parser.parse(mapping);
assertEquals(mapping, serialize(mapper));
Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject());
assertNotNull(update);
// original mapping not modified
assertEquals(mapping, serialize(mapper));
// but we have an update
assertEquals("{\"type\":{\"properties\":{\"foo\":{\"type\":\"string\"}}}}", serialize(update));
}
public void testIncremental() throws Exception {
IndexService indexService = createIndex("test");
DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
// Make sure that mapping updates are incremental, this is important for performance otherwise
// every new field introduction runs in linear time with the total number of fields
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("foo").field("type", "string").endObject().endObject()
.endObject().string();
DocumentMapper mapper = parser.parse(mapping);
assertEquals(mapping, serialize(mapper));
Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar").field("bar", "baz").endObject());
assertNotNull(update);
// original mapping not modified
assertEquals(mapping, serialize(mapper));
// but we have an update
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
// foo is NOT in the update
.startObject("bar").field("type", "string").endObject()
.endObject().endObject().string(), serialize(update));
}
public void testIntroduceTwoFields() throws Exception {
IndexService indexService = createIndex("test");
DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
String mapping = XContentFactory.jsonBuilder().startObject()
.startObject("type").endObject()
.endObject().string();
DocumentMapper mapper = parser.parse(mapping);
assertEquals(mapping, serialize(mapper));
Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar").field("bar", "baz").endObject());
assertNotNull(update);
// original mapping not modified
assertEquals(mapping, serialize(mapper));
// but we have an update
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
.startObject("bar").field("type", "string").endObject()
.startObject("foo").field("type", "string").endObject()
.endObject().endObject().string(), serialize(update));
}
public void testObject() throws Exception {
IndexService indexService = createIndex("test");
DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
String mapping = XContentFactory.jsonBuilder().startObject()
.startObject("type").endObject()
.endObject().string();
DocumentMapper mapper = parser.parse(mapping);
assertEquals(mapping, serialize(mapper));
Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startObject("foo").startObject("bar").field("baz", "foo").endObject().endObject().endObject());
assertNotNull(update);
// original mapping not modified
assertEquals(mapping, serialize(mapper));
// but we have an update
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
.startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz").field("type", "string").endObject().endObject().endObject().endObject().endObject()
.endObject().endObject().endObject().string(), serialize(update));
}
public void testArray() throws Exception {
IndexService indexService = createIndex("test");
DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
String mapping = XContentFactory.jsonBuilder().startObject()
.startObject("type").endObject()
.endObject().string();
DocumentMapper mapper = parser.parse(mapping);
assertEquals(mapping, serialize(mapper));
Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startArray("foo").value("bar").value("baz").endArray().endObject());
assertNotNull(update);
// original mapping not modified
assertEquals(mapping, serialize(mapper));
// but we have an update
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
.startObject("foo").field("type", "string").endObject()
.endObject().endObject().endObject().string(), serialize(update));
}
public void testInnerDynamicMapping() throws Exception {
IndexService indexService = createIndex("test");
DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties")
.startObject("foo").field("type", "object").endObject()
.endObject().endObject().endObject().string();
DocumentMapper mapper = parser.parse(mapping);
assertEquals(mapping, serialize(mapper));
Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startObject("foo").startObject("bar").field("baz", "foo").endObject().endObject().endObject());
assertNotNull(update);
// original mapping not modified
assertEquals(mapping, serialize(mapper));
// but we have an update
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
.startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz").field("type", "string").endObject().endObject().endObject().endObject().endObject()
.endObject().endObject().endObject().string(), serialize(update));
}
public void testComplexArray() throws Exception {
IndexService indexService = createIndex("test");
DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
String mapping = XContentFactory.jsonBuilder().startObject()
.startObject("type").endObject()
.endObject().string();
DocumentMapper mapper = parser.parse(mapping);
assertEquals(mapping, serialize(mapper));
Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startArray("foo")
.startObject().field("bar", "baz").endObject()
.startObject().field("baz", 3).endObject()
.endArray().endObject());
assertEquals(mapping, serialize(mapper));
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
.startObject("foo").startObject("properties")
.startObject("bar").field("type", "string").endObject()
.startObject("baz").field("type", "long").endObject()
.endObject().endObject()
.endObject().endObject().endObject().string(), serialize(update));
}
public void testReuseExistingMappings() throws IOException, Exception {
IndexService indexService = createIndex("test", Settings.EMPTY, "type", "my_field1", "type=string,store=yes", "my_field2", "type=integer,precision_step=10");
// Even if the dynamic type of our new field is long, we already have a mapping for the same field
// of type string so it should be mapped as a string
DocumentMapper newMapper = indexService.mapperService().documentMapperWithAutoCreate("type2").getDocumentMapper();
Mapper update = parse(newMapper, indexService.mapperService().documentMapperParser(),
XContentFactory.jsonBuilder().startObject().field("my_field1", 42).endObject());
Mapper myField1Mapper = null;
for (Mapper m : update) {
if (m.name().equals("my_field1")) {
myField1Mapper = m;
}
}
assertNotNull(myField1Mapper);
// same type
assertTrue(myField1Mapper instanceof StringFieldMapper);
// and same option
assertTrue(((StringFieldMapper) myField1Mapper).fieldType().stored());
// Even if dynamic mappings would map a numeric field as a long, here it should map it as a integer
// since we already have a mapping of type integer
update = parse(newMapper, indexService.mapperService().documentMapperParser(),
XContentFactory.jsonBuilder().startObject().field("my_field2", 42).endObject());
Mapper myField2Mapper = null;
for (Mapper m : update) {
if (m.name().equals("my_field2")) {
myField2Mapper = m;
}
}
assertNotNull(myField2Mapper);
// same type
assertTrue(myField2Mapper instanceof IntegerFieldMapper);
// and same option
assertEquals(10, ((IntegerFieldMapper) myField2Mapper).fieldType().numericPrecisionStep());
// This can't work
try {
parse(newMapper, indexService.mapperService().documentMapperParser(),
XContentFactory.jsonBuilder().startObject().field("my_field2", "foobar").endObject());
fail("Cannot succeed, incompatible types");
} catch (MapperParsingException e) {
// expected
}
}
}
| apache-2.0 |
sekys/ivda | web/libs/vis/node_modules/webpack/node_modules/node-libs-browser/node_modules/timers-browserify/README.md | 916 | # Overview
Adds support for the `timers` module to browserify.
## Wait, isn't it already supported in the browser?
The public methods of the `timers` module are:
* `setTimeout(callback, delay, [arg], [...])`
* `clearTimeout(timeoutId)`
* `setInterval(callback, delay, [arg], [...])`
* `clearInterval(intervalId)`
and indeed, browsers support these already.
## So, why does this exist?
The `timers` module also includes some private methods used in other built-in
Node.js modules:
* `enroll(item, delay)`
* `unenroll(item)`
* `active(item)`
These are used to efficiently support a large quantity of timers with the same
timeouts by creating only a few timers under the covers.
Node.js also offers the `immediate` APIs, which aren't yet available cross-browser, so we polyfill those:
* `setImmediate(callback, [arg], [...])`
* `clearImmediate(immediateId)`
# License
[MIT](http://jryans.mit-license.org/)
| mit |
vfalico/hydra-rpmsg | drivers/media/pci/cx25821/cx25821-core.c | 36572 | /*
* Driver for the Conexant CX25821 PCIe bridge
*
* Copyright (C) 2009 Conexant Systems Inc.
* Authors <shu.lin@conexant.com>, <hiep.huynh@conexant.com>
* Based on Steven Toth <stoth@linuxtv.org> cx23885 driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/i2c.h>
#include <linux/slab.h>
#include "cx25821.h"
#include "cx25821-sram.h"
#include "cx25821-video.h"
MODULE_DESCRIPTION("Driver for Athena cards");
MODULE_AUTHOR("Shu Lin - Hiep Huynh");
MODULE_LICENSE("GPL");
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
static unsigned int card[] = {[0 ... (CX25821_MAXBOARDS - 1)] = UNSET };
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "card type");
const struct sram_channel cx25821_sram_channels[] = {
[SRAM_CH00] = {
.i = SRAM_CH00,
.name = "VID A",
.cmds_start = VID_A_DOWN_CMDS,
.ctrl_start = VID_A_IQ,
.cdt = VID_A_CDT,
.fifo_start = VID_A_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA1_PTR1,
.ptr2_reg = DMA1_PTR2,
.cnt1_reg = DMA1_CNT1,
.cnt2_reg = DMA1_CNT2,
.int_msk = VID_A_INT_MSK,
.int_stat = VID_A_INT_STAT,
.int_mstat = VID_A_INT_MSTAT,
.dma_ctl = VID_DST_A_DMA_CTL,
.gpcnt_ctl = VID_DST_A_GPCNT_CTL,
.gpcnt = VID_DST_A_GPCNT,
.vip_ctl = VID_DST_A_VIP_CTL,
.pix_frmt = VID_DST_A_PIX_FRMT,
},
[SRAM_CH01] = {
.i = SRAM_CH01,
.name = "VID B",
.cmds_start = VID_B_DOWN_CMDS,
.ctrl_start = VID_B_IQ,
.cdt = VID_B_CDT,
.fifo_start = VID_B_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA2_PTR1,
.ptr2_reg = DMA2_PTR2,
.cnt1_reg = DMA2_CNT1,
.cnt2_reg = DMA2_CNT2,
.int_msk = VID_B_INT_MSK,
.int_stat = VID_B_INT_STAT,
.int_mstat = VID_B_INT_MSTAT,
.dma_ctl = VID_DST_B_DMA_CTL,
.gpcnt_ctl = VID_DST_B_GPCNT_CTL,
.gpcnt = VID_DST_B_GPCNT,
.vip_ctl = VID_DST_B_VIP_CTL,
.pix_frmt = VID_DST_B_PIX_FRMT,
},
[SRAM_CH02] = {
.i = SRAM_CH02,
.name = "VID C",
.cmds_start = VID_C_DOWN_CMDS,
.ctrl_start = VID_C_IQ,
.cdt = VID_C_CDT,
.fifo_start = VID_C_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA3_PTR1,
.ptr2_reg = DMA3_PTR2,
.cnt1_reg = DMA3_CNT1,
.cnt2_reg = DMA3_CNT2,
.int_msk = VID_C_INT_MSK,
.int_stat = VID_C_INT_STAT,
.int_mstat = VID_C_INT_MSTAT,
.dma_ctl = VID_DST_C_DMA_CTL,
.gpcnt_ctl = VID_DST_C_GPCNT_CTL,
.gpcnt = VID_DST_C_GPCNT,
.vip_ctl = VID_DST_C_VIP_CTL,
.pix_frmt = VID_DST_C_PIX_FRMT,
},
[SRAM_CH03] = {
.i = SRAM_CH03,
.name = "VID D",
.cmds_start = VID_D_DOWN_CMDS,
.ctrl_start = VID_D_IQ,
.cdt = VID_D_CDT,
.fifo_start = VID_D_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA4_PTR1,
.ptr2_reg = DMA4_PTR2,
.cnt1_reg = DMA4_CNT1,
.cnt2_reg = DMA4_CNT2,
.int_msk = VID_D_INT_MSK,
.int_stat = VID_D_INT_STAT,
.int_mstat = VID_D_INT_MSTAT,
.dma_ctl = VID_DST_D_DMA_CTL,
.gpcnt_ctl = VID_DST_D_GPCNT_CTL,
.gpcnt = VID_DST_D_GPCNT,
.vip_ctl = VID_DST_D_VIP_CTL,
.pix_frmt = VID_DST_D_PIX_FRMT,
},
[SRAM_CH04] = {
.i = SRAM_CH04,
.name = "VID E",
.cmds_start = VID_E_DOWN_CMDS,
.ctrl_start = VID_E_IQ,
.cdt = VID_E_CDT,
.fifo_start = VID_E_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA5_PTR1,
.ptr2_reg = DMA5_PTR2,
.cnt1_reg = DMA5_CNT1,
.cnt2_reg = DMA5_CNT2,
.int_msk = VID_E_INT_MSK,
.int_stat = VID_E_INT_STAT,
.int_mstat = VID_E_INT_MSTAT,
.dma_ctl = VID_DST_E_DMA_CTL,
.gpcnt_ctl = VID_DST_E_GPCNT_CTL,
.gpcnt = VID_DST_E_GPCNT,
.vip_ctl = VID_DST_E_VIP_CTL,
.pix_frmt = VID_DST_E_PIX_FRMT,
},
[SRAM_CH05] = {
.i = SRAM_CH05,
.name = "VID F",
.cmds_start = VID_F_DOWN_CMDS,
.ctrl_start = VID_F_IQ,
.cdt = VID_F_CDT,
.fifo_start = VID_F_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA6_PTR1,
.ptr2_reg = DMA6_PTR2,
.cnt1_reg = DMA6_CNT1,
.cnt2_reg = DMA6_CNT2,
.int_msk = VID_F_INT_MSK,
.int_stat = VID_F_INT_STAT,
.int_mstat = VID_F_INT_MSTAT,
.dma_ctl = VID_DST_F_DMA_CTL,
.gpcnt_ctl = VID_DST_F_GPCNT_CTL,
.gpcnt = VID_DST_F_GPCNT,
.vip_ctl = VID_DST_F_VIP_CTL,
.pix_frmt = VID_DST_F_PIX_FRMT,
},
[SRAM_CH06] = {
.i = SRAM_CH06,
.name = "VID G",
.cmds_start = VID_G_DOWN_CMDS,
.ctrl_start = VID_G_IQ,
.cdt = VID_G_CDT,
.fifo_start = VID_G_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA7_PTR1,
.ptr2_reg = DMA7_PTR2,
.cnt1_reg = DMA7_CNT1,
.cnt2_reg = DMA7_CNT2,
.int_msk = VID_G_INT_MSK,
.int_stat = VID_G_INT_STAT,
.int_mstat = VID_G_INT_MSTAT,
.dma_ctl = VID_DST_G_DMA_CTL,
.gpcnt_ctl = VID_DST_G_GPCNT_CTL,
.gpcnt = VID_DST_G_GPCNT,
.vip_ctl = VID_DST_G_VIP_CTL,
.pix_frmt = VID_DST_G_PIX_FRMT,
},
[SRAM_CH07] = {
.i = SRAM_CH07,
.name = "VID H",
.cmds_start = VID_H_DOWN_CMDS,
.ctrl_start = VID_H_IQ,
.cdt = VID_H_CDT,
.fifo_start = VID_H_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA8_PTR1,
.ptr2_reg = DMA8_PTR2,
.cnt1_reg = DMA8_CNT1,
.cnt2_reg = DMA8_CNT2,
.int_msk = VID_H_INT_MSK,
.int_stat = VID_H_INT_STAT,
.int_mstat = VID_H_INT_MSTAT,
.dma_ctl = VID_DST_H_DMA_CTL,
.gpcnt_ctl = VID_DST_H_GPCNT_CTL,
.gpcnt = VID_DST_H_GPCNT,
.vip_ctl = VID_DST_H_VIP_CTL,
.pix_frmt = VID_DST_H_PIX_FRMT,
},
[SRAM_CH08] = {
.name = "audio from",
.cmds_start = AUD_A_DOWN_CMDS,
.ctrl_start = AUD_A_IQ,
.cdt = AUD_A_CDT,
.fifo_start = AUD_A_DOWN_CLUSTER_1,
.fifo_size = AUDIO_CLUSTER_SIZE * 3,
.ptr1_reg = DMA17_PTR1,
.ptr2_reg = DMA17_PTR2,
.cnt1_reg = DMA17_CNT1,
.cnt2_reg = DMA17_CNT2,
},
[SRAM_CH09] = {
.i = SRAM_CH09,
.name = "VID Upstream I",
.cmds_start = VID_I_UP_CMDS,
.ctrl_start = VID_I_IQ,
.cdt = VID_I_CDT,
.fifo_start = VID_I_UP_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA15_PTR1,
.ptr2_reg = DMA15_PTR2,
.cnt1_reg = DMA15_CNT1,
.cnt2_reg = DMA15_CNT2,
.int_msk = VID_I_INT_MSK,
.int_stat = VID_I_INT_STAT,
.int_mstat = VID_I_INT_MSTAT,
.dma_ctl = VID_SRC_I_DMA_CTL,
.gpcnt_ctl = VID_SRC_I_GPCNT_CTL,
.gpcnt = VID_SRC_I_GPCNT,
.vid_fmt_ctl = VID_SRC_I_FMT_CTL,
.vid_active_ctl1 = VID_SRC_I_ACTIVE_CTL1,
.vid_active_ctl2 = VID_SRC_I_ACTIVE_CTL2,
.vid_cdt_size = VID_SRC_I_CDT_SZ,
.irq_bit = 8,
},
[SRAM_CH10] = {
.i = SRAM_CH10,
.name = "VID Upstream J",
.cmds_start = VID_J_UP_CMDS,
.ctrl_start = VID_J_IQ,
.cdt = VID_J_CDT,
.fifo_start = VID_J_UP_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA16_PTR1,
.ptr2_reg = DMA16_PTR2,
.cnt1_reg = DMA16_CNT1,
.cnt2_reg = DMA16_CNT2,
.int_msk = VID_J_INT_MSK,
.int_stat = VID_J_INT_STAT,
.int_mstat = VID_J_INT_MSTAT,
.dma_ctl = VID_SRC_J_DMA_CTL,
.gpcnt_ctl = VID_SRC_J_GPCNT_CTL,
.gpcnt = VID_SRC_J_GPCNT,
.vid_fmt_ctl = VID_SRC_J_FMT_CTL,
.vid_active_ctl1 = VID_SRC_J_ACTIVE_CTL1,
.vid_active_ctl2 = VID_SRC_J_ACTIVE_CTL2,
.vid_cdt_size = VID_SRC_J_CDT_SZ,
.irq_bit = 9,
},
[SRAM_CH11] = {
.i = SRAM_CH11,
.name = "Audio Upstream Channel B",
.cmds_start = AUD_B_UP_CMDS,
.ctrl_start = AUD_B_IQ,
.cdt = AUD_B_CDT,
.fifo_start = AUD_B_UP_CLUSTER_1,
.fifo_size = (AUDIO_CLUSTER_SIZE * 3),
.ptr1_reg = DMA22_PTR1,
.ptr2_reg = DMA22_PTR2,
.cnt1_reg = DMA22_CNT1,
.cnt2_reg = DMA22_CNT2,
.int_msk = AUD_B_INT_MSK,
.int_stat = AUD_B_INT_STAT,
.int_mstat = AUD_B_INT_MSTAT,
.dma_ctl = AUD_INT_DMA_CTL,
.gpcnt_ctl = AUD_B_GPCNT_CTL,
.gpcnt = AUD_B_GPCNT,
.aud_length = AUD_B_LNGTH,
.aud_cfg = AUD_B_CFG,
.fld_aud_fifo_en = FLD_AUD_SRC_B_FIFO_EN,
.fld_aud_risc_en = FLD_AUD_SRC_B_RISC_EN,
.irq_bit = 11,
},
};
EXPORT_SYMBOL(cx25821_sram_channels);
static int cx25821_risc_decode(u32 risc)
{
static const char * const instr[16] = {
[RISC_SYNC >> 28] = "sync",
[RISC_WRITE >> 28] = "write",
[RISC_WRITEC >> 28] = "writec",
[RISC_READ >> 28] = "read",
[RISC_READC >> 28] = "readc",
[RISC_JUMP >> 28] = "jump",
[RISC_SKIP >> 28] = "skip",
[RISC_WRITERM >> 28] = "writerm",
[RISC_WRITECM >> 28] = "writecm",
[RISC_WRITECR >> 28] = "writecr",
};
static const int incr[16] = {
[RISC_WRITE >> 28] = 3,
[RISC_JUMP >> 28] = 3,
[RISC_SKIP >> 28] = 1,
[RISC_SYNC >> 28] = 1,
[RISC_WRITERM >> 28] = 3,
[RISC_WRITECM >> 28] = 3,
[RISC_WRITECR >> 28] = 4,
};
static const char * const bits[] = {
"12", "13", "14", "resync",
"cnt0", "cnt1", "18", "19",
"20", "21", "22", "23",
"irq1", "irq2", "eol", "sol",
};
int i;
pr_cont("0x%08x [ %s",
risc, instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--) {
if (risc & (1 << (i + 12)))
pr_cont(" %s", bits[i]);
}
pr_cont(" count=%d ]\n", risc & 0xfff);
return incr[risc >> 28] ? incr[risc >> 28] : 1;
}
static inline int i2c_slave_did_ack(struct i2c_adapter *i2c_adap)
{
struct cx25821_i2c *bus = i2c_adap->algo_data;
struct cx25821_dev *dev = bus->dev;
return cx_read(bus->reg_stat) & 0x01;
}
static void cx25821_registers_init(struct cx25821_dev *dev)
{
u32 tmp;
/* enable RUN_RISC in Pecos */
cx_write(DEV_CNTRL2, 0x20);
/* Set the master PCI interrupt masks to enable video, audio, MBIF,
* and GPIO interrupts
* I2C interrupt masking is handled by the I2C objects themselves. */
cx_write(PCI_INT_MSK, 0x2001FFFF);
tmp = cx_read(RDR_TLCTL0);
tmp &= ~FLD_CFG_RCB_CK_EN; /* Clear the RCB_CK_EN bit */
cx_write(RDR_TLCTL0, tmp);
/* PLL-A setting for the Audio Master Clock */
cx_write(PLL_A_INT_FRAC, 0x9807A58B);
/* PLL_A_POST = 0x1C, PLL_A_OUT_TO_PIN = 0x1 */
cx_write(PLL_A_POST_STAT_BIST, 0x8000019C);
/* clear reset bit [31] */
tmp = cx_read(PLL_A_INT_FRAC);
cx_write(PLL_A_INT_FRAC, tmp & 0x7FFFFFFF);
/* PLL-B setting for Mobilygen Host Bus Interface */
cx_write(PLL_B_INT_FRAC, 0x9883A86F);
/* PLL_B_POST = 0xD, PLL_B_OUT_TO_PIN = 0x0 */
cx_write(PLL_B_POST_STAT_BIST, 0x8000018D);
/* clear reset bit [31] */
tmp = cx_read(PLL_B_INT_FRAC);
cx_write(PLL_B_INT_FRAC, tmp & 0x7FFFFFFF);
/* PLL-C setting for video upstream channel */
cx_write(PLL_C_INT_FRAC, 0x96A0EA3F);
/* PLL_C_POST = 0x3, PLL_C_OUT_TO_PIN = 0x0 */
cx_write(PLL_C_POST_STAT_BIST, 0x80000103);
/* clear reset bit [31] */
tmp = cx_read(PLL_C_INT_FRAC);
cx_write(PLL_C_INT_FRAC, tmp & 0x7FFFFFFF);
/* PLL-D setting for audio upstream channel */
cx_write(PLL_D_INT_FRAC, 0x98757F5B);
/* PLL_D_POST = 0x13, PLL_D_OUT_TO_PIN = 0x0 */
cx_write(PLL_D_POST_STAT_BIST, 0x80000113);
/* clear reset bit [31] */
tmp = cx_read(PLL_D_INT_FRAC);
cx_write(PLL_D_INT_FRAC, tmp & 0x7FFFFFFF);
/* This selects the PLL C clock source for the video upstream channel
* I and J */
tmp = cx_read(VID_CH_CLK_SEL);
cx_write(VID_CH_CLK_SEL, (tmp & 0x00FFFFFF) | 0x24000000);
/* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for
* channel A-C
* select 656/VIP DST for downstream Channel A - C */
tmp = cx_read(VID_CH_MODE_SEL);
/* cx_write( VID_CH_MODE_SEL, tmp | 0x1B0001FF); */
cx_write(VID_CH_MODE_SEL, tmp & 0xFFFFFE00);
/* enables 656 port I and J as output */
tmp = cx_read(CLK_RST);
/* use external ALT_PLL_REF pin as its reference clock instead */
tmp |= FLD_USE_ALT_PLL_REF;
cx_write(CLK_RST, tmp & ~(FLD_VID_I_CLK_NOE | FLD_VID_J_CLK_NOE));
mdelay(100);
}
int cx25821_sram_channel_setup(struct cx25821_dev *dev,
const struct sram_channel *ch,
unsigned int bpl, u32 risc)
{
unsigned int i, lines;
u32 cdt;
if (ch->cmds_start == 0) {
cx_write(ch->ptr1_reg, 0);
cx_write(ch->ptr2_reg, 0);
cx_write(ch->cnt2_reg, 0);
cx_write(ch->cnt1_reg, 0);
return 0;
}
bpl = (bpl + 7) & ~7; /* alignment */
cdt = ch->cdt;
lines = ch->fifo_size / bpl;
if (lines > 4)
lines = 4;
BUG_ON(lines < 2);
cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
cx_write(8 + 4, 8);
cx_write(8 + 8, 0);
/* write CDT */
for (i = 0; i < lines; i++) {
cx_write(cdt + 16 * i, ch->fifo_start + bpl * i);
cx_write(cdt + 16 * i + 4, 0);
cx_write(cdt + 16 * i + 8, 0);
cx_write(cdt + 16 * i + 12, 0);
}
/* init the first cdt buffer */
for (i = 0; i < 128; i++)
cx_write(ch->fifo_start + 4 * i, i);
/* write CMDS */
if (ch->jumponly)
cx_write(ch->cmds_start + 0, 8);
else
cx_write(ch->cmds_start + 0, risc);
cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
cx_write(ch->cmds_start + 8, cdt);
cx_write(ch->cmds_start + 12, (lines * 16) >> 3);
cx_write(ch->cmds_start + 16, ch->ctrl_start);
if (ch->jumponly)
cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
else
cx_write(ch->cmds_start + 20, 64 >> 2);
for (i = 24; i < 80; i += 4)
cx_write(ch->cmds_start + i, 0);
/* fill registers */
cx_write(ch->ptr1_reg, ch->fifo_start);
cx_write(ch->ptr2_reg, cdt);
cx_write(ch->cnt2_reg, (lines * 16) >> 3);
cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
return 0;
}
int cx25821_sram_channel_setup_audio(struct cx25821_dev *dev,
const struct sram_channel *ch,
unsigned int bpl, u32 risc)
{
unsigned int i, lines;
u32 cdt;
if (ch->cmds_start == 0) {
cx_write(ch->ptr1_reg, 0);
cx_write(ch->ptr2_reg, 0);
cx_write(ch->cnt2_reg, 0);
cx_write(ch->cnt1_reg, 0);
return 0;
}
bpl = (bpl + 7) & ~7; /* alignment */
cdt = ch->cdt;
lines = ch->fifo_size / bpl;
if (lines > 3)
lines = 3; /* for AUDIO */
BUG_ON(lines < 2);
cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
cx_write(8 + 4, 8);
cx_write(8 + 8, 0);
/* write CDT */
for (i = 0; i < lines; i++) {
cx_write(cdt + 16 * i, ch->fifo_start + bpl * i);
cx_write(cdt + 16 * i + 4, 0);
cx_write(cdt + 16 * i + 8, 0);
cx_write(cdt + 16 * i + 12, 0);
}
/* write CMDS */
if (ch->jumponly)
cx_write(ch->cmds_start + 0, 8);
else
cx_write(ch->cmds_start + 0, risc);
cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
cx_write(ch->cmds_start + 8, cdt);
cx_write(ch->cmds_start + 12, (lines * 16) >> 3);
cx_write(ch->cmds_start + 16, ch->ctrl_start);
/* IQ size */
if (ch->jumponly)
cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
else
cx_write(ch->cmds_start + 20, 64 >> 2);
/* zero out */
for (i = 24; i < 80; i += 4)
cx_write(ch->cmds_start + i, 0);
/* fill registers */
cx_write(ch->ptr1_reg, ch->fifo_start);
cx_write(ch->ptr2_reg, cdt);
cx_write(ch->cnt2_reg, (lines * 16) >> 3);
cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
return 0;
}
EXPORT_SYMBOL(cx25821_sram_channel_setup_audio);
void cx25821_sram_channel_dump(struct cx25821_dev *dev, const struct sram_channel *ch)
{
static char *name[] = {
"init risc lo",
"init risc hi",
"cdt base",
"cdt size",
"iq base",
"iq size",
"risc pc lo",
"risc pc hi",
"iq wr ptr",
"iq rd ptr",
"cdt current",
"pci target lo",
"pci target hi",
"line / byte",
};
u32 risc;
unsigned int i, j, n;
pr_warn("%s: %s - dma channel status dump\n", dev->name, ch->name);
for (i = 0; i < ARRAY_SIZE(name); i++)
pr_warn("cmds + 0x%2x: %-15s: 0x%08x\n",
i * 4, name[i], cx_read(ch->cmds_start + 4 * i));
j = i * 4;
for (i = 0; i < 4;) {
risc = cx_read(ch->cmds_start + 4 * (i + 14));
pr_warn("cmds + 0x%2x: risc%d: ", j + i * 4, i);
i += cx25821_risc_decode(risc);
}
for (i = 0; i < (64 >> 2); i += n) {
risc = cx_read(ch->ctrl_start + 4 * i);
/* No consideration for bits 63-32 */
pr_warn("ctrl + 0x%2x (0x%08x): iq %x: ",
i * 4, ch->ctrl_start + 4 * i, i);
n = cx25821_risc_decode(risc);
for (j = 1; j < n; j++) {
risc = cx_read(ch->ctrl_start + 4 * (i + j));
pr_warn("ctrl + 0x%2x : iq %x: 0x%08x [ arg #%d ]\n",
4 * (i + j), i + j, risc, j);
}
}
pr_warn(" : fifo: 0x%08x -> 0x%x\n",
ch->fifo_start, ch->fifo_start + ch->fifo_size);
pr_warn(" : ctrl: 0x%08x -> 0x%x\n",
ch->ctrl_start, ch->ctrl_start + 6 * 16);
pr_warn(" : ptr1_reg: 0x%08x\n",
cx_read(ch->ptr1_reg));
pr_warn(" : ptr2_reg: 0x%08x\n",
cx_read(ch->ptr2_reg));
pr_warn(" : cnt1_reg: 0x%08x\n",
cx_read(ch->cnt1_reg));
pr_warn(" : cnt2_reg: 0x%08x\n",
cx_read(ch->cnt2_reg));
}
void cx25821_sram_channel_dump_audio(struct cx25821_dev *dev,
const struct sram_channel *ch)
{
static const char * const name[] = {
"init risc lo",
"init risc hi",
"cdt base",
"cdt size",
"iq base",
"iq size",
"risc pc lo",
"risc pc hi",
"iq wr ptr",
"iq rd ptr",
"cdt current",
"pci target lo",
"pci target hi",
"line / byte",
};
u32 risc, value, tmp;
unsigned int i, j, n;
pr_info("\n%s: %s - dma Audio channel status dump\n",
dev->name, ch->name);
for (i = 0; i < ARRAY_SIZE(name); i++)
pr_info("%s: cmds + 0x%2x: %-15s: 0x%08x\n",
dev->name, i * 4, name[i],
cx_read(ch->cmds_start + 4 * i));
j = i * 4;
for (i = 0; i < 4;) {
risc = cx_read(ch->cmds_start + 4 * (i + 14));
pr_warn("cmds + 0x%2x: risc%d: ", j + i * 4, i);
i += cx25821_risc_decode(risc);
}
for (i = 0; i < (64 >> 2); i += n) {
risc = cx_read(ch->ctrl_start + 4 * i);
/* No consideration for bits 63-32 */
pr_warn("ctrl + 0x%2x (0x%08x): iq %x: ",
i * 4, ch->ctrl_start + 4 * i, i);
n = cx25821_risc_decode(risc);
for (j = 1; j < n; j++) {
risc = cx_read(ch->ctrl_start + 4 * (i + j));
pr_warn("ctrl + 0x%2x : iq %x: 0x%08x [ arg #%d ]\n",
4 * (i + j), i + j, risc, j);
}
}
pr_warn(" : fifo: 0x%08x -> 0x%x\n",
ch->fifo_start, ch->fifo_start + ch->fifo_size);
pr_warn(" : ctrl: 0x%08x -> 0x%x\n",
ch->ctrl_start, ch->ctrl_start + 6 * 16);
pr_warn(" : ptr1_reg: 0x%08x\n",
cx_read(ch->ptr1_reg));
pr_warn(" : ptr2_reg: 0x%08x\n",
cx_read(ch->ptr2_reg));
pr_warn(" : cnt1_reg: 0x%08x\n",
cx_read(ch->cnt1_reg));
pr_warn(" : cnt2_reg: 0x%08x\n",
cx_read(ch->cnt2_reg));
for (i = 0; i < 4; i++) {
risc = cx_read(ch->cmds_start + 56 + (i * 4));
pr_warn("instruction %d = 0x%x\n", i, risc);
}
/* read data from the first cdt buffer */
risc = cx_read(AUD_A_CDT);
pr_warn("\nread cdt loc=0x%x\n", risc);
for (i = 0; i < 8; i++) {
n = cx_read(risc + i * 4);
pr_cont("0x%x ", n);
}
pr_cont("\n\n");
value = cx_read(CLK_RST);
CX25821_INFO(" CLK_RST = 0x%x\n\n", value);
value = cx_read(PLL_A_POST_STAT_BIST);
CX25821_INFO(" PLL_A_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_A_INT_FRAC);
CX25821_INFO(" PLL_A_INT_FRAC = 0x%x\n\n", value);
value = cx_read(PLL_B_POST_STAT_BIST);
CX25821_INFO(" PLL_B_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_B_INT_FRAC);
CX25821_INFO(" PLL_B_INT_FRAC = 0x%x\n\n", value);
value = cx_read(PLL_C_POST_STAT_BIST);
CX25821_INFO(" PLL_C_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_C_INT_FRAC);
CX25821_INFO(" PLL_C_INT_FRAC = 0x%x\n\n", value);
value = cx_read(PLL_D_POST_STAT_BIST);
CX25821_INFO(" PLL_D_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_D_INT_FRAC);
CX25821_INFO(" PLL_D_INT_FRAC = 0x%x\n\n", value);
value = cx25821_i2c_read(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL, &tmp);
CX25821_INFO(" AFE_AB_DIAG_CTRL (0x10900090) = 0x%x\n\n", value);
}
EXPORT_SYMBOL(cx25821_sram_channel_dump_audio);
static void cx25821_shutdown(struct cx25821_dev *dev)
{
int i;
/* disable RISC controller */
cx_write(DEV_CNTRL2, 0);
/* Disable Video A/B activity */
for (i = 0; i < VID_CHANNEL_NUM; i++) {
cx_write(dev->channels[i].sram_channels->dma_ctl, 0);
cx_write(dev->channels[i].sram_channels->int_msk, 0);
}
for (i = VID_UPSTREAM_SRAM_CHANNEL_I;
i <= VID_UPSTREAM_SRAM_CHANNEL_J; i++) {
cx_write(dev->channels[i].sram_channels->dma_ctl, 0);
cx_write(dev->channels[i].sram_channels->int_msk, 0);
}
/* Disable Audio activity */
cx_write(AUD_INT_DMA_CTL, 0);
/* Disable Serial port */
cx_write(UART_CTL, 0);
/* Disable Interrupts */
cx_write(PCI_INT_MSK, 0);
cx_write(AUD_A_INT_MSK, 0);
}
void cx25821_set_pixel_format(struct cx25821_dev *dev, int channel_select,
u32 format)
{
if (channel_select <= 7 && channel_select >= 0) {
cx_write(dev->channels[channel_select].sram_channels->pix_frmt,
format);
}
dev->channels[channel_select].pixel_formats = format;
}
static void cx25821_set_vip_mode(struct cx25821_dev *dev,
const struct sram_channel *ch)
{
cx_write(ch->pix_frmt, PIXEL_FRMT_422);
cx_write(ch->vip_ctl, PIXEL_ENGINE_VIP1);
}
static void cx25821_initialize(struct cx25821_dev *dev)
{
int i;
dprintk(1, "%s()\n", __func__);
cx25821_shutdown(dev);
cx_write(PCI_INT_STAT, 0xffffffff);
for (i = 0; i < VID_CHANNEL_NUM; i++)
cx_write(dev->channels[i].sram_channels->int_stat, 0xffffffff);
cx_write(AUD_A_INT_STAT, 0xffffffff);
cx_write(AUD_B_INT_STAT, 0xffffffff);
cx_write(AUD_C_INT_STAT, 0xffffffff);
cx_write(AUD_D_INT_STAT, 0xffffffff);
cx_write(AUD_E_INT_STAT, 0xffffffff);
cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
cx_write(PAD_CTRL, 0x12); /* for I2C */
cx25821_registers_init(dev); /* init Pecos registers */
mdelay(100);
for (i = 0; i < VID_CHANNEL_NUM; i++) {
cx25821_set_vip_mode(dev, dev->channels[i].sram_channels);
cx25821_sram_channel_setup(dev, dev->channels[i].sram_channels,
1440, 0);
dev->channels[i].pixel_formats = PIXEL_FRMT_422;
dev->channels[i].use_cif_resolution = 0;
}
/* Probably only affect Downstream */
for (i = VID_UPSTREAM_SRAM_CHANNEL_I;
i <= VID_UPSTREAM_SRAM_CHANNEL_J; i++) {
dev->channels[i].pixel_formats = PIXEL_FRMT_422;
cx25821_set_vip_mode(dev, dev->channels[i].sram_channels);
}
cx25821_sram_channel_setup_audio(dev,
dev->channels[SRAM_CH08].sram_channels, 128, 0);
cx25821_gpio_init(dev);
}
static int cx25821_get_resources(struct cx25821_dev *dev)
{
if (request_mem_region(pci_resource_start(dev->pci, 0),
pci_resource_len(dev->pci, 0), dev->name))
return 0;
pr_err("%s: can't get MMIO memory @ 0x%llx\n",
dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
return -EBUSY;
}
static void cx25821_dev_checkrevision(struct cx25821_dev *dev)
{
dev->hwrevision = cx_read(RDR_CFG2) & 0xff;
pr_info("Hardware revision = 0x%02x\n", dev->hwrevision);
}
static void cx25821_iounmap(struct cx25821_dev *dev)
{
if (dev == NULL)
return;
/* Releasing IO memory */
if (dev->lmmio != NULL) {
iounmap(dev->lmmio);
dev->lmmio = NULL;
}
}
static int cx25821_dev_setup(struct cx25821_dev *dev)
{
static unsigned int cx25821_devcount;
int i;
mutex_init(&dev->lock);
dev->nr = ++cx25821_devcount;
sprintf(dev->name, "cx25821[%d]", dev->nr);
if (dev->pci->device != 0x8210) {
pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
__func__, dev->pci->device);
return -1;
} else {
pr_info("Athena Hardware device = 0x%02x\n", dev->pci->device);
}
/* Apply a sensible clock frequency for the PCIe bridge */
dev->clk_freq = 28000000;
for (i = 0; i < MAX_VID_CHANNEL_NUM; i++) {
dev->channels[i].dev = dev;
dev->channels[i].id = i;
dev->channels[i].sram_channels = &cx25821_sram_channels[i];
}
if (dev->nr > 1)
CX25821_INFO("dev->nr > 1!");
/* board config */
dev->board = 1; /* card[dev->nr]; */
dev->_max_num_decoders = MAX_DECODERS;
dev->pci_bus = dev->pci->bus->number;
dev->pci_slot = PCI_SLOT(dev->pci->devfn);
dev->pci_irqmask = 0x001f00;
/* External Master 1 Bus */
dev->i2c_bus[0].nr = 0;
dev->i2c_bus[0].dev = dev;
dev->i2c_bus[0].reg_stat = I2C1_STAT;
dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
dev->i2c_bus[0].reg_addr = I2C1_ADDR;
dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
dev->i2c_bus[0].i2c_period = (0x07 << 24); /* 1.95MHz */
if (cx25821_get_resources(dev) < 0) {
pr_err("%s: No more PCIe resources for subsystem: %04x:%04x\n",
dev->name, dev->pci->subsystem_vendor,
dev->pci->subsystem_device);
cx25821_devcount--;
return -EBUSY;
}
/* PCIe stuff */
dev->base_io_addr = pci_resource_start(dev->pci, 0);
if (!dev->base_io_addr) {
CX25821_ERR("No PCI Memory resources, exiting!\n");
return -ENODEV;
}
dev->lmmio = ioremap(dev->base_io_addr, pci_resource_len(dev->pci, 0));
if (!dev->lmmio) {
CX25821_ERR("ioremap failed, maybe increasing __VMALLOC_RESERVE in page.h\n");
cx25821_iounmap(dev);
return -ENOMEM;
}
dev->bmmio = (u8 __iomem *) dev->lmmio;
pr_info("%s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
dev->name, dev->pci->subsystem_vendor,
dev->pci->subsystem_device, cx25821_boards[dev->board].name,
dev->board, card[dev->nr] == dev->board ?
"insmod option" : "autodetected");
/* init hardware */
cx25821_initialize(dev);
cx25821_i2c_register(&dev->i2c_bus[0]);
/* cx25821_i2c_register(&dev->i2c_bus[1]);
* cx25821_i2c_register(&dev->i2c_bus[2]); */
if (medusa_video_init(dev) < 0)
CX25821_ERR("%s(): Failed to initialize medusa!\n", __func__);
cx25821_video_register(dev);
cx25821_dev_checkrevision(dev);
return 0;
}
void cx25821_dev_unregister(struct cx25821_dev *dev)
{
int i;
if (!dev->base_io_addr)
return;
release_mem_region(dev->base_io_addr, pci_resource_len(dev->pci, 0));
for (i = 0; i < MAX_VID_CHANNEL_NUM - 1; i++) {
if (i == SRAM_CH08) /* audio channel */
continue;
if (i == SRAM_CH09 || i == SRAM_CH10)
cx25821_free_mem_upstream(&dev->channels[i]);
cx25821_video_unregister(dev, i);
}
cx25821_i2c_unregister(&dev->i2c_bus[0]);
cx25821_iounmap(dev);
}
EXPORT_SYMBOL(cx25821_dev_unregister);
static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
unsigned int offset, u32 sync_line,
unsigned int bpl, unsigned int padding,
unsigned int lines)
{
struct scatterlist *sg;
unsigned int line, todo;
/* sync instruction */
if (sync_line != NO_SYNC_LINE)
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
/* scan lines */
sg = sglist;
for (line = 0; line < lines; line++) {
while (offset && offset >= sg_dma_len(sg)) {
offset -= sg_dma_len(sg);
sg = sg_next(sg);
}
if (bpl <= sg_dma_len(sg) - offset) {
/* fits into current chunk */
*(rp++) = cpu_to_le32(RISC_WRITE | RISC_SOL | RISC_EOL |
bpl);
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += bpl;
} else {
/* scanline needs to be split */
todo = bpl;
*(rp++) = cpu_to_le32(RISC_WRITE | RISC_SOL |
(sg_dma_len(sg) - offset));
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= (sg_dma_len(sg) - offset);
offset = 0;
sg = sg_next(sg);
while (todo > sg_dma_len(sg)) {
*(rp++) = cpu_to_le32(RISC_WRITE |
sg_dma_len(sg));
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= sg_dma_len(sg);
sg = sg_next(sg);
}
*(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += todo;
}
offset += padding;
}
return rp;
}
int cx25821_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
struct scatterlist *sglist, unsigned int top_offset,
unsigned int bottom_offset, unsigned int bpl,
unsigned int padding, unsigned int lines)
{
u32 instructions;
u32 fields;
__le32 *rp;
int rc;
fields = 0;
if (UNSET != top_offset)
fields++;
if (UNSET != bottom_offset)
fields++;
/* estimate risc mem: worst case is one write per page border +
one write per scan line + syncs + jump (all 2 dwords). Padding
can cause next bpl to start close to a page border. First DMA
region may be smaller than PAGE_SIZE */
/* write and jump need and extra dword */
instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE +
lines);
instructions += 2;
rc = btcx_riscmem_alloc(pci, risc, instructions * 12);
if (rc < 0)
return rc;
/* write risc instructions */
rp = risc->cpu;
if (UNSET != top_offset) {
rp = cx25821_risc_field(rp, sglist, top_offset, 0, bpl, padding,
lines);
}
if (UNSET != bottom_offset) {
rp = cx25821_risc_field(rp, sglist, bottom_offset, 0x200, bpl,
padding, lines);
}
/* save pointer to jmp instruction address */
risc->jmp = rp;
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
return 0;
}
static __le32 *cx25821_risc_field_audio(__le32 * rp, struct scatterlist *sglist,
unsigned int offset, u32 sync_line,
unsigned int bpl, unsigned int padding,
unsigned int lines, unsigned int lpi)
{
struct scatterlist *sg;
unsigned int line, todo, sol;
/* sync instruction */
if (sync_line != NO_SYNC_LINE)
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
/* scan lines */
sg = sglist;
for (line = 0; line < lines; line++) {
while (offset && offset >= sg_dma_len(sg)) {
offset -= sg_dma_len(sg);
sg = sg_next(sg);
}
if (lpi && line > 0 && !(line % lpi))
sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
else
sol = RISC_SOL;
if (bpl <= sg_dma_len(sg) - offset) {
/* fits into current chunk */
*(rp++) = cpu_to_le32(RISC_WRITE | sol | RISC_EOL |
bpl);
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += bpl;
} else {
/* scanline needs to be split */
todo = bpl;
*(rp++) = cpu_to_le32(RISC_WRITE | sol |
(sg_dma_len(sg) - offset));
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= (sg_dma_len(sg) - offset);
offset = 0;
sg = sg_next(sg);
while (todo > sg_dma_len(sg)) {
*(rp++) = cpu_to_le32(RISC_WRITE |
sg_dma_len(sg));
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= sg_dma_len(sg);
sg = sg_next(sg);
}
*(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += todo;
}
offset += padding;
}
return rp;
}
int cx25821_risc_databuffer_audio(struct pci_dev *pci,
struct btcx_riscmem *risc,
struct scatterlist *sglist,
unsigned int bpl,
unsigned int lines, unsigned int lpi)
{
u32 instructions;
__le32 *rp;
int rc;
/* estimate risc mem: worst case is one write per page border +
one write per scan line + syncs + jump (all 2 dwords). Here
there is no padding and no sync. First DMA region may be smaller
than PAGE_SIZE */
/* Jump and write need an extra dword */
instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
instructions += 1;
rc = btcx_riscmem_alloc(pci, risc, instructions * 12);
if (rc < 0)
return rc;
/* write risc instructions */
rp = risc->cpu;
rp = cx25821_risc_field_audio(rp, sglist, 0, NO_SYNC_LINE, bpl, 0,
lines, lpi);
/* save pointer to jmp instruction address */
risc->jmp = rp;
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
return 0;
}
EXPORT_SYMBOL(cx25821_risc_databuffer_audio);
int cx25821_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
u32 reg, u32 mask, u32 value)
{
__le32 *rp;
int rc;
rc = btcx_riscmem_alloc(pci, risc, 4 * 16);
if (rc < 0)
return rc;
/* write risc instructions */
rp = risc->cpu;
*(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ1);
*(rp++) = cpu_to_le32(reg);
*(rp++) = cpu_to_le32(value);
*(rp++) = cpu_to_le32(mask);
*(rp++) = cpu_to_le32(RISC_JUMP);
*(rp++) = cpu_to_le32(risc->dma);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
return 0;
}
void cx25821_free_buffer(struct videobuf_queue *q, struct cx25821_buffer *buf)
{
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
BUG_ON(in_interrupt());
videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
static irqreturn_t cx25821_irq(int irq, void *dev_id)
{
struct cx25821_dev *dev = dev_id;
u32 pci_status;
u32 vid_status;
int i, handled = 0;
u32 mask[8] = { 1, 2, 4, 8, 16, 32, 64, 128 };
pci_status = cx_read(PCI_INT_STAT);
if (pci_status == 0)
goto out;
for (i = 0; i < VID_CHANNEL_NUM; i++) {
if (pci_status & mask[i]) {
vid_status = cx_read(dev->channels[i].
sram_channels->int_stat);
if (vid_status)
handled += cx25821_video_irq(dev, i,
vid_status);
cx_write(PCI_INT_STAT, mask[i]);
}
}
out:
return IRQ_RETVAL(handled);
}
void cx25821_print_irqbits(char *name, char *tag, char **strings,
int len, u32 bits, u32 mask)
{
unsigned int i;
printk(KERN_DEBUG pr_fmt("%s: %s [0x%x]"), name, tag, bits);
for (i = 0; i < len; i++) {
if (!(bits & (1 << i)))
continue;
if (strings[i])
pr_cont(" %s", strings[i]);
else
pr_cont(" %d", i);
if (!(mask & (1 << i)))
continue;
pr_cont("*");
}
pr_cont("\n");
}
EXPORT_SYMBOL(cx25821_print_irqbits);
struct cx25821_dev *cx25821_dev_get(struct pci_dev *pci)
{
struct cx25821_dev *dev = pci_get_drvdata(pci);
return dev;
}
EXPORT_SYMBOL(cx25821_dev_get);
static int cx25821_initdev(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
struct cx25821_dev *dev;
int err = 0;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (NULL == dev)
return -ENOMEM;
err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
if (err < 0)
goto fail_free;
/* pci init */
dev->pci = pci_dev;
if (pci_enable_device(pci_dev)) {
err = -EIO;
pr_info("pci enable failed!\n");
goto fail_unregister_device;
}
err = cx25821_dev_setup(dev);
if (err) {
if (err == -EBUSY)
goto fail_unregister_device;
else
goto fail_unregister_pci;
}
/* print pci info */
pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
dev->pci_lat, (unsigned long long)dev->base_io_addr);
pci_set_master(pci_dev);
if (!pci_dma_supported(pci_dev, 0xffffffff)) {
pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
err = -EIO;
goto fail_irq;
}
err = request_irq(pci_dev->irq, cx25821_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
pr_err("%s: can't get IRQ %d\n", dev->name, pci_dev->irq);
goto fail_irq;
}
return 0;
fail_irq:
pr_info("cx25821_initdev() can't get IRQ !\n");
cx25821_dev_unregister(dev);
fail_unregister_pci:
pci_disable_device(pci_dev);
fail_unregister_device:
v4l2_device_unregister(&dev->v4l2_dev);
fail_free:
kfree(dev);
return err;
}
static void cx25821_finidev(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct cx25821_dev *dev = get_cx25821(v4l2_dev);
cx25821_shutdown(dev);
pci_disable_device(pci_dev);
/* unregister stuff */
if (pci_dev->irq)
free_irq(pci_dev->irq, dev);
cx25821_dev_unregister(dev);
v4l2_device_unregister(v4l2_dev);
kfree(dev);
}
static const struct pci_device_id cx25821_pci_tbl[] = {
{
/* CX25821 Athena */
.vendor = 0x14f1,
.device = 0x8210,
.subvendor = 0x14f1,
.subdevice = 0x0920,
}, {
/* CX25821 No Brand */
.vendor = 0x14f1,
.device = 0x8210,
.subvendor = 0x0000,
.subdevice = 0x0000,
}, {
/* --- end of list --- */
}
};
MODULE_DEVICE_TABLE(pci, cx25821_pci_tbl);
static struct pci_driver cx25821_pci_driver = {
.name = "cx25821",
.id_table = cx25821_pci_tbl,
.probe = cx25821_initdev,
.remove = cx25821_finidev,
/* TODO */
.suspend = NULL,
.resume = NULL,
};
static int __init cx25821_init(void)
{
pr_info("driver version %d.%d.%d loaded\n",
(CX25821_VERSION_CODE >> 16) & 0xff,
(CX25821_VERSION_CODE >> 8) & 0xff,
CX25821_VERSION_CODE & 0xff);
return pci_register_driver(&cx25821_pci_driver);
}
static void __exit cx25821_fini(void)
{
pci_unregister_driver(&cx25821_pci_driver);
}
module_init(cx25821_init);
module_exit(cx25821_fini);
| gpl-2.0 |
KubaKaszycki/kubux | xz-utils/tests/test_filter_flags.c | 5506 | ///////////////////////////////////////////////////////////////////////////////
//
/// \file test_filter_flags.c
/// \brief Tests Filter Flags coders
//
// Author: Lasse Collin
//
// This file has been put into the public domain.
// You can do whatever you want with this file.
//
///////////////////////////////////////////////////////////////////////////////
#include "tests.h"
static uint8_t buffer[4096];
static lzma_filter known_flags;
static lzma_filter decoded_flags;
static lzma_stream strm = LZMA_STREAM_INIT;
static bool
encode(uint32_t known_size)
{
memcrap(buffer, sizeof(buffer));
uint32_t tmp;
if (lzma_filter_flags_size(&tmp, &known_flags) != LZMA_OK)
return true;
if (tmp != known_size)
return true;
size_t out_pos = 0;
if (lzma_filter_flags_encode(&known_flags,
buffer, &out_pos, known_size) != LZMA_OK)
return true;
if (out_pos != known_size)
return true;
return false;
}
static bool
decode_ret(uint32_t known_size, lzma_ret expected_ret)
{
memcrap(&decoded_flags, sizeof(decoded_flags));
size_t pos = 0;
if (lzma_filter_flags_decode(&decoded_flags, NULL,
buffer, &pos, known_size) != expected_ret
|| pos != known_size)
return true;
return false;
}
static bool
decode(uint32_t known_size)
{
if (decode_ret(known_size, LZMA_OK))
return true;
if (known_flags.id != decoded_flags.id)
return true;
return false;
}
#if defined(HAVE_ENCODER_X86) && defined(HAVE_DECODER_X86)
static void
test_bcj(void)
{
// Test 1
known_flags.id = LZMA_FILTER_X86;
known_flags.options = NULL;
expect(!encode(2));
expect(!decode(2));
expect(decoded_flags.options == NULL);
// Test 2
lzma_options_bcj options;
options.start_offset = 0;
known_flags.options = &options;
expect(!encode(2));
expect(!decode(2));
expect(decoded_flags.options == NULL);
// Test 3
options.start_offset = 123456;
known_flags.options = &options;
expect(!encode(6));
expect(!decode(6));
expect(decoded_flags.options != NULL);
lzma_options_bcj *decoded = decoded_flags.options;
expect(decoded->start_offset == options.start_offset);
free(decoded);
}
#endif
#if defined(HAVE_ENCODER_DELTA) && defined(HAVE_DECODER_DELTA)
static void
test_delta(void)
{
// Test 1
known_flags.id = LZMA_FILTER_DELTA;
known_flags.options = NULL;
expect(encode(99));
// Test 2
lzma_options_delta options = {
.type = LZMA_DELTA_TYPE_BYTE,
.dist = 0
};
known_flags.options = &options;
expect(encode(99));
// Test 3
options.dist = LZMA_DELTA_DIST_MIN;
expect(!encode(3));
expect(!decode(3));
expect(((lzma_options_delta *)(decoded_flags.options))->dist
== options.dist);
free(decoded_flags.options);
// Test 4
options.dist = LZMA_DELTA_DIST_MAX;
expect(!encode(3));
expect(!decode(3));
expect(((lzma_options_delta *)(decoded_flags.options))->dist
== options.dist);
free(decoded_flags.options);
// Test 5
options.dist = LZMA_DELTA_DIST_MAX + 1;
expect(encode(99));
}
#endif
/*
#ifdef HAVE_FILTER_LZMA
static void
validate_lzma(void)
{
const lzma_options_lzma *known = known_flags.options;
const lzma_options_lzma *decoded = decoded_flags.options;
expect(known->dictionary_size <= decoded->dictionary_size);
if (known->dictionary_size == 1)
expect(decoded->dictionary_size == 1);
else
expect(known->dictionary_size + known->dictionary_size / 2
> decoded->dictionary_size);
expect(known->literal_context_bits == decoded->literal_context_bits);
expect(known->literal_pos_bits == decoded->literal_pos_bits);
expect(known->pos_bits == decoded->pos_bits);
}
static void
test_lzma(void)
{
// Test 1
known_flags.id = LZMA_FILTER_LZMA1;
known_flags.options = NULL;
expect(encode(99));
// Test 2
lzma_options_lzma options = {
.dictionary_size = 0,
.literal_context_bits = 0,
.literal_pos_bits = 0,
.pos_bits = 0,
.preset_dictionary = NULL,
.preset_dictionary_size = 0,
.mode = LZMA_MODE_INVALID,
.fast_bytes = 0,
.match_finder = LZMA_MF_INVALID,
.match_finder_cycles = 0,
};
// Test 3 (empty dictionary not allowed)
known_flags.options = &options;
expect(encode(99));
// Test 4 (brute-force test some valid dictionary sizes)
options.dictionary_size = LZMA_DICTIONARY_SIZE_MIN;
while (options.dictionary_size != LZMA_DICTIONARY_SIZE_MAX) {
if (++options.dictionary_size == 5000)
options.dictionary_size = LZMA_DICTIONARY_SIZE_MAX - 5;
expect(!encode(4));
expect(!decode(4));
validate_lzma();
free(decoded_flags.options);
}
// Test 5 (too big dictionary size)
options.dictionary_size = LZMA_DICTIONARY_SIZE_MAX + 1;
expect(encode(99));
// Test 6 (brute-force test lc/lp/pb)
options.dictionary_size = LZMA_DICTIONARY_SIZE_MIN;
for (uint32_t lc = LZMA_LITERAL_CONTEXT_BITS_MIN;
lc <= LZMA_LITERAL_CONTEXT_BITS_MAX; ++lc) {
for (uint32_t lp = LZMA_LITERAL_POS_BITS_MIN;
lp <= LZMA_LITERAL_POS_BITS_MAX; ++lp) {
for (uint32_t pb = LZMA_POS_BITS_MIN;
pb <= LZMA_POS_BITS_MAX; ++pb) {
if (lc + lp > LZMA_LITERAL_BITS_MAX)
continue;
options.literal_context_bits = lc;
options.literal_pos_bits = lp;
options.pos_bits = pb;
expect(!encode(4));
expect(!decode(4));
validate_lzma();
free(decoded_flags.options);
}
}
}
}
#endif
*/
int
main(void)
{
#if defined(HAVE_ENCODER_X86) && defined(HAVE_DECODER_X86)
test_bcj();
#endif
#if defined(HAVE_ENCODER_DELTA) && defined(HAVE_DECODER_DELTA)
test_delta();
#endif
// #ifdef HAVE_FILTER_LZMA
// test_lzma();
// #endif
lzma_end(&strm);
return 0;
}
| gpl-3.0 |
bzhouduke123/sakai | portal/portal-api/api/src/java/org/sakaiproject/portal/api/PageFilter.java | 1396 | /**********************************************************************************
* $URL$
* $Id$
***********************************************************************************
*
* Copyright (c) 2003, 2004, 2005, 2006, 2007, 2008 The Sakai Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ECL-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************************/
package org.sakaiproject.portal.api;
import java.util.List;
import java.util.Map;
import org.sakaiproject.site.api.Site;
/**
* @author ieb
*
*/
public interface PageFilter
{
/**
* @param newPages
* @param site
* @return
*/
List filter(List newPages, Site site);
/**
* Filter the list of placements, potentially making them hierachical if required
* @param l
* @param site
* @return
*/
List<Map> filterPlacements(List<Map> l, Site site);
}
| apache-2.0 |
geekzoo/linux | linux-3.14.31/drivers/md/md.c | 231628 | /*
md.c : Multiple Devices driver for Linux
Copyright (C) 1998, 1999, 2000 Ingo Molnar
completely rewritten, based on the MD driver code from Marc Zyngier
Changes:
- RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
- RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
- boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
- kerneld support by Boris Tobotras <boris@xtalk.msk.su>
- kmod support by: Cyrus Durgin
- RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
- Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
- lots of fixes and improvements to the RAID1/RAID5 and generic
RAID code (such as request based resynchronization):
Neil Brown <neilb@cse.unsw.edu.au>.
- persistent bitmap code
Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kthread.h>
#include <linux/blkdev.h>
#include <linux/sysctl.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/hdreg.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/file.h>
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/raid/md_p.h>
#include <linux/raid/md_u.h>
#include <linux/slab.h>
#include "md.h"
#include "bitmap.h"
#ifndef MODULE
static void autostart_arrays(int part);
#endif
/* pers_list is a list of registered personalities protected
* by pers_lock.
* pers_lock does extra service to protect accesses to
* mddev->thread when the mutex cannot be held.
*/
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
static void md_print_devices(void);
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
static struct workqueue_struct *md_misc_wq;
static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *this);
#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
/*
* Default number of read corrections we'll attempt on an rdev
* before ejecting it from the array. We divide the read error
* count by 2 for every hour elapsed between read errors.
*/
#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
/*
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
* is 1000 KB/sec, so the extra system load does not show up that much.
* Increase it if you want to have more _guaranteed_ speed. Note that
* the RAID driver will use the maximum available bandwidth if the IO
* subsystem is idle. There is also an 'absolute maximum' reconstruction
* speed limit - in case reconstruction slows down your system despite
* idle IO detection.
*
* you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
* or /sys/block/mdX/md/sync_speed_{min,max}
*/
static int sysctl_speed_limit_min = 1000;
static int sysctl_speed_limit_max = 200000;
static inline int speed_min(struct mddev *mddev)
{
return mddev->sync_speed_min ?
mddev->sync_speed_min : sysctl_speed_limit_min;
}
static inline int speed_max(struct mddev *mddev)
{
return mddev->sync_speed_max ?
mddev->sync_speed_max : sysctl_speed_limit_max;
}
static struct ctl_table_header *raid_table_header;
static struct ctl_table raid_table[] = {
{
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
.maxlen = sizeof(int),
.mode = S_IRUGO|S_IWUSR,
.proc_handler = proc_dointvec,
},
{
.procname = "speed_limit_max",
.data = &sysctl_speed_limit_max,
.maxlen = sizeof(int),
.mode = S_IRUGO|S_IWUSR,
.proc_handler = proc_dointvec,
},
{ }
};
static struct ctl_table raid_dir_table[] = {
{
.procname = "raid",
.maxlen = 0,
.mode = S_IRUGO|S_IXUGO,
.child = raid_table,
},
{ }
};
static struct ctl_table raid_root_table[] = {
{
.procname = "dev",
.maxlen = 0,
.mode = 0555,
.child = raid_dir_table,
},
{ }
};
static const struct block_device_operations md_fops;
static int start_readonly;
/* bio_clone_mddev
* like bio_clone, but with a local bio set
*/
struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev)
{
struct bio *b;
if (!mddev || !mddev->bio_set)
return bio_alloc(gfp_mask, nr_iovecs);
b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
if (!b)
return NULL;
return b;
}
EXPORT_SYMBOL_GPL(bio_alloc_mddev);
struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
struct mddev *mddev)
{
if (!mddev || !mddev->bio_set)
return bio_clone(bio, gfp_mask);
return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
}
EXPORT_SYMBOL_GPL(bio_clone_mddev);
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
* can use 'poll' or 'select' to find out when the event
* count increases.
*
* Events are:
* start array, stop array, error, add device, remove device,
* start build, activate spare
*/
static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
static atomic_t md_event_count;
void md_new_event(struct mddev *mddev)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
}
EXPORT_SYMBOL_GPL(md_new_event);
/* Alternate version that can be called from interrupts
* when calling sysfs_notify isn't needed.
*/
static void md_new_event_inintr(struct mddev *mddev)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
}
/*
* Enables to iterate over all existing md arrays
* all_mddevs_lock protects this list.
*/
static LIST_HEAD(all_mddevs);
static DEFINE_SPINLOCK(all_mddevs_lock);
/*
* iterates through all used mddevs in the system.
* We take care to grab the all_mddevs_lock whenever navigating
* the list, and to always hold a refcount when unlocked.
* Any code which breaks out of this loop while own
* a reference to the current mddev and must mddev_put it.
*/
#define for_each_mddev(_mddev,_tmp) \
\
for (({ spin_lock(&all_mddevs_lock); \
_tmp = all_mddevs.next; \
_mddev = NULL;}); \
({ if (_tmp != &all_mddevs) \
mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
spin_unlock(&all_mddevs_lock); \
if (_mddev) mddev_put(_mddev); \
_mddev = list_entry(_tmp, struct mddev, all_mddevs); \
_tmp != &all_mddevs;}); \
({ spin_lock(&all_mddevs_lock); \
_tmp = _tmp->next;}) \
)
/* Rather than calling directly into the personality make_request function,
* IO requests come here first so that we can check if the device is
* being suspended pending a reconfiguration.
* We hold a refcount over the call to ->make_request. By the time that
* call has finished, the bio has been linked into some internal structure
* and so is visible to ->quiesce(), so we don't need the refcount any more.
*/
static void md_make_request(struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
struct mddev *mddev = q->queuedata;
int cpu;
unsigned int sectors;
if (mddev == NULL || mddev->pers == NULL
|| !mddev->ready) {
bio_io_error(bio);
return;
}
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
return;
}
smp_rmb(); /* Ensure implications of 'active' are visible */
rcu_read_lock();
if (mddev->suspended) {
DEFINE_WAIT(__wait);
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
TASK_UNINTERRUPTIBLE);
if (!mddev->suspended)
break;
rcu_read_unlock();
schedule();
rcu_read_lock();
}
finish_wait(&mddev->sb_wait, &__wait);
}
atomic_inc(&mddev->active_io);
rcu_read_unlock();
/*
* save the sectors now since our bio can
* go away inside make_request
*/
sectors = bio_sectors(bio);
mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
part_stat_unlock();
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);
}
/* mddev_suspend makes sure no new requests are submitted
* to the device, and that any requests that have been submitted
* are completely handled.
* Once ->stop is called and completes, the module will be completely
* unused.
*/
void mddev_suspend(struct mddev *mddev)
{
BUG_ON(mddev->suspended);
mddev->suspended = 1;
synchronize_rcu();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
del_timer_sync(&mddev->safemode_timer);
}
EXPORT_SYMBOL_GPL(mddev_suspend);
void mddev_resume(struct mddev *mddev)
{
mddev->suspended = 0;
wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
}
EXPORT_SYMBOL_GPL(mddev_resume);
int mddev_congested(struct mddev *mddev, int bits)
{
return mddev->suspended;
}
EXPORT_SYMBOL(mddev_congested);
/*
* Generic flush handling for md
*/
static void md_end_flush(struct bio *bio, int err)
{
struct md_rdev *rdev = bio->bi_private;
struct mddev *mddev = rdev->mddev;
rdev_dec_pending(rdev, mddev);
if (atomic_dec_and_test(&mddev->flush_pending)) {
/* The pre-request flush has finished */
queue_work(md_wq, &mddev->flush_work);
}
bio_put(bio);
}
static void md_submit_flush_data(struct work_struct *ws);
static void submit_flushes(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct md_rdev *rdev;
INIT_WORK(&mddev->flush_work, md_submit_flush_data);
atomic_set(&mddev->flush_pending, 1);
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags)) {
/* Take two references, one is dropped
* when request finishes, one after
* we reclaim rcu_read_lock
*/
struct bio *bi;
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
atomic_inc(&mddev->flush_pending);
submit_bio(WRITE_FLUSH, bi);
rcu_read_lock();
rdev_dec_pending(rdev, mddev);
}
rcu_read_unlock();
if (atomic_dec_and_test(&mddev->flush_pending))
queue_work(md_wq, &mddev->flush_work);
}
static void md_submit_flush_data(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct bio *bio = mddev->flush_bio;
if (bio->bi_iter.bi_size == 0)
/* an empty barrier - all done */
bio_endio(bio, 0);
else {
bio->bi_rw &= ~REQ_FLUSH;
mddev->pers->make_request(mddev, bio);
}
mddev->flush_bio = NULL;
wake_up(&mddev->sb_wait);
}
void md_flush_request(struct mddev *mddev, struct bio *bio)
{
spin_lock_irq(&mddev->write_lock);
wait_event_lock_irq(mddev->sb_wait,
!mddev->flush_bio,
mddev->write_lock);
mddev->flush_bio = bio;
spin_unlock_irq(&mddev->write_lock);
INIT_WORK(&mddev->flush_work, submit_flushes);
queue_work(md_wq, &mddev->flush_work);
}
EXPORT_SYMBOL(md_flush_request);
void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
struct mddev *mddev = cb->data;
md_wakeup_thread(mddev->thread);
kfree(cb);
}
EXPORT_SYMBOL(md_unplug);
static inline struct mddev *mddev_get(struct mddev *mddev)
{
atomic_inc(&mddev->active);
return mddev;
}
static void mddev_delayed_delete(struct work_struct *ws);
static void mddev_put(struct mddev *mddev)
{
struct bio_set *bs = NULL;
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
list_del_init(&mddev->all_mddevs);
bs = mddev->bio_set;
mddev->bio_set = NULL;
if (mddev->gendisk) {
/* We did a probe so need to clean up. Call
* queue_work inside the spinlock so that
* flush_workqueue() after mddev_find will
* succeed in waiting for the work to be done.
*/
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
queue_work(md_misc_wq, &mddev->del_work);
} else
kfree(mddev);
}
spin_unlock(&all_mddevs_lock);
if (bs)
bioset_free(bs);
}
void mddev_init(struct mddev *mddev)
{
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
mutex_init(&mddev->bitmap_info.mutex);
INIT_LIST_HEAD(&mddev->disks);
INIT_LIST_HEAD(&mddev->all_mddevs);
init_timer(&mddev->safemode_timer);
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
spin_lock_init(&mddev->write_lock);
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
init_waitqueue_head(&mddev->recovery_wait);
mddev->reshape_position = MaxSector;
mddev->reshape_backwards = 0;
mddev->last_sync_action = "none";
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE;
}
EXPORT_SYMBOL_GPL(mddev_init);
static struct mddev * mddev_find(dev_t unit)
{
struct mddev *mddev, *new = NULL;
if (unit && MAJOR(unit) != MD_MAJOR)
unit &= ~((1<<MdpMinorShift)-1);
retry:
spin_lock(&all_mddevs_lock);
if (unit) {
list_for_each_entry(mddev, &all_mddevs, all_mddevs)
if (mddev->unit == unit) {
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
kfree(new);
return mddev;
}
if (new) {
list_add(&new->all_mddevs, &all_mddevs);
spin_unlock(&all_mddevs_lock);
new->hold_active = UNTIL_IOCTL;
return new;
}
} else if (new) {
/* find an unused unit number */
static int next_minor = 512;
int start = next_minor;
int is_free = 0;
int dev = 0;
while (!is_free) {
dev = MKDEV(MD_MAJOR, next_minor);
next_minor++;
if (next_minor > MINORMASK)
next_minor = 0;
if (next_minor == start) {
/* Oh dear, all in use. */
spin_unlock(&all_mddevs_lock);
kfree(new);
return NULL;
}
is_free = 1;
list_for_each_entry(mddev, &all_mddevs, all_mddevs)
if (mddev->unit == dev) {
is_free = 0;
break;
}
}
new->unit = dev;
new->md_minor = MINOR(dev);
new->hold_active = UNTIL_STOP;
list_add(&new->all_mddevs, &all_mddevs);
spin_unlock(&all_mddevs_lock);
return new;
}
spin_unlock(&all_mddevs_lock);
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
new->unit = unit;
if (MAJOR(unit) == MD_MAJOR)
new->md_minor = MINOR(unit);
else
new->md_minor = MINOR(unit) >> MdpMinorShift;
mddev_init(new);
goto retry;
}
static inline int __must_check mddev_lock(struct mddev * mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
}
/* Sometimes we need to take the lock in a situation where
* failure due to interrupts is not acceptable.
*/
static inline void mddev_lock_nointr(struct mddev * mddev)
{
mutex_lock(&mddev->reconfig_mutex);
}
static inline int mddev_is_locked(struct mddev *mddev)
{
return mutex_is_locked(&mddev->reconfig_mutex);
}
static inline int mddev_trylock(struct mddev * mddev)
{
return mutex_trylock(&mddev->reconfig_mutex);
}
static struct attribute_group md_redundancy_group;
static void mddev_unlock(struct mddev * mddev)
{
if (mddev->to_remove) {
/* These cannot be removed under reconfig_mutex as
* an access to the files will try to take reconfig_mutex
* while holding the file unremovable, which leads to
* a deadlock.
* So hold set sysfs_active while the remove in happeing,
* and anything else which might set ->to_remove or my
* otherwise change the sysfs namespace will fail with
* -EBUSY if sysfs_active is still set.
* We set sysfs_active under reconfig_mutex and elsewhere
* test it under the same mutex to ensure its correct value
* is seen.
*/
struct attribute_group *to_remove = mddev->to_remove;
mddev->to_remove = NULL;
mddev->sysfs_active = 1;
mutex_unlock(&mddev->reconfig_mutex);
if (mddev->kobj.sd) {
if (to_remove != &md_redundancy_group)
sysfs_remove_group(&mddev->kobj, to_remove);
if (mddev->pers == NULL ||
mddev->pers->sync_request == NULL) {
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
if (mddev->sysfs_action)
sysfs_put(mddev->sysfs_action);
mddev->sysfs_action = NULL;
}
}
mddev->sysfs_active = 0;
} else
mutex_unlock(&mddev->reconfig_mutex);
/* As we've dropped the mutex we need a spinlock to
* make sure the thread doesn't disappear
*/
spin_lock(&pers_lock);
md_wakeup_thread(mddev->thread);
spin_unlock(&pers_lock);
}
static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr)
{
struct md_rdev *rdev;
rdev_for_each(rdev, mddev)
if (rdev->desc_nr == nr)
return rdev;
return NULL;
}
static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
{
struct md_rdev *rdev;
rdev_for_each_rcu(rdev, mddev)
if (rdev->desc_nr == nr)
return rdev;
return NULL;
}
static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
{
struct md_rdev *rdev;
rdev_for_each(rdev, mddev)
if (rdev->bdev->bd_dev == dev)
return rdev;
return NULL;
}
static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
{
struct md_rdev *rdev;
rdev_for_each_rcu(rdev, mddev)
if (rdev->bdev->bd_dev == dev)
return rdev;
return NULL;
}
static struct md_personality *find_pers(int level, char *clevel)
{
struct md_personality *pers;
list_for_each_entry(pers, &pers_list, list) {
if (level != LEVEL_NONE && pers->level == level)
return pers;
if (strcmp(pers->name, clevel)==0)
return pers;
}
return NULL;
}
/* return the offset of the super block in 512byte sectors */
static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
{
sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
return MD_NEW_SIZE_SECTORS(num_sectors);
}
static int alloc_disk_sb(struct md_rdev * rdev)
{
if (rdev->sb_page)
MD_BUG();
rdev->sb_page = alloc_page(GFP_KERNEL);
if (!rdev->sb_page) {
printk(KERN_ALERT "md: out of memory.\n");
return -ENOMEM;
}
return 0;
}
void md_rdev_clear(struct md_rdev *rdev)
{
if (rdev->sb_page) {
put_page(rdev->sb_page);
rdev->sb_loaded = 0;
rdev->sb_page = NULL;
rdev->sb_start = 0;
rdev->sectors = 0;
}
if (rdev->bb_page) {
put_page(rdev->bb_page);
rdev->bb_page = NULL;
}
kfree(rdev->badblocks.page);
rdev->badblocks.page = NULL;
}
EXPORT_SYMBOL_GPL(md_rdev_clear);
static void super_written(struct bio *bio, int error)
{
struct md_rdev *rdev = bio->bi_private;
struct mddev *mddev = rdev->mddev;
if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk("md: super_written gets error=%d, uptodate=%d\n",
error, test_bit(BIO_UPTODATE, &bio->bi_flags));
WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
md_error(mddev, rdev);
}
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
bio_put(bio);
}
void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page)
{
/* write first size bytes of page to sector of rdev
* Increment mddev->pending_writes before returning
* and decrement it on completion, waking up sb_wait
* if zero is reached.
* If an error occurred, call md_error
*/
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
atomic_inc(&mddev->pending_writes);
submit_bio(WRITE_FLUSH_FUA, bio);
}
void md_super_wait(struct mddev *mddev)
{
/* wait for all superblock writes that were scheduled to complete */
DEFINE_WAIT(wq);
for(;;) {
prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
if (atomic_read(&mddev->pending_writes)==0)
break;
schedule();
}
finish_wait(&mddev->sb_wait, &wq);
}
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int rw, bool metadata_op)
{
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
int ret;
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
rdev->meta_bdev : rdev->bdev;
if (metadata_op)
bio->bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
(rdev->mddev->reshape_backwards ==
(sector >= rdev->mddev->reshape_position)))
bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
else
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
submit_bio_wait(rw, bio);
ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
bio_put(bio);
return ret;
}
EXPORT_SYMBOL_GPL(sync_page_io);
static int read_disk_sb(struct md_rdev * rdev, int size)
{
char b[BDEVNAME_SIZE];
if (!rdev->sb_page) {
MD_BUG();
return -EINVAL;
}
if (rdev->sb_loaded)
return 0;
if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
goto fail;
rdev->sb_loaded = 1;
return 0;
fail:
printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
return sb1->set_uuid0 == sb2->set_uuid0 &&
sb1->set_uuid1 == sb2->set_uuid1 &&
sb1->set_uuid2 == sb2->set_uuid2 &&
sb1->set_uuid3 == sb2->set_uuid3;
}
static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
int ret;
mdp_super_t *tmp1, *tmp2;
tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
if (!tmp1 || !tmp2) {
ret = 0;
printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
goto abort;
}
*tmp1 = *sb1;
*tmp2 = *sb2;
/*
* nr_disks is not constant
*/
tmp1->nr_disks = 0;
tmp2->nr_disks = 0;
ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
abort:
kfree(tmp1);
kfree(tmp2);
return ret;
}
static u32 md_csum_fold(u32 csum)
{
csum = (csum & 0xffff) + (csum >> 16);
return (csum & 0xffff) + (csum >> 16);
}
static unsigned int calc_sb_csum(mdp_super_t * sb)
{
u64 newcsum = 0;
u32 *sb32 = (u32*)sb;
int i;
unsigned int disk_csum, csum;
disk_csum = sb->sb_csum;
sb->sb_csum = 0;
for (i = 0; i < MD_SB_BYTES/4 ; i++)
newcsum += sb32[i];
csum = (newcsum & 0xffffffff) + (newcsum>>32);
#ifdef CONFIG_ALPHA
/* This used to use csum_partial, which was wrong for several
* reasons including that different results are returned on
* different architectures. It isn't critical that we get exactly
* the same return value as before (we always csum_fold before
* testing, and that removes any differences). However as we
* know that csum_partial always returned a 16bit value on
* alphas, do a fold to maximise conformity to previous behaviour.
*/
sb->sb_csum = md_csum_fold(disk_csum);
#else
sb->sb_csum = disk_csum;
#endif
return csum;
}
/*
* Handle superblock details.
* We want to be able to handle multiple superblock formats
* so we have a common interface to them all, and an array of
* different handlers.
* We rely on user-space to write the initial superblock, and support
* reading and updating of superblocks.
* Interface methods are:
* int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
* loads and validates a superblock on dev.
* if refdev != NULL, compare superblocks on both devices
* Return:
* 0 - dev has a superblock that is compatible with refdev
* 1 - dev has a superblock that is compatible and newer than refdev
* so dev should be used as the refdev in future
* -EINVAL superblock incompatible or invalid
* -othererror e.g. -EIO
*
* int validate_super(struct mddev *mddev, struct md_rdev *dev)
* Verify that dev is acceptable into mddev.
* The first time, mddev->raid_disks will be 0, and data from
* dev should be merged in. Subsequent calls check that dev
* is new enough. Return 0 or -EINVAL
*
* void sync_super(struct mddev *mddev, struct md_rdev *dev)
* Update the superblock for rdev with data in mddev
* This does not write to disc.
*
*/
struct super_type {
char *name;
struct module *owner;
int (*load_super)(struct md_rdev *rdev,
struct md_rdev *refdev,
int minor_version);
int (*validate_super)(struct mddev *mddev,
struct md_rdev *rdev);
void (*sync_super)(struct mddev *mddev,
struct md_rdev *rdev);
unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
sector_t num_sectors);
int (*allow_new_offset)(struct md_rdev *rdev,
unsigned long long new_offset);
};
/*
* Check that the given mddev has no bitmap.
*
* This function is called from the run method of all personalities that do not
* support bitmaps. It prints an error message and returns non-zero if mddev
* has a bitmap. Otherwise, it returns 0.
*
*/
int md_check_no_bitmap(struct mddev *mddev)
{
if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
return 0;
printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
mdname(mddev), mddev->pers->name);
return 1;
}
EXPORT_SYMBOL(md_check_no_bitmap);
/*
* load_super for 0.90.0
*/
static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
{
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
mdp_super_t *sb;
int ret;
/*
* Calculate the position of the superblock (512byte sectors),
* it's at the end of the disk.
*
* It also happens to be a multiple of 4Kb.
*/
rdev->sb_start = calc_dev_sboffset(rdev);
ret = read_disk_sb(rdev, MD_SB_BYTES);
if (ret) return ret;
ret = -EINVAL;
bdevname(rdev->bdev, b);
sb = page_address(rdev->sb_page);
if (sb->md_magic != MD_SB_MAGIC) {
printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
b);
goto abort;
}
if (sb->major_version != 0 ||
sb->minor_version < 90 ||
sb->minor_version > 91) {
printk(KERN_WARNING "Bad version number %d.%d on %s\n",
sb->major_version, sb->minor_version,
b);
goto abort;
}
if (sb->raid_disks <= 0)
goto abort;
if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
b);
goto abort;
}
rdev->preferred_minor = sb->md_minor;
rdev->data_offset = 0;
rdev->new_data_offset = 0;
rdev->sb_size = MD_SB_BYTES;
rdev->badblocks.shift = -1;
if (sb->level == LEVEL_MULTIPATH)
rdev->desc_nr = -1;
else
rdev->desc_nr = sb->this_disk.number;
if (!refdev) {
ret = 1;
} else {
__u64 ev1, ev2;
mdp_super_t *refsb = page_address(refdev->sb_page);
if (!uuid_equal(refsb, sb)) {
printk(KERN_WARNING "md: %s has different UUID to %s\n",
b, bdevname(refdev->bdev,b2));
goto abort;
}
if (!sb_equal(refsb, sb)) {
printk(KERN_WARNING "md: %s has same UUID"
" but different superblock to %s\n",
b, bdevname(refdev->bdev, b2));
goto abort;
}
ev1 = md_event(sb);
ev2 = md_event(refsb);
if (ev1 > ev2)
ret = 1;
else
ret = 0;
}
rdev->sectors = rdev->sb_start;
/* Limit to 4TB as metadata cannot record more than that.
* (not needed for Linear and RAID0 as metadata doesn't
* record this size)
*/
if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
rdev->sectors = (2ULL << 32) - 2;
if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
/* "this cannot possibly happen" ... */
ret = -EINVAL;
abort:
return ret;
}
/*
* validate_super for 0.90.0
*/
static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
{
mdp_disk_t *desc;
mdp_super_t *sb = page_address(rdev->sb_page);
__u64 ev1 = md_event(sb);
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
mddev->major_version = 0;
mddev->minor_version = sb->minor_version;
mddev->patch_version = sb->patch_version;
mddev->external = 0;
mddev->chunk_sectors = sb->chunk_size >> 9;
mddev->ctime = sb->ctime;
mddev->utime = sb->utime;
mddev->level = sb->level;
mddev->clevel[0] = 0;
mddev->layout = sb->layout;
mddev->raid_disks = sb->raid_disks;
mddev->dev_sectors = ((sector_t)sb->size) * 2;
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.space = 0;
/* bitmap can use 60 K after the 4K superblocks */
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
mddev->reshape_backwards = 0;
if (mddev->minor_version >= 91) {
mddev->reshape_position = sb->reshape_position;
mddev->delta_disks = sb->delta_disks;
mddev->new_level = sb->new_level;
mddev->new_layout = sb->new_layout;
mddev->new_chunk_sectors = sb->new_chunk >> 9;
if (mddev->delta_disks < 0)
mddev->reshape_backwards = 1;
} else {
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
if (sb->state & (1<<MD_SB_CLEAN))
mddev->recovery_cp = MaxSector;
else {
if (sb->events_hi == sb->cp_events_hi &&
sb->events_lo == sb->cp_events_lo) {
mddev->recovery_cp = sb->recovery_cp;
} else
mddev->recovery_cp = 0;
}
memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
mddev->max_disks = MD_SB_DISKS;
if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
mddev->bitmap_info.file == NULL) {
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
}
} else if (mddev->pers == NULL) {
/* Insist on good event counter while assembling, except
* for spares (which don't need an event count) */
++ev1;
if (sb->disks[rdev->desc_nr].state & (
(1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
if (ev1 < mddev->events)
return -EINVAL;
} else if (mddev->bitmap) {
/* if adding to array with a bitmap, then we can accept an
* older device ... but not too old.
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
if (ev1 < mddev->events)
set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
return 0;
}
if (mddev->level != LEVEL_MULTIPATH) {
desc = sb->disks + rdev->desc_nr;
if (desc->state & (1<<MD_DISK_FAULTY))
set_bit(Faulty, &rdev->flags);
else if (desc->state & (1<<MD_DISK_SYNC) /* &&
desc->raid_disk < mddev->raid_disks */) {
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = desc->raid_disk;
rdev->saved_raid_disk = desc->raid_disk;
} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
/* active but not in sync implies recovery up to
* reshape position. We don't know exactly where
* that is, so set to zero for now */
if (mddev->minor_version >= 91) {
rdev->recovery_offset = 0;
rdev->raid_disk = desc->raid_disk;
}
}
if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
return 0;
}
/*
* sync_super for 0.90.0
*/
static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
{
mdp_super_t *sb;
struct md_rdev *rdev2;
int next_spare = mddev->raid_disks;
/* make rdev->sb match mddev data..
*
* 1/ zero out disks
* 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
* 3/ any empty disks < next_spare become removed
*
* disks[0] gets initialised to REMOVED because
* we cannot be sure from other fields if it has
* been initialised or not.
*/
int i;
int active=0, working=0,failed=0,spare=0,nr_disks=0;
rdev->sb_size = MD_SB_BYTES;
sb = page_address(rdev->sb_page);
memset(sb, 0, sizeof(*sb));
sb->md_magic = MD_SB_MAGIC;
sb->major_version = mddev->major_version;
sb->patch_version = mddev->patch_version;
sb->gvalid_words = 0; /* ignored */
memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
memcpy(&sb->set_uuid3, mddev->uuid+12,4);
sb->ctime = mddev->ctime;
sb->level = mddev->level;
sb->size = mddev->dev_sectors / 2;
sb->raid_disks = mddev->raid_disks;
sb->md_minor = mddev->md_minor;
sb->not_persistent = 0;
sb->utime = mddev->utime;
sb->state = 0;
sb->events_hi = (mddev->events>>32);
sb->events_lo = (u32)mddev->events;
if (mddev->reshape_position == MaxSector)
sb->minor_version = 90;
else {
sb->minor_version = 91;
sb->reshape_position = mddev->reshape_position;
sb->new_level = mddev->new_level;
sb->delta_disks = mddev->delta_disks;
sb->new_layout = mddev->new_layout;
sb->new_chunk = mddev->new_chunk_sectors << 9;
}
mddev->minor_version = sb->minor_version;
if (mddev->in_sync)
{
sb->recovery_cp = mddev->recovery_cp;
sb->cp_events_hi = (mddev->events>>32);
sb->cp_events_lo = (u32)mddev->events;
if (mddev->recovery_cp == MaxSector)
sb->state = (1<< MD_SB_CLEAN);
} else
sb->recovery_cp = 0;
sb->layout = mddev->layout;
sb->chunk_size = mddev->chunk_sectors << 9;
if (mddev->bitmap && mddev->bitmap_info.file == NULL)
sb->state |= (1<<MD_SB_BITMAP_PRESENT);
sb->disks[0].state = (1<<MD_DISK_REMOVED);
rdev_for_each(rdev2, mddev) {
mdp_disk_t *d;
int desc_nr;
int is_active = test_bit(In_sync, &rdev2->flags);
if (rdev2->raid_disk >= 0 &&
sb->minor_version >= 91)
/* we have nowhere to store the recovery_offset,
* but if it is not below the reshape_position,
* we can piggy-back on that.
*/
is_active = 1;
if (rdev2->raid_disk < 0 ||
test_bit(Faulty, &rdev2->flags))
is_active = 0;
if (is_active)
desc_nr = rdev2->raid_disk;
else
desc_nr = next_spare++;
rdev2->desc_nr = desc_nr;
d = &sb->disks[rdev2->desc_nr];
nr_disks++;
d->number = rdev2->desc_nr;
d->major = MAJOR(rdev2->bdev->bd_dev);
d->minor = MINOR(rdev2->bdev->bd_dev);
if (is_active)
d->raid_disk = rdev2->raid_disk;
else
d->raid_disk = rdev2->desc_nr; /* compatibility */
if (test_bit(Faulty, &rdev2->flags))
d->state = (1<<MD_DISK_FAULTY);
else if (is_active) {
d->state = (1<<MD_DISK_ACTIVE);
if (test_bit(In_sync, &rdev2->flags))
d->state |= (1<<MD_DISK_SYNC);
active++;
working++;
} else {
d->state = 0;
spare++;
working++;
}
if (test_bit(WriteMostly, &rdev2->flags))
d->state |= (1<<MD_DISK_WRITEMOSTLY);
}
/* now set the "removed" and "faulty" bits on any missing devices */
for (i=0 ; i < mddev->raid_disks ; i++) {
mdp_disk_t *d = &sb->disks[i];
if (d->state == 0 && d->number == 0) {
d->number = i;
d->raid_disk = i;
d->state = (1<<MD_DISK_REMOVED);
d->state |= (1<<MD_DISK_FAULTY);
failed++;
}
}
sb->nr_disks = nr_disks;
sb->active_disks = active;
sb->working_disks = working;
sb->failed_disks = failed;
sb->spare_disks = spare;
sb->this_disk = sb->disks[rdev->desc_nr];
sb->sb_csum = calc_sb_csum(sb);
}
/*
* rdev_size_change for 0.90.0
*/
static unsigned long long
super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
{
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
return 0; /* component must fit device */
if (rdev->mddev->bitmap_info.offset)
return 0; /* can't move bitmap */
rdev->sb_start = calc_dev_sboffset(rdev);
if (!num_sectors || num_sectors > rdev->sb_start)
num_sectors = rdev->sb_start;
/* Limit to 4TB as metadata cannot record more than that.
* 4TB == 2^32 KB, or 2*2^32 sectors.
*/
if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
num_sectors = (2ULL << 32) - 2;
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
return num_sectors;
}
static int
super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
{
/* non-zero offset changes not possible with v0.90 */
return new_offset == 0;
}
/*
* version 1 superblock
*/
static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
{
__le32 disk_csum;
u32 csum;
unsigned long long newcsum;
int size = 256 + le32_to_cpu(sb->max_dev)*2;
__le32 *isuper = (__le32*)sb;
disk_csum = sb->sb_csum;
sb->sb_csum = 0;
newcsum = 0;
for (; size >= 4; size -= 4)
newcsum += le32_to_cpu(*isuper++);
if (size == 2)
newcsum += le16_to_cpu(*(__le16*) isuper);
csum = (newcsum & 0xffffffff) + (newcsum >> 32);
sb->sb_csum = disk_csum;
return cpu_to_le32(csum);
}
static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
int acknowledged);
static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
{
struct mdp_superblock_1 *sb;
int ret;
sector_t sb_start;
sector_t sectors;
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
int bmask;
/*
* Calculate the position of the superblock in 512byte sectors.
* It is always aligned to a 4K boundary and
* depeding on minor_version, it can be:
* 0: At least 8K, but less than 12K, from end of device
* 1: At start of device
* 2: 4K from start of device.
*/
switch(minor_version) {
case 0:
sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
sb_start -= 8*2;
sb_start &= ~(sector_t)(4*2-1);
break;
case 1:
sb_start = 0;
break;
case 2:
sb_start = 8;
break;
default:
return -EINVAL;
}
rdev->sb_start = sb_start;
/* superblock is rarely larger than 1K, but it can be larger,
* and it is safe to read 4k, so we do that
*/
ret = read_disk_sb(rdev, 4096);
if (ret) return ret;
sb = page_address(rdev->sb_page);
if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
sb->major_version != cpu_to_le32(1) ||
le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
le64_to_cpu(sb->super_offset) != rdev->sb_start ||
(le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
return -EINVAL;
if (calc_sb_1_csum(sb) != sb->sb_csum) {
printk("md: invalid superblock checksum on %s\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
if (le64_to_cpu(sb->data_size) < 10) {
printk("md: data_size too small on %s\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
if (sb->pad0 ||
sb->pad3[0] ||
memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
/* Some padding is non-zero, might be a new feature */
return -EINVAL;
rdev->preferred_minor = 0xffff;
rdev->data_offset = le64_to_cpu(sb->data_offset);
rdev->new_data_offset = rdev->data_offset;
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
(le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
if (minor_version
&& rdev->data_offset < sb_start + (rdev->sb_size/512))
return -EINVAL;
if (minor_version
&& rdev->new_data_offset < sb_start + (rdev->sb_size/512))
return -EINVAL;
if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
rdev->desc_nr = -1;
else
rdev->desc_nr = le32_to_cpu(sb->dev_number);
if (!rdev->bb_page) {
rdev->bb_page = alloc_page(GFP_KERNEL);
if (!rdev->bb_page)
return -ENOMEM;
}
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
rdev->badblocks.count == 0) {
/* need to load the bad block list.
* Currently we limit it to one page.
*/
s32 offset;
sector_t bb_sector;
u64 *bbp;
int i;
int sectors = le16_to_cpu(sb->bblog_size);
if (sectors > (PAGE_SIZE / 512))
return -EINVAL;
offset = le32_to_cpu(sb->bblog_offset);
if (offset == 0)
return -EINVAL;
bb_sector = (long long)offset;
if (!sync_page_io(rdev, bb_sector, sectors << 9,
rdev->bb_page, READ, true))
return -EIO;
bbp = (u64 *)page_address(rdev->bb_page);
rdev->badblocks.shift = sb->bblog_shift;
for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
u64 bb = le64_to_cpu(*bbp);
int count = bb & (0x3ff);
u64 sector = bb >> 10;
sector <<= sb->bblog_shift;
count <<= sb->bblog_shift;
if (bb + 1 == 0)
break;
if (md_set_badblocks(&rdev->badblocks,
sector, count, 1) == 0)
return -EINVAL;
}
} else if (sb->bblog_offset != 0)
rdev->badblocks.shift = 0;
if (!refdev) {
ret = 1;
} else {
__u64 ev1, ev2;
struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
sb->level != refsb->level ||
sb->layout != refsb->layout ||
sb->chunksize != refsb->chunksize) {
printk(KERN_WARNING "md: %s has strangely different"
" superblock to %s\n",
bdevname(rdev->bdev,b),
bdevname(refdev->bdev,b2));
return -EINVAL;
}
ev1 = le64_to_cpu(sb->events);
ev2 = le64_to_cpu(refsb->events);
if (ev1 > ev2)
ret = 1;
else
ret = 0;
}
if (minor_version) {
sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
sectors -= rdev->data_offset;
} else
sectors = rdev->sb_start;
if (sectors < le64_to_cpu(sb->data_size))
return -EINVAL;
rdev->sectors = le64_to_cpu(sb->data_size);
return ret;
}
static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
{
struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
__u64 ev1 = le64_to_cpu(sb->events);
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
mddev->major_version = 1;
mddev->patch_version = 0;
mddev->external = 0;
mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
mddev->level = le32_to_cpu(sb->level);
mddev->clevel[0] = 0;
mddev->layout = le32_to_cpu(sb->layout);
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
mddev->dev_sectors = le64_to_cpu(sb->size);
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.space = 0;
/* Default location for bitmap is 1K after superblock
* using 3K - total of 4K
*/
mddev->bitmap_info.default_offset = 1024 >> 9;
mddev->bitmap_info.default_space = (4096-1024) >> 9;
mddev->reshape_backwards = 0;
mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
memcpy(mddev->uuid, sb->set_uuid, 16);
mddev->max_disks = (4096-256)/2;
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
mddev->bitmap_info.file == NULL) {
mddev->bitmap_info.offset =
(__s32)le32_to_cpu(sb->bitmap_offset);
/* Metadata doesn't record how much space is available.
* For 1.0, we assume we can use up to the superblock
* if before, else to 4K beyond superblock.
* For others, assume no change is possible.
*/
if (mddev->minor_version > 0)
mddev->bitmap_info.space = 0;
else if (mddev->bitmap_info.offset > 0)
mddev->bitmap_info.space =
8 - mddev->bitmap_info.offset;
else
mddev->bitmap_info.space =
-mddev->bitmap_info.offset;
}
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
mddev->reshape_position = le64_to_cpu(sb->reshape_position);
mddev->delta_disks = le32_to_cpu(sb->delta_disks);
mddev->new_level = le32_to_cpu(sb->new_level);
mddev->new_layout = le32_to_cpu(sb->new_layout);
mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
if (mddev->delta_disks < 0 ||
(mddev->delta_disks == 0 &&
(le32_to_cpu(sb->feature_map)
& MD_FEATURE_RESHAPE_BACKWARDS)))
mddev->reshape_backwards = 1;
} else {
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
} else if (mddev->pers == NULL) {
/* Insist of good event counter while assembling, except for
* spares (which don't need an event count) */
++ev1;
if (rdev->desc_nr >= 0 &&
rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
if (ev1 < mddev->events)
return -EINVAL;
} else if (mddev->bitmap) {
/* If adding to array with a bitmap, then we can accept an
* older device, but not too old.
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
if (ev1 < mddev->events)
set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
return 0;
}
if (mddev->level != LEVEL_MULTIPATH) {
int role;
if (rdev->desc_nr < 0 ||
rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
role = 0xffff;
rdev->desc_nr = -1;
} else
role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
switch(role) {
case 0xffff: /* spare */
break;
case 0xfffe: /* faulty */
set_bit(Faulty, &rdev->flags);
break;
default:
rdev->saved_raid_disk = role;
if ((le32_to_cpu(sb->feature_map) &
MD_FEATURE_RECOVERY_OFFSET)) {
rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
if (!(le32_to_cpu(sb->feature_map) &
MD_FEATURE_RECOVERY_BITMAP))
rdev->saved_raid_disk = -1;
} else
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = role;
break;
}
if (sb->devflags & WriteMostly1)
set_bit(WriteMostly, &rdev->flags);
if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
set_bit(Replacement, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
return 0;
}
static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
{
struct mdp_superblock_1 *sb;
struct md_rdev *rdev2;
int max_dev, i;
/* make rdev->sb match mddev and rdev data. */
sb = page_address(rdev->sb_page);
sb->feature_map = 0;
sb->pad0 = 0;
sb->recovery_offset = cpu_to_le64(0);
memset(sb->pad3, 0, sizeof(sb->pad3));
sb->utime = cpu_to_le64((__u64)mddev->utime);
sb->events = cpu_to_le64(mddev->events);
if (mddev->in_sync)
sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
else
sb->resync_offset = cpu_to_le64(0);
sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
sb->raid_disks = cpu_to_le32(mddev->raid_disks);
sb->size = cpu_to_le64(mddev->dev_sectors);
sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
if (test_bit(WriteMostly, &rdev->flags))
sb->devflags |= WriteMostly1;
else
sb->devflags &= ~WriteMostly1;
sb->data_offset = cpu_to_le64(rdev->data_offset);
sb->data_size = cpu_to_le64(rdev->sectors);
if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
}
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags)) {
sb->feature_map |=
cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
sb->recovery_offset =
cpu_to_le64(rdev->recovery_offset);
if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
sb->feature_map |=
cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
}
if (test_bit(Replacement, &rdev->flags))
sb->feature_map |=
cpu_to_le32(MD_FEATURE_REPLACEMENT);
if (mddev->reshape_position != MaxSector) {
sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
sb->reshape_position = cpu_to_le64(mddev->reshape_position);
sb->new_layout = cpu_to_le32(mddev->new_layout);
sb->delta_disks = cpu_to_le32(mddev->delta_disks);
sb->new_level = cpu_to_le32(mddev->new_level);
sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
if (mddev->delta_disks == 0 &&
mddev->reshape_backwards)
sb->feature_map
|= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
if (rdev->new_data_offset != rdev->data_offset) {
sb->feature_map
|= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
- rdev->data_offset));
}
}
if (rdev->badblocks.count == 0)
/* Nothing to do for bad blocks*/ ;
else if (sb->bblog_offset == 0)
/* Cannot record bad blocks on this device */
md_error(mddev, rdev);
else {
struct badblocks *bb = &rdev->badblocks;
u64 *bbp = (u64 *)page_address(rdev->bb_page);
u64 *p = bb->page;
sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
if (bb->changed) {
unsigned seq;
retry:
seq = read_seqbegin(&bb->lock);
memset(bbp, 0xff, PAGE_SIZE);
for (i = 0 ; i < bb->count ; i++) {
u64 internal_bb = p[i];
u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
| BB_LEN(internal_bb));
bbp[i] = cpu_to_le64(store_bb);
}
bb->changed = 0;
if (read_seqretry(&bb->lock, seq))
goto retry;
bb->sector = (rdev->sb_start +
(int)le32_to_cpu(sb->bblog_offset));
bb->size = le16_to_cpu(sb->bblog_size);
}
}
max_dev = 0;
rdev_for_each(rdev2, mddev)
if (rdev2->desc_nr+1 > max_dev)
max_dev = rdev2->desc_nr+1;
if (max_dev > le32_to_cpu(sb->max_dev)) {
int bmask;
sb->max_dev = cpu_to_le32(max_dev);
rdev->sb_size = max_dev * 2 + 256;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
} else
max_dev = le32_to_cpu(sb->max_dev);
for (i=0; i<max_dev;i++)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
rdev_for_each(rdev2, mddev) {
i = rdev2->desc_nr;
if (test_bit(Faulty, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(0xfffe);
else if (test_bit(In_sync, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else if (rdev2->raid_disk >= 0)
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else
sb->dev_roles[i] = cpu_to_le16(0xffff);
}
sb->sb_csum = calc_sb_1_csum(sb);
}
static unsigned long long
super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
{
struct mdp_superblock_1 *sb;
sector_t max_sectors;
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
return 0; /* component must fit device */
if (rdev->data_offset != rdev->new_data_offset)
return 0; /* too confusing */
if (rdev->sb_start < rdev->data_offset) {
/* minor versions 1 and 2; superblock before data */
max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
max_sectors -= rdev->data_offset;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
} else if (rdev->mddev->bitmap_info.offset) {
/* minor version 0 with bitmap we can't move */
return 0;
} else {
/* minor version 0; superblock after data */
sector_t sb_start;
sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
sb_start &= ~(sector_t)(4*2 - 1);
max_sectors = rdev->sectors + sb_start - rdev->sb_start;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
rdev->sb_start = sb_start;
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
sb->super_offset = rdev->sb_start;
sb->sb_csum = calc_sb_1_csum(sb);
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
return num_sectors;
}
static int
super_1_allow_new_offset(struct md_rdev *rdev,
unsigned long long new_offset)
{
/* All necessary checks on new >= old have been done */
struct bitmap *bitmap;
if (new_offset >= rdev->data_offset)
return 1;
/* with 1.0 metadata, there is no metadata to tread on
* so we can always move back */
if (rdev->mddev->minor_version == 0)
return 1;
/* otherwise we must be sure not to step on
* any metadata, so stay:
* 36K beyond start of superblock
* beyond end of badblocks
* beyond write-intent bitmap
*/
if (rdev->sb_start + (32+4)*2 > new_offset)
return 0;
bitmap = rdev->mddev->bitmap;
if (bitmap && !rdev->mddev->bitmap_info.file &&
rdev->sb_start + rdev->mddev->bitmap_info.offset +
bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
return 0;
if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
return 0;
return 1;
}
static struct super_type super_types[] = {
[0] = {
.name = "0.90.0",
.owner = THIS_MODULE,
.load_super = super_90_load,
.validate_super = super_90_validate,
.sync_super = super_90_sync,
.rdev_size_change = super_90_rdev_size_change,
.allow_new_offset = super_90_allow_new_offset,
},
[1] = {
.name = "md-1",
.owner = THIS_MODULE,
.load_super = super_1_load,
.validate_super = super_1_validate,
.sync_super = super_1_sync,
.rdev_size_change = super_1_rdev_size_change,
.allow_new_offset = super_1_allow_new_offset,
},
};
static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
{
if (mddev->sync_super) {
mddev->sync_super(mddev, rdev);
return;
}
BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
super_types[mddev->major_version].sync_super(mddev, rdev);
}
static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
{
struct md_rdev *rdev, *rdev2;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev1)
rdev_for_each_rcu(rdev2, mddev2)
if (rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
rcu_read_unlock();
return 1;
}
rcu_read_unlock();
return 0;
}
static LIST_HEAD(pending_raid_disks);
/*
* Try to register data integrity profile for an mddev
*
* This is called when an array is started and after a disk has been kicked
* from the array. It only succeeds if all working and active component devices
* are integrity capable with matching profiles.
*/
int md_integrity_register(struct mddev *mddev)
{
struct md_rdev *rdev, *reference = NULL;
if (list_empty(&mddev->disks))
return 0; /* nothing to do */
if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
return 0; /* shouldn't register, or already is */
rdev_for_each(rdev, mddev) {
/* skip spares and non-functional disks */
if (test_bit(Faulty, &rdev->flags))
continue;
if (rdev->raid_disk < 0)
continue;
if (!reference) {
/* Use the first rdev as the reference */
reference = rdev;
continue;
}
/* does this rdev's profile match the reference profile? */
if (blk_integrity_compare(reference->bdev->bd_disk,
rdev->bdev->bd_disk) < 0)
return -EINVAL;
}
if (!reference || !bdev_get_integrity(reference->bdev))
return 0;
/*
* All component devices are integrity capable and have matching
* profiles, register the common profile for the md device.
*/
if (blk_integrity_register(mddev->gendisk,
bdev_get_integrity(reference->bdev)) != 0) {
printk(KERN_ERR "md: failed to register integrity for %s\n",
mdname(mddev));
return -EINVAL;
}
printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
printk(KERN_ERR "md: failed to create integrity pool for %s\n",
mdname(mddev));
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(md_integrity_register);
/* Disable data integrity if non-capable/non-matching disk is being added */
void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
struct blk_integrity *bi_rdev;
struct blk_integrity *bi_mddev;
if (!mddev->gendisk)
return;
bi_rdev = bdev_get_integrity(rdev->bdev);
bi_mddev = blk_get_integrity(mddev->gendisk);
if (!bi_mddev) /* nothing to do */
return;
if (rdev->raid_disk < 0) /* skip spares */
return;
if (bi_rdev && blk_integrity_compare(mddev->gendisk,
rdev->bdev->bd_disk) >= 0)
return;
printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
blk_integrity_unregister(mddev->gendisk);
}
EXPORT_SYMBOL(md_integrity_add_rdev);
static int bind_rdev_to_array(struct md_rdev * rdev, struct mddev * mddev)
{
char b[BDEVNAME_SIZE];
struct kobject *ko;
char *s;
int err;
if (rdev->mddev) {
MD_BUG();
return -EINVAL;
}
/* prevent duplicates */
if (find_rdev(mddev, rdev->bdev->bd_dev))
return -EEXIST;
/* make sure rdev->sectors exceeds mddev->dev_sectors */
if (rdev->sectors && (mddev->dev_sectors == 0 ||
rdev->sectors < mddev->dev_sectors)) {
if (mddev->pers) {
/* Cannot change size, so fail
* If mddev->level <= 0, then we don't care
* about aligning sizes (e.g. linear)
*/
if (mddev->level > 0)
return -ENOSPC;
} else
mddev->dev_sectors = rdev->sectors;
}
/* Verify rdev->desc_nr is unique.
* If it is -1, assign a free number, else
* check number is not in use
*/
if (rdev->desc_nr < 0) {
int choice = 0;
if (mddev->pers) choice = mddev->raid_disks;
while (find_rdev_nr(mddev, choice))
choice++;
rdev->desc_nr = choice;
} else {
if (find_rdev_nr(mddev, rdev->desc_nr))
return -EBUSY;
}
if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
mdname(mddev), mddev->max_disks);
return -EBUSY;
}
bdevname(rdev->bdev,b);
while ( (s=strchr(b, '/')) != NULL)
*s = '!';
rdev->mddev = mddev;
printk(KERN_INFO "md: bind<%s>\n", b);
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
if (sysfs_create_link(&rdev->kobj, ko, "block"))
/* failure here is OK */;
rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
list_add_rcu(&rdev->same_set, &mddev->disks);
bd_link_disk_holder(rdev->bdev, mddev->gendisk);
/* May as well allow recovery to be retried once */
mddev->recovery_disabled++;
return 0;
fail:
printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
b, mdname(mddev));
return err;
}
static void md_delayed_delete(struct work_struct *ws)
{
struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
kobject_del(&rdev->kobj);
kobject_put(&rdev->kobj);
}
static void unbind_rdev_from_array(struct md_rdev * rdev)
{
char b[BDEVNAME_SIZE];
if (!rdev->mddev) {
MD_BUG();
return;
}
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
rdev->sysfs_state = NULL;
rdev->badblocks.count = 0;
/* We need to delay this, otherwise we can deadlock when
* writing to 'remove' to "dev/state". We also need
* to delay it due to rcu usage.
*/
synchronize_rcu();
INIT_WORK(&rdev->del_work, md_delayed_delete);
kobject_get(&rdev->kobj);
queue_work(md_misc_wq, &rdev->del_work);
}
/*
* prevent the device from being mounted, repartitioned or
* otherwise reused by a RAID array (or any other kernel
* subsystem), by bd_claiming the device.
*/
static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
{
int err = 0;
struct block_device *bdev;
char b[BDEVNAME_SIZE];
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
shared ? (struct md_rdev *)lock_rdev : rdev);
if (IS_ERR(bdev)) {
printk(KERN_ERR "md: could not open %s.\n",
__bdevname(dev, b));
return PTR_ERR(bdev);
}
rdev->bdev = bdev;
return err;
}
static void unlock_rdev(struct md_rdev *rdev)
{
struct block_device *bdev = rdev->bdev;
rdev->bdev = NULL;
if (!bdev)
MD_BUG();
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
void md_autodetect_dev(dev_t dev);
static void export_rdev(struct md_rdev * rdev)
{
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: export_rdev(%s)\n",
bdevname(rdev->bdev,b));
if (rdev->mddev)
MD_BUG();
md_rdev_clear(rdev);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
#endif
unlock_rdev(rdev);
kobject_put(&rdev->kobj);
}
static void kick_rdev_from_array(struct md_rdev * rdev)
{
unbind_rdev_from_array(rdev);
export_rdev(rdev);
}
static void export_array(struct mddev *mddev)
{
struct md_rdev *rdev, *tmp;
rdev_for_each_safe(rdev, tmp, mddev) {
if (!rdev->mddev) {
MD_BUG();
continue;
}
kick_rdev_from_array(rdev);
}
if (!list_empty(&mddev->disks))
MD_BUG();
mddev->raid_disks = 0;
mddev->major_version = 0;
}
static void print_desc(mdp_disk_t *desc)
{
printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
desc->major,desc->minor,desc->raid_disk,desc->state);
}
static void print_sb_90(mdp_super_t *sb)
{
int i;
printk(KERN_INFO
"md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
sb->major_version, sb->minor_version, sb->patch_version,
sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
sb->ctime);
printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
sb->level, sb->size, sb->nr_disks, sb->raid_disks,
sb->md_minor, sb->layout, sb->chunk_size);
printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
" FD:%d SD:%d CSUM:%08x E:%08lx\n",
sb->utime, sb->state, sb->active_disks, sb->working_disks,
sb->failed_disks, sb->spare_disks,
sb->sb_csum, (unsigned long)sb->events_lo);
printk(KERN_INFO);
for (i = 0; i < MD_SB_DISKS; i++) {
mdp_disk_t *desc;
desc = sb->disks + i;
if (desc->number || desc->major || desc->minor ||
desc->raid_disk || (desc->state && (desc->state != 4))) {
printk(" D %2d: ", i);
print_desc(desc);
}
}
printk(KERN_INFO "md: THIS: ");
print_desc(&sb->this_disk);
}
static void print_sb_1(struct mdp_superblock_1 *sb)
{
__u8 *uuid;
uuid = sb->set_uuid;
printk(KERN_INFO
"md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
"md: Name: \"%s\" CT:%llu\n",
le32_to_cpu(sb->major_version),
le32_to_cpu(sb->feature_map),
uuid,
sb->set_name,
(unsigned long long)le64_to_cpu(sb->ctime)
& MD_SUPERBLOCK_1_TIME_SEC_MASK);
uuid = sb->device_uuid;
printk(KERN_INFO
"md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
" RO:%llu\n"
"md: Dev:%08x UUID: %pU\n"
"md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
"md: (MaxDev:%u) \n",
le32_to_cpu(sb->level),
(unsigned long long)le64_to_cpu(sb->size),
le32_to_cpu(sb->raid_disks),
le32_to_cpu(sb->layout),
le32_to_cpu(sb->chunksize),
(unsigned long long)le64_to_cpu(sb->data_offset),
(unsigned long long)le64_to_cpu(sb->data_size),
(unsigned long long)le64_to_cpu(sb->super_offset),
(unsigned long long)le64_to_cpu(sb->recovery_offset),
le32_to_cpu(sb->dev_number),
uuid,
sb->devflags,
(unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
(unsigned long long)le64_to_cpu(sb->events),
(unsigned long long)le64_to_cpu(sb->resync_offset),
le32_to_cpu(sb->sb_csum),
le32_to_cpu(sb->max_dev)
);
}
static void print_rdev(struct md_rdev *rdev, int major_version)
{
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
rdev->desc_nr);
if (rdev->sb_loaded) {
printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
switch (major_version) {
case 0:
print_sb_90(page_address(rdev->sb_page));
break;
case 1:
print_sb_1(page_address(rdev->sb_page));
break;
}
} else
printk(KERN_INFO "md: no rdev superblock!\n");
}
static void md_print_devices(void)
{
struct list_head *tmp;
struct md_rdev *rdev;
struct mddev *mddev;
char b[BDEVNAME_SIZE];
printk("\n");
printk("md: **********************************\n");
printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
printk("md: **********************************\n");
for_each_mddev(mddev, tmp) {
if (mddev->bitmap)
bitmap_print_sb(mddev->bitmap);
else
printk("%s: ", mdname(mddev));
rdev_for_each(rdev, mddev)
printk("<%s>", bdevname(rdev->bdev,b));
printk("\n");
rdev_for_each(rdev, mddev)
print_rdev(rdev, mddev->major_version);
}
printk("md: **********************************\n");
printk("\n");
}
static void sync_sbs(struct mddev * mddev, int nospares)
{
/* Update each superblock (in-memory image), but
* if we are allowed to, skip spares which already
* have the right event counter, or have one earlier
* (which would mean they aren't being marked as dirty
* with the rest of the array)
*/
struct md_rdev *rdev;
rdev_for_each(rdev, mddev) {
if (rdev->sb_events == mddev->events ||
(nospares &&
rdev->raid_disk < 0 &&
rdev->sb_events+1 == mddev->events)) {
/* Don't update this superblock */
rdev->sb_loaded = 2;
} else {
sync_super(mddev, rdev);
rdev->sb_loaded = 1;
}
}
}
static void md_update_sb(struct mddev * mddev, int force_change)
{
struct md_rdev *rdev;
int sync_req;
int nospares = 0;
int any_badblocks_changed = 0;
if (mddev->ro) {
if (force_change)
set_bit(MD_CHANGE_DEVS, &mddev->flags);
return;
}
repeat:
/* First make sure individual recovery_offsets are correct */
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk >= 0 &&
mddev->delta_disks >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
mddev->curr_resync_completed > rdev->recovery_offset)
rdev->recovery_offset = mddev->curr_resync_completed;
}
if (!mddev->persistent) {
clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
clear_bit(MD_CHANGE_DEVS, &mddev->flags);
if (!mddev->external) {
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
rdev_for_each(rdev, mddev) {
if (rdev->badblocks.changed) {
rdev->badblocks.changed = 0;
md_ack_all_badblocks(&rdev->badblocks);
md_error(mddev, rdev);
}
clear_bit(Blocked, &rdev->flags);
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
}
}
wake_up(&mddev->sb_wait);
return;
}
spin_lock_irq(&mddev->write_lock);
mddev->utime = get_seconds();
if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
force_change = 1;
if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
/* just a clean<-> dirty transition, possibly leave spares alone,
* though if events isn't the right even/odd, we will have to do
* spares after all
*/
nospares = 1;
if (force_change)
nospares = 0;
if (mddev->degraded)
/* If the array is degraded, then skipping spares is both
* dangerous and fairly pointless.
* Dangerous because a device that was removed from the array
* might have a event_count that still looks up-to-date,
* so it can be re-added without a resync.
* Pointless because if there are any spares to skip,
* then a recovery will happen and soon that array won't
* be degraded any more and the spare can go back to sleep then.
*/
nospares = 0;
sync_req = mddev->in_sync;
/* If this is just a dirty<->clean transition, and the array is clean
* and 'events' is odd, we can roll back to the previous clean state */
if (nospares
&& (mddev->in_sync && mddev->recovery_cp == MaxSector)
&& mddev->can_decrease_events
&& mddev->events != 1) {
mddev->events--;
mddev->can_decrease_events = 0;
} else {
/* otherwise we have to go forward and ... */
mddev->events ++;
mddev->can_decrease_events = nospares;
}
if (!mddev->events) {
/*
* oops, this 64-bit counter should never wrap.
* Either we are in around ~1 trillion A.C., assuming
* 1 reboot per second, or we have a bug:
*/
MD_BUG();
mddev->events --;
}
rdev_for_each(rdev, mddev) {
if (rdev->badblocks.changed)
any_badblocks_changed++;
if (test_bit(Faulty, &rdev->flags))
set_bit(FaultRecorded, &rdev->flags);
}
sync_sbs(mddev, nospares);
spin_unlock_irq(&mddev->write_lock);
pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
mdname(mddev), mddev->in_sync);
bitmap_update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
if (rdev->sb_loaded != 1)
continue; /* no noise on spare devices */
if (!test_bit(Faulty, &rdev->flags)) {
md_super_write(mddev,rdev,
rdev->sb_start, rdev->sb_size,
rdev->sb_page);
pr_debug("md: (write) %s's sb offset: %llu\n",
bdevname(rdev->bdev, b),
(unsigned long long)rdev->sb_start);
rdev->sb_events = mddev->events;
if (rdev->badblocks.size) {
md_super_write(mddev, rdev,
rdev->badblocks.sector,
rdev->badblocks.size << 9,
rdev->bb_page);
rdev->badblocks.size = 0;
}
} else
pr_debug("md: %s (skipping faulty)\n",
bdevname(rdev->bdev, b));
if (mddev->level == LEVEL_MULTIPATH)
/* only need to write one superblock... */
break;
}
md_super_wait(mddev);
/* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
spin_lock_irq(&mddev->write_lock);
if (mddev->in_sync != sync_req ||
test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
/* have to write it out again */
spin_unlock_irq(&mddev->write_lock);
goto repeat;
}
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
spin_unlock_irq(&mddev->write_lock);
wake_up(&mddev->sb_wait);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
rdev_for_each(rdev, mddev) {
if (test_and_clear_bit(FaultRecorded, &rdev->flags))
clear_bit(Blocked, &rdev->flags);
if (any_badblocks_changed)
md_ack_all_badblocks(&rdev->badblocks);
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
}
}
/* words written to sysfs files may, or may not, be \n terminated.
* We want to accept with case. For this we use cmd_match.
*/
static int cmd_match(const char *cmd, const char *str)
{
/* See if cmd, written into a sysfs file, matches
* str. They must either be the same, or cmd can
* have a trailing newline
*/
while (*cmd && *str && *cmd == *str) {
cmd++;
str++;
}
if (*cmd == '\n')
cmd++;
if (*str || *cmd)
return 0;
return 1;
}
struct rdev_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct md_rdev *, char *);
ssize_t (*store)(struct md_rdev *, const char *, size_t);
};
static ssize_t
state_show(struct md_rdev *rdev, char *page)
{
char *sep = "";
size_t len = 0;
if (test_bit(Faulty, &rdev->flags) ||
rdev->badblocks.unacked_exist) {
len+= sprintf(page+len, "%sfaulty",sep);
sep = ",";
}
if (test_bit(In_sync, &rdev->flags)) {
len += sprintf(page+len, "%sin_sync",sep);
sep = ",";
}
if (test_bit(WriteMostly, &rdev->flags)) {
len += sprintf(page+len, "%swrite_mostly",sep);
sep = ",";
}
if (test_bit(Blocked, &rdev->flags) ||
(rdev->badblocks.unacked_exist
&& !test_bit(Faulty, &rdev->flags))) {
len += sprintf(page+len, "%sblocked", sep);
sep = ",";
}
if (!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags)) {
len += sprintf(page+len, "%sspare", sep);
sep = ",";
}
if (test_bit(WriteErrorSeen, &rdev->flags)) {
len += sprintf(page+len, "%swrite_error", sep);
sep = ",";
}
if (test_bit(WantReplacement, &rdev->flags)) {
len += sprintf(page+len, "%swant_replacement", sep);
sep = ",";
}
if (test_bit(Replacement, &rdev->flags)) {
len += sprintf(page+len, "%sreplacement", sep);
sep = ",";
}
return len+sprintf(page+len, "\n");
}
static ssize_t
state_store(struct md_rdev *rdev, const char *buf, size_t len)
{
/* can write
* faulty - simulates an error
* remove - disconnects the device
* writemostly - sets write_mostly
* -writemostly - clears write_mostly
* blocked - sets the Blocked flags
* -blocked - clears the Blocked and possibly simulates an error
* insync - sets Insync providing device isn't active
* -insync - clear Insync for a device with a slot assigned,
* so that it gets rebuilt based on bitmap
* write_error - sets WriteErrorSeen
* -write_error - clears WriteErrorSeen
*/
int err = -EINVAL;
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
md_error(rdev->mddev, rdev);
if (test_bit(Faulty, &rdev->flags))
err = 0;
else
err = -EBUSY;
} else if (cmd_match(buf, "remove")) {
if (rdev->raid_disk >= 0)
err = -EBUSY;
else {
struct mddev *mddev = rdev->mddev;
kick_rdev_from_array(rdev);
if (mddev->pers)
md_update_sb(mddev, 1);
md_new_event(mddev);
err = 0;
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-writemostly")) {
clear_bit(WriteMostly, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "blocked")) {
set_bit(Blocked, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-blocked")) {
if (!test_bit(Faulty, &rdev->flags) &&
rdev->badblocks.unacked_exist) {
/* metadata handler doesn't understand badblocks,
* so we need to fail the device
*/
md_error(rdev->mddev, rdev);
}
clear_bit(Blocked, &rdev->flags);
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
err = 0;
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
set_bit(In_sync, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) {
clear_bit(In_sync, &rdev->flags);
rdev->saved_raid_disk = rdev->raid_disk;
rdev->raid_disk = -1;
err = 0;
} else if (cmd_match(buf, "write_error")) {
set_bit(WriteErrorSeen, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-write_error")) {
clear_bit(WriteErrorSeen, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "want_replacement")) {
/* Any non-spare device that is not a replacement can
* become want_replacement at any time, but we then need to
* check if recovery is needed.
*/
if (rdev->raid_disk >= 0 &&
!test_bit(Replacement, &rdev->flags))
set_bit(WantReplacement, &rdev->flags);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
err = 0;
} else if (cmd_match(buf, "-want_replacement")) {
/* Clearing 'want_replacement' is always allowed.
* Once replacements starts it is too late though.
*/
err = 0;
clear_bit(WantReplacement, &rdev->flags);
} else if (cmd_match(buf, "replacement")) {
/* Can only set a device as a replacement when array has not
* yet been started. Once running, replacement is automatic
* from spares, or by assigning 'slot'.
*/
if (rdev->mddev->pers)
err = -EBUSY;
else {
set_bit(Replacement, &rdev->flags);
err = 0;
}
} else if (cmd_match(buf, "-replacement")) {
/* Similarly, can only clear Replacement before start */
if (rdev->mddev->pers)
err = -EBUSY;
else {
clear_bit(Replacement, &rdev->flags);
err = 0;
}
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
return err ? err : len;
}
static struct rdev_sysfs_entry rdev_state =
__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
static ssize_t
errors_show(struct md_rdev *rdev, char *page)
{
return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
}
static ssize_t
errors_store(struct md_rdev *rdev, const char *buf, size_t len)
{
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
if (*buf && (*e == 0 || *e == '\n')) {
atomic_set(&rdev->corrected_errors, n);
return len;
}
return -EINVAL;
}
static struct rdev_sysfs_entry rdev_errors =
__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
static ssize_t
slot_show(struct md_rdev *rdev, char *page)
{
if (rdev->raid_disk < 0)
return sprintf(page, "none\n");
else
return sprintf(page, "%d\n", rdev->raid_disk);
}
static ssize_t
slot_store(struct md_rdev *rdev, const char *buf, size_t len)
{
char *e;
int err;
int slot = simple_strtoul(buf, &e, 10);
if (strncmp(buf, "none", 4)==0)
slot = -1;
else if (e==buf || (*e && *e!= '\n'))
return -EINVAL;
if (rdev->mddev->pers && slot == -1) {
/* Setting 'slot' on an active array requires also
* updating the 'rd%d' link, and communicating
* with the personality with ->hot_*_disk.
* For now we only support removing
* failed/spare devices. This normally happens automatically,
* but not when the metadata is externally managed.
*/
if (rdev->raid_disk == -1)
return -EEXIST;
/* personality does all needed checks */
if (rdev->mddev->pers->hot_remove_disk == NULL)
return -EINVAL;
clear_bit(Blocked, &rdev->flags);
remove_and_add_spares(rdev->mddev, rdev);
if (rdev->raid_disk >= 0)
return -EBUSY;
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
} else if (rdev->mddev->pers) {
/* Activating a spare .. or possibly reactivating
* if we ever get bitmaps working here.
*/
if (rdev->raid_disk != -1)
return -EBUSY;
if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
return -EBUSY;
if (rdev->mddev->pers->hot_add_disk == NULL)
return -EINVAL;
if (slot >= rdev->mddev->raid_disks &&
slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
return -ENOSPC;
rdev->raid_disk = slot;
if (test_bit(In_sync, &rdev->flags))
rdev->saved_raid_disk = slot;
else
rdev->saved_raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
err = rdev->mddev->pers->
hot_add_disk(rdev->mddev, rdev);
if (err) {
rdev->raid_disk = -1;
return err;
} else
sysfs_notify_dirent_safe(rdev->sysfs_state);
if (sysfs_link_rdev(rdev->mddev, rdev))
/* failure here is OK */;
/* don't wakeup anyone, leave that to userspace. */
} else {
if (slot >= rdev->mddev->raid_disks &&
slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
return -ENOSPC;
rdev->raid_disk = slot;
/* assume it is working */
clear_bit(Faulty, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
set_bit(In_sync, &rdev->flags);
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
return len;
}
static struct rdev_sysfs_entry rdev_slot =
__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
static ssize_t
offset_show(struct md_rdev *rdev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
}
static ssize_t
offset_store(struct md_rdev *rdev, const char *buf, size_t len)
{
unsigned long long offset;
if (kstrtoull(buf, 10, &offset) < 0)
return -EINVAL;
if (rdev->mddev->pers && rdev->raid_disk >= 0)
return -EBUSY;
if (rdev->sectors && rdev->mddev->external)
/* Must set offset before size, so overlap checks
* can be sane */
return -EBUSY;
rdev->data_offset = offset;
rdev->new_data_offset = offset;
return len;
}
static struct rdev_sysfs_entry rdev_offset =
__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)rdev->new_data_offset);
}
static ssize_t new_offset_store(struct md_rdev *rdev,
const char *buf, size_t len)
{
unsigned long long new_offset;
struct mddev *mddev = rdev->mddev;
if (kstrtoull(buf, 10, &new_offset) < 0)
return -EINVAL;
if (mddev->sync_thread)
return -EBUSY;
if (new_offset == rdev->data_offset)
/* reset is always permitted */
;
else if (new_offset > rdev->data_offset) {
/* must not push array size beyond rdev_sectors */
if (new_offset - rdev->data_offset
+ mddev->dev_sectors > rdev->sectors)
return -E2BIG;
}
/* Metadata worries about other space details. */
/* decreasing the offset is inconsistent with a backwards
* reshape.
*/
if (new_offset < rdev->data_offset &&
mddev->reshape_backwards)
return -EINVAL;
/* Increasing offset is inconsistent with forwards
* reshape. reshape_direction should be set to
* 'backwards' first.
*/
if (new_offset > rdev->data_offset &&
!mddev->reshape_backwards)
return -EINVAL;
if (mddev->pers && mddev->persistent &&
!super_types[mddev->major_version]
.allow_new_offset(rdev, new_offset))
return -E2BIG;
rdev->new_data_offset = new_offset;
if (new_offset > rdev->data_offset)
mddev->reshape_backwards = 1;
else if (new_offset < rdev->data_offset)
mddev->reshape_backwards = 0;
return len;
}
static struct rdev_sysfs_entry rdev_new_offset =
__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
static ssize_t
rdev_size_show(struct md_rdev *rdev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
}
static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
{
/* check if two start/length pairs overlap */
if (s1+l1 <= s2)
return 0;
if (s2+l2 <= s1)
return 0;
return 1;
}
static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
{
unsigned long long blocks;
sector_t new;
if (kstrtoull(buf, 10, &blocks) < 0)
return -EINVAL;
if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
return -EINVAL; /* sector conversion overflow */
new = blocks * 2;
if (new != blocks * 2)
return -EINVAL; /* unsigned long long to sector_t overflow */
*sectors = new;
return 0;
}
static ssize_t
rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
{
struct mddev *my_mddev = rdev->mddev;
sector_t oldsectors = rdev->sectors;
sector_t sectors;
if (strict_blocks_to_sectors(buf, §ors) < 0)
return -EINVAL;
if (rdev->data_offset != rdev->new_data_offset)
return -EINVAL; /* too confusing */
if (my_mddev->pers && rdev->raid_disk >= 0) {
if (my_mddev->persistent) {
sectors = super_types[my_mddev->major_version].
rdev_size_change(rdev, sectors);
if (!sectors)
return -EBUSY;
} else if (!sectors)
sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
rdev->data_offset;
if (!my_mddev->pers->resize)
/* Cannot change size for RAID0 or Linear etc */
return -EINVAL;
}
if (sectors < my_mddev->dev_sectors)
return -EINVAL; /* component must fit device */
rdev->sectors = sectors;
if (sectors > oldsectors && my_mddev->external) {
/* need to check that all other rdevs with the same ->bdev
* do not overlap. We need to unlock the mddev to avoid
* a deadlock. We have already changed rdev->sectors, and if
* we have to change it back, we will have the lock again.
*/
struct mddev *mddev;
int overlap = 0;
struct list_head *tmp;
mddev_unlock(my_mddev);
for_each_mddev(mddev, tmp) {
struct md_rdev *rdev2;
mddev_lock_nointr(mddev);
rdev_for_each(rdev2, mddev)
if (rdev->bdev == rdev2->bdev &&
rdev != rdev2 &&
overlaps(rdev->data_offset, rdev->sectors,
rdev2->data_offset,
rdev2->sectors)) {
overlap = 1;
break;
}
mddev_unlock(mddev);
if (overlap) {
mddev_put(mddev);
break;
}
}
mddev_lock_nointr(my_mddev);
if (overlap) {
/* Someone else could have slipped in a size
* change here, but doing so is just silly.
* We put oldsectors back because we *know* it is
* safe, and trust userspace not to race with
* itself
*/
rdev->sectors = oldsectors;
return -EBUSY;
}
}
return len;
}
static struct rdev_sysfs_entry rdev_size =
__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
{
unsigned long long recovery_start = rdev->recovery_offset;
if (test_bit(In_sync, &rdev->flags) ||
recovery_start == MaxSector)
return sprintf(page, "none\n");
return sprintf(page, "%llu\n", recovery_start);
}
static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
{
unsigned long long recovery_start;
if (cmd_match(buf, "none"))
recovery_start = MaxSector;
else if (kstrtoull(buf, 10, &recovery_start))
return -EINVAL;
if (rdev->mddev->pers &&
rdev->raid_disk >= 0)
return -EBUSY;
rdev->recovery_offset = recovery_start;
if (recovery_start == MaxSector)
set_bit(In_sync, &rdev->flags);
else
clear_bit(In_sync, &rdev->flags);
return len;
}
static struct rdev_sysfs_entry rdev_recovery_start =
__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
static ssize_t
badblocks_show(struct badblocks *bb, char *page, int unack);
static ssize_t
badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);
static ssize_t bb_show(struct md_rdev *rdev, char *page)
{
return badblocks_show(&rdev->badblocks, page, 0);
}
static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
{
int rv = badblocks_store(&rdev->badblocks, page, len, 0);
/* Maybe that ack was all we needed */
if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
wake_up(&rdev->blocked_wait);
return rv;
}
static struct rdev_sysfs_entry rdev_bad_blocks =
__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
static ssize_t ubb_show(struct md_rdev *rdev, char *page)
{
return badblocks_show(&rdev->badblocks, page, 1);
}
static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
{
return badblocks_store(&rdev->badblocks, page, len, 1);
}
static struct rdev_sysfs_entry rdev_unack_bad_blocks =
__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
static struct attribute *rdev_default_attrs[] = {
&rdev_state.attr,
&rdev_errors.attr,
&rdev_slot.attr,
&rdev_offset.attr,
&rdev_new_offset.attr,
&rdev_size.attr,
&rdev_recovery_start.attr,
&rdev_bad_blocks.attr,
&rdev_unack_bad_blocks.attr,
NULL,
};
static ssize_t
rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
struct mddev *mddev = rdev->mddev;
ssize_t rv;
if (!entry->show)
return -EIO;
rv = mddev ? mddev_lock(mddev) : -EBUSY;
if (!rv) {
if (rdev->mddev == NULL)
rv = -EBUSY;
else
rv = entry->show(rdev, page);
mddev_unlock(mddev);
}
return rv;
}
static ssize_t
rdev_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
ssize_t rv;
struct mddev *mddev = rdev->mddev;
if (!entry->store)
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
rv = mddev ? mddev_lock(mddev): -EBUSY;
if (!rv) {
if (rdev->mddev == NULL)
rv = -EBUSY;
else
rv = entry->store(rdev, page, length);
mddev_unlock(mddev);
}
return rv;
}
static void rdev_free(struct kobject *ko)
{
struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
kfree(rdev);
}
static const struct sysfs_ops rdev_sysfs_ops = {
.show = rdev_attr_show,
.store = rdev_attr_store,
};
static struct kobj_type rdev_ktype = {
.release = rdev_free,
.sysfs_ops = &rdev_sysfs_ops,
.default_attrs = rdev_default_attrs,
};
int md_rdev_init(struct md_rdev *rdev)
{
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
rdev->raid_disk = -1;
rdev->flags = 0;
rdev->data_offset = 0;
rdev->new_data_offset = 0;
rdev->sb_events = 0;
rdev->last_read_error.tv_sec = 0;
rdev->last_read_error.tv_nsec = 0;
rdev->sb_loaded = 0;
rdev->bb_page = NULL;
atomic_set(&rdev->nr_pending, 0);
atomic_set(&rdev->read_errors, 0);
atomic_set(&rdev->corrected_errors, 0);
INIT_LIST_HEAD(&rdev->same_set);
init_waitqueue_head(&rdev->blocked_wait);
/* Add space to store bad block list.
* This reserves the space even on arrays where it cannot
* be used - I wonder if that matters
*/
rdev->badblocks.count = 0;
rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
seqlock_init(&rdev->badblocks.lock);
if (rdev->badblocks.page == NULL)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(md_rdev_init);
/*
* Import a device. If 'super_format' >= 0, then sanity check the superblock
*
* mark the device faulty if:
*
* - the device is nonexistent (zero size)
* - the device has no valid superblock
*
* a faulty rdev _never_ has rdev->sb set.
*/
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
{
char b[BDEVNAME_SIZE];
int err;
struct md_rdev *rdev;
sector_t size;
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
if (!rdev) {
printk(KERN_ERR "md: could not alloc mem for new device!\n");
return ERR_PTR(-ENOMEM);
}
err = md_rdev_init(rdev);
if (err)
goto abort_free;
err = alloc_disk_sb(rdev);
if (err)
goto abort_free;
err = lock_rdev(rdev, newdev, super_format == -2);
if (err)
goto abort_free;
kobject_init(&rdev->kobj, &rdev_ktype);
size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
if (!size) {
printk(KERN_WARNING
"md: %s has zero or unknown size, marking faulty!\n",
bdevname(rdev->bdev,b));
err = -EINVAL;
goto abort_free;
}
if (super_format >= 0) {
err = super_types[super_format].
load_super(rdev, NULL, super_minor);
if (err == -EINVAL) {
printk(KERN_WARNING
"md: %s does not have a valid v%d.%d "
"superblock, not importing!\n",
bdevname(rdev->bdev,b),
super_format, super_minor);
goto abort_free;
}
if (err < 0) {
printk(KERN_WARNING
"md: could not read %s's sb, not importing!\n",
bdevname(rdev->bdev,b));
goto abort_free;
}
}
return rdev;
abort_free:
if (rdev->bdev)
unlock_rdev(rdev);
md_rdev_clear(rdev);
kfree(rdev);
return ERR_PTR(err);
}
/*
* Check a full RAID array for plausibility
*/
static void analyze_sbs(struct mddev * mddev)
{
int i;
struct md_rdev *rdev, *freshest, *tmp;
char b[BDEVNAME_SIZE];
freshest = NULL;
rdev_for_each_safe(rdev, tmp, mddev)
switch (super_types[mddev->major_version].
load_super(rdev, freshest, mddev->minor_version)) {
case 1:
freshest = rdev;
break;
case 0:
break;
default:
printk( KERN_ERR \
"md: fatal superblock inconsistency in %s"
" -- removing from array\n",
bdevname(rdev->bdev,b));
kick_rdev_from_array(rdev);
}
super_types[mddev->major_version].
validate_super(mddev, freshest);
i = 0;
rdev_for_each_safe(rdev, tmp, mddev) {
if (mddev->max_disks &&
(rdev->desc_nr >= mddev->max_disks ||
i > mddev->max_disks)) {
printk(KERN_WARNING
"md: %s: %s: only %d devices permitted\n",
mdname(mddev), bdevname(rdev->bdev, b),
mddev->max_disks);
kick_rdev_from_array(rdev);
continue;
}
if (rdev != freshest)
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
printk(KERN_WARNING "md: kicking non-fresh %s"
" from array!\n",
bdevname(rdev->bdev,b));
kick_rdev_from_array(rdev);
continue;
}
if (mddev->level == LEVEL_MULTIPATH) {
rdev->desc_nr = i++;
rdev->raid_disk = rdev->desc_nr;
set_bit(In_sync, &rdev->flags);
} else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
rdev->raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
}
}
}
/* Read a fixed-point number.
* Numbers in sysfs attributes should be in "standard" units where
* possible, so time should be in seconds.
* However we internally use a a much smaller unit such as
* milliseconds or jiffies.
* This function takes a decimal number with a possible fractional
* component, and produces an integer which is the result of
* multiplying that number by 10^'scale'.
* all without any floating-point arithmetic.
*/
int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
{
unsigned long result = 0;
long decimals = -1;
while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
if (*cp == '.')
decimals = 0;
else if (decimals < scale) {
unsigned int value;
value = *cp - '0';
result = result * 10 + value;
if (decimals >= 0)
decimals++;
}
cp++;
}
if (*cp == '\n')
cp++;
if (*cp)
return -EINVAL;
if (decimals < 0)
decimals = 0;
while (decimals < scale) {
result *= 10;
decimals ++;
}
*res = result;
return 0;
}
static void md_safemode_timeout(unsigned long data);
static ssize_t
safe_delay_show(struct mddev *mddev, char *page)
{
int msec = (mddev->safemode_delay*1000)/HZ;
return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
}
static ssize_t
safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
{
unsigned long msec;
if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
return -EINVAL;
if (msec == 0)
mddev->safemode_delay = 0;
else {
unsigned long old_delay = mddev->safemode_delay;
mddev->safemode_delay = (msec*HZ)/1000;
if (mddev->safemode_delay == 0)
mddev->safemode_delay = 1;
if (mddev->safemode_delay < old_delay || old_delay == 0)
md_safemode_timeout((unsigned long)mddev);
}
return len;
}
static struct md_sysfs_entry md_safe_delay =
__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
static ssize_t
level_show(struct mddev *mddev, char *page)
{
struct md_personality *p = mddev->pers;
if (p)
return sprintf(page, "%s\n", p->name);
else if (mddev->clevel[0])
return sprintf(page, "%s\n", mddev->clevel);
else if (mddev->level != LEVEL_NONE)
return sprintf(page, "%d\n", mddev->level);
else
return 0;
}
static ssize_t
level_store(struct mddev *mddev, const char *buf, size_t len)
{
char clevel[16];
ssize_t rv = len;
struct md_personality *pers;
long level;
void *priv;
struct md_rdev *rdev;
if (mddev->pers == NULL) {
if (len == 0)
return 0;
if (len >= sizeof(mddev->clevel))
return -ENOSPC;
strncpy(mddev->clevel, buf, len);
if (mddev->clevel[len-1] == '\n')
len--;
mddev->clevel[len] = 0;
mddev->level = LEVEL_NONE;
return rv;
}
/* request to change the personality. Need to ensure:
* - array is not engaged in resync/recovery/reshape
* - old personality can be suspended
* - new personality will access other array.
*/
if (mddev->sync_thread ||
mddev->reshape_position != MaxSector ||
mddev->sysfs_active)
return -EBUSY;
if (!mddev->pers->quiesce) {
printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
mdname(mddev), mddev->pers->name);
return -EINVAL;
}
/* Now find the new personality */
if (len == 0 || len >= sizeof(clevel))
return -EINVAL;
strncpy(clevel, buf, len);
if (clevel[len-1] == '\n')
len--;
clevel[len] = 0;
if (kstrtol(clevel, 10, &level))
level = LEVEL_NONE;
if (request_module("md-%s", clevel) != 0)
request_module("md-level-%s", clevel);
spin_lock(&pers_lock);
pers = find_pers(level, clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
return -EINVAL;
}
spin_unlock(&pers_lock);
if (pers == mddev->pers) {
/* Nothing to do! */
module_put(pers->owner);
return rv;
}
if (!pers->takeover) {
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
mdname(mddev), clevel);
return -EINVAL;
}
rdev_for_each(rdev, mddev)
rdev->new_raid_disk = rdev->raid_disk;
/* ->takeover must set new_* and/or delta_disks
* if it succeeds, and may set them when it fails.
*/
priv = pers->takeover(mddev);
if (IS_ERR(priv)) {
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->raid_disks -= mddev->delta_disks;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s would not accept array\n",
mdname(mddev), clevel);
return PTR_ERR(priv);
}
/* Looks like we have a winner */
mddev_suspend(mddev);
mddev->pers->stop(mddev);
if (mddev->pers->sync_request == NULL &&
pers->sync_request != NULL) {
/* need to add the md_redundancy_group */
if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
}
if (mddev->pers->sync_request != NULL &&
pers->sync_request == NULL) {
/* need to remove the md_redundancy_group */
if (mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
}
if (mddev->pers->sync_request == NULL &&
mddev->external) {
/* We are converting from a no-redundancy array
* to a redundancy array and metadata is managed
* externally so we need to be sure that writes
* won't block due to a need to transition
* clean->dirty
* until external management is started.
*/
mddev->in_sync = 0;
mddev->safemode_delay = 0;
mddev->safemode = 0;
}
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk < 0)
continue;
if (rdev->new_raid_disk >= mddev->raid_disks)
rdev->new_raid_disk = -1;
if (rdev->new_raid_disk == rdev->raid_disk)
continue;
sysfs_unlink_rdev(mddev, rdev);
}
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk < 0)
continue;
if (rdev->new_raid_disk == rdev->raid_disk)
continue;
rdev->raid_disk = rdev->new_raid_disk;
if (rdev->raid_disk < 0)
clear_bit(In_sync, &rdev->flags);
else {
if (sysfs_link_rdev(mddev, rdev))
printk(KERN_WARNING "md: cannot register rd%d"
" for %s after level change\n",
rdev->raid_disk, mdname(mddev));
}
}
module_put(mddev->pers->owner);
mddev->pers = pers;
mddev->private = priv;
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
mddev->level = mddev->new_level;
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
mddev->degraded = 0;
if (mddev->pers->sync_request == NULL) {
/* this is now an array without redundancy, so
* it must always be in_sync
*/
mddev->in_sync = 1;
del_timer_sync(&mddev->safemode_timer);
}
blk_set_stacking_limits(&mddev->queue->limits);
pers->run(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev_resume(mddev);
if (!mddev->thread)
md_update_sb(mddev, 1);
sysfs_notify(&mddev->kobj, NULL, "level");
md_new_event(mddev);
return rv;
}
static struct md_sysfs_entry md_level =
__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
static ssize_t
layout_show(struct mddev *mddev, char *page)
{
/* just a number, not meaningful for all levels */
if (mddev->reshape_position != MaxSector &&
mddev->layout != mddev->new_layout)
return sprintf(page, "%d (%d)\n",
mddev->new_layout, mddev->layout);
return sprintf(page, "%d\n", mddev->layout);
}
static ssize_t
layout_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
if (!*buf || (*e && *e != '\n'))
return -EINVAL;
if (mddev->pers) {
int err;
if (mddev->pers->check_reshape == NULL)
return -EBUSY;
mddev->new_layout = n;
err = mddev->pers->check_reshape(mddev);
if (err) {
mddev->new_layout = mddev->layout;
return err;
}
} else {
mddev->new_layout = n;
if (mddev->reshape_position == MaxSector)
mddev->layout = n;
}
return len;
}
static struct md_sysfs_entry md_layout =
__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
static ssize_t
raid_disks_show(struct mddev *mddev, char *page)
{
if (mddev->raid_disks == 0)
return 0;
if (mddev->reshape_position != MaxSector &&
mddev->delta_disks != 0)
return sprintf(page, "%d (%d)\n", mddev->raid_disks,
mddev->raid_disks - mddev->delta_disks);
return sprintf(page, "%d\n", mddev->raid_disks);
}
static int update_raid_disks(struct mddev *mddev, int raid_disks);
static ssize_t
raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
int rv = 0;
unsigned long n = simple_strtoul(buf, &e, 10);
if (!*buf || (*e && *e != '\n'))
return -EINVAL;
if (mddev->pers)
rv = update_raid_disks(mddev, n);
else if (mddev->reshape_position != MaxSector) {
struct md_rdev *rdev;
int olddisks = mddev->raid_disks - mddev->delta_disks;
rdev_for_each(rdev, mddev) {
if (olddisks < n &&
rdev->data_offset < rdev->new_data_offset)
return -EINVAL;
if (olddisks > n &&
rdev->data_offset > rdev->new_data_offset)
return -EINVAL;
}
mddev->delta_disks = n - olddisks;
mddev->raid_disks = n;
mddev->reshape_backwards = (mddev->delta_disks < 0);
} else
mddev->raid_disks = n;
return rv ? rv : len;
}
static struct md_sysfs_entry md_raid_disks =
__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
static ssize_t
chunk_size_show(struct mddev *mddev, char *page)
{
if (mddev->reshape_position != MaxSector &&
mddev->chunk_sectors != mddev->new_chunk_sectors)
return sprintf(page, "%d (%d)\n",
mddev->new_chunk_sectors << 9,
mddev->chunk_sectors << 9);
return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
}
static ssize_t
chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
if (!*buf || (*e && *e != '\n'))
return -EINVAL;
if (mddev->pers) {
int err;
if (mddev->pers->check_reshape == NULL)
return -EBUSY;
mddev->new_chunk_sectors = n >> 9;
err = mddev->pers->check_reshape(mddev);
if (err) {
mddev->new_chunk_sectors = mddev->chunk_sectors;
return err;
}
} else {
mddev->new_chunk_sectors = n >> 9;
if (mddev->reshape_position == MaxSector)
mddev->chunk_sectors = n >> 9;
}
return len;
}
static struct md_sysfs_entry md_chunk_size =
__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
static ssize_t
resync_start_show(struct mddev *mddev, char *page)
{
if (mddev->recovery_cp == MaxSector)
return sprintf(page, "none\n");
return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
}
static ssize_t
resync_start_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long long n = simple_strtoull(buf, &e, 10);
if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
return -EBUSY;
if (cmd_match(buf, "none"))
n = MaxSector;
else if (!*buf || (*e && *e != '\n'))
return -EINVAL;
mddev->recovery_cp = n;
if (mddev->pers)
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
return len;
}
static struct md_sysfs_entry md_resync_start =
__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
/*
* The array state can be:
*
* clear
* No devices, no size, no level
* Equivalent to STOP_ARRAY ioctl
* inactive
* May have some settings, but array is not active
* all IO results in error
* When written, doesn't tear down array, but just stops it
* suspended (not supported yet)
* All IO requests will block. The array can be reconfigured.
* Writing this, if accepted, will block until array is quiescent
* readonly
* no resync can happen. no superblocks get written.
* write requests fail
* read-auto
* like readonly, but behaves like 'clean' on a write request.
*
* clean - no pending writes, but otherwise active.
* When written to inactive array, starts without resync
* If a write request arrives then
* if metadata is known, mark 'dirty' and switch to 'active'.
* if not known, block and switch to write-pending
* If written to an active array that has pending writes, then fails.
* active
* fully active: IO and resync can be happening.
* When written to inactive array, starts with resync
*
* write-pending
* clean, but writes are blocked waiting for 'active' to be written.
*
* active-idle
* like active, but no writes have been seen for a while (100msec).
*
*/
enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
write_pending, active_idle, bad_word};
static char *array_states[] = {
"clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
"write-pending", "active-idle", NULL };
static int match_word(const char *word, char **list)
{
int n;
for (n=0; list[n]; n++)
if (cmd_match(word, list[n]))
break;
return n;
}
static ssize_t
array_state_show(struct mddev *mddev, char *page)
{
enum array_state st = inactive;
if (mddev->pers)
switch(mddev->ro) {
case 1:
st = readonly;
break;
case 2:
st = read_auto;
break;
case 0:
if (mddev->in_sync)
st = clean;
else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
st = write_pending;
else if (mddev->safemode)
st = active_idle;
else
st = active;
}
else {
if (list_empty(&mddev->disks) &&
mddev->raid_disks == 0 &&
mddev->dev_sectors == 0)
st = clear;
else
st = inactive;
}
return sprintf(page, "%s\n", array_states[st]);
}
static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
static int do_md_run(struct mddev * mddev);
static int restart_array(struct mddev *mddev);
static ssize_t
array_state_store(struct mddev *mddev, const char *buf, size_t len)
{
int err = -EINVAL;
enum array_state st = match_word(buf, array_states);
switch(st) {
case bad_word:
break;
case clear:
/* stopping an active array */
err = do_md_stop(mddev, 0, NULL);
break;
case inactive:
/* stopping an active array */
if (mddev->pers)
err = do_md_stop(mddev, 2, NULL);
else
err = 0; /* already inactive */
break;
case suspended:
break; /* not supported yet */
case readonly:
if (mddev->pers)
err = md_set_readonly(mddev, NULL);
else {
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
err = do_md_run(mddev);
}
break;
case read_auto:
if (mddev->pers) {
if (mddev->ro == 0)
err = md_set_readonly(mddev, NULL);
else if (mddev->ro == 1)
err = restart_array(mddev);
if (err == 0) {
mddev->ro = 2;
set_disk_ro(mddev->gendisk, 0);
}
} else {
mddev->ro = 2;
err = do_md_run(mddev);
}
break;
case clean:
if (mddev->pers) {
restart_array(mddev);
spin_lock_irq(&mddev->write_lock);
if (atomic_read(&mddev->writes_pending) == 0) {
if (mddev->in_sync == 0) {
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
err = 0;
} else
err = -EBUSY;
spin_unlock_irq(&mddev->write_lock);
} else
err = -EINVAL;
break;
case active:
if (mddev->pers) {
restart_array(mddev);
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
mddev->ro = 0;
set_disk_ro(mddev->gendisk, 0);
err = do_md_run(mddev);
}
break;
case write_pending:
case active_idle:
/* these cannot be set */
break;
}
if (err)
return err;
else {
if (mddev->hold_active == UNTIL_IOCTL)
mddev->hold_active = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
return len;
}
}
static struct md_sysfs_entry md_array_state =
__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
static ssize_t
max_corrected_read_errors_show(struct mddev *mddev, char *page) {
return sprintf(page, "%d\n",
atomic_read(&mddev->max_corr_read_errors));
}
static ssize_t
max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
if (*buf && (*e == 0 || *e == '\n')) {
atomic_set(&mddev->max_corr_read_errors, n);
return len;
}
return -EINVAL;
}
static struct md_sysfs_entry max_corr_read_errors =
__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
max_corrected_read_errors_store);
static ssize_t
null_show(struct mddev *mddev, char *page)
{
return -EINVAL;
}
static ssize_t
new_dev_store(struct mddev *mddev, const char *buf, size_t len)
{
/* buf must be %d:%d\n? giving major and minor numbers */
/* The new device is added to the array.
* If the array has a persistent superblock, we read the
* superblock to initialise info and check validity.
* Otherwise, only checking done is that in bind_rdev_to_array,
* which mainly checks size.
*/
char *e;
int major = simple_strtoul(buf, &e, 10);
int minor;
dev_t dev;
struct md_rdev *rdev;
int err;
if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
return -EINVAL;
minor = simple_strtoul(e+1, &e, 10);
if (*e && *e != '\n')
return -EINVAL;
dev = MKDEV(major, minor);
if (major != MAJOR(dev) ||
minor != MINOR(dev))
return -EOVERFLOW;
if (mddev->persistent) {
rdev = md_import_device(dev, mddev->major_version,
mddev->minor_version);
if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
struct md_rdev *rdev0
= list_entry(mddev->disks.next,
struct md_rdev, same_set);
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0)
goto out;
}
} else if (mddev->external)
rdev = md_import_device(dev, -2, -1);
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
err = bind_rdev_to_array(rdev, mddev);
out:
if (err)
export_rdev(rdev);
return err ? err : len;
}
static struct md_sysfs_entry md_new_device =
__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
static ssize_t
bitmap_store(struct mddev *mddev, const char *buf, size_t len)
{
char *end;
unsigned long chunk, end_chunk;
if (!mddev->bitmap)
goto out;
/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
while (*buf) {
chunk = end_chunk = simple_strtoul(buf, &end, 0);
if (buf == end) break;
if (*end == '-') { /* range */
buf = end + 1;
end_chunk = simple_strtoul(buf, &end, 0);
if (buf == end) break;
}
if (*end && !isspace(*end)) break;
bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
buf = skip_spaces(end);
}
bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
out:
return len;
}
static struct md_sysfs_entry md_bitmap =
__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
static ssize_t
size_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)mddev->dev_sectors / 2);
}
static int update_size(struct mddev *mddev, sector_t num_sectors);
static ssize_t
size_store(struct mddev *mddev, const char *buf, size_t len)
{
/* If array is inactive, we can reduce the component size, but
* not increase it (except from 0).
* If array is active, we can try an on-line resize
*/
sector_t sectors;
int err = strict_blocks_to_sectors(buf, §ors);
if (err < 0)
return err;
if (mddev->pers) {
err = update_size(mddev, sectors);
md_update_sb(mddev, 1);
} else {
if (mddev->dev_sectors == 0 ||
mddev->dev_sectors > sectors)
mddev->dev_sectors = sectors;
else
err = -ENOSPC;
}
return err ? err : len;
}
static struct md_sysfs_entry md_size =
__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
/* Metadata version.
* This is one of
* 'none' for arrays with no metadata (good luck...)
* 'external' for arrays with externally managed metadata,
* or N.M for internally known formats
*/
static ssize_t
metadata_show(struct mddev *mddev, char *page)
{
if (mddev->persistent)
return sprintf(page, "%d.%d\n",
mddev->major_version, mddev->minor_version);
else if (mddev->external)
return sprintf(page, "external:%s\n", mddev->metadata_type);
else
return sprintf(page, "none\n");
}
static ssize_t
metadata_store(struct mddev *mddev, const char *buf, size_t len)
{
int major, minor;
char *e;
/* Changing the details of 'external' metadata is
* always permitted. Otherwise there must be
* no devices attached to the array.
*/
if (mddev->external && strncmp(buf, "external:", 9) == 0)
;
else if (!list_empty(&mddev->disks))
return -EBUSY;
if (cmd_match(buf, "none")) {
mddev->persistent = 0;
mddev->external = 0;
mddev->major_version = 0;
mddev->minor_version = 90;
return len;
}
if (strncmp(buf, "external:", 9) == 0) {
size_t namelen = len-9;
if (namelen >= sizeof(mddev->metadata_type))
namelen = sizeof(mddev->metadata_type)-1;
strncpy(mddev->metadata_type, buf+9, namelen);
mddev->metadata_type[namelen] = 0;
if (namelen && mddev->metadata_type[namelen-1] == '\n')
mddev->metadata_type[--namelen] = 0;
mddev->persistent = 0;
mddev->external = 1;
mddev->major_version = 0;
mddev->minor_version = 90;
return len;
}
major = simple_strtoul(buf, &e, 10);
if (e==buf || *e != '.')
return -EINVAL;
buf = e+1;
minor = simple_strtoul(buf, &e, 10);
if (e==buf || (*e && *e != '\n') )
return -EINVAL;
if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
return -ENOENT;
mddev->major_version = major;
mddev->minor_version = minor;
mddev->persistent = 1;
mddev->external = 0;
return len;
}
static struct md_sysfs_entry md_metadata =
__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
static ssize_t
action_show(struct mddev *mddev, char *page)
{
char *type = "idle";
if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
type = "frozen";
else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
(!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
type = "reshape";
else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
type = "resync";
else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
type = "check";
else
type = "repair";
} else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
type = "recover";
}
return sprintf(page, "%s\n", type);
}
static ssize_t
action_store(struct mddev *mddev, const char *page, size_t len)
{
if (!mddev->pers || !mddev->pers->sync_request)
return -EINVAL;
if (cmd_match(page, "frozen"))
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
else
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
}
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return -EBUSY;
else if (cmd_match(page, "resync"))
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
else if (cmd_match(page, "recover")) {
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
} else if (cmd_match(page, "reshape")) {
int err;
if (mddev->pers->start_reshape == NULL)
return -EINVAL;
err = mddev->pers->start_reshape(mddev);
if (err)
return err;
sysfs_notify(&mddev->kobj, NULL, "degraded");
} else {
if (cmd_match(page, "check"))
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
else if (!cmd_match(page, "repair"))
return -EINVAL;
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
}
if (mddev->ro == 2) {
/* A write to sync_action is enough to justify
* canceling read-auto mode
*/
mddev->ro = 0;
md_wakeup_thread(mddev->sync_thread);
}
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
return len;
}
static struct md_sysfs_entry md_scan_mode =
__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
static ssize_t
last_sync_action_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%s\n", mddev->last_sync_action);
}
static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
static ssize_t
mismatch_cnt_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)
atomic64_read(&mddev->resync_mismatches));
}
static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
static ssize_t
sync_min_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_min(mddev),
mddev->sync_speed_min ? "local": "system");
}
static ssize_t
sync_min_store(struct mddev *mddev, const char *buf, size_t len)
{
int min;
char *e;
if (strncmp(buf, "system", 6)==0) {
mddev->sync_speed_min = 0;
return len;
}
min = simple_strtoul(buf, &e, 10);
if (buf == e || (*e && *e != '\n') || min <= 0)
return -EINVAL;
mddev->sync_speed_min = min;
return len;
}
static struct md_sysfs_entry md_sync_min =
__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
static ssize_t
sync_max_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_max(mddev),
mddev->sync_speed_max ? "local": "system");
}
static ssize_t
sync_max_store(struct mddev *mddev, const char *buf, size_t len)
{
int max;
char *e;
if (strncmp(buf, "system", 6)==0) {
mddev->sync_speed_max = 0;
return len;
}
max = simple_strtoul(buf, &e, 10);
if (buf == e || (*e && *e != '\n') || max <= 0)
return -EINVAL;
mddev->sync_speed_max = max;
return len;
}
static struct md_sysfs_entry md_sync_max =
__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
static ssize_t
degraded_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d\n", mddev->degraded);
}
static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
static ssize_t
sync_force_parallel_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d\n", mddev->parallel_resync);
}
static ssize_t
sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
{
long n;
if (kstrtol(buf, 10, &n))
return -EINVAL;
if (n != 0 && n != 1)
return -EINVAL;
mddev->parallel_resync = n;
if (mddev->sync_thread)
wake_up(&resync_wait);
return len;
}
/* force parallel resync, even with shared block devices */
static struct md_sysfs_entry md_sync_force_parallel =
__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
sync_force_parallel_show, sync_force_parallel_store);
static ssize_t
sync_speed_show(struct mddev *mddev, char *page)
{
unsigned long resync, dt, db;
if (mddev->curr_resync == 0)
return sprintf(page, "none\n");
resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
dt = (jiffies - mddev->resync_mark) / HZ;
if (!dt) dt++;
db = resync - mddev->resync_mark_cnt;
return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
}
static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
static ssize_t
sync_completed_show(struct mddev *mddev, char *page)
{
unsigned long long max_sectors, resync;
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return sprintf(page, "none\n");
if (mddev->curr_resync == 1 ||
mddev->curr_resync == 2)
return sprintf(page, "delayed\n");
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else
max_sectors = mddev->dev_sectors;
resync = mddev->curr_resync_completed;
return sprintf(page, "%llu / %llu\n", resync, max_sectors);
}
static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
static ssize_t
min_sync_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)mddev->resync_min);
}
static ssize_t
min_sync_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long long min;
if (kstrtoull(buf, 10, &min))
return -EINVAL;
if (min > mddev->resync_max)
return -EINVAL;
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
/* Must be a multiple of chunk_size */
if (mddev->chunk_sectors) {
sector_t temp = min;
if (sector_div(temp, mddev->chunk_sectors))
return -EINVAL;
}
mddev->resync_min = min;
return len;
}
static struct md_sysfs_entry md_min_sync =
__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
static ssize_t
max_sync_show(struct mddev *mddev, char *page)
{
if (mddev->resync_max == MaxSector)
return sprintf(page, "max\n");
else
return sprintf(page, "%llu\n",
(unsigned long long)mddev->resync_max);
}
static ssize_t
max_sync_store(struct mddev *mddev, const char *buf, size_t len)
{
if (strncmp(buf, "max", 3) == 0)
mddev->resync_max = MaxSector;
else {
unsigned long long max;
if (kstrtoull(buf, 10, &max))
return -EINVAL;
if (max < mddev->resync_min)
return -EINVAL;
if (max < mddev->resync_max &&
mddev->ro == 0 &&
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
/* Must be a multiple of chunk_size */
if (mddev->chunk_sectors) {
sector_t temp = max;
if (sector_div(temp, mddev->chunk_sectors))
return -EINVAL;
}
mddev->resync_max = max;
}
wake_up(&mddev->recovery_wait);
return len;
}
static struct md_sysfs_entry md_max_sync =
__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
static ssize_t
suspend_lo_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
}
static ssize_t
suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
unsigned long long old = mddev->suspend_lo;
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
mddev->suspend_lo = new;
if (new >= old)
/* Shrinking suspended region */
mddev->pers->quiesce(mddev, 2);
else {
/* Expanding suspended region - need to wait */
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
return len;
}
static struct md_sysfs_entry md_suspend_lo =
__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
static ssize_t
suspend_hi_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
}
static ssize_t
suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
unsigned long long old = mddev->suspend_hi;
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
mddev->suspend_hi = new;
if (new <= old)
/* Shrinking suspended region */
mddev->pers->quiesce(mddev, 2);
else {
/* Expanding suspended region - need to wait */
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
return len;
}
static struct md_sysfs_entry md_suspend_hi =
__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
static ssize_t
reshape_position_show(struct mddev *mddev, char *page)
{
if (mddev->reshape_position != MaxSector)
return sprintf(page, "%llu\n",
(unsigned long long)mddev->reshape_position);
strcpy(page, "none\n");
return 5;
}
static ssize_t
reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
{
struct md_rdev *rdev;
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
if (mddev->pers)
return -EBUSY;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
mddev->reshape_position = new;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
rdev_for_each(rdev, mddev)
rdev->new_data_offset = rdev->data_offset;
return len;
}
static struct md_sysfs_entry md_reshape_position =
__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
reshape_position_store);
static ssize_t
reshape_direction_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%s\n",
mddev->reshape_backwards ? "backwards" : "forwards");
}
static ssize_t
reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
{
int backwards = 0;
if (cmd_match(buf, "forwards"))
backwards = 0;
else if (cmd_match(buf, "backwards"))
backwards = 1;
else
return -EINVAL;
if (mddev->reshape_backwards == backwards)
return len;
/* check if we are allowed to change */
if (mddev->delta_disks)
return -EBUSY;
if (mddev->persistent &&
mddev->major_version == 0)
return -EINVAL;
mddev->reshape_backwards = backwards;
return len;
}
static struct md_sysfs_entry md_reshape_direction =
__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
reshape_direction_store);
static ssize_t
array_size_show(struct mddev *mddev, char *page)
{
if (mddev->external_size)
return sprintf(page, "%llu\n",
(unsigned long long)mddev->array_sectors/2);
else
return sprintf(page, "default\n");
}
static ssize_t
array_size_store(struct mddev *mddev, const char *buf, size_t len)
{
sector_t sectors;
if (strncmp(buf, "default", 7) == 0) {
if (mddev->pers)
sectors = mddev->pers->size(mddev, 0, 0);
else
sectors = mddev->array_sectors;
mddev->external_size = 0;
} else {
if (strict_blocks_to_sectors(buf, §ors) < 0)
return -EINVAL;
if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
return -E2BIG;
mddev->external_size = 1;
}
mddev->array_sectors = sectors;
if (mddev->pers) {
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
}
return len;
}
static struct md_sysfs_entry md_array_size =
__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
array_size_store);
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_layout.attr,
&md_raid_disks.attr,
&md_chunk_size.attr,
&md_size.attr,
&md_resync_start.attr,
&md_metadata.attr,
&md_new_device.attr,
&md_safe_delay.attr,
&md_array_state.attr,
&md_reshape_position.attr,
&md_reshape_direction.attr,
&md_array_size.attr,
&max_corr_read_errors.attr,
NULL,
};
static struct attribute *md_redundancy_attrs[] = {
&md_scan_mode.attr,
&md_last_scan_mode.attr,
&md_mismatches.attr,
&md_sync_min.attr,
&md_sync_max.attr,
&md_sync_speed.attr,
&md_sync_force_parallel.attr,
&md_sync_completed.attr,
&md_min_sync.attr,
&md_max_sync.attr,
&md_suspend_lo.attr,
&md_suspend_hi.attr,
&md_bitmap.attr,
&md_degraded.attr,
NULL,
};
static struct attribute_group md_redundancy_group = {
.name = NULL,
.attrs = md_redundancy_attrs,
};
static ssize_t
md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
struct mddev *mddev = container_of(kobj, struct mddev, kobj);
ssize_t rv;
if (!entry->show)
return -EIO;
spin_lock(&all_mddevs_lock);
if (list_empty(&mddev->all_mddevs)) {
spin_unlock(&all_mddevs_lock);
return -EBUSY;
}
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
rv = mddev_lock(mddev);
if (!rv) {
rv = entry->show(mddev, page);
mddev_unlock(mddev);
}
mddev_put(mddev);
return rv;
}
static ssize_t
md_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
struct mddev *mddev = container_of(kobj, struct mddev, kobj);
ssize_t rv;
if (!entry->store)
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
spin_lock(&all_mddevs_lock);
if (list_empty(&mddev->all_mddevs)) {
spin_unlock(&all_mddevs_lock);
return -EBUSY;
}
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
if (entry->store == new_dev_store)
flush_workqueue(md_misc_wq);
rv = mddev_lock(mddev);
if (!rv) {
rv = entry->store(mddev, page, length);
mddev_unlock(mddev);
}
mddev_put(mddev);
return rv;
}
static void md_free(struct kobject *ko)
{
struct mddev *mddev = container_of(ko, struct mddev, kobj);
if (mddev->sysfs_state)
sysfs_put(mddev->sysfs_state);
if (mddev->gendisk) {
del_gendisk(mddev->gendisk);
put_disk(mddev->gendisk);
}
if (mddev->queue)
blk_cleanup_queue(mddev->queue);
kfree(mddev);
}
static const struct sysfs_ops md_sysfs_ops = {
.show = md_attr_show,
.store = md_attr_store,
};
static struct kobj_type md_ktype = {
.release = md_free,
.sysfs_ops = &md_sysfs_ops,
.default_attrs = md_default_attrs,
};
int mdp_major = 0;
static void mddev_delayed_delete(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
}
static int md_alloc(dev_t dev, char *name)
{
static DEFINE_MUTEX(disks_mutex);
struct mddev *mddev = mddev_find(dev);
struct gendisk *disk;
int partitioned;
int shift;
int unit;
int error;
if (!mddev)
return -ENODEV;
partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
shift = partitioned ? MdpMinorShift : 0;
unit = MINOR(mddev->unit) >> shift;
/* wait for any previous instance of this device to be
* completely removed (mddev_delayed_delete).
*/
flush_workqueue(md_misc_wq);
mutex_lock(&disks_mutex);
error = -EEXIST;
if (mddev->gendisk)
goto abort;
if (name) {
/* Need to ensure that 'name' is not a duplicate.
*/
struct mddev *mddev2;
spin_lock(&all_mddevs_lock);
list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
if (mddev2->gendisk &&
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
goto abort;
}
spin_unlock(&all_mddevs_lock);
}
error = -ENOMEM;
mddev->queue = blk_alloc_queue(GFP_KERNEL);
if (!mddev->queue)
goto abort;
mddev->queue->queuedata = mddev;
blk_queue_make_request(mddev->queue, md_make_request);
blk_set_stacking_limits(&mddev->queue->limits);
disk = alloc_disk(1 << shift);
if (!disk) {
blk_cleanup_queue(mddev->queue);
mddev->queue = NULL;
goto abort;
}
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
if (name)
strcpy(disk->disk_name, name);
else if (partitioned)
sprintf(disk->disk_name, "md_d%d", unit);
else
sprintf(disk->disk_name, "md%d", unit);
disk->fops = &md_fops;
disk->private_data = mddev;
disk->queue = mddev->queue;
blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
/* Allow extended partitions. This makes the
* 'mdp' device redundant, but we can't really
* remove it now.
*/
disk->flags |= GENHD_FL_EXT_DEVT;
mddev->gendisk = disk;
/* As soon as we call add_disk(), another thread could get
* through to md_open, so make sure it doesn't get too far
*/
mutex_lock(&mddev->open_mutex);
add_disk(disk);
error = kobject_init_and_add(&mddev->kobj, &md_ktype,
&disk_to_dev(disk)->kobj, "%s", "md");
if (error) {
/* This isn't possible, but as kobject_init_and_add is marked
* __must_check, we must do something with the result
*/
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
disk->disk_name);
error = 0;
}
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
printk(KERN_DEBUG "pointless warning\n");
mutex_unlock(&mddev->open_mutex);
abort:
mutex_unlock(&disks_mutex);
if (!error && mddev->kobj.sd) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
}
mddev_put(mddev);
return error;
}
static struct kobject *md_probe(dev_t dev, int *part, void *data)
{
md_alloc(dev, NULL);
return NULL;
}
static int add_named_array(const char *val, struct kernel_param *kp)
{
/* val must be "md_*" where * is not all digits.
* We allocate an array with a large free minor number, and
* set the name to val. val must not already be an active name.
*/
int len = strlen(val);
char buf[DISK_NAME_LEN];
while (len && val[len-1] == '\n')
len--;
if (len >= DISK_NAME_LEN)
return -E2BIG;
strlcpy(buf, val, len+1);
if (strncmp(buf, "md_", 3) != 0)
return -EINVAL;
return md_alloc(0, buf);
}
static void md_safemode_timeout(unsigned long data)
{
struct mddev *mddev = (struct mddev *) data;
if (!atomic_read(&mddev->writes_pending)) {
mddev->safemode = 1;
if (mddev->external)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
md_wakeup_thread(mddev->thread);
}
static int start_dirty_degraded;
int md_run(struct mddev *mddev)
{
int err;
struct md_rdev *rdev;
struct md_personality *pers;
if (list_empty(&mddev->disks))
/* cannot run an array with no devices.. */
return -EINVAL;
if (mddev->pers)
return -EBUSY;
/* Cannot run until previous stop completes properly */
if (mddev->sysfs_active)
return -EBUSY;
/*
* Analyze all RAID superblock(s)
*/
if (!mddev->raid_disks) {
if (!mddev->persistent)
return -EINVAL;
analyze_sbs(mddev);
}
if (mddev->level != LEVEL_NONE)
request_module("md-level-%d", mddev->level);
else if (mddev->clevel[0])
request_module("md-%s", mddev->clevel);
/*
* Drop all container device buffers, from now on
* the only valid external interface is through the md
* device.
*/
rdev_for_each(rdev, mddev) {
if (test_bit(Faulty, &rdev->flags))
continue;
sync_blockdev(rdev->bdev);
invalidate_bdev(rdev->bdev);
/* perform some consistency tests on the device.
* We don't want the data to overlap the metadata,
* Internal Bitmap issues have been handled elsewhere.
*/
if (rdev->meta_bdev) {
/* Nothing to check */;
} else if (rdev->data_offset < rdev->sb_start) {
if (mddev->dev_sectors &&
rdev->data_offset + mddev->dev_sectors
> rdev->sb_start) {
printk("md: %s: data overlaps metadata\n",
mdname(mddev));
return -EINVAL;
}
} else {
if (rdev->sb_start + rdev->sb_size/512
> rdev->data_offset) {
printk("md: %s: metadata overlaps data\n",
mdname(mddev));
return -EINVAL;
}
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
if (mddev->bio_set == NULL)
mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
if (mddev->level != LEVEL_NONE)
printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
mddev->level);
else
printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
mddev->clevel);
return -EINVAL;
}
mddev->pers = pers;
spin_unlock(&pers_lock);
if (mddev->level != pers->level) {
mddev->level = pers->level;
mddev->new_level = pers->level;
}
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
if (mddev->reshape_position != MaxSector &&
pers->start_reshape == NULL) {
/* This personality cannot handle reshaping... */
mddev->pers = NULL;
module_put(pers->owner);
return -EINVAL;
}
if (pers->sync_request) {
/* Warn if this is a potentially silly
* configuration.
*/
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct md_rdev *rdev2;
int warned = 0;
rdev_for_each(rdev, mddev)
rdev_for_each(rdev2, mddev) {
if (rdev < rdev2 &&
rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
printk(KERN_WARNING
"%s: WARNING: %s appears to be"
" on the same physical disk as"
" %s.\n",
mdname(mddev),
bdevname(rdev->bdev,b),
bdevname(rdev2->bdev,b2));
warned = 1;
}
}
if (warned)
printk(KERN_WARNING
"True protection against single-disk"
" failure might be compromised.\n");
}
mddev->recovery = 0;
/* may be over-ridden by personality */
mddev->resync_max_sectors = mddev->dev_sectors;
mddev->ok_start_degraded = start_dirty_degraded;
if (start_readonly && mddev->ro == 0)
mddev->ro = 2; /* read-only, but switch on first write */
err = mddev->pers->run(mddev);
if (err)
printk(KERN_ERR "md: pers->run() failed ...\n");
else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
WARN_ONCE(!mddev->external_size, "%s: default size too small,"
" but 'external_size' not in effect?\n", __func__);
printk(KERN_ERR
"md: invalid array_size %llu > default size %llu\n",
(unsigned long long)mddev->array_sectors / 2,
(unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
err = -EINVAL;
mddev->pers->stop(mddev);
}
if (err == 0 && mddev->pers->sync_request &&
(mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
err = bitmap_create(mddev);
if (err) {
printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
mdname(mddev), err);
mddev->pers->stop(mddev);
}
}
if (err) {
module_put(mddev->pers->owner);
mddev->pers = NULL;
bitmap_destroy(mddev);
return err;
}
if (mddev->pers->sync_request) {
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_redundancy_group))
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
atomic_set(&mddev->writes_pending,0);
atomic_set(&mddev->max_corr_read_errors,
MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
mddev->safemode = 0;
mddev->safemode_timer.function = md_safemode_timeout;
mddev->safemode_timer.data = (unsigned long) mddev;
mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
mddev->in_sync = 1;
smp_wmb();
mddev->ready = 1;
rdev_for_each(rdev, mddev)
if (rdev->raid_disk >= 0)
if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
if (mddev->flags & MD_UPDATE_SB_FLAGS)
md_update_sb(mddev, 0);
md_new_event(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
sysfs_notify_dirent_safe(mddev->sysfs_action);
sysfs_notify(&mddev->kobj, NULL, "degraded");
return 0;
}
EXPORT_SYMBOL_GPL(md_run);
static int do_md_run(struct mddev *mddev)
{
int err;
err = md_run(mddev);
if (err)
goto out;
err = bitmap_load(mddev);
if (err) {
bitmap_destroy(mddev);
goto out;
}
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
out:
return err;
}
static int restart_array(struct mddev *mddev)
{
struct gendisk *disk = mddev->gendisk;
/* Complain if it has no devices */
if (list_empty(&mddev->disks))
return -ENXIO;
if (!mddev->pers)
return -EINVAL;
if (!mddev->ro)
return -EBUSY;
mddev->safemode = 0;
mddev->ro = 0;
set_disk_ro(disk, 0);
printk(KERN_INFO "md: %s switched to read-write mode.\n",
mdname(mddev));
/* Kick recovery or resync if necessary */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
/* similar to deny_write_access, but accounts for our holding a reference
* to the file ourselves */
static int deny_bitmap_write_access(struct file * file)
{
struct inode *inode = file->f_mapping->host;
spin_lock(&inode->i_lock);
if (atomic_read(&inode->i_writecount) > 1) {
spin_unlock(&inode->i_lock);
return -ETXTBSY;
}
atomic_set(&inode->i_writecount, -1);
spin_unlock(&inode->i_lock);
return 0;
}
void restore_bitmap_write_access(struct file *file)
{
struct inode *inode = file->f_mapping->host;
spin_lock(&inode->i_lock);
atomic_set(&inode->i_writecount, 1);
spin_unlock(&inode->i_lock);
}
static void md_clean(struct mddev *mddev)
{
mddev->array_sectors = 0;
mddev->external_size = 0;
mddev->dev_sectors = 0;
mddev->raid_disks = 0;
mddev->recovery_cp = 0;
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->reshape_position = MaxSector;
mddev->external = 0;
mddev->persistent = 0;
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
mddev->flags = 0;
mddev->ro = 0;
mddev->metadata_type[0] = 0;
mddev->chunk_sectors = 0;
mddev->ctime = mddev->utime = 0;
mddev->layout = 0;
mddev->max_disks = 0;
mddev->events = 0;
mddev->can_decrease_events = 0;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
mddev->new_level = LEVEL_NONE;
mddev->new_layout = 0;
mddev->new_chunk_sectors = 0;
mddev->curr_resync = 0;
atomic64_set(&mddev->resync_mismatches, 0);
mddev->suspend_lo = mddev->suspend_hi = 0;
mddev->sync_speed_min = mddev->sync_speed_max = 0;
mddev->recovery = 0;
mddev->in_sync = 0;
mddev->changed = 0;
mddev->degraded = 0;
mddev->safemode = 0;
mddev->merge_check_needed = 0;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.default_offset = 0;
mddev->bitmap_info.default_space = 0;
mddev->bitmap_info.chunksize = 0;
mddev->bitmap_info.daemon_sleep = 0;
mddev->bitmap_info.max_write_behind = 0;
}
static void __md_stop_writes(struct mddev *mddev)
{
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
}
del_timer_sync(&mddev->safemode_timer);
bitmap_flush(mddev);
md_super_wait(mddev);
if (mddev->ro == 0 &&
(!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) {
/* mark array as shutdown cleanly */
mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
}
void md_stop_writes(struct mddev *mddev)
{
mddev_lock_nointr(mddev);
__md_stop_writes(mddev);
mddev_unlock(mddev);
}
EXPORT_SYMBOL_GPL(md_stop_writes);
static void __md_stop(struct mddev *mddev)
{
mddev->ready = 0;
mddev->pers->stop(mddev);
if (mddev->pers->sync_request && mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
module_put(mddev->pers->owner);
mddev->pers = NULL;
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
void md_stop(struct mddev *mddev)
{
/* stop the array and free an attached data structures.
* This is called from dm-raid
*/
__md_stop(mddev);
bitmap_destroy(mddev);
if (mddev->bio_set)
bioset_free(mddev->bio_set);
}
EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
{
int err = 0;
int did_freeze = 0;
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
did_freeze = 1;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
/* Thread might be blocked waiting for metadata update
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
}
mddev_unlock(mddev);
wait_event(resync_wait, mddev->sync_thread == NULL);
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > !!bdev ||
mddev->sync_thread ||
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
err = -EBUSY;
goto out;
}
if (mddev->pers) {
__md_stop_writes(mddev);
err = -ENXIO;
if (mddev->ro==1)
goto out;
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
err = 0;
}
out:
mutex_unlock(&mddev->open_mutex);
return err;
}
/* mode:
* 0 - completely stop and dis-assemble array
* 2 - stop but do not disassemble array
*/
static int do_md_stop(struct mddev * mddev, int mode,
struct block_device *bdev)
{
struct gendisk *disk = mddev->gendisk;
struct md_rdev *rdev;
int did_freeze = 0;
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
did_freeze = 1;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
/* Thread might be blocked waiting for metadata update
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
}
mddev_unlock(mddev);
wait_event(resync_wait, mddev->sync_thread == NULL);
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > !!bdev ||
mddev->sysfs_active ||
mddev->sync_thread ||
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
return -EBUSY;
}
if (mddev->pers) {
if (mddev->ro)
set_disk_ro(disk, 0);
__md_stop_writes(mddev);
__md_stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state);
rdev_for_each(rdev, mddev)
if (rdev->raid_disk >= 0)
sysfs_unlink_rdev(mddev, rdev);
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
mddev->changed = 1;
revalidate_disk(disk);
if (mddev->ro)
mddev->ro = 0;
} else
mutex_unlock(&mddev->open_mutex);
/*
* Free resources if final stop
*/
if (mode == 0) {
printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
bitmap_destroy(mddev);
if (mddev->bitmap_info.file) {
restore_bitmap_write_access(mddev->bitmap_info.file);
fput(mddev->bitmap_info.file);
mddev->bitmap_info.file = NULL;
}
mddev->bitmap_info.offset = 0;
export_array(mddev);
md_clean(mddev);
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
}
blk_integrity_unregister(disk);
md_new_event(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
#ifndef MODULE
static void autorun_array(struct mddev *mddev)
{
struct md_rdev *rdev;
int err;
if (list_empty(&mddev->disks))
return;
printk(KERN_INFO "md: running: ");
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
printk("<%s>", bdevname(rdev->bdev,b));
}
printk("\n");
err = do_md_run(mddev);
if (err) {
printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
do_md_stop(mddev, 0, NULL);
}
}
/*
* lets try to run arrays based on all disks that have arrived
* until now. (those are in pending_raid_disks)
*
* the method: pick the first pending disk, collect all disks with
* the same UUID, remove all from the pending list and put them into
* the 'same_array' list. Then order this list based on superblock
* update time (freshest comes first), kick out 'old' disks and
* compare superblocks. If everything's fine then run it.
*
* If "unit" is allocated, then bump its reference count
*/
static void autorun_devices(int part)
{
struct md_rdev *rdev0, *rdev, *tmp;
struct mddev *mddev;
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: autorun ...\n");
while (!list_empty(&pending_raid_disks)) {
int unit;
dev_t dev;
LIST_HEAD(candidates);
rdev0 = list_entry(pending_raid_disks.next,
struct md_rdev, same_set);
printk(KERN_INFO "md: considering %s ...\n",
bdevname(rdev0->bdev,b));
INIT_LIST_HEAD(&candidates);
rdev_for_each_list(rdev, tmp, &pending_raid_disks)
if (super_90_load(rdev, rdev0, 0) >= 0) {
printk(KERN_INFO "md: adding %s ...\n",
bdevname(rdev->bdev,b));
list_move(&rdev->same_set, &candidates);
}
/*
* now we have a set of devices, with all of them having
* mostly sane superblocks. It's time to allocate the
* mddev.
*/
if (part) {
dev = MKDEV(mdp_major,
rdev0->preferred_minor << MdpMinorShift);
unit = MINOR(dev) >> MdpMinorShift;
} else {
dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
unit = MINOR(dev);
}
if (rdev0->preferred_minor != unit) {
printk(KERN_INFO "md: unit number in %s is bad: %d\n",
bdevname(rdev0->bdev, b), rdev0->preferred_minor);
break;
}
md_probe(dev, NULL, NULL);
mddev = mddev_find(dev);
if (!mddev || !mddev->gendisk) {
if (mddev)
mddev_put(mddev);
printk(KERN_ERR
"md: cannot allocate memory for md drive.\n");
break;
}
if (mddev_lock(mddev))
printk(KERN_WARNING "md: %s locked, cannot run\n",
mdname(mddev));
else if (mddev->raid_disks || mddev->major_version
|| !list_empty(&mddev->disks)) {
printk(KERN_WARNING
"md: %s already running, cannot run %s\n",
mdname(mddev), bdevname(rdev0->bdev,b));
mddev_unlock(mddev);
} else {
printk(KERN_INFO "md: created %s\n", mdname(mddev));
mddev->persistent = 1;
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
if (bind_rdev_to_array(rdev, mddev))
export_rdev(rdev);
}
autorun_array(mddev);
mddev_unlock(mddev);
}
/* on success, candidates will be empty, on error
* it won't...
*/
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
export_rdev(rdev);
}
mddev_put(mddev);
}
printk(KERN_INFO "md: ... autorun DONE.\n");
}
#endif /* !MODULE */
static int get_version(void __user * arg)
{
mdu_version_t ver;
ver.major = MD_MAJOR_VERSION;
ver.minor = MD_MINOR_VERSION;
ver.patchlevel = MD_PATCHLEVEL_VERSION;
if (copy_to_user(arg, &ver, sizeof(ver)))
return -EFAULT;
return 0;
}
static int get_array_info(struct mddev * mddev, void __user * arg)
{
mdu_array_info_t info;
int nr,working,insync,failed,spare;
struct md_rdev *rdev;
nr = working = insync = failed = spare = 0;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
nr++;
if (test_bit(Faulty, &rdev->flags))
failed++;
else {
working++;
if (test_bit(In_sync, &rdev->flags))
insync++;
else
spare++;
}
}
rcu_read_unlock();
info.major_version = mddev->major_version;
info.minor_version = mddev->minor_version;
info.patch_version = MD_PATCHLEVEL_VERSION;
info.ctime = mddev->ctime;
info.level = mddev->level;
info.size = mddev->dev_sectors / 2;
if (info.size != mddev->dev_sectors / 2) /* overflow */
info.size = -1;
info.nr_disks = nr;
info.raid_disks = mddev->raid_disks;
info.md_minor = mddev->md_minor;
info.not_persistent= !mddev->persistent;
info.utime = mddev->utime;
info.state = 0;
if (mddev->in_sync)
info.state = (1<<MD_SB_CLEAN);
if (mddev->bitmap && mddev->bitmap_info.offset)
info.state = (1<<MD_SB_BITMAP_PRESENT);
info.active_disks = insync;
info.working_disks = working;
info.failed_disks = failed;
info.spare_disks = spare;
info.layout = mddev->layout;
info.chunk_size = mddev->chunk_sectors << 9;
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int get_bitmap_file(struct mddev * mddev, void __user * arg)
{
mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
char *ptr, *buf = NULL;
int err = -ENOMEM;
file = kmalloc(sizeof(*file), GFP_NOIO);
if (!file)
goto out;
/* bitmap disabled, zero the first byte and copy out */
if (!mddev->bitmap || !mddev->bitmap->storage.file) {
file->pathname[0] = '\0';
goto copy_out;
}
buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
if (!buf)
goto out;
ptr = d_path(&mddev->bitmap->storage.file->f_path,
buf, sizeof(file->pathname));
if (IS_ERR(ptr))
goto out;
strcpy(file->pathname, ptr);
copy_out:
err = 0;
if (copy_to_user(arg, file, sizeof(*file)))
err = -EFAULT;
out:
kfree(buf);
kfree(file);
return err;
}
static int get_disk_info(struct mddev * mddev, void __user * arg)
{
mdu_disk_info_t info;
struct md_rdev *rdev;
if (copy_from_user(&info, arg, sizeof(info)))
return -EFAULT;
rcu_read_lock();
rdev = find_rdev_nr_rcu(mddev, info.number);
if (rdev) {
info.major = MAJOR(rdev->bdev->bd_dev);
info.minor = MINOR(rdev->bdev->bd_dev);
info.raid_disk = rdev->raid_disk;
info.state = 0;
if (test_bit(Faulty, &rdev->flags))
info.state |= (1<<MD_DISK_FAULTY);
else if (test_bit(In_sync, &rdev->flags)) {
info.state |= (1<<MD_DISK_ACTIVE);
info.state |= (1<<MD_DISK_SYNC);
}
if (test_bit(WriteMostly, &rdev->flags))
info.state |= (1<<MD_DISK_WRITEMOSTLY);
} else {
info.major = info.minor = 0;
info.raid_disk = -1;
info.state = (1<<MD_DISK_REMOVED);
}
rcu_read_unlock();
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
{
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct md_rdev *rdev;
dev_t dev = MKDEV(info->major,info->minor);
if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
return -EOVERFLOW;
if (!mddev->raid_disks) {
int err;
/* expecting a device which has a superblock */
rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
if (!list_empty(&mddev->disks)) {
struct md_rdev *rdev0
= list_entry(mddev->disks.next,
struct md_rdev, same_set);
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) {
printk(KERN_WARNING
"md: %s has different UUID to %s\n",
bdevname(rdev->bdev,b),
bdevname(rdev0->bdev,b2));
export_rdev(rdev);
return -EINVAL;
}
}
err = bind_rdev_to_array(rdev, mddev);
if (err)
export_rdev(rdev);
return err;
}
/*
* add_new_disk can be used once the array is assembled
* to add "hot spares". They must already have a superblock
* written
*/
if (mddev->pers) {
int err;
if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING
"%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
}
if (mddev->persistent)
rdev = md_import_device(dev, mddev->major_version,
mddev->minor_version);
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
/* set saved_raid_disk if appropriate */
if (!mddev->persistent) {
if (info->state & (1<<MD_DISK_SYNC) &&
info->raid_disk < mddev->raid_disks) {
rdev->raid_disk = info->raid_disk;
set_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
} else
rdev->raid_disk = -1;
rdev->saved_raid_disk = rdev->raid_disk;
} else
super_types[mddev->major_version].
validate_super(mddev, rdev);
if ((info->state & (1<<MD_DISK_SYNC)) &&
rdev->raid_disk != info->raid_disk) {
/* This was a hot-add request, but events doesn't
* match, so reject it.
*/
export_rdev(rdev);
return -EINVAL;
}
clear_bit(In_sync, &rdev->flags); /* just to be sure */
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
else
clear_bit(WriteMostly, &rdev->flags);
rdev->raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (!err && !mddev->pers->hot_remove_disk) {
/* If there is hot_add_disk but no hot_remove_disk
* then added disks for geometry changes,
* and should be added immediately.
*/
super_types[mddev->major_version].
validate_super(mddev, rdev);
err = mddev->pers->hot_add_disk(mddev, rdev);
if (err)
unbind_rdev_from_array(rdev);
}
if (err)
export_rdev(rdev);
else
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
if (!err)
md_new_event(mddev);
md_wakeup_thread(mddev->thread);
return err;
}
/* otherwise, add_new_disk is only allowed
* for major_version==0 superblocks
*/
if (mddev->major_version != 0) {
printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
mdname(mddev));
return -EINVAL;
}
if (!(info->state & (1<<MD_DISK_FAULTY))) {
int err;
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
rdev->desc_nr = info->number;
if (info->raid_disk < mddev->raid_disks)
rdev->raid_disk = info->raid_disk;
else
rdev->raid_disk = -1;
if (rdev->raid_disk < mddev->raid_disks)
if (info->state & (1<<MD_DISK_SYNC))
set_bit(In_sync, &rdev->flags);
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
if (!mddev->persistent) {
printk(KERN_INFO "md: nonpersistent superblock ...\n");
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
} else
rdev->sb_start = calc_dev_sboffset(rdev);
rdev->sectors = rdev->sb_start;
err = bind_rdev_to_array(rdev, mddev);
if (err) {
export_rdev(rdev);
return err;
}
}
return 0;
}
static int hot_remove_disk(struct mddev * mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
struct md_rdev *rdev;
rdev = find_rdev(mddev, dev);
if (!rdev)
return -ENXIO;
clear_bit(Blocked, &rdev->flags);
remove_and_add_spares(mddev, rdev);
if (rdev->raid_disk >= 0)
goto busy;
kick_rdev_from_array(rdev);
md_update_sb(mddev, 1);
md_new_event(mddev);
return 0;
busy:
printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
bdevname(rdev->bdev,b), mdname(mddev));
return -EBUSY;
}
static int hot_add_disk(struct mddev * mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
int err;
struct md_rdev *rdev;
if (!mddev->pers)
return -ENODEV;
if (mddev->major_version != 0) {
printk(KERN_WARNING "%s: HOT_ADD may only be used with"
" version-0 superblocks.\n",
mdname(mddev));
return -EINVAL;
}
if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING
"%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
}
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return -EINVAL;
}
if (mddev->persistent)
rdev->sb_start = calc_dev_sboffset(rdev);
else
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
rdev->sectors = rdev->sb_start;
if (test_bit(Faulty, &rdev->flags)) {
printk(KERN_WARNING
"md: can not hot-add faulty %s disk to %s!\n",
bdevname(rdev->bdev,b), mdname(mddev));
err = -EINVAL;
goto abort_export;
}
clear_bit(In_sync, &rdev->flags);
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (err)
goto abort_export;
/*
* The rest should better be atomic, we can have disk failures
* noticed in interrupt contexts ...
*/
rdev->raid_disk = -1;
md_update_sb(mddev, 1);
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
*/
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_new_event(mddev);
return 0;
abort_export:
export_rdev(rdev);
return err;
}
static int set_bitmap_file(struct mddev *mddev, int fd)
{
int err;
if (mddev->pers) {
if (!mddev->pers->quiesce)
return -EBUSY;
if (mddev->recovery || mddev->sync_thread)
return -EBUSY;
/* we should be able to change the bitmap.. */
}
if (fd >= 0) {
if (mddev->bitmap)
return -EEXIST; /* cannot add when bitmap is present */
mddev->bitmap_info.file = fget(fd);
if (mddev->bitmap_info.file == NULL) {
printk(KERN_ERR "%s: error: failed to get bitmap file\n",
mdname(mddev));
return -EBADF;
}
err = deny_bitmap_write_access(mddev->bitmap_info.file);
if (err) {
printk(KERN_ERR "%s: error: bitmap file is already in use\n",
mdname(mddev));
fput(mddev->bitmap_info.file);
mddev->bitmap_info.file = NULL;
return err;
}
mddev->bitmap_info.offset = 0; /* file overrides offset */
} else if (mddev->bitmap == NULL)
return -ENOENT; /* cannot remove what isn't there */
err = 0;
if (mddev->pers) {
mddev->pers->quiesce(mddev, 1);
if (fd >= 0) {
err = bitmap_create(mddev);
if (!err)
err = bitmap_load(mddev);
}
if (fd < 0 || err) {
bitmap_destroy(mddev);
fd = -1; /* make sure to put the file */
}
mddev->pers->quiesce(mddev, 0);
}
if (fd < 0) {
if (mddev->bitmap_info.file) {
restore_bitmap_write_access(mddev->bitmap_info.file);
fput(mddev->bitmap_info.file);
}
mddev->bitmap_info.file = NULL;
}
return err;
}
/*
* set_array_info is used two different ways
* The original usage is when creating a new array.
* In this usage, raid_disks is > 0 and it together with
* level, size, not_persistent,layout,chunksize determine the
* shape of the array.
* This will always create an array with a type-0.90.0 superblock.
* The newer usage is when assembling an array.
* In this case raid_disks will be 0, and the major_version field is
* use to determine which style super-blocks are to be found on the devices.
* The minor and patch _version numbers are also kept incase the
* super_block handler wishes to interpret them.
*/
static int set_array_info(struct mddev * mddev, mdu_array_info_t *info)
{
if (info->raid_disks == 0) {
/* just setting version number for superblock loading */
if (info->major_version < 0 ||
info->major_version >= ARRAY_SIZE(super_types) ||
super_types[info->major_version].name == NULL) {
/* maybe try to auto-load a module? */
printk(KERN_INFO
"md: superblock version %d not known\n",
info->major_version);
return -EINVAL;
}
mddev->major_version = info->major_version;
mddev->minor_version = info->minor_version;
mddev->patch_version = info->patch_version;
mddev->persistent = !info->not_persistent;
/* ensure mddev_put doesn't delete this now that there
* is some minimal configuration.
*/
mddev->ctime = get_seconds();
return 0;
}
mddev->major_version = MD_MAJOR_VERSION;
mddev->minor_version = MD_MINOR_VERSION;
mddev->patch_version = MD_PATCHLEVEL_VERSION;
mddev->ctime = get_seconds();
mddev->level = info->level;
mddev->clevel[0] = 0;
mddev->dev_sectors = 2 * (sector_t)info->size;
mddev->raid_disks = info->raid_disks;
/* don't set md_minor, it is determined by which /dev/md* was
* openned
*/
if (info->state & (1<<MD_SB_CLEAN))
mddev->recovery_cp = MaxSector;
else
mddev->recovery_cp = 0;
mddev->persistent = ! info->not_persistent;
mddev->external = 0;
mddev->layout = info->layout;
mddev->chunk_sectors = info->chunk_size >> 9;
mddev->max_disks = MD_SB_DISKS;
if (mddev->persistent)
mddev->flags = 0;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
mddev->bitmap_info.offset = 0;
mddev->reshape_position = MaxSector;
/*
* Generate a 128 bit UUID
*/
get_random_bytes(mddev->uuid, 16);
mddev->new_level = mddev->level;
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->new_layout = mddev->layout;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
return 0;
}
void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
{
WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
if (mddev->external_size)
return;
mddev->array_sectors = array_sectors;
}
EXPORT_SYMBOL(md_set_array_sectors);
static int update_size(struct mddev *mddev, sector_t num_sectors)
{
struct md_rdev *rdev;
int rv;
int fit = (num_sectors == 0);
if (mddev->pers->resize == NULL)
return -EINVAL;
/* The "num_sectors" is the number of sectors of each device that
* is used. This can only make sense for arrays with redundancy.
* linear and raid0 always use whatever space is available. We can only
* consider changing this number if no resync or reconstruction is
* happening, and if the new size is acceptable. It must fit before the
* sb_start or, if that is <data_offset, it must fit before the size
* of each device. If num_sectors is zero, we find the largest size
* that fits.
*/
if (mddev->sync_thread)
return -EBUSY;
rdev_for_each(rdev, mddev) {
sector_t avail = rdev->sectors;
if (fit && (num_sectors == 0 || num_sectors > avail))
num_sectors = avail;
if (avail < num_sectors)
return -ENOSPC;
}
rv = mddev->pers->resize(mddev, num_sectors);
if (!rv)
revalidate_disk(mddev->gendisk);
return rv;
}
static int update_raid_disks(struct mddev *mddev, int raid_disks)
{
int rv;
struct md_rdev *rdev;
/* change the number of raid disks */
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
if (raid_disks <= 0 ||
(mddev->max_disks && raid_disks >= mddev->max_disks))
return -EINVAL;
if (mddev->sync_thread || mddev->reshape_position != MaxSector)
return -EBUSY;
rdev_for_each(rdev, mddev) {
if (mddev->raid_disks < raid_disks &&
rdev->data_offset < rdev->new_data_offset)
return -EINVAL;
if (mddev->raid_disks > raid_disks &&
rdev->data_offset > rdev->new_data_offset)
return -EINVAL;
}
mddev->delta_disks = raid_disks - mddev->raid_disks;
if (mddev->delta_disks < 0)
mddev->reshape_backwards = 1;
else if (mddev->delta_disks > 0)
mddev->reshape_backwards = 0;
rv = mddev->pers->check_reshape(mddev);
if (rv < 0) {
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
}
return rv;
}
/*
* update_array_info is used to change the configuration of an
* on-line array.
* The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
* fields in the info are checked against the array.
* Any differences that cannot be handled will cause an error.
* Normally, only one change can be managed at a time.
*/
static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
{
int rv = 0;
int cnt = 0;
int state = 0;
/* calculate expected state,ignoring low bits */
if (mddev->bitmap && mddev->bitmap_info.offset)
state |= (1 << MD_SB_BITMAP_PRESENT);
if (mddev->major_version != info->major_version ||
mddev->minor_version != info->minor_version ||
/* mddev->patch_version != info->patch_version || */
mddev->ctime != info->ctime ||
mddev->level != info->level ||
/* mddev->layout != info->layout || */
!mddev->persistent != info->not_persistent||
mddev->chunk_sectors != info->chunk_size >> 9 ||
/* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
((state^info->state) & 0xfffffe00)
)
return -EINVAL;
/* Check there is only one change */
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
cnt++;
if (mddev->raid_disks != info->raid_disks)
cnt++;
if (mddev->layout != info->layout)
cnt++;
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
cnt++;
if (cnt == 0)
return 0;
if (cnt > 1)
return -EINVAL;
if (mddev->layout != info->layout) {
/* Change layout
* we don't need to do anything at the md level, the
* personality will take care of it all.
*/
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
else {
mddev->new_layout = info->layout;
rv = mddev->pers->check_reshape(mddev);
if (rv)
mddev->new_layout = mddev->layout;
return rv;
}
}
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
rv = update_size(mddev, (sector_t)info->size * 2);
if (mddev->raid_disks != info->raid_disks)
rv = update_raid_disks(mddev, info->raid_disks);
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
if (mddev->pers->quiesce == NULL)
return -EINVAL;
if (mddev->recovery || mddev->sync_thread)
return -EBUSY;
if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
/* add the bitmap */
if (mddev->bitmap)
return -EEXIST;
if (mddev->bitmap_info.default_offset == 0)
return -EINVAL;
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
mddev->pers->quiesce(mddev, 1);
rv = bitmap_create(mddev);
if (!rv)
rv = bitmap_load(mddev);
if (rv)
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
} else {
/* remove the bitmap */
if (!mddev->bitmap)
return -ENOENT;
if (mddev->bitmap->storage.file)
return -EINVAL;
mddev->pers->quiesce(mddev, 1);
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
mddev->bitmap_info.offset = 0;
}
}
md_update_sb(mddev, 1);
return rv;
}
static int set_disk_faulty(struct mddev *mddev, dev_t dev)
{
struct md_rdev *rdev;
int err = 0;
if (mddev->pers == NULL)
return -ENODEV;
rcu_read_lock();
rdev = find_rdev_rcu(mddev, dev);
if (!rdev)
err = -ENODEV;
else {
md_error(mddev, rdev);
if (!test_bit(Faulty, &rdev->flags))
err = -EBUSY;
}
rcu_read_unlock();
return err;
}
/*
* We have a problem here : there is no easy way to give a CHS
* virtual geometry. We currently pretend that we have a 2 heads
* 4 sectors (with a BIG number of cylinders...). This drives
* dosfs just mad... ;-)
*/
static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct mddev *mddev = bdev->bd_disk->private_data;
geo->heads = 2;
geo->sectors = 4;
geo->cylinders = mddev->array_sectors / 8;
return 0;
}
static inline bool md_ioctl_valid(unsigned int cmd)
{
switch (cmd) {
case ADD_NEW_DISK:
case BLKROSET:
case GET_ARRAY_INFO:
case GET_BITMAP_FILE:
case GET_DISK_INFO:
case HOT_ADD_DISK:
case HOT_REMOVE_DISK:
case PRINT_RAID_DEBUG:
case RAID_AUTORUN:
case RAID_VERSION:
case RESTART_ARRAY_RW:
case RUN_ARRAY:
case SET_ARRAY_INFO:
case SET_BITMAP_FILE:
case SET_DISK_FAULTY:
case STOP_ARRAY:
case STOP_ARRAY_RO:
return true;
default:
return false;
}
}
static int md_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
int err = 0;
void __user *argp = (void __user *)arg;
struct mddev *mddev = NULL;
int ro;
if (!md_ioctl_valid(cmd))
return -ENOTTY;
switch (cmd) {
case RAID_VERSION:
case GET_ARRAY_INFO:
case GET_DISK_INFO:
break;
default:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
}
/*
* Commands dealing with the RAID driver but not any
* particular array:
*/
switch (cmd) {
case RAID_VERSION:
err = get_version(argp);
goto done;
case PRINT_RAID_DEBUG:
err = 0;
md_print_devices();
goto done;
#ifndef MODULE
case RAID_AUTORUN:
err = 0;
autostart_arrays(arg);
goto done;
#endif
default:;
}
/*
* Commands creating/starting a new array:
*/
mddev = bdev->bd_disk->private_data;
if (!mddev) {
BUG();
goto abort;
}
/* Some actions do not requires the mutex */
switch (cmd) {
case GET_ARRAY_INFO:
if (!mddev->raid_disks && !mddev->external)
err = -ENODEV;
else
err = get_array_info(mddev, argp);
goto abort;
case GET_DISK_INFO:
if (!mddev->raid_disks && !mddev->external)
err = -ENODEV;
else
err = get_disk_info(mddev, argp);
goto abort;
case SET_DISK_FAULTY:
err = set_disk_faulty(mddev, new_decode_dev(arg));
goto abort;
}
if (cmd == ADD_NEW_DISK)
/* need to ensure md_delayed_delete() has completed */
flush_workqueue(md_misc_wq);
if (cmd == HOT_REMOVE_DISK)
/* need to ensure recovery thread has run */
wait_event_interruptible_timeout(mddev->sb_wait,
!test_bit(MD_RECOVERY_NEEDED,
&mddev->flags),
msecs_to_jiffies(5000));
if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
/* Need to flush page cache, and ensure no-one else opens
* and writes
*/
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > 1) {
mutex_unlock(&mddev->open_mutex);
err = -EBUSY;
goto abort;
}
set_bit(MD_STILL_CLOSED, &mddev->flags);
mutex_unlock(&mddev->open_mutex);
sync_blockdev(bdev);
}
err = mddev_lock(mddev);
if (err) {
printk(KERN_INFO
"md: ioctl lock interrupted, reason %d, cmd %d\n",
err, cmd);
goto abort;
}
if (cmd == SET_ARRAY_INFO) {
mdu_array_info_t info;
if (!arg)
memset(&info, 0, sizeof(info));
else if (copy_from_user(&info, argp, sizeof(info))) {
err = -EFAULT;
goto abort_unlock;
}
if (mddev->pers) {
err = update_array_info(mddev, &info);
if (err) {
printk(KERN_WARNING "md: couldn't update"
" array info. %d\n", err);
goto abort_unlock;
}
goto done_unlock;
}
if (!list_empty(&mddev->disks)) {
printk(KERN_WARNING
"md: array %s already has disks!\n",
mdname(mddev));
err = -EBUSY;
goto abort_unlock;
}
if (mddev->raid_disks) {
printk(KERN_WARNING
"md: array %s already initialised!\n",
mdname(mddev));
err = -EBUSY;
goto abort_unlock;
}
err = set_array_info(mddev, &info);
if (err) {
printk(KERN_WARNING "md: couldn't set"
" array info. %d\n", err);
goto abort_unlock;
}
goto done_unlock;
}
/*
* Commands querying/configuring an existing array:
*/
/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
* RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
if ((!mddev->raid_disks && !mddev->external)
&& cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
&& cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
&& cmd != GET_BITMAP_FILE) {
err = -ENODEV;
goto abort_unlock;
}
/*
* Commands even a read-only array can execute:
*/
switch (cmd) {
case GET_BITMAP_FILE:
err = get_bitmap_file(mddev, argp);
goto done_unlock;
case RESTART_ARRAY_RW:
err = restart_array(mddev);
goto done_unlock;
case STOP_ARRAY:
err = do_md_stop(mddev, 0, bdev);
goto done_unlock;
case STOP_ARRAY_RO:
err = md_set_readonly(mddev, bdev);
goto done_unlock;
case HOT_REMOVE_DISK:
err = hot_remove_disk(mddev, new_decode_dev(arg));
goto done_unlock;
case ADD_NEW_DISK:
/* We can support ADD_NEW_DISK on read-only arrays
* on if we are re-adding a preexisting device.
* So require mddev->pers and MD_DISK_SYNC.
*/
if (mddev->pers) {
mdu_disk_info_t info;
if (copy_from_user(&info, argp, sizeof(info)))
err = -EFAULT;
else if (!(info.state & (1<<MD_DISK_SYNC)))
/* Need to clear read-only for this */
break;
else
err = add_new_disk(mddev, &info);
goto done_unlock;
}
break;
case BLKROSET:
if (get_user(ro, (int __user *)(arg))) {
err = -EFAULT;
goto done_unlock;
}
err = -EINVAL;
/* if the bdev is going readonly the value of mddev->ro
* does not matter, no writes are coming
*/
if (ro)
goto done_unlock;
/* are we are already prepared for writes? */
if (mddev->ro != 1)
goto done_unlock;
/* transitioning to readauto need only happen for
* arrays that call md_write_start
*/
if (mddev->pers) {
err = restart_array(mddev);
if (err == 0) {
mddev->ro = 2;
set_disk_ro(mddev->gendisk, 0);
}
}
goto done_unlock;
}
/*
* The remaining ioctls are changing the state of the
* superblock, so we do not allow them on read-only arrays.
* However non-MD ioctls (e.g. get-size) will still come through
* here and hit the 'default' below, so only disallow
* 'md' ioctls, and switch to rw mode if started auto-readonly.
*/
if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
if (mddev->ro == 2) {
mddev->ro = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
/* mddev_unlock will wake thread */
/* If a device failed while we were read-only, we
* need to make sure the metadata is updated now.
*/
if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
mddev_unlock(mddev);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
mddev_lock_nointr(mddev);
}
} else {
err = -EROFS;
goto abort_unlock;
}
}
switch (cmd) {
case ADD_NEW_DISK:
{
mdu_disk_info_t info;
if (copy_from_user(&info, argp, sizeof(info)))
err = -EFAULT;
else
err = add_new_disk(mddev, &info);
goto done_unlock;
}
case HOT_ADD_DISK:
err = hot_add_disk(mddev, new_decode_dev(arg));
goto done_unlock;
case RUN_ARRAY:
err = do_md_run(mddev);
goto done_unlock;
case SET_BITMAP_FILE:
err = set_bitmap_file(mddev, (int)arg);
goto done_unlock;
default:
err = -EINVAL;
goto abort_unlock;
}
done_unlock:
abort_unlock:
if (mddev->hold_active == UNTIL_IOCTL &&
err != -EINVAL)
mddev->hold_active = 0;
mddev_unlock(mddev);
return err;
done:
if (err)
MD_BUG();
abort:
return err;
}
#ifdef CONFIG_COMPAT
static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case HOT_REMOVE_DISK:
case HOT_ADD_DISK:
case SET_DISK_FAULTY:
case SET_BITMAP_FILE:
/* These take in integer arg, do not convert */
break;
default:
arg = (unsigned long)compat_ptr(arg);
break;
}
return md_ioctl(bdev, mode, cmd, arg);
}
#endif /* CONFIG_COMPAT */
static int md_open(struct block_device *bdev, fmode_t mode)
{
/*
* Succeed if we can lock the mddev, which confirms that
* it isn't being stopped right now.
*/
struct mddev *mddev = mddev_find(bdev->bd_dev);
int err;
if (!mddev)
return -ENODEV;
if (mddev->gendisk != bdev->bd_disk) {
/* we are racing with mddev_put which is discarding this
* bd_disk.
*/
mddev_put(mddev);
/* Wait until bdev->bd_disk is definitely gone */
flush_workqueue(md_misc_wq);
/* Then retry the open from the top */
return -ERESTARTSYS;
}
BUG_ON(mddev != bdev->bd_disk->private_data);
if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
goto out;
err = 0;
atomic_inc(&mddev->openers);
clear_bit(MD_STILL_CLOSED, &mddev->flags);
mutex_unlock(&mddev->open_mutex);
check_disk_change(bdev);
out:
return err;
}
static void md_release(struct gendisk *disk, fmode_t mode)
{
struct mddev *mddev = disk->private_data;
BUG_ON(!mddev);
atomic_dec(&mddev->openers);
mddev_put(mddev);
}
static int md_media_changed(struct gendisk *disk)
{
struct mddev *mddev = disk->private_data;
return mddev->changed;
}
static int md_revalidate(struct gendisk *disk)
{
struct mddev *mddev = disk->private_data;
mddev->changed = 0;
return 0;
}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
.open = md_open,
.release = md_release,
.ioctl = md_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
.media_changed = md_media_changed,
.revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)
{
struct md_thread *thread = arg;
/*
* md_thread is a 'system-thread', it's priority should be very
* high. We avoid resource deadlocks individually in each
* raid personality. (RAID5 does preallocation) We also use RR and
* the very same RT priority as kswapd, thus we will never get
* into a priority inversion deadlock.
*
* we definitely have to have equal or higher priority than
* bdflush, otherwise bdflush will deadlock if there are too
* many dirty RAID5 blocks.
*/
allow_signal(SIGKILL);
while (!kthread_should_stop()) {
/* We need to wait INTERRUPTIBLE so that
* we don't add to the load-average.
* That means we need to be sure no signals are
* pending
*/
if (signal_pending(current))
flush_signals(current);
wait_event_interruptible_timeout
(thread->wqueue,
test_bit(THREAD_WAKEUP, &thread->flags)
|| kthread_should_stop(),
thread->timeout);
clear_bit(THREAD_WAKEUP, &thread->flags);
if (!kthread_should_stop())
thread->run(thread);
}
return 0;
}
void md_wakeup_thread(struct md_thread *thread)
{
if (thread) {
pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
set_bit(THREAD_WAKEUP, &thread->flags);
wake_up(&thread->wqueue);
}
}
struct md_thread *md_register_thread(void (*run) (struct md_thread *),
struct mddev *mddev, const char *name)
{
struct md_thread *thread;
thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
if (!thread)
return NULL;
init_waitqueue_head(&thread->wqueue);
thread->run = run;
thread->mddev = mddev;
thread->timeout = MAX_SCHEDULE_TIMEOUT;
thread->tsk = kthread_run(md_thread, thread,
"%s_%s",
mdname(thread->mddev),
name);
if (IS_ERR(thread->tsk)) {
kfree(thread);
return NULL;
}
return thread;
}
void md_unregister_thread(struct md_thread **threadp)
{
struct md_thread *thread = *threadp;
if (!thread)
return;
pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
/* Locking ensures that mddev_unlock does not wake_up a
* non-existent thread
*/
spin_lock(&pers_lock);
*threadp = NULL;
spin_unlock(&pers_lock);
kthread_stop(thread->tsk);
kfree(thread);
}
void md_error(struct mddev *mddev, struct md_rdev *rdev)
{
if (!mddev) {
MD_BUG();
return;
}
if (!rdev || test_bit(Faulty, &rdev->flags))
return;
if (!mddev->pers || !mddev->pers->error_handler)
return;
mddev->pers->error_handler(mddev,rdev);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
md_new_event_inintr(mddev);
}
/* seq_file implementation /proc/mdstat */
static void status_unused(struct seq_file *seq)
{
int i = 0;
struct md_rdev *rdev;
seq_printf(seq, "unused devices: ");
list_for_each_entry(rdev, &pending_raid_disks, same_set) {
char b[BDEVNAME_SIZE];
i++;
seq_printf(seq, "%s ",
bdevname(rdev->bdev,b));
}
if (!i)
seq_printf(seq, "<none>");
seq_printf(seq, "\n");
}
static void status_resync(struct seq_file *seq, struct mddev * mddev)
{
sector_t max_sectors, resync, res;
unsigned long dt, db;
sector_t rt;
int scale;
unsigned int per_milli;
if (mddev->curr_resync <= 3)
resync = 0;
else
resync = mddev->curr_resync
- atomic_read(&mddev->recovery_active);
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else
max_sectors = mddev->dev_sectors;
/*
* Should not happen.
*/
if (!max_sectors) {
MD_BUG();
return;
}
/* Pick 'scale' such that (resync>>scale)*1000 will fit
* in a sector_t, and (max_sectors>>scale) will fit in a
* u32, as those are the requirements for sector_div.
* Thus 'scale' must be at least 10
*/
scale = 10;
if (sizeof(sector_t) > sizeof(unsigned long)) {
while ( max_sectors/2 > (1ULL<<(scale+32)))
scale++;
}
res = (resync>>scale)*1000;
sector_div(res, (u32)((max_sectors>>scale)+1));
per_milli = res;
{
int i, x = per_milli/50, y = 20-x;
seq_printf(seq, "[");
for (i = 0; i < x; i++)
seq_printf(seq, "=");
seq_printf(seq, ">");
for (i = 0; i < y; i++)
seq_printf(seq, ".");
seq_printf(seq, "] ");
}
seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
(test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
"reshape" :
(test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
"check" :
(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
"resync" : "recovery"))),
per_milli/10, per_milli % 10,
(unsigned long long) resync/2,
(unsigned long long) max_sectors/2);
/*
* dt: time from mark until now
* db: blocks written from mark until now
* rt: remaining time
*
* rt is a sector_t, so could be 32bit or 64bit.
* So we divide before multiply in case it is 32bit and close
* to the limit.
* We scale the divisor (db) by 32 to avoid losing precision
* near the end of resync when the number of remaining sectors
* is close to 'db'.
* We then divide rt by 32 after multiplying by db to compensate.
* The '+1' avoids division by zero if db is very small.
*/
dt = ((jiffies - mddev->resync_mark) / HZ);
if (!dt) dt++;
db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
- mddev->resync_mark_cnt;
rt = max_sectors - resync; /* number of remaining sectors */
sector_div(rt, db/32+1);
rt *= dt;
rt >>= 5;
seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
((unsigned long)rt % 60)/6);
seq_printf(seq, " speed=%ldK/sec", db/2/dt);
}
static void *md_seq_start(struct seq_file *seq, loff_t *pos)
{
struct list_head *tmp;
loff_t l = *pos;
struct mddev *mddev;
if (l >= 0x10000)
return NULL;
if (!l--)
/* header */
return (void*)1;
spin_lock(&all_mddevs_lock);
list_for_each(tmp,&all_mddevs)
if (!l--) {
mddev = list_entry(tmp, struct mddev, all_mddevs);
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
return mddev;
}
spin_unlock(&all_mddevs_lock);
if (!l--)
return (void*)2;/* tail */
return NULL;
}
static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *tmp;
struct mddev *next_mddev, *mddev = v;
++*pos;
if (v == (void*)2)
return NULL;
spin_lock(&all_mddevs_lock);
if (v == (void*)1)
tmp = all_mddevs.next;
else
tmp = mddev->all_mddevs.next;
if (tmp != &all_mddevs)
next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
else {
next_mddev = (void*)2;
*pos = 0x10000;
}
spin_unlock(&all_mddevs_lock);
if (v != (void*)1)
mddev_put(mddev);
return next_mddev;
}
static void md_seq_stop(struct seq_file *seq, void *v)
{
struct mddev *mddev = v;
if (mddev && v != (void*)1 && v != (void*)2)
mddev_put(mddev);
}
static int md_seq_show(struct seq_file *seq, void *v)
{
struct mddev *mddev = v;
sector_t sectors;
struct md_rdev *rdev;
if (v == (void*)1) {
struct md_personality *pers;
seq_printf(seq, "Personalities : ");
spin_lock(&pers_lock);
list_for_each_entry(pers, &pers_list, list)
seq_printf(seq, "[%s] ", pers->name);
spin_unlock(&pers_lock);
seq_printf(seq, "\n");
seq->poll_event = atomic_read(&md_event_count);
return 0;
}
if (v == (void*)2) {
status_unused(seq);
return 0;
}
if (mddev_lock(mddev) < 0)
return -EINTR;
if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
seq_printf(seq, "%s : %sactive", mdname(mddev),
mddev->pers ? "" : "in");
if (mddev->pers) {
if (mddev->ro==1)
seq_printf(seq, " (read-only)");
if (mddev->ro==2)
seq_printf(seq, " (auto-read-only)");
seq_printf(seq, " %s", mddev->pers->name);
}
sectors = 0;
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
seq_printf(seq, " %s[%d]",
bdevname(rdev->bdev,b), rdev->desc_nr);
if (test_bit(WriteMostly, &rdev->flags))
seq_printf(seq, "(W)");
if (test_bit(Faulty, &rdev->flags)) {
seq_printf(seq, "(F)");
continue;
}
if (rdev->raid_disk < 0)
seq_printf(seq, "(S)"); /* spare */
if (test_bit(Replacement, &rdev->flags))
seq_printf(seq, "(R)");
sectors += rdev->sectors;
}
if (!list_empty(&mddev->disks)) {
if (mddev->pers)
seq_printf(seq, "\n %llu blocks",
(unsigned long long)
mddev->array_sectors / 2);
else
seq_printf(seq, "\n %llu blocks",
(unsigned long long)sectors / 2);
}
if (mddev->persistent) {
if (mddev->major_version != 0 ||
mddev->minor_version != 90) {
seq_printf(seq," super %d.%d",
mddev->major_version,
mddev->minor_version);
}
} else if (mddev->external)
seq_printf(seq, " super external:%s",
mddev->metadata_type);
else
seq_printf(seq, " super non-persistent");
if (mddev->pers) {
mddev->pers->status(seq, mddev);
seq_printf(seq, "\n ");
if (mddev->pers->sync_request) {
if (mddev->curr_resync > 2) {
status_resync(seq, mddev);
seq_printf(seq, "\n ");
} else if (mddev->curr_resync >= 1)
seq_printf(seq, "\tresync=DELAYED\n ");
else if (mddev->recovery_cp < MaxSector)
seq_printf(seq, "\tresync=PENDING\n ");
}
} else
seq_printf(seq, "\n ");
bitmap_status(seq, mddev->bitmap);
seq_printf(seq, "\n");
}
mddev_unlock(mddev);
return 0;
}
static const struct seq_operations md_seq_ops = {
.start = md_seq_start,
.next = md_seq_next,
.stop = md_seq_stop,
.show = md_seq_show,
};
static int md_seq_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int error;
error = seq_open(file, &md_seq_ops);
if (error)
return error;
seq = file->private_data;
seq->poll_event = atomic_read(&md_event_count);
return error;
}
static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
{
struct seq_file *seq = filp->private_data;
int mask;
poll_wait(filp, &md_event_waiters, wait);
/* always allow read */
mask = POLLIN | POLLRDNORM;
if (seq->poll_event != atomic_read(&md_event_count))
mask |= POLLERR | POLLPRI;
return mask;
}
static const struct file_operations md_seq_fops = {
.owner = THIS_MODULE,
.open = md_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
.poll = mdstat_poll,
};
int register_md_personality(struct md_personality *p)
{
spin_lock(&pers_lock);
list_add_tail(&p->list, &pers_list);
printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
spin_unlock(&pers_lock);
return 0;
}
int unregister_md_personality(struct md_personality *p)
{
printk(KERN_INFO "md: %s personality unregistered\n", p->name);
spin_lock(&pers_lock);
list_del_init(&p->list);
spin_unlock(&pers_lock);
return 0;
}
static int is_mddev_idle(struct mddev *mddev, int init)
{
struct md_rdev * rdev;
int idle;
int curr_events;
idle = 1;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
(int)part_stat_read(&disk->part0, sectors[1]) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and
* disk_stats is counted when it completes.
* So resync activity will cause curr_events to be smaller than
* when there was no such activity.
* non-sync IO will cause disk_stat to increase without
* increasing sync_io so curr_events will (eventually)
* be larger than it was before. Once it becomes
* substantially larger, the test below will cause
* the array to appear non-idle, and resync will slow
* down.
* If there is a lot of outstanding resync activity when
* we set last_event to curr_events, then all that activity
* completing might cause the array to appear non-idle
* and resync will be slowed down even though there might
* not have been non-resync activity. This will only
* happen once though. 'last_events' will soon reflect
* the state where there is little or no outstanding
* resync requests, and further resync activity will
* always make curr_events less than last_events.
*
*/
if (init || curr_events - rdev->last_events > 64) {
rdev->last_events = curr_events;
idle = 0;
}
}
rcu_read_unlock();
return idle;
}
void md_done_sync(struct mddev *mddev, int blocks, int ok)
{
/* another "blocks" (512byte) blocks have been synced */
atomic_sub(blocks, &mddev->recovery_active);
wake_up(&mddev->recovery_wait);
if (!ok) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
md_wakeup_thread(mddev->thread);
// stop recovery, signal do_sync ....
}
}
/* md_write_start(mddev, bi)
* If we need to update some array metadata (e.g. 'active' flag
* in superblock) before writing, schedule a superblock update
* and wait for it to complete.
*/
void md_write_start(struct mddev *mddev, struct bio *bi)
{
int did_change = 0;
if (bio_data_dir(bi) != WRITE)
return;
BUG_ON(mddev->ro == 1);
if (mddev->ro == 2) {
/* need to switch to read/write */
mddev->ro = 0;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
did_change = 1;
}
atomic_inc(&mddev->writes_pending);
if (mddev->safemode == 1)
mddev->safemode = 0;
if (mddev->in_sync) {
spin_lock_irq(&mddev->write_lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
set_bit(MD_CHANGE_PENDING, &mddev->flags);
md_wakeup_thread(mddev->thread);
did_change = 1;
}
spin_unlock_irq(&mddev->write_lock);
}
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
}
void md_write_end(struct mddev *mddev)
{
if (atomic_dec_and_test(&mddev->writes_pending)) {
if (mddev->safemode == 2)
md_wakeup_thread(mddev->thread);
else if (mddev->safemode_delay)
mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
}
}
/* md_allow_write(mddev)
* Calling this ensures that the array is marked 'active' so that writes
* may proceed without blocking. It is important to call this before
* attempting a GFP_KERNEL allocation while holding the mddev lock.
* Must be called with mddev_lock held.
*
* In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
* is dropped, so return -EAGAIN after notifying userspace.
*/
int md_allow_write(struct mddev *mddev)
{
if (!mddev->pers)
return 0;
if (mddev->ro)
return 0;
if (!mddev->pers->sync_request)
return 0;
spin_lock_irq(&mddev->write_lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
set_bit(MD_CHANGE_PENDING, &mddev->flags);
if (mddev->safemode_delay &&
mddev->safemode == 0)
mddev->safemode = 1;
spin_unlock_irq(&mddev->write_lock);
md_update_sb(mddev, 0);
sysfs_notify_dirent_safe(mddev->sysfs_state);
} else
spin_unlock_irq(&mddev->write_lock);
if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
return -EAGAIN;
else
return 0;
}
EXPORT_SYMBOL_GPL(md_allow_write);
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
#define UPDATE_FREQUENCY (5*60*HZ)
void md_do_sync(struct md_thread *thread)
{
struct mddev *mddev = thread->mddev;
struct mddev *mddev2;
unsigned int currspeed = 0,
window;
sector_t max_sectors,j, io_sectors;
unsigned long mark[SYNC_MARKS];
unsigned long update_time;
sector_t mark_cnt[SYNC_MARKS];
int last_mark,m;
struct list_head *tmp;
sector_t last_check;
int skipped = 0;
struct md_rdev *rdev;
char *desc, *action = NULL;
struct blk_plug plug;
/* just incase thread restarts... */
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
return;
if (mddev->ro) {/* never try to sync a read-only array */
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
return;
}
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
desc = "data-check";
action = "check";
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
desc = "requested-resync";
action = "repair";
} else
desc = "resync";
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
desc = "reshape";
else
desc = "recovery";
mddev->last_sync_action = action ?: desc;
/* we overload curr_resync somewhat here.
* 0 == not engaged in resync at all
* 2 == checking that there is no conflict with another sync
* 1 == like 2, but have yielded to allow conflicting resync to
* commense
* other == active in resync - this many blocks
*
* Before starting a resync we must have set curr_resync to
* 2, and then checked that every "conflicting" array has curr_resync
* less than ours. When we find one that is the same or higher
* we wait on resync_wait. To avoid deadlock, we reduce curr_resync
* to 1 if we choose to yield (based arbitrarily on address of mddev structure).
* This will mean we have to start checking from the beginning again.
*
*/
do {
mddev->curr_resync = 2;
try_again:
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto skip;
for_each_mddev(mddev2, tmp) {
if (mddev2 == mddev)
continue;
if (!mddev->parallel_resync
&& mddev2->curr_resync
&& match_mddev_units(mddev, mddev2)) {
DEFINE_WAIT(wq);
if (mddev < mddev2 && mddev->curr_resync == 2) {
/* arbitrarily yield */
mddev->curr_resync = 1;
wake_up(&resync_wait);
}
if (mddev > mddev2 && mddev->curr_resync == 1)
/* no need to wait here, we can wait the next
* time 'round when curr_resync == 2
*/
continue;
/* We need to wait 'interruptible' so as not to
* contribute to the load average, and not to
* be caught by 'softlockup'
*/
prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
mddev2->curr_resync >= mddev->curr_resync) {
printk(KERN_INFO "md: delaying %s of %s"
" until %s has finished (they"
" share one or more physical units)\n",
desc, mdname(mddev), mdname(mddev2));
mddev_put(mddev2);
if (signal_pending(current))
flush_signals(current);
schedule();
finish_wait(&resync_wait, &wq);
goto try_again;
}
finish_wait(&resync_wait, &wq);
}
}
} while (mddev->curr_resync < 2);
j = 0;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
/* resync follows the size requested by the personality,
* which defaults to physical size, but can be virtual size
*/
max_sectors = mddev->resync_max_sectors;
atomic64_set(&mddev->resync_mismatches, 0);
/* we don't use the checkpoint if there's a bitmap */
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
j = mddev->resync_min;
else if (!mddev->bitmap)
j = mddev->recovery_cp;
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else {
/* recovery follows the physical size of devices */
max_sectors = mddev->dev_sectors;
j = MaxSector;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < j)
j = rdev->recovery_offset;
rcu_read_unlock();
/* If there is a bitmap, we need to make sure all
* writes that started before we added a spare
* complete before we start doing a recovery.
* Otherwise the write might complete and (via
* bitmap_endwrite) set a bit in the bitmap after the
* recovery has checked that bit and skipped that
* region.
*/
if (mddev->bitmap) {
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
}
printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
printk(KERN_INFO "md: minimum _guaranteed_ speed:"
" %d KB/sec/disk.\n", speed_min(mddev));
printk(KERN_INFO "md: using maximum available idle IO bandwidth "
"(but not more than %d KB/sec) for %s.\n",
speed_max(mddev), desc);
is_mddev_idle(mddev, 1); /* this initializes IO event counters */
io_sectors = 0;
for (m = 0; m < SYNC_MARKS; m++) {
mark[m] = jiffies;
mark_cnt[m] = io_sectors;
}
last_mark = 0;
mddev->resync_mark = mark[last_mark];
mddev->resync_mark_cnt = mark_cnt[last_mark];
/*
* Tune reconstruction:
*/
window = 32*(PAGE_SIZE/512);
printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
window/2, (unsigned long long)max_sectors/2);
atomic_set(&mddev->recovery_active, 0);
last_check = 0;
if (j>2) {
printk(KERN_INFO
"md: resuming %s of %s from checkpoint.\n",
desc, mdname(mddev));
mddev->curr_resync = j;
} else
mddev->curr_resync = 3; /* no longer delayed */
mddev->curr_resync_completed = j;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
md_new_event(mddev);
update_time = jiffies;
blk_start_plug(&plug);
while (j < max_sectors) {
sector_t sectors;
skipped = 0;
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
((mddev->curr_resync > mddev->curr_resync_completed &&
(mddev->curr_resync - mddev->curr_resync_completed)
> (max_sectors >> 4)) ||
time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
(j - mddev->curr_resync_completed)*2
>= mddev->resync_max - mddev->curr_resync_completed
)) {
/* time to update curr_resync_completed */
wait_event(mddev->recovery_wait,
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed = j;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
j > mddev->recovery_cp)
mddev->recovery_cp = j;
update_time = jiffies;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
while (j >= mddev->resync_max &&
!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* As this condition is controlled by user-space,
* we can block indefinitely, so use '_interruptible'
* to avoid triggering warnings.
*/
flush_signals(current); /* just in case */
wait_event_interruptible(mddev->recovery_wait,
mddev->resync_max > j
|| test_bit(MD_RECOVERY_INTR,
&mddev->recovery));
}
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
sectors = mddev->pers->sync_request(mddev, j, &skipped,
currspeed < speed_min(mddev));
if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
break;
}
if (!skipped) { /* actual IO requested */
io_sectors += sectors;
atomic_add(sectors, &mddev->recovery_active);
}
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
j += sectors;
if (j > 2)
mddev->curr_resync = j;
mddev->curr_mark_cnt = io_sectors;
if (last_check == 0)
/* this is the earliest that rebuild will be
* visible in /proc/mdstat
*/
md_new_event(mddev);
if (last_check + window > io_sectors || j == max_sectors)
continue;
last_check = io_sectors;
repeat:
if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
/* step marks */
int next = (last_mark+1) % SYNC_MARKS;
mddev->resync_mark = mark[next];
mddev->resync_mark_cnt = mark_cnt[next];
mark[next] = jiffies;
mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
last_mark = next;
}
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
/*
* this loop exits only if either when we are slower than
* the 'hard' speed limit, or the system was IO-idle for
* a jiffy.
* the system might be non-idle CPU-wise, but we only care
* about not overloading the IO subsystem. (things like an
* e2fsck being done on the RAID array should execute fast)
*/
cond_resched();
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
/((jiffies-mddev->resync_mark)/HZ +1) +1;
if (currspeed > speed_min(mddev)) {
if ((currspeed > speed_max(mddev)) ||
!is_mddev_idle(mddev, 0)) {
msleep(500);
goto repeat;
}
}
}
printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
test_bit(MD_RECOVERY_INTR, &mddev->recovery)
? "interrupted" : "done");
/*
* this also signals 'finished resyncing' to md_stop
*/
blk_finish_plug(&plug);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */
mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > 2) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
printk(KERN_INFO
"md: checkpointing %s of %s.\n",
desc, mdname(mddev));
if (test_bit(MD_RECOVERY_ERROR,
&mddev->recovery))
mddev->recovery_cp =
mddev->curr_resync_completed;
else
mddev->recovery_cp =
mddev->curr_resync;
}
} else
mddev->recovery_cp = MaxSector;
} else {
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
mddev->delta_disks >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < mddev->curr_resync)
rdev->recovery_offset = mddev->curr_resync;
rcu_read_unlock();
}
}
skip:
set_bit(MD_CHANGE_DEVS, &mddev->flags);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* We completed so min/max setting can be forgotten if used. */
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = mddev->curr_resync_completed;
mddev->curr_resync = 0;
wake_up(&resync_wait);
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread);
return;
}
EXPORT_SYMBOL_GPL(md_do_sync);
static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *this)
{
struct md_rdev *rdev;
int spares = 0;
int removed = 0;
rdev_for_each(rdev, mddev)
if ((this == NULL || rdev == this) &&
rdev->raid_disk >= 0 &&
!test_bit(Blocked, &rdev->flags) &&
(test_bit(Faulty, &rdev->flags) ||
! test_bit(In_sync, &rdev->flags)) &&
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
removed++;
}
}
if (removed && mddev->kobj.sd)
sysfs_notify(&mddev->kobj, NULL, "degraded");
if (this)
goto no_add;
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags))
spares++;
if (rdev->raid_disk >= 0)
continue;
if (test_bit(Faulty, &rdev->flags))
continue;
if (mddev->ro &&
! (rdev->saved_raid_disk >= 0 &&
!test_bit(Bitmap_sync, &rdev->flags)))
continue;
if (rdev->saved_raid_disk < 0)
rdev->recovery_offset = 0;
if (mddev->pers->
hot_add_disk(mddev, rdev) == 0) {
if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
spares++;
md_new_event(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
}
}
no_add:
if (removed)
set_bit(MD_CHANGE_DEVS, &mddev->flags);
return spares;
}
/*
* This routine is regularly called by all per-raid-array threads to
* deal with generic issues like resync and super-block update.
* Raid personalities that don't have a thread (linear/raid0) do not
* need this as they never do any recovery or update the superblock.
*
* It does not do any resync itself, but rather "forks" off other threads
* to do that as needed.
* When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
* "->recovery" and create a thread at ->sync_thread.
* When the thread finishes it sets MD_RECOVERY_DONE
* and wakeups up this thread which will reap the thread and finish up.
* This thread also removes any faulty devices (with nr_pending == 0).
*
* The overall approach is:
* 1/ if the superblock needs updating, update it.
* 2/ If a recovery thread is running, don't do anything else.
* 3/ If recovery has finished, clean up, possibly marking spares active.
* 4/ If there are any faulty devices, remove them.
* 5/ If array is degraded, try to add spares devices
* 6/ If array has spares or is not in-sync, start a resync thread.
*/
void md_check_recovery(struct mddev *mddev)
{
if (mddev->suspended)
return;
if (mddev->bitmap)
bitmap_daemon_work(mddev);
if (signal_pending(current)) {
if (mddev->pers->sync_request && !mddev->external) {
printk(KERN_INFO "md: %s in immediate safe mode\n",
mdname(mddev));
mddev->safemode = 2;
}
flush_signals(current);
}
if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return;
if ( ! (
(mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->external == 0 && mddev->safemode == 1) ||
(mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
&& !mddev->in_sync && mddev->recovery_cp == MaxSector)
))
return;
if (mddev_trylock(mddev)) {
int spares = 0;
if (mddev->ro) {
/* On a read-only array we can:
* - remove failed devices
* - add already-in_sync devices if the array itself
* is in-sync.
* As we only add devices that are already in-sync,
* we can activate the spares immediately.
*/
remove_and_add_spares(mddev, NULL);
/* There is no thread, but we need to call
* ->spare_active and clear saved_raid_disk
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
if (!mddev->external) {
int did_change = 0;
spin_lock_irq(&mddev->write_lock);
if (mddev->safemode &&
!atomic_read(&mddev->writes_pending) &&
!mddev->in_sync &&
mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
did_change = 1;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
spin_unlock_irq(&mddev->write_lock);
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
if (mddev->flags & MD_UPDATE_SB_FLAGS)
md_update_sb(mddev, 0);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
/* resync/recovery still happening */
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
if (mddev->sync_thread) {
md_reap_sync_thread(mddev);
goto unlock;
}
/* Set RUNNING before clearing NEEDED to avoid
* any transients in the value of "sync_action".
*/
mddev->curr_resync_completed = 0;
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
/* Clear some bits that don't mean anything, but
* might be left set
*/
clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
goto unlock;
/* no recovery is running.
* remove any failed drives, then
* add spares if possible.
* Spares are also removed and re-added, to allow
* the personality to fail the re-add.
*/
if (mddev->reshape_position != MaxSector) {
if (mddev->pers->check_reshape == NULL ||
mddev->pers->check_reshape(mddev) != 0)
/* Cannot proceed */
goto unlock;
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if ((spares = remove_and_add_spares(mddev, NULL))) {
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (mddev->recovery_cp < MaxSector) {
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
/* nothing to be done ... */
goto unlock;
if (mddev->pers->sync_request) {
if (spares) {
/* We are adding a device or devices to an array
* which has the bitmap stored on all devices.
* So make sure all bitmap pages get written
*/
bitmap_write_all(mddev->bitmap);
}
mddev->sync_thread = md_register_thread(md_do_sync,
mddev,
"resync");
if (!mddev->sync_thread) {
printk(KERN_ERR "%s: could not start resync"
" thread...\n",
mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
} else
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
}
unlock:
wake_up(&mddev->sb_wait);
if (!mddev->sync_thread) {
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
&mddev->recovery))
if (mddev->sysfs_action)
sysfs_notify_dirent_safe(mddev->sysfs_action);
}
mddev_unlock(mddev);
}
}
void md_reap_sync_thread(struct mddev *mddev)
{
struct md_rdev *rdev;
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
wake_up(&resync_wait);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
/* activate any spares */
if (mddev->pers->spare_active(mddev)) {
sysfs_notify(&mddev->kobj, NULL,
"degraded");
set_bit(MD_CHANGE_DEVS, &mddev->flags);
}
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
mddev->pers->finish_reshape)
mddev->pers->finish_reshape(mddev);
/* If array is no-longer degraded, then any saved_raid_disk
* information must be scrapped.
*/
if (!mddev->degraded)
rdev_for_each(rdev, mddev)
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
}
void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
sysfs_notify_dirent_safe(rdev->sysfs_state);
wait_event_timeout(rdev->blocked_wait,
!test_bit(Blocked, &rdev->flags) &&
!test_bit(BlockedBadBlocks, &rdev->flags),
msecs_to_jiffies(5000));
rdev_dec_pending(rdev, mddev);
}
EXPORT_SYMBOL(md_wait_for_blocked_rdev);
void md_finish_reshape(struct mddev *mddev)
{
/* called be personality module when reshape completes. */
struct md_rdev *rdev;
rdev_for_each(rdev, mddev) {
if (rdev->data_offset > rdev->new_data_offset)
rdev->sectors += rdev->data_offset - rdev->new_data_offset;
else
rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
rdev->data_offset = rdev->new_data_offset;
}
}
EXPORT_SYMBOL(md_finish_reshape);
/* Bad block management.
* We can record which blocks on each device are 'bad' and so just
* fail those blocks, or that stripe, rather than the whole device.
* Entries in the bad-block table are 64bits wide. This comprises:
* Length of bad-range, in sectors: 0-511 for lengths 1-512
* Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
* A 'shift' can be set so that larger blocks are tracked and
* consequently larger devices can be covered.
* 'Acknowledged' flag - 1 bit. - the most significant bit.
*
* Locking of the bad-block table uses a seqlock so md_is_badblock
* might need to retry if it is very unlucky.
* We will sometimes want to check for bad blocks in a bi_end_io function,
* so we use the write_seqlock_irq variant.
*
* When looking for a bad block we specify a range and want to
* know if any block in the range is bad. So we binary-search
* to the last range that starts at-or-before the given endpoint,
* (or "before the sector after the target range")
* then see if it ends after the given start.
* We return
* 0 if there are no known bad blocks in the range
* 1 if there are known bad block which are all acknowledged
* -1 if there are bad blocks which have not yet been acknowledged in metadata.
* plus the start/length of the first bad section we overlap.
*/
int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
sector_t *first_bad, int *bad_sectors)
{
int hi;
int lo;
u64 *p = bb->page;
int rv;
sector_t target = s + sectors;
unsigned seq;
if (bb->shift > 0) {
/* round the start down, and the end up */
s >>= bb->shift;
target += (1<<bb->shift) - 1;
target >>= bb->shift;
sectors = target - s;
}
/* 'target' is now the first block after the bad range */
retry:
seq = read_seqbegin(&bb->lock);
lo = 0;
rv = 0;
hi = bb->count;
/* Binary search between lo and hi for 'target'
* i.e. for the last range that starts before 'target'
*/
/* INVARIANT: ranges before 'lo' and at-or-after 'hi'
* are known not to be the last range before target.
* VARIANT: hi-lo is the number of possible
* ranges, and decreases until it reaches 1
*/
while (hi - lo > 1) {
int mid = (lo + hi) / 2;
sector_t a = BB_OFFSET(p[mid]);
if (a < target)
/* This could still be the one, earlier ranges
* could not. */
lo = mid;
else
/* This and later ranges are definitely out. */
hi = mid;
}
/* 'lo' might be the last that started before target, but 'hi' isn't */
if (hi > lo) {
/* need to check all range that end after 's' to see if
* any are unacknowledged.
*/
while (lo >= 0 &&
BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
if (BB_OFFSET(p[lo]) < target) {
/* starts before the end, and finishes after
* the start, so they must overlap
*/
if (rv != -1 && BB_ACK(p[lo]))
rv = 1;
else
rv = -1;
*first_bad = BB_OFFSET(p[lo]);
*bad_sectors = BB_LEN(p[lo]);
}
lo--;
}
}
if (read_seqretry(&bb->lock, seq))
goto retry;
return rv;
}
EXPORT_SYMBOL_GPL(md_is_badblock);
/*
* Add a range of bad blocks to the table.
* This might extend the table, or might contract it
* if two adjacent ranges can be merged.
* We binary-search to find the 'insertion' point, then
* decide how best to handle it.
*/
static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
int acknowledged)
{
u64 *p;
int lo, hi;
int rv = 1;
unsigned long flags;
if (bb->shift < 0)
/* badblocks are disabled */
return 0;
if (bb->shift) {
/* round the start down, and the end up */
sector_t next = s + sectors;
s >>= bb->shift;
next += (1<<bb->shift) - 1;
next >>= bb->shift;
sectors = next - s;
}
write_seqlock_irqsave(&bb->lock, flags);
p = bb->page;
lo = 0;
hi = bb->count;
/* Find the last range that starts at-or-before 's' */
while (hi - lo > 1) {
int mid = (lo + hi) / 2;
sector_t a = BB_OFFSET(p[mid]);
if (a <= s)
lo = mid;
else
hi = mid;
}
if (hi > lo && BB_OFFSET(p[lo]) > s)
hi = lo;
if (hi > lo) {
/* we found a range that might merge with the start
* of our new range
*/
sector_t a = BB_OFFSET(p[lo]);
sector_t e = a + BB_LEN(p[lo]);
int ack = BB_ACK(p[lo]);
if (e >= s) {
/* Yes, we can merge with a previous range */
if (s == a && s + sectors >= e)
/* new range covers old */
ack = acknowledged;
else
ack = ack && acknowledged;
if (e < s + sectors)
e = s + sectors;
if (e - a <= BB_MAX_LEN) {
p[lo] = BB_MAKE(a, e-a, ack);
s = e;
} else {
/* does not all fit in one range,
* make p[lo] maximal
*/
if (BB_LEN(p[lo]) != BB_MAX_LEN)
p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
s = a + BB_MAX_LEN;
}
sectors = e - s;
}
}
if (sectors && hi < bb->count) {
/* 'hi' points to the first range that starts after 's'.
* Maybe we can merge with the start of that range */
sector_t a = BB_OFFSET(p[hi]);
sector_t e = a + BB_LEN(p[hi]);
int ack = BB_ACK(p[hi]);
if (a <= s + sectors) {
/* merging is possible */
if (e <= s + sectors) {
/* full overlap */
e = s + sectors;
ack = acknowledged;
} else
ack = ack && acknowledged;
a = s;
if (e - a <= BB_MAX_LEN) {
p[hi] = BB_MAKE(a, e-a, ack);
s = e;
} else {
p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
s = a + BB_MAX_LEN;
}
sectors = e - s;
lo = hi;
hi++;
}
}
if (sectors == 0 && hi < bb->count) {
/* we might be able to combine lo and hi */
/* Note: 's' is at the end of 'lo' */
sector_t a = BB_OFFSET(p[hi]);
int lolen = BB_LEN(p[lo]);
int hilen = BB_LEN(p[hi]);
int newlen = lolen + hilen - (s - a);
if (s >= a && newlen < BB_MAX_LEN) {
/* yes, we can combine them */
int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
memmove(p + hi, p + hi + 1,
(bb->count - hi - 1) * 8);
bb->count--;
}
}
while (sectors) {
/* didn't merge (it all).
* Need to add a range just before 'hi' */
if (bb->count >= MD_MAX_BADBLOCKS) {
/* No room for more */
rv = 0;
break;
} else {
int this_sectors = sectors;
memmove(p + hi + 1, p + hi,
(bb->count - hi) * 8);
bb->count++;
if (this_sectors > BB_MAX_LEN)
this_sectors = BB_MAX_LEN;
p[hi] = BB_MAKE(s, this_sectors, acknowledged);
sectors -= this_sectors;
s += this_sectors;
}
}
bb->changed = 1;
if (!acknowledged)
bb->unacked_exist = 1;
write_sequnlock_irqrestore(&bb->lock, flags);
return rv;
}
int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new)
{
int rv;
if (is_new)
s += rdev->new_data_offset;
else
s += rdev->data_offset;
rv = md_set_badblocks(&rdev->badblocks,
s, sectors, 0);
if (rv) {
/* Make sure they get written out promptly */
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
md_wakeup_thread(rdev->mddev->thread);
}
return rv;
}
EXPORT_SYMBOL_GPL(rdev_set_badblocks);
/*
* Remove a range of bad blocks from the table.
* This may involve extending the table if we spilt a region,
* but it must not fail. So if the table becomes full, we just
* drop the remove request.
*/
static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
{
u64 *p;
int lo, hi;
sector_t target = s + sectors;
int rv = 0;
if (bb->shift > 0) {
/* When clearing we round the start up and the end down.
* This should not matter as the shift should align with
* the block size and no rounding should ever be needed.
* However it is better the think a block is bad when it
* isn't than to think a block is not bad when it is.
*/
s += (1<<bb->shift) - 1;
s >>= bb->shift;
target >>= bb->shift;
sectors = target - s;
}
write_seqlock_irq(&bb->lock);
p = bb->page;
lo = 0;
hi = bb->count;
/* Find the last range that starts before 'target' */
while (hi - lo > 1) {
int mid = (lo + hi) / 2;
sector_t a = BB_OFFSET(p[mid]);
if (a < target)
lo = mid;
else
hi = mid;
}
if (hi > lo) {
/* p[lo] is the last range that could overlap the
* current range. Earlier ranges could also overlap,
* but only this one can overlap the end of the range.
*/
if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
/* Partial overlap, leave the tail of this range */
int ack = BB_ACK(p[lo]);
sector_t a = BB_OFFSET(p[lo]);
sector_t end = a + BB_LEN(p[lo]);
if (a < s) {
/* we need to split this range */
if (bb->count >= MD_MAX_BADBLOCKS) {
rv = 0;
goto out;
}
memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
bb->count++;
p[lo] = BB_MAKE(a, s-a, ack);
lo++;
}
p[lo] = BB_MAKE(target, end - target, ack);
/* there is no longer an overlap */
hi = lo;
lo--;
}
while (lo >= 0 &&
BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
/* This range does overlap */
if (BB_OFFSET(p[lo]) < s) {
/* Keep the early parts of this range. */
int ack = BB_ACK(p[lo]);
sector_t start = BB_OFFSET(p[lo]);
p[lo] = BB_MAKE(start, s - start, ack);
/* now low doesn't overlap, so.. */
break;
}
lo--;
}
/* 'lo' is strictly before, 'hi' is strictly after,
* anything between needs to be discarded
*/
if (hi - lo > 1) {
memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
bb->count -= (hi - lo - 1);
}
}
bb->changed = 1;
out:
write_sequnlock_irq(&bb->lock);
return rv;
}
int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new)
{
if (is_new)
s += rdev->new_data_offset;
else
s += rdev->data_offset;
return md_clear_badblocks(&rdev->badblocks,
s, sectors);
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
/*
* Acknowledge all bad blocks in a list.
* This only succeeds if ->changed is clear. It is used by
* in-kernel metadata updates
*/
void md_ack_all_badblocks(struct badblocks *bb)
{
if (bb->page == NULL || bb->changed)
/* no point even trying */
return;
write_seqlock_irq(&bb->lock);
if (bb->changed == 0 && bb->unacked_exist) {
u64 *p = bb->page;
int i;
for (i = 0; i < bb->count ; i++) {
if (!BB_ACK(p[i])) {
sector_t start = BB_OFFSET(p[i]);
int len = BB_LEN(p[i]);
p[i] = BB_MAKE(start, len, 1);
}
}
bb->unacked_exist = 0;
}
write_sequnlock_irq(&bb->lock);
}
EXPORT_SYMBOL_GPL(md_ack_all_badblocks);
/* sysfs access to bad-blocks list.
* We present two files.
* 'bad-blocks' lists sector numbers and lengths of ranges that
* are recorded as bad. The list is truncated to fit within
* the one-page limit of sysfs.
* Writing "sector length" to this file adds an acknowledged
* bad block list.
* 'unacknowledged-bad-blocks' lists bad blocks that have not yet
* been acknowledged. Writing to this file adds bad blocks
* without acknowledging them. This is largely for testing.
*/
static ssize_t
badblocks_show(struct badblocks *bb, char *page, int unack)
{
size_t len;
int i;
u64 *p = bb->page;
unsigned seq;
if (bb->shift < 0)
return 0;
retry:
seq = read_seqbegin(&bb->lock);
len = 0;
i = 0;
while (len < PAGE_SIZE && i < bb->count) {
sector_t s = BB_OFFSET(p[i]);
unsigned int length = BB_LEN(p[i]);
int ack = BB_ACK(p[i]);
i++;
if (unack && ack)
continue;
len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
(unsigned long long)s << bb->shift,
length << bb->shift);
}
if (unack && len == 0)
bb->unacked_exist = 0;
if (read_seqretry(&bb->lock, seq))
goto retry;
return len;
}
#define DO_DEBUG 1
static ssize_t
badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
{
unsigned long long sector;
int length;
char newline;
#ifdef DO_DEBUG
/* Allow clearing via sysfs *only* for testing/debugging.
* Normally only a successful write may clear a badblock
*/
int clear = 0;
if (page[0] == '-') {
clear = 1;
page++;
}
#endif /* DO_DEBUG */
switch (sscanf(page, "%llu %d%c", §or, &length, &newline)) {
case 3:
if (newline != '\n')
return -EINVAL;
case 2:
if (length <= 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
#ifdef DO_DEBUG
if (clear) {
md_clear_badblocks(bb, sector, length);
return len;
}
#endif /* DO_DEBUG */
if (md_set_badblocks(bb, sector, length, !unack))
return len;
else
return -ENOSPC;
}
static int md_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
struct list_head *tmp;
struct mddev *mddev;
int need_delay = 0;
for_each_mddev(mddev, tmp) {
if (mddev_trylock(mddev)) {
if (mddev->pers)
__md_stop_writes(mddev);
if (mddev->persistent)
mddev->safemode = 2;
mddev_unlock(mddev);
}
need_delay = 1;
}
/*
* certain more exotic SCSI devices are known to be
* volatile wrt too early system reboots. While the
* right place to handle this issue is the given
* driver, we do want to have a safe RAID driver ...
*/
if (need_delay)
mdelay(1000*1);
return NOTIFY_DONE;
}
static struct notifier_block md_notifier = {
.notifier_call = md_notify_reboot,
.next = NULL,
.priority = INT_MAX, /* before any real devices */
};
static void md_geninit(void)
{
pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
}
static int __init md_init(void)
{
int ret = -ENOMEM;
md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
if (!md_wq)
goto err_wq;
md_misc_wq = alloc_workqueue("md_misc", 0, 0);
if (!md_misc_wq)
goto err_misc_wq;
if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
goto err_md;
if ((ret = register_blkdev(0, "mdp")) < 0)
goto err_mdp;
mdp_major = ret;
blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
md_probe, NULL, NULL);
blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
md_probe, NULL, NULL);
register_reboot_notifier(&md_notifier);
raid_table_header = register_sysctl_table(raid_root_table);
md_geninit();
return 0;
err_mdp:
unregister_blkdev(MD_MAJOR, "md");
err_md:
destroy_workqueue(md_misc_wq);
err_misc_wq:
destroy_workqueue(md_wq);
err_wq:
return ret;
}
#ifndef MODULE
/*
* Searches all registered partitions for autorun RAID arrays
* at boot time.
*/
static LIST_HEAD(all_detected_devices);
struct detected_devices_node {
struct list_head list;
dev_t dev;
};
void md_autodetect_dev(dev_t dev)
{
struct detected_devices_node *node_detected_dev;
node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
if (node_detected_dev) {
node_detected_dev->dev = dev;
list_add_tail(&node_detected_dev->list, &all_detected_devices);
} else {
printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
}
}
static void autostart_arrays(int part)
{
struct md_rdev *rdev;
struct detected_devices_node *node_detected_dev;
dev_t dev;
int i_scanned, i_passed;
i_scanned = 0;
i_passed = 0;
printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
i_scanned++;
node_detected_dev = list_entry(all_detected_devices.next,
struct detected_devices_node, list);
list_del(&node_detected_dev->list);
dev = node_detected_dev->dev;
kfree(node_detected_dev);
rdev = md_import_device(dev,0, 90);
if (IS_ERR(rdev))
continue;
if (test_bit(Faulty, &rdev->flags)) {
MD_BUG();
continue;
}
set_bit(AutoDetected, &rdev->flags);
list_add(&rdev->same_set, &pending_raid_disks);
i_passed++;
}
printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
i_scanned, i_passed);
autorun_devices(part);
}
#endif /* !MODULE */
static __exit void md_exit(void)
{
struct mddev *mddev;
struct list_head *tmp;
blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
unregister_blkdev(MD_MAJOR,"md");
unregister_blkdev(mdp_major, "mdp");
unregister_reboot_notifier(&md_notifier);
unregister_sysctl_table(raid_table_header);
remove_proc_entry("mdstat", NULL);
for_each_mddev(mddev, tmp) {
export_array(mddev);
mddev->hold_active = 0;
}
destroy_workqueue(md_misc_wq);
destroy_workqueue(md_wq);
}
subsys_initcall(md_init);
module_exit(md_exit)
static int get_ro(char *buffer, struct kernel_param *kp)
{
return sprintf(buffer, "%d", start_readonly);
}
static int set_ro(const char *val, struct kernel_param *kp)
{
char *e;
int num = simple_strtoul(val, &e, 10);
if (*val && (*e == '\0' || *e == '\n')) {
start_readonly = num;
return 0;
}
return -EINVAL;
}
module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
EXPORT_SYMBOL(register_md_personality);
EXPORT_SYMBOL(unregister_md_personality);
EXPORT_SYMBOL(md_error);
EXPORT_SYMBOL(md_done_sync);
EXPORT_SYMBOL(md_write_start);
EXPORT_SYMBOL(md_write_end);
EXPORT_SYMBOL(md_register_thread);
EXPORT_SYMBOL(md_unregister_thread);
EXPORT_SYMBOL(md_wakeup_thread);
EXPORT_SYMBOL(md_check_recovery);
EXPORT_SYMBOL(md_reap_sync_thread);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD RAID framework");
MODULE_ALIAS("md");
MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);
| gpl-2.0 |
AiJiaZone/linux-4.0 | virt/drivers/infiniband/hw/qib/qib_file_ops.c | 62473 | /*
* Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <asm/pgtable.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/uio.h>
#include "qib.h"
#include "qib_common.h"
#include "qib_user_sdma.h"
#undef pr_fmt
#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
static int qib_open(struct inode *, struct file *);
static int qib_close(struct inode *, struct file *);
static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *);
static unsigned int qib_poll(struct file *, struct poll_table_struct *);
static int qib_mmapf(struct file *, struct vm_area_struct *);
/*
* This is really, really weird shit - write() and writev() here
* have completely unrelated semantics. Sucky userland ABI,
* film at 11.
*/
static const struct file_operations qib_file_ops = {
.owner = THIS_MODULE,
.write = qib_write,
.write_iter = qib_write_iter,
.open = qib_open,
.release = qib_close,
.poll = qib_poll,
.mmap = qib_mmapf,
.llseek = noop_llseek,
};
/*
* Convert kernel virtual addresses to physical addresses so they don't
* potentially conflict with the chip addresses used as mmap offsets.
* It doesn't really matter what mmap offset we use as long as we can
* interpret it correctly.
*/
static u64 cvt_kvaddr(void *p)
{
struct page *page;
u64 paddr = 0;
page = vmalloc_to_page(p);
if (page)
paddr = page_to_pfn(page) << PAGE_SHIFT;
return paddr;
}
static int qib_get_base_info(struct file *fp, void __user *ubase,
size_t ubase_size)
{
struct qib_ctxtdata *rcd = ctxt_fp(fp);
int ret = 0;
struct qib_base_info *kinfo = NULL;
struct qib_devdata *dd = rcd->dd;
struct qib_pportdata *ppd = rcd->ppd;
unsigned subctxt_cnt;
int shared, master;
size_t sz;
subctxt_cnt = rcd->subctxt_cnt;
if (!subctxt_cnt) {
shared = 0;
master = 0;
subctxt_cnt = 1;
} else {
shared = 1;
master = !subctxt_fp(fp);
}
sz = sizeof(*kinfo);
/* If context sharing is not requested, allow the old size structure */
if (!shared)
sz -= 7 * sizeof(u64);
if (ubase_size < sz) {
ret = -EINVAL;
goto bail;
}
kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
if (kinfo == NULL) {
ret = -ENOMEM;
goto bail;
}
ret = dd->f_get_base_info(rcd, kinfo);
if (ret < 0)
goto bail;
kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
/*
* have to mmap whole thing
*/
kinfo->spi_rcv_egrbuftotlen =
rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
rcd->rcvegrbuf_chunks;
kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
if (master)
kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
/*
* for this use, may be cfgctxts summed over all chips that
* are are configured and present
*/
kinfo->spi_nctxts = dd->cfgctxts;
/* unit (chip/board) our context is on */
kinfo->spi_unit = dd->unit;
kinfo->spi_port = ppd->port;
/* for now, only a single page */
kinfo->spi_tid_maxsize = PAGE_SIZE;
/*
* Doing this per context, and based on the skip value, etc. This has
* to be the actual buffer size, since the protocol code treats it
* as an array.
*
* These have to be set to user addresses in the user code via mmap.
* These values are used on return to user code for the mmap target
* addresses only. For 32 bit, same 44 bit address problem, so use
* the physical address, not virtual. Before 2.6.11, using the
* page_address() macro worked, but in 2.6.11, even that returns the
* full 64 bit address (upper bits all 1's). So far, using the
* physical addresses (or chip offsets, for chip mapping) works, but
* no doubt some future kernel release will change that, and we'll be
* on to yet another method of dealing with this.
* Normally only one of rcvhdr_tailaddr or rhf_offset is useful
* since the chips with non-zero rhf_offset don't normally
* enable tail register updates to host memory, but for testing,
* both can be enabled and used.
*/
kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
kinfo->spi_rhf_offset = dd->rhf_offset;
kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
/* setup per-unit (not port) status area for user programs */
kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
(char *) ppd->statusp -
(char *) dd->pioavailregs_dma;
kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
if (!shared) {
kinfo->spi_piocnt = rcd->piocnt;
kinfo->spi_piobufbase = (u64) rcd->piobufs;
kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
} else if (master) {
kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
(rcd->piocnt % subctxt_cnt);
/* Master's PIO buffers are after all the slave's */
kinfo->spi_piobufbase = (u64) rcd->piobufs +
dd->palign *
(rcd->piocnt - kinfo->spi_piocnt);
} else {
unsigned slave = subctxt_fp(fp) - 1;
kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
kinfo->spi_piobufbase = (u64) rcd->piobufs +
dd->palign * kinfo->spi_piocnt * slave;
}
if (shared) {
kinfo->spi_sendbuf_status =
cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
/* only spi_subctxt_* fields should be set in this block! */
kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
kinfo->spi_subctxt_rcvegrbuf =
cvt_kvaddr(rcd->subctxt_rcvegrbuf);
kinfo->spi_subctxt_rcvhdr_base =
cvt_kvaddr(rcd->subctxt_rcvhdr_base);
}
/*
* All user buffers are 2KB buffers. If we ever support
* giving 4KB buffers to user processes, this will need some
* work. Can't use piobufbase directly, because it has
* both 2K and 4K buffer base values.
*/
kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
dd->palign;
kinfo->spi_pioalign = dd->palign;
kinfo->spi_qpair = QIB_KD_QP;
/*
* user mode PIO buffers are always 2KB, even when 4KB can
* be received, and sent via the kernel; this is ibmaxlen
* for 2K MTU.
*/
kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
kinfo->spi_ctxt = rcd->ctxt;
kinfo->spi_subctxt = subctxt_fp(fp);
kinfo->spi_sw_version = QIB_KERN_SWVERSION;
kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
kinfo->spi_hw_version = dd->revision;
if (master)
kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
if (copy_to_user(ubase, kinfo, sz))
ret = -EFAULT;
bail:
kfree(kinfo);
return ret;
}
/**
* qib_tid_update - update a context TID
* @rcd: the context
* @fp: the qib device file
* @ti: the TID information
*
* The new implementation as of Oct 2004 is that the driver assigns
* the tid and returns it to the caller. To reduce search time, we
* keep a cursor for each context, walking the shadow tid array to find
* one that's not in use.
*
* For now, if we can't allocate the full list, we fail, although
* in the long run, we'll allocate as many as we can, and the
* caller will deal with that by trying the remaining pages later.
* That means that when we fail, we have to mark the tids as not in
* use again, in our shadow copy.
*
* It's up to the caller to free the tids when they are done.
* We'll unlock the pages as they free them.
*
* Also, right now we are locking one page at a time, but since
* the intended use of this routine is for a single group of
* virtually contiguous pages, that should change to improve
* performance.
*/
static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
const struct qib_tid_info *ti)
{
int ret = 0, ntids;
u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
u16 *tidlist;
struct qib_devdata *dd = rcd->dd;
u64 physaddr;
unsigned long vaddr;
u64 __iomem *tidbase;
unsigned long tidmap[8];
struct page **pagep = NULL;
unsigned subctxt = subctxt_fp(fp);
if (!dd->pageshadow) {
ret = -ENOMEM;
goto done;
}
cnt = ti->tidcnt;
if (!cnt) {
ret = -EFAULT;
goto done;
}
ctxttid = rcd->ctxt * dd->rcvtidcnt;
if (!rcd->subctxt_cnt) {
tidcnt = dd->rcvtidcnt;
tid = rcd->tidcursor;
tidoff = 0;
} else if (!subctxt) {
tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
(dd->rcvtidcnt % rcd->subctxt_cnt);
tidoff = dd->rcvtidcnt - tidcnt;
ctxttid += tidoff;
tid = tidcursor_fp(fp);
} else {
tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
tidoff = tidcnt * (subctxt - 1);
ctxttid += tidoff;
tid = tidcursor_fp(fp);
}
if (cnt > tidcnt) {
/* make sure it all fits in tid_pg_list */
qib_devinfo(dd->pcidev,
"Process tried to allocate %u TIDs, only trying max (%u)\n",
cnt, tidcnt);
cnt = tidcnt;
}
pagep = (struct page **) rcd->tid_pg_list;
tidlist = (u16 *) &pagep[dd->rcvtidcnt];
pagep += tidoff;
tidlist += tidoff;
memset(tidmap, 0, sizeof(tidmap));
/* before decrement; chip actual # */
ntids = tidcnt;
tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
dd->rcvtidbase +
ctxttid * sizeof(*tidbase));
/* virtual address of first page in transfer */
vaddr = ti->tidvaddr;
if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
cnt * PAGE_SIZE)) {
ret = -EFAULT;
goto done;
}
ret = qib_get_user_pages(vaddr, cnt, pagep);
if (ret) {
/*
* if (ret == -EBUSY)
* We can't continue because the pagep array won't be
* initialized. This should never happen,
* unless perhaps the user has mpin'ed the pages
* themselves.
*/
qib_devinfo(
dd->pcidev,
"Failed to lock addr %p, %u pages: errno %d\n",
(void *) vaddr, cnt, -ret);
goto done;
}
for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
for (; ntids--; tid++) {
if (tid == tidcnt)
tid = 0;
if (!dd->pageshadow[ctxttid + tid])
break;
}
if (ntids < 0) {
/*
* Oops, wrapped all the way through their TIDs,
* and didn't have enough free; see comments at
* start of routine
*/
i--; /* last tidlist[i] not filled in */
ret = -ENOMEM;
break;
}
tidlist[i] = tid + tidoff;
/* we "know" system pages and TID pages are same size */
dd->pageshadow[ctxttid + tid] = pagep[i];
dd->physshadow[ctxttid + tid] =
qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
/*
* don't need atomic or it's overhead
*/
__set_bit(tid, tidmap);
physaddr = dd->physshadow[ctxttid + tid];
/* PERFORMANCE: below should almost certainly be cached */
dd->f_put_tid(dd, &tidbase[tid],
RCVHQ_RCV_TYPE_EXPECTED, physaddr);
/*
* don't check this tid in qib_ctxtshadow, since we
* just filled it in; start with the next one.
*/
tid++;
}
if (ret) {
u32 limit;
cleanup:
/* jump here if copy out of updated info failed... */
/* same code that's in qib_free_tid() */
limit = sizeof(tidmap) * BITS_PER_BYTE;
if (limit > tidcnt)
/* just in case size changes in future */
limit = tidcnt;
tid = find_first_bit((const unsigned long *)tidmap, limit);
for (; tid < limit; tid++) {
if (!test_bit(tid, tidmap))
continue;
if (dd->pageshadow[ctxttid + tid]) {
dma_addr_t phys;
phys = dd->physshadow[ctxttid + tid];
dd->physshadow[ctxttid + tid] = dd->tidinvalid;
/* PERFORMANCE: below should almost certainly
* be cached
*/
dd->f_put_tid(dd, &tidbase[tid],
RCVHQ_RCV_TYPE_EXPECTED,
dd->tidinvalid);
pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
dd->pageshadow[ctxttid + tid] = NULL;
}
}
qib_release_user_pages(pagep, cnt);
} else {
/*
* Copy the updated array, with qib_tid's filled in, back
* to user. Since we did the copy in already, this "should
* never fail" If it does, we have to clean up...
*/
if (copy_to_user((void __user *)
(unsigned long) ti->tidlist,
tidlist, cnt * sizeof(*tidlist))) {
ret = -EFAULT;
goto cleanup;
}
if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
tidmap, sizeof(tidmap))) {
ret = -EFAULT;
goto cleanup;
}
if (tid == tidcnt)
tid = 0;
if (!rcd->subctxt_cnt)
rcd->tidcursor = tid;
else
tidcursor_fp(fp) = tid;
}
done:
return ret;
}
/**
* qib_tid_free - free a context TID
* @rcd: the context
* @subctxt: the subcontext
* @ti: the TID info
*
* right now we are unlocking one page at a time, but since
* the intended use of this routine is for a single group of
* virtually contiguous pages, that should change to improve
* performance. We check that the TID is in range for this context
* but otherwise don't check validity; if user has an error and
* frees the wrong tid, it's only their own data that can thereby
* be corrupted. We do check that the TID was in use, for sanity
* We always use our idea of the saved address, not the address that
* they pass in to us.
*/
static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
const struct qib_tid_info *ti)
{
int ret = 0;
u32 tid, ctxttid, cnt, limit, tidcnt;
struct qib_devdata *dd = rcd->dd;
u64 __iomem *tidbase;
unsigned long tidmap[8];
if (!dd->pageshadow) {
ret = -ENOMEM;
goto done;
}
if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
sizeof(tidmap))) {
ret = -EFAULT;
goto done;
}
ctxttid = rcd->ctxt * dd->rcvtidcnt;
if (!rcd->subctxt_cnt)
tidcnt = dd->rcvtidcnt;
else if (!subctxt) {
tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
(dd->rcvtidcnt % rcd->subctxt_cnt);
ctxttid += dd->rcvtidcnt - tidcnt;
} else {
tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
ctxttid += tidcnt * (subctxt - 1);
}
tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
dd->rcvtidbase +
ctxttid * sizeof(*tidbase));
limit = sizeof(tidmap) * BITS_PER_BYTE;
if (limit > tidcnt)
/* just in case size changes in future */
limit = tidcnt;
tid = find_first_bit(tidmap, limit);
for (cnt = 0; tid < limit; tid++) {
/*
* small optimization; if we detect a run of 3 or so without
* any set, use find_first_bit again. That's mainly to
* accelerate the case where we wrapped, so we have some at
* the beginning, and some at the end, and a big gap
* in the middle.
*/
if (!test_bit(tid, tidmap))
continue;
cnt++;
if (dd->pageshadow[ctxttid + tid]) {
struct page *p;
dma_addr_t phys;
p = dd->pageshadow[ctxttid + tid];
dd->pageshadow[ctxttid + tid] = NULL;
phys = dd->physshadow[ctxttid + tid];
dd->physshadow[ctxttid + tid] = dd->tidinvalid;
/* PERFORMANCE: below should almost certainly be
* cached
*/
dd->f_put_tid(dd, &tidbase[tid],
RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
qib_release_user_pages(&p, 1);
}
}
done:
return ret;
}
/**
* qib_set_part_key - set a partition key
* @rcd: the context
* @key: the key
*
* We can have up to 4 active at a time (other than the default, which is
* always allowed). This is somewhat tricky, since multiple contexts may set
* the same key, so we reference count them, and clean up at exit. All 4
* partition keys are packed into a single qlogic_ib register. It's an
* error for a process to set the same pkey multiple times. We provide no
* mechanism to de-allocate a pkey at this time, we may eventually need to
* do that. I've used the atomic operations, and no locking, and only make
* a single pass through what's available. This should be more than
* adequate for some time. I'll think about spinlocks or the like if and as
* it's necessary.
*/
static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
{
struct qib_pportdata *ppd = rcd->ppd;
int i, any = 0, pidx = -1;
u16 lkey = key & 0x7FFF;
int ret;
if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
/* nothing to do; this key always valid */
ret = 0;
goto bail;
}
if (!lkey) {
ret = -EINVAL;
goto bail;
}
/*
* Set the full membership bit, because it has to be
* set in the register or the packet, and it seems
* cleaner to set in the register than to force all
* callers to set it.
*/
key |= 0x8000;
for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
if (!rcd->pkeys[i] && pidx == -1)
pidx = i;
if (rcd->pkeys[i] == key) {
ret = -EEXIST;
goto bail;
}
}
if (pidx == -1) {
ret = -EBUSY;
goto bail;
}
for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
if (!ppd->pkeys[i]) {
any++;
continue;
}
if (ppd->pkeys[i] == key) {
atomic_t *pkrefs = &ppd->pkeyrefs[i];
if (atomic_inc_return(pkrefs) > 1) {
rcd->pkeys[pidx] = key;
ret = 0;
goto bail;
} else {
/*
* lost race, decrement count, catch below
*/
atomic_dec(pkrefs);
any++;
}
}
if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
/*
* It makes no sense to have both the limited and
* full membership PKEY set at the same time since
* the unlimited one will disable the limited one.
*/
ret = -EEXIST;
goto bail;
}
}
if (!any) {
ret = -EBUSY;
goto bail;
}
for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
if (!ppd->pkeys[i] &&
atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
rcd->pkeys[pidx] = key;
ppd->pkeys[i] = key;
(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
ret = 0;
goto bail;
}
}
ret = -EBUSY;
bail:
return ret;
}
/**
* qib_manage_rcvq - manage a context's receive queue
* @rcd: the context
* @subctxt: the subcontext
* @start_stop: action to carry out
*
* start_stop == 0 disables receive on the context, for use in queue
* overflow conditions. start_stop==1 re-enables, to be used to
* re-init the software copy of the head register
*/
static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
int start_stop)
{
struct qib_devdata *dd = rcd->dd;
unsigned int rcvctrl_op;
if (subctxt)
goto bail;
/* atomically clear receive enable ctxt. */
if (start_stop) {
/*
* On enable, force in-memory copy of the tail register to
* 0, so that protocol code doesn't have to worry about
* whether or not the chip has yet updated the in-memory
* copy or not on return from the system call. The chip
* always resets it's tail register back to 0 on a
* transition from disabled to enabled.
*/
if (rcd->rcvhdrtail_kvaddr)
qib_clear_rcvhdrtail(rcd);
rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
} else
rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
/* always; new head should be equal to new tail; see above */
bail:
return 0;
}
static void qib_clean_part_key(struct qib_ctxtdata *rcd,
struct qib_devdata *dd)
{
int i, j, pchanged = 0;
u64 oldpkey;
struct qib_pportdata *ppd = rcd->ppd;
/* for debugging only */
oldpkey = (u64) ppd->pkeys[0] |
((u64) ppd->pkeys[1] << 16) |
((u64) ppd->pkeys[2] << 32) |
((u64) ppd->pkeys[3] << 48);
for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
if (!rcd->pkeys[i])
continue;
for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
/* check for match independent of the global bit */
if ((ppd->pkeys[j] & 0x7fff) !=
(rcd->pkeys[i] & 0x7fff))
continue;
if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
ppd->pkeys[j] = 0;
pchanged++;
}
break;
}
rcd->pkeys[i] = 0;
}
if (pchanged)
(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
}
/* common code for the mappings on dma_alloc_coherent mem */
static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
unsigned len, void *kvaddr, u32 write_ok, char *what)
{
struct qib_devdata *dd = rcd->dd;
unsigned long pfn;
int ret;
if ((vma->vm_end - vma->vm_start) > len) {
qib_devinfo(dd->pcidev,
"FAIL on %s: len %lx > %x\n", what,
vma->vm_end - vma->vm_start, len);
ret = -EFAULT;
goto bail;
}
/*
* shared context user code requires rcvhdrq mapped r/w, others
* only allowed readonly mapping.
*/
if (!write_ok) {
if (vma->vm_flags & VM_WRITE) {
qib_devinfo(dd->pcidev,
"%s must be mapped readonly\n", what);
ret = -EPERM;
goto bail;
}
/* don't allow them to later change with mprotect */
vma->vm_flags &= ~VM_MAYWRITE;
}
pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
ret = remap_pfn_range(vma, vma->vm_start, pfn,
len, vma->vm_page_prot);
if (ret)
qib_devinfo(dd->pcidev,
"%s ctxt%u mmap of %lx, %x bytes failed: %d\n",
what, rcd->ctxt, pfn, len, ret);
bail:
return ret;
}
static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
u64 ureg)
{
unsigned long phys;
unsigned long sz;
int ret;
/*
* This is real hardware, so use io_remap. This is the mechanism
* for the user process to update the head registers for their ctxt
* in the chip.
*/
sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
if ((vma->vm_end - vma->vm_start) > sz) {
qib_devinfo(dd->pcidev,
"FAIL mmap userreg: reqlen %lx > PAGE\n",
vma->vm_end - vma->vm_start);
ret = -EFAULT;
} else {
phys = dd->physaddr + ureg;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
ret = io_remap_pfn_range(vma, vma->vm_start,
phys >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
return ret;
}
static int mmap_piobufs(struct vm_area_struct *vma,
struct qib_devdata *dd,
struct qib_ctxtdata *rcd,
unsigned piobufs, unsigned piocnt)
{
unsigned long phys;
int ret;
/*
* When we map the PIO buffers in the chip, we want to map them as
* writeonly, no read possible; unfortunately, x86 doesn't allow
* for this in hardware, but we still prevent users from asking
* for it.
*/
if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
qib_devinfo(dd->pcidev,
"FAIL mmap piobufs: reqlen %lx > PAGE\n",
vma->vm_end - vma->vm_start);
ret = -EINVAL;
goto bail;
}
phys = dd->physaddr + piobufs;
#if defined(__powerpc__)
/* There isn't a generic way to specify writethrough mappings */
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
#endif
/*
* don't allow them to later change to readable with mprotect (for when
* not initially mapped readable, as is normally the case)
*/
vma->vm_flags &= ~VM_MAYREAD;
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
/* We used PAT if wc_cookie == 0 */
if (!dd->wc_cookie)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
bail:
return ret;
}
static int mmap_rcvegrbufs(struct vm_area_struct *vma,
struct qib_ctxtdata *rcd)
{
struct qib_devdata *dd = rcd->dd;
unsigned long start, size;
size_t total_size, i;
unsigned long pfn;
int ret;
size = rcd->rcvegrbuf_size;
total_size = rcd->rcvegrbuf_chunks * size;
if ((vma->vm_end - vma->vm_start) > total_size) {
qib_devinfo(dd->pcidev,
"FAIL on egr bufs: reqlen %lx > actual %lx\n",
vma->vm_end - vma->vm_start,
(unsigned long) total_size);
ret = -EINVAL;
goto bail;
}
if (vma->vm_flags & VM_WRITE) {
qib_devinfo(dd->pcidev,
"Can't map eager buffers as writable (flags=%lx)\n",
vma->vm_flags);
ret = -EPERM;
goto bail;
}
/* don't allow them to later change to writeable with mprotect */
vma->vm_flags &= ~VM_MAYWRITE;
start = vma->vm_start;
for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
ret = remap_pfn_range(vma, start, pfn, size,
vma->vm_page_prot);
if (ret < 0)
goto bail;
}
ret = 0;
bail:
return ret;
}
/*
* qib_file_vma_fault - handle a VMA page fault.
*/
static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page;
page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
if (!page)
return VM_FAULT_SIGBUS;
get_page(page);
vmf->page = page;
return 0;
}
static const struct vm_operations_struct qib_file_vm_ops = {
.fault = qib_file_vma_fault,
};
static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
struct qib_ctxtdata *rcd, unsigned subctxt)
{
struct qib_devdata *dd = rcd->dd;
unsigned subctxt_cnt;
unsigned long len;
void *addr;
size_t size;
int ret = 0;
subctxt_cnt = rcd->subctxt_cnt;
size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
/*
* Each process has all the subctxt uregbase, rcvhdrq, and
* rcvegrbufs mmapped - as an array for all the processes,
* and also separately for this process.
*/
if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
addr = rcd->subctxt_uregbase;
size = PAGE_SIZE * subctxt_cnt;
} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
addr = rcd->subctxt_rcvhdr_base;
size = rcd->rcvhdrq_size * subctxt_cnt;
} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
addr = rcd->subctxt_rcvegrbuf;
size *= subctxt_cnt;
} else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
PAGE_SIZE * subctxt)) {
addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
size = PAGE_SIZE;
} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
rcd->rcvhdrq_size * subctxt)) {
addr = rcd->subctxt_rcvhdr_base +
rcd->rcvhdrq_size * subctxt;
size = rcd->rcvhdrq_size;
} else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
addr = rcd->user_event_mask;
size = PAGE_SIZE;
} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
size * subctxt)) {
addr = rcd->subctxt_rcvegrbuf + size * subctxt;
/* rcvegrbufs are read-only on the slave */
if (vma->vm_flags & VM_WRITE) {
qib_devinfo(dd->pcidev,
"Can't map eager buffers as writable (flags=%lx)\n",
vma->vm_flags);
ret = -EPERM;
goto bail;
}
/*
* Don't allow permission to later change to writeable
* with mprotect.
*/
vma->vm_flags &= ~VM_MAYWRITE;
} else
goto bail;
len = vma->vm_end - vma->vm_start;
if (len > size) {
ret = -EINVAL;
goto bail;
}
vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
vma->vm_ops = &qib_file_vm_ops;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
ret = 1;
bail:
return ret;
}
/**
* qib_mmapf - mmap various structures into user space
* @fp: the file pointer
* @vma: the VM area
*
* We use this to have a shared buffer between the kernel and the user code
* for the rcvhdr queue, egr buffers, and the per-context user regs and pio
* buffers in the chip. We have the open and close entries so we can bump
* the ref count and keep the driver from being unloaded while still mapped.
*/
static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
{
struct qib_ctxtdata *rcd;
struct qib_devdata *dd;
u64 pgaddr, ureg;
unsigned piobufs, piocnt;
int ret, match = 1;
rcd = ctxt_fp(fp);
if (!rcd || !(vma->vm_flags & VM_SHARED)) {
ret = -EINVAL;
goto bail;
}
dd = rcd->dd;
/*
* This is the qib_do_user_init() code, mapping the shared buffers
* and per-context user registers into the user process. The address
* referred to by vm_pgoff is the file offset passed via mmap().
* For shared contexts, this is the kernel vmalloc() address of the
* pages to share with the master.
* For non-shared or master ctxts, this is a physical address.
* We only do one mmap for each space mapped.
*/
pgaddr = vma->vm_pgoff << PAGE_SHIFT;
/*
* Check for 0 in case one of the allocations failed, but user
* called mmap anyway.
*/
if (!pgaddr) {
ret = -EINVAL;
goto bail;
}
/*
* Physical addresses must fit in 40 bits for our hardware.
* Check for kernel virtual addresses first, anything else must
* match a HW or memory address.
*/
ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
if (ret) {
if (ret > 0)
ret = 0;
goto bail;
}
ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
if (!rcd->subctxt_cnt) {
/* ctxt is not shared */
piocnt = rcd->piocnt;
piobufs = rcd->piobufs;
} else if (!subctxt_fp(fp)) {
/* caller is the master */
piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
(rcd->piocnt % rcd->subctxt_cnt);
piobufs = rcd->piobufs +
dd->palign * (rcd->piocnt - piocnt);
} else {
unsigned slave = subctxt_fp(fp) - 1;
/* caller is a slave */
piocnt = rcd->piocnt / rcd->subctxt_cnt;
piobufs = rcd->piobufs + dd->palign * piocnt * slave;
}
if (pgaddr == ureg)
ret = mmap_ureg(vma, dd, ureg);
else if (pgaddr == piobufs)
ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
else if (pgaddr == dd->pioavailregs_phys)
/* in-memory copy of pioavail registers */
ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
(void *) dd->pioavailregs_dma, 0,
"pioavail registers");
else if (pgaddr == rcd->rcvegr_phys)
ret = mmap_rcvegrbufs(vma, rcd);
else if (pgaddr == (u64) rcd->rcvhdrq_phys)
/*
* The rcvhdrq itself; multiple pages, contiguous
* from an i/o perspective. Shared contexts need
* to map r/w, so we allow writing.
*/
ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
rcd->rcvhdrq, 1, "rcvhdrq");
else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
/* in-memory copy of rcvhdrq tail register */
ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
rcd->rcvhdrtail_kvaddr, 0,
"rcvhdrq tail");
else
match = 0;
if (!match)
ret = -EINVAL;
vma->vm_private_data = NULL;
if (ret < 0)
qib_devinfo(dd->pcidev,
"mmap Failure %d: off %llx len %lx\n",
-ret, (unsigned long long)pgaddr,
vma->vm_end - vma->vm_start);
bail:
return ret;
}
static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
struct file *fp,
struct poll_table_struct *pt)
{
struct qib_devdata *dd = rcd->dd;
unsigned pollflag;
poll_wait(fp, &rcd->wait, pt);
spin_lock_irq(&dd->uctxt_lock);
if (rcd->urgent != rcd->urgent_poll) {
pollflag = POLLIN | POLLRDNORM;
rcd->urgent_poll = rcd->urgent;
} else {
pollflag = 0;
set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
}
spin_unlock_irq(&dd->uctxt_lock);
return pollflag;
}
static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
struct file *fp,
struct poll_table_struct *pt)
{
struct qib_devdata *dd = rcd->dd;
unsigned pollflag;
poll_wait(fp, &rcd->wait, pt);
spin_lock_irq(&dd->uctxt_lock);
if (dd->f_hdrqempty(rcd)) {
set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
pollflag = 0;
} else
pollflag = POLLIN | POLLRDNORM;
spin_unlock_irq(&dd->uctxt_lock);
return pollflag;
}
static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
{
struct qib_ctxtdata *rcd;
unsigned pollflag;
rcd = ctxt_fp(fp);
if (!rcd)
pollflag = POLLERR;
else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
pollflag = qib_poll_urgent(rcd, fp, pt);
else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
pollflag = qib_poll_next(rcd, fp, pt);
else /* invalid */
pollflag = POLLERR;
return pollflag;
}
static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
{
struct qib_filedata *fd = fp->private_data;
const unsigned int weight = cpumask_weight(¤t->cpus_allowed);
const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
int local_cpu;
/*
* If process has NOT already set it's affinity, select and
* reserve a processor for it on the local NUMA node.
*/
if ((weight >= qib_cpulist_count) &&
(cpumask_weight(local_mask) <= qib_cpulist_count)) {
for_each_cpu(local_cpu, local_mask)
if (!test_and_set_bit(local_cpu, qib_cpulist)) {
fd->rec_cpu_num = local_cpu;
return;
}
}
/*
* If process has NOT already set it's affinity, select and
* reserve a processor for it, as a rendevous for all
* users of the driver. If they don't actually later
* set affinity to this cpu, or set it to some other cpu,
* it just means that sooner or later we don't recommend
* a cpu, and let the scheduler do it's best.
*/
if (weight >= qib_cpulist_count) {
int cpu;
cpu = find_first_zero_bit(qib_cpulist,
qib_cpulist_count);
if (cpu == qib_cpulist_count)
qib_dev_err(dd,
"no cpus avail for affinity PID %u\n",
current->pid);
else {
__set_bit(cpu, qib_cpulist);
fd->rec_cpu_num = cpu;
}
}
}
/*
* Check that userland and driver are compatible for subcontexts.
*/
static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
{
/* this code is written long-hand for clarity */
if (QIB_USER_SWMAJOR != user_swmajor) {
/* no promise of compatibility if major mismatch */
return 0;
}
if (QIB_USER_SWMAJOR == 1) {
switch (QIB_USER_SWMINOR) {
case 0:
case 1:
case 2:
/* no subctxt implementation so cannot be compatible */
return 0;
case 3:
/* 3 is only compatible with itself */
return user_swminor == 3;
default:
/* >= 4 are compatible (or are expected to be) */
return user_swminor <= QIB_USER_SWMINOR;
}
}
/* make no promises yet for future major versions */
return 0;
}
static int init_subctxts(struct qib_devdata *dd,
struct qib_ctxtdata *rcd,
const struct qib_user_info *uinfo)
{
int ret = 0;
unsigned num_subctxts;
size_t size;
/*
* If the user is requesting zero subctxts,
* skip the subctxt allocation.
*/
if (uinfo->spu_subctxt_cnt <= 0)
goto bail;
num_subctxts = uinfo->spu_subctxt_cnt;
/* Check for subctxt compatibility */
if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
uinfo->spu_userversion & 0xffff)) {
qib_devinfo(dd->pcidev,
"Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
(int) (uinfo->spu_userversion >> 16),
(int) (uinfo->spu_userversion & 0xffff),
QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
goto bail;
}
if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
ret = -EINVAL;
goto bail;
}
rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
if (!rcd->subctxt_uregbase) {
ret = -ENOMEM;
goto bail;
}
/* Note: rcd->rcvhdrq_size isn't initialized yet. */
size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
sizeof(u32), PAGE_SIZE) * num_subctxts;
rcd->subctxt_rcvhdr_base = vmalloc_user(size);
if (!rcd->subctxt_rcvhdr_base) {
ret = -ENOMEM;
goto bail_ureg;
}
rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
rcd->rcvegrbuf_size *
num_subctxts);
if (!rcd->subctxt_rcvegrbuf) {
ret = -ENOMEM;
goto bail_rhdr;
}
rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
rcd->subctxt_id = uinfo->spu_subctxt_id;
rcd->active_slaves = 1;
rcd->redirect_seq_cnt = 1;
set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
goto bail;
bail_rhdr:
vfree(rcd->subctxt_rcvhdr_base);
bail_ureg:
vfree(rcd->subctxt_uregbase);
rcd->subctxt_uregbase = NULL;
bail:
return ret;
}
static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
struct file *fp, const struct qib_user_info *uinfo)
{
struct qib_filedata *fd = fp->private_data;
struct qib_devdata *dd = ppd->dd;
struct qib_ctxtdata *rcd;
void *ptmp = NULL;
int ret;
int numa_id;
assign_ctxt_affinity(fp, dd);
numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ?
cpu_to_node(fd->rec_cpu_num) :
numa_node_id()) : dd->assigned_node_id;
rcd = qib_create_ctxtdata(ppd, ctxt, numa_id);
/*
* Allocate memory for use in qib_tid_update() at open to
* reduce cost of expected send setup per message segment
*/
if (rcd)
ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
dd->rcvtidcnt * sizeof(struct page **),
GFP_KERNEL);
if (!rcd || !ptmp) {
qib_dev_err(dd,
"Unable to allocate ctxtdata memory, failing open\n");
ret = -ENOMEM;
goto bailerr;
}
rcd->userversion = uinfo->spu_userversion;
ret = init_subctxts(dd, rcd, uinfo);
if (ret)
goto bailerr;
rcd->tid_pg_list = ptmp;
rcd->pid = current->pid;
init_waitqueue_head(&dd->rcd[ctxt]->wait);
strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
ctxt_fp(fp) = rcd;
qib_stats.sps_ctxts++;
dd->freectxts--;
ret = 0;
goto bail;
bailerr:
if (fd->rec_cpu_num != -1)
__clear_bit(fd->rec_cpu_num, qib_cpulist);
dd->rcd[ctxt] = NULL;
kfree(rcd);
kfree(ptmp);
bail:
return ret;
}
static inline int usable(struct qib_pportdata *ppd)
{
struct qib_devdata *dd = ppd->dd;
return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
(ppd->lflags & QIBL_LINKACTIVE);
}
/*
* Select a context on the given device, either using a requested port
* or the port based on the context number.
*/
static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
const struct qib_user_info *uinfo)
{
struct qib_pportdata *ppd = NULL;
int ret, ctxt;
if (port) {
if (!usable(dd->pport + port - 1)) {
ret = -ENETDOWN;
goto done;
} else
ppd = dd->pport + port - 1;
}
for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
ctxt++)
;
if (ctxt == dd->cfgctxts) {
ret = -EBUSY;
goto done;
}
if (!ppd) {
u32 pidx = ctxt % dd->num_pports;
if (usable(dd->pport + pidx))
ppd = dd->pport + pidx;
else {
for (pidx = 0; pidx < dd->num_pports && !ppd;
pidx++)
if (usable(dd->pport + pidx))
ppd = dd->pport + pidx;
}
}
ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
done:
return ret;
}
static int find_free_ctxt(int unit, struct file *fp,
const struct qib_user_info *uinfo)
{
struct qib_devdata *dd = qib_lookup(unit);
int ret;
if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
ret = -ENODEV;
else
ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
return ret;
}
static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
unsigned alg)
{
struct qib_devdata *udd = NULL;
int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
u32 port = uinfo->spu_port, ctxt;
devmax = qib_count_units(&npresent, &nup);
if (!npresent) {
ret = -ENXIO;
goto done;
}
if (nup == 0) {
ret = -ENETDOWN;
goto done;
}
if (alg == QIB_PORT_ALG_ACROSS) {
unsigned inuse = ~0U;
/* find device (with ACTIVE ports) with fewest ctxts in use */
for (ndev = 0; ndev < devmax; ndev++) {
struct qib_devdata *dd = qib_lookup(ndev);
unsigned cused = 0, cfree = 0, pusable = 0;
if (!dd)
continue;
if (port && port <= dd->num_pports &&
usable(dd->pport + port - 1))
pusable = 1;
else
for (i = 0; i < dd->num_pports; i++)
if (usable(dd->pport + i))
pusable++;
if (!pusable)
continue;
for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
ctxt++)
if (dd->rcd[ctxt])
cused++;
else
cfree++;
if (cfree && cused < inuse) {
udd = dd;
inuse = cused;
}
}
if (udd) {
ret = choose_port_ctxt(fp, udd, port, uinfo);
goto done;
}
} else {
for (ndev = 0; ndev < devmax; ndev++) {
struct qib_devdata *dd = qib_lookup(ndev);
if (dd) {
ret = choose_port_ctxt(fp, dd, port, uinfo);
if (!ret)
goto done;
if (ret == -EBUSY)
dusable++;
}
}
}
ret = dusable ? -EBUSY : -ENETDOWN;
done:
return ret;
}
static int find_shared_ctxt(struct file *fp,
const struct qib_user_info *uinfo)
{
int devmax, ndev, i;
int ret = 0;
devmax = qib_count_units(NULL, NULL);
for (ndev = 0; ndev < devmax; ndev++) {
struct qib_devdata *dd = qib_lookup(ndev);
/* device portion of usable() */
if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
continue;
for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
struct qib_ctxtdata *rcd = dd->rcd[i];
/* Skip ctxts which are not yet open */
if (!rcd || !rcd->cnt)
continue;
/* Skip ctxt if it doesn't match the requested one */
if (rcd->subctxt_id != uinfo->spu_subctxt_id)
continue;
/* Verify the sharing process matches the master */
if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
rcd->userversion != uinfo->spu_userversion ||
rcd->cnt >= rcd->subctxt_cnt) {
ret = -EINVAL;
goto done;
}
ctxt_fp(fp) = rcd;
subctxt_fp(fp) = rcd->cnt++;
rcd->subpid[subctxt_fp(fp)] = current->pid;
tidcursor_fp(fp) = 0;
rcd->active_slaves |= 1 << subctxt_fp(fp);
ret = 1;
goto done;
}
}
done:
return ret;
}
static int qib_open(struct inode *in, struct file *fp)
{
/* The real work is performed later in qib_assign_ctxt() */
fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
if (fp->private_data) /* no cpu affinity by default */
((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
return fp->private_data ? 0 : -ENOMEM;
}
static int find_hca(unsigned int cpu, int *unit)
{
int ret = 0, devmax, npresent, nup, ndev;
*unit = -1;
devmax = qib_count_units(&npresent, &nup);
if (!npresent) {
ret = -ENXIO;
goto done;
}
if (!nup) {
ret = -ENETDOWN;
goto done;
}
for (ndev = 0; ndev < devmax; ndev++) {
struct qib_devdata *dd = qib_lookup(ndev);
if (dd) {
if (pcibus_to_node(dd->pcidev->bus) < 0) {
ret = -EINVAL;
goto done;
}
if (cpu_to_node(cpu) ==
pcibus_to_node(dd->pcidev->bus)) {
*unit = ndev;
goto done;
}
}
}
done:
return ret;
}
static int do_qib_user_sdma_queue_create(struct file *fp)
{
struct qib_filedata *fd = fp->private_data;
struct qib_ctxtdata *rcd = fd->rcd;
struct qib_devdata *dd = rcd->dd;
if (dd->flags & QIB_HAS_SEND_DMA) {
fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
dd->unit,
rcd->ctxt,
fd->subctxt);
if (!fd->pq)
return -ENOMEM;
}
return 0;
}
/*
* Get ctxt early, so can set affinity prior to memory allocation.
*/
static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
{
int ret;
int i_minor;
unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
/* Check to be sure we haven't already initialized this file */
if (ctxt_fp(fp)) {
ret = -EINVAL;
goto done;
}
/* for now, if major version is different, bail */
swmajor = uinfo->spu_userversion >> 16;
if (swmajor != QIB_USER_SWMAJOR) {
ret = -ENODEV;
goto done;
}
swminor = uinfo->spu_userversion & 0xffff;
if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
alg = uinfo->spu_port_alg;
mutex_lock(&qib_mutex);
if (qib_compatible_subctxts(swmajor, swminor) &&
uinfo->spu_subctxt_cnt) {
ret = find_shared_ctxt(fp, uinfo);
if (ret > 0) {
ret = do_qib_user_sdma_queue_create(fp);
if (!ret)
assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd);
goto done_ok;
}
}
i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE;
if (i_minor)
ret = find_free_ctxt(i_minor - 1, fp, uinfo);
else {
int unit;
const unsigned int cpu = cpumask_first(¤t->cpus_allowed);
const unsigned int weight =
cpumask_weight(¤t->cpus_allowed);
if (weight == 1 && !test_bit(cpu, qib_cpulist))
if (!find_hca(cpu, &unit) && unit >= 0)
if (!find_free_ctxt(unit, fp, uinfo)) {
ret = 0;
goto done_chk_sdma;
}
ret = get_a_ctxt(fp, uinfo, alg);
}
done_chk_sdma:
if (!ret)
ret = do_qib_user_sdma_queue_create(fp);
done_ok:
mutex_unlock(&qib_mutex);
done:
return ret;
}
static int qib_do_user_init(struct file *fp,
const struct qib_user_info *uinfo)
{
int ret;
struct qib_ctxtdata *rcd = ctxt_fp(fp);
struct qib_devdata *dd;
unsigned uctxt;
/* Subctxts don't need to initialize anything since master did it. */
if (subctxt_fp(fp)) {
ret = wait_event_interruptible(rcd->wait,
!test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
goto bail;
}
dd = rcd->dd;
/* some ctxts may get extra buffers, calculate that here */
uctxt = rcd->ctxt - dd->first_user_ctxt;
if (uctxt < dd->ctxts_extrabuf) {
rcd->piocnt = dd->pbufsctxt + 1;
rcd->pio_base = rcd->piocnt * uctxt;
} else {
rcd->piocnt = dd->pbufsctxt;
rcd->pio_base = rcd->piocnt * uctxt +
dd->ctxts_extrabuf;
}
/*
* All user buffers are 2KB buffers. If we ever support
* giving 4KB buffers to user processes, this will need some
* work. Can't use piobufbase directly, because it has
* both 2K and 4K buffer base values. So check and handle.
*/
if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
if (rcd->pio_base >= dd->piobcnt2k) {
qib_dev_err(dd,
"%u:ctxt%u: no 2KB buffers available\n",
dd->unit, rcd->ctxt);
ret = -ENOBUFS;
goto bail;
}
rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
rcd->ctxt, rcd->piocnt);
}
rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
TXCHK_CHG_TYPE_USER, rcd);
/*
* try to ensure that processes start up with consistent avail update
* for their own range, at least. If system very quiet, it might
* have the in-memory copy out of date at startup for this range of
* buffers, when a context gets re-used. Do after the chg_pioavail
* and before the rest of setup, so it's "almost certain" the dma
* will have occurred (can't 100% guarantee, but should be many
* decimals of 9s, with this ordering), given how much else happens
* after this.
*/
dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
/*
* Now allocate the rcvhdr Q and eager TIDs; skip the TID
* array for time being. If rcd->ctxt > chip-supported,
* we need to do extra stuff here to handle by handling overflow
* through ctxt 0, someday
*/
ret = qib_create_rcvhdrq(dd, rcd);
if (!ret)
ret = qib_setup_eagerbufs(rcd);
if (ret)
goto bail_pio;
rcd->tidcursor = 0; /* start at beginning after open */
/* initialize poll variables... */
rcd->urgent = 0;
rcd->urgent_poll = 0;
/*
* Now enable the ctxt for receive.
* For chips that are set to DMA the tail register to memory
* when they change (and when the update bit transitions from
* 0 to 1. So for those chips, we turn it off and then back on.
* This will (very briefly) affect any other open ctxts, but the
* duration is very short, and therefore isn't an issue. We
* explicitly set the in-memory tail copy to 0 beforehand, so we
* don't have to wait to be sure the DMA update has happened
* (chip resets head/tail to 0 on transition to enable).
*/
if (rcd->rcvhdrtail_kvaddr)
qib_clear_rcvhdrtail(rcd);
dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
rcd->ctxt);
/* Notify any waiting slaves */
if (rcd->subctxt_cnt) {
clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
wake_up(&rcd->wait);
}
return 0;
bail_pio:
qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
TXCHK_CHG_TYPE_KERN, rcd);
bail:
return ret;
}
/**
* unlock_exptid - unlock any expected TID entries context still had in use
* @rcd: ctxt
*
* We don't actually update the chip here, because we do a bulk update
* below, using f_clear_tids.
*/
static void unlock_expected_tids(struct qib_ctxtdata *rcd)
{
struct qib_devdata *dd = rcd->dd;
int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
for (i = ctxt_tidbase; i < maxtid; i++) {
struct page *p = dd->pageshadow[i];
dma_addr_t phys;
if (!p)
continue;
phys = dd->physshadow[i];
dd->physshadow[i] = dd->tidinvalid;
dd->pageshadow[i] = NULL;
pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
qib_release_user_pages(&p, 1);
cnt++;
}
}
static int qib_close(struct inode *in, struct file *fp)
{
int ret = 0;
struct qib_filedata *fd;
struct qib_ctxtdata *rcd;
struct qib_devdata *dd;
unsigned long flags;
unsigned ctxt;
pid_t pid;
mutex_lock(&qib_mutex);
fd = fp->private_data;
fp->private_data = NULL;
rcd = fd->rcd;
if (!rcd) {
mutex_unlock(&qib_mutex);
goto bail;
}
dd = rcd->dd;
/* ensure all pio buffer writes in progress are flushed */
qib_flush_wc();
/* drain user sdma queue */
if (fd->pq) {
qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
qib_user_sdma_queue_destroy(fd->pq);
}
if (fd->rec_cpu_num != -1)
__clear_bit(fd->rec_cpu_num, qib_cpulist);
if (--rcd->cnt) {
/*
* XXX If the master closes the context before the slave(s),
* revoke the mmap for the eager receive queue so
* the slave(s) don't wait for receive data forever.
*/
rcd->active_slaves &= ~(1 << fd->subctxt);
rcd->subpid[fd->subctxt] = 0;
mutex_unlock(&qib_mutex);
goto bail;
}
/* early; no interrupt users after this */
spin_lock_irqsave(&dd->uctxt_lock, flags);
ctxt = rcd->ctxt;
dd->rcd[ctxt] = NULL;
pid = rcd->pid;
rcd->pid = 0;
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
if (rcd->rcvwait_to || rcd->piowait_to ||
rcd->rcvnowait || rcd->pionowait) {
rcd->rcvwait_to = 0;
rcd->piowait_to = 0;
rcd->rcvnowait = 0;
rcd->pionowait = 0;
}
if (rcd->flag)
rcd->flag = 0;
if (dd->kregbase) {
/* atomically clear receive enable ctxt and intr avail. */
dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
/* clean up the pkeys for this ctxt user */
qib_clean_part_key(rcd, dd);
qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
qib_chg_pioavailkernel(dd, rcd->pio_base,
rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
dd->f_clear_tids(dd, rcd);
if (dd->pageshadow)
unlock_expected_tids(rcd);
qib_stats.sps_ctxts--;
dd->freectxts++;
}
mutex_unlock(&qib_mutex);
qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
bail:
kfree(fd);
return ret;
}
static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
{
struct qib_ctxt_info info;
int ret;
size_t sz;
struct qib_ctxtdata *rcd = ctxt_fp(fp);
struct qib_filedata *fd;
fd = fp->private_data;
info.num_active = qib_count_active_units();
info.unit = rcd->dd->unit;
info.port = rcd->ppd->port;
info.ctxt = rcd->ctxt;
info.subctxt = subctxt_fp(fp);
/* Number of user ctxts available for this device. */
info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
info.num_subctxts = rcd->subctxt_cnt;
info.rec_cpu = fd->rec_cpu_num;
sz = sizeof(info);
if (copy_to_user(uinfo, &info, sz)) {
ret = -EFAULT;
goto bail;
}
ret = 0;
bail:
return ret;
}
static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
u32 __user *inflightp)
{
const u32 val = qib_user_sdma_inflight_counter(pq);
if (put_user(val, inflightp))
return -EFAULT;
return 0;
}
static int qib_sdma_get_complete(struct qib_pportdata *ppd,
struct qib_user_sdma_queue *pq,
u32 __user *completep)
{
u32 val;
int err;
if (!pq)
return -EINVAL;
err = qib_user_sdma_make_progress(ppd, pq);
if (err < 0)
return err;
val = qib_user_sdma_complete_counter(pq);
if (put_user(val, completep))
return -EFAULT;
return 0;
}
static int disarm_req_delay(struct qib_ctxtdata *rcd)
{
int ret = 0;
if (!usable(rcd->ppd)) {
int i;
/*
* if link is down, or otherwise not usable, delay
* the caller up to 30 seconds, so we don't thrash
* in trying to get the chip back to ACTIVE, and
* set flag so they make the call again.
*/
if (rcd->user_event_mask) {
/*
* subctxt_cnt is 0 if not shared, so do base
* separately, first, then remaining subctxt, if any
*/
set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
&rcd->user_event_mask[0]);
for (i = 1; i < rcd->subctxt_cnt; i++)
set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
&rcd->user_event_mask[i]);
}
for (i = 0; !usable(rcd->ppd) && i < 300; i++)
msleep(100);
ret = -ENETDOWN;
}
return ret;
}
/*
* Find all user contexts in use, and set the specified bit in their
* event mask.
* See also find_ctxt() for a similar use, that is specific to send buffers.
*/
int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
{
struct qib_ctxtdata *rcd;
unsigned ctxt;
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
ctxt++) {
rcd = ppd->dd->rcd[ctxt];
if (!rcd)
continue;
if (rcd->user_event_mask) {
int i;
/*
* subctxt_cnt is 0 if not shared, so do base
* separately, first, then remaining subctxt, if any
*/
set_bit(evtbit, &rcd->user_event_mask[0]);
for (i = 1; i < rcd->subctxt_cnt; i++)
set_bit(evtbit, &rcd->user_event_mask[i]);
}
ret = 1;
break;
}
spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
return ret;
}
/*
* clear the event notifier events for this context.
* For the DISARM_BUFS case, we also take action (this obsoletes
* the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
* compatibility.
* Other bits don't currently require actions, just atomically clear.
* User process then performs actions appropriate to bit having been
* set, if desired, and checks again in future.
*/
static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
unsigned long events)
{
int ret = 0, i;
for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
if (!test_bit(i, &events))
continue;
if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
(void)qib_disarm_piobufs_ifneeded(rcd);
ret = disarm_req_delay(rcd);
} else
clear_bit(i, &rcd->user_event_mask[subctxt]);
}
return ret;
}
static ssize_t qib_write(struct file *fp, const char __user *data,
size_t count, loff_t *off)
{
const struct qib_cmd __user *ucmd;
struct qib_ctxtdata *rcd;
const void __user *src;
size_t consumed, copy = 0;
struct qib_cmd cmd;
ssize_t ret = 0;
void *dest;
if (count < sizeof(cmd.type)) {
ret = -EINVAL;
goto bail;
}
ucmd = (const struct qib_cmd __user *) data;
if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
ret = -EFAULT;
goto bail;
}
consumed = sizeof(cmd.type);
switch (cmd.type) {
case QIB_CMD_ASSIGN_CTXT:
case QIB_CMD_USER_INIT:
copy = sizeof(cmd.cmd.user_info);
dest = &cmd.cmd.user_info;
src = &ucmd->cmd.user_info;
break;
case QIB_CMD_RECV_CTRL:
copy = sizeof(cmd.cmd.recv_ctrl);
dest = &cmd.cmd.recv_ctrl;
src = &ucmd->cmd.recv_ctrl;
break;
case QIB_CMD_CTXT_INFO:
copy = sizeof(cmd.cmd.ctxt_info);
dest = &cmd.cmd.ctxt_info;
src = &ucmd->cmd.ctxt_info;
break;
case QIB_CMD_TID_UPDATE:
case QIB_CMD_TID_FREE:
copy = sizeof(cmd.cmd.tid_info);
dest = &cmd.cmd.tid_info;
src = &ucmd->cmd.tid_info;
break;
case QIB_CMD_SET_PART_KEY:
copy = sizeof(cmd.cmd.part_key);
dest = &cmd.cmd.part_key;
src = &ucmd->cmd.part_key;
break;
case QIB_CMD_DISARM_BUFS:
case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
copy = 0;
src = NULL;
dest = NULL;
break;
case QIB_CMD_POLL_TYPE:
copy = sizeof(cmd.cmd.poll_type);
dest = &cmd.cmd.poll_type;
src = &ucmd->cmd.poll_type;
break;
case QIB_CMD_ARMLAUNCH_CTRL:
copy = sizeof(cmd.cmd.armlaunch_ctrl);
dest = &cmd.cmd.armlaunch_ctrl;
src = &ucmd->cmd.armlaunch_ctrl;
break;
case QIB_CMD_SDMA_INFLIGHT:
copy = sizeof(cmd.cmd.sdma_inflight);
dest = &cmd.cmd.sdma_inflight;
src = &ucmd->cmd.sdma_inflight;
break;
case QIB_CMD_SDMA_COMPLETE:
copy = sizeof(cmd.cmd.sdma_complete);
dest = &cmd.cmd.sdma_complete;
src = &ucmd->cmd.sdma_complete;
break;
case QIB_CMD_ACK_EVENT:
copy = sizeof(cmd.cmd.event_mask);
dest = &cmd.cmd.event_mask;
src = &ucmd->cmd.event_mask;
break;
default:
ret = -EINVAL;
goto bail;
}
if (copy) {
if ((count - consumed) < copy) {
ret = -EINVAL;
goto bail;
}
if (copy_from_user(dest, src, copy)) {
ret = -EFAULT;
goto bail;
}
consumed += copy;
}
rcd = ctxt_fp(fp);
if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
ret = -EINVAL;
goto bail;
}
switch (cmd.type) {
case QIB_CMD_ASSIGN_CTXT:
ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
if (ret)
goto bail;
break;
case QIB_CMD_USER_INIT:
ret = qib_do_user_init(fp, &cmd.cmd.user_info);
if (ret)
goto bail;
ret = qib_get_base_info(fp, (void __user *) (unsigned long)
cmd.cmd.user_info.spu_base_info,
cmd.cmd.user_info.spu_base_info_size);
break;
case QIB_CMD_RECV_CTRL:
ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
break;
case QIB_CMD_CTXT_INFO:
ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
(unsigned long) cmd.cmd.ctxt_info);
break;
case QIB_CMD_TID_UPDATE:
ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
break;
case QIB_CMD_TID_FREE:
ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
break;
case QIB_CMD_SET_PART_KEY:
ret = qib_set_part_key(rcd, cmd.cmd.part_key);
break;
case QIB_CMD_DISARM_BUFS:
(void)qib_disarm_piobufs_ifneeded(rcd);
ret = disarm_req_delay(rcd);
break;
case QIB_CMD_PIOAVAILUPD:
qib_force_pio_avail_update(rcd->dd);
break;
case QIB_CMD_POLL_TYPE:
rcd->poll_type = cmd.cmd.poll_type;
break;
case QIB_CMD_ARMLAUNCH_CTRL:
rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
break;
case QIB_CMD_SDMA_INFLIGHT:
ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
(u32 __user *) (unsigned long)
cmd.cmd.sdma_inflight);
break;
case QIB_CMD_SDMA_COMPLETE:
ret = qib_sdma_get_complete(rcd->ppd,
user_sdma_queue_fp(fp),
(u32 __user *) (unsigned long)
cmd.cmd.sdma_complete);
break;
case QIB_CMD_ACK_EVENT:
ret = qib_user_event_ack(rcd, subctxt_fp(fp),
cmd.cmd.event_mask);
break;
}
if (ret >= 0)
ret = consumed;
bail:
return ret;
}
static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct qib_filedata *fp = iocb->ki_filp->private_data;
struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
struct qib_user_sdma_queue *pq = fp->pq;
if (!iter_is_iovec(from) || !from->nr_segs || !pq)
return -EINVAL;
return qib_user_sdma_writev(rcd, pq, from->iov, from->nr_segs);
}
static struct class *qib_class;
static dev_t qib_dev;
int qib_cdev_init(int minor, const char *name,
const struct file_operations *fops,
struct cdev **cdevp, struct device **devp)
{
const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
struct cdev *cdev;
struct device *device = NULL;
int ret;
cdev = cdev_alloc();
if (!cdev) {
pr_err("Could not allocate cdev for minor %d, %s\n",
minor, name);
ret = -ENOMEM;
goto done;
}
cdev->owner = THIS_MODULE;
cdev->ops = fops;
kobject_set_name(&cdev->kobj, name);
ret = cdev_add(cdev, dev, 1);
if (ret < 0) {
pr_err("Could not add cdev for minor %d, %s (err %d)\n",
minor, name, -ret);
goto err_cdev;
}
device = device_create(qib_class, NULL, dev, NULL, "%s", name);
if (!IS_ERR(device))
goto done;
ret = PTR_ERR(device);
device = NULL;
pr_err("Could not create device for minor %d, %s (err %d)\n",
minor, name, -ret);
err_cdev:
cdev_del(cdev);
cdev = NULL;
done:
*cdevp = cdev;
*devp = device;
return ret;
}
void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
{
struct device *device = *devp;
if (device) {
device_unregister(device);
*devp = NULL;
}
if (*cdevp) {
cdev_del(*cdevp);
*cdevp = NULL;
}
}
static struct cdev *wildcard_cdev;
static struct device *wildcard_device;
int __init qib_dev_init(void)
{
int ret;
ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
if (ret < 0) {
pr_err("Could not allocate chrdev region (err %d)\n", -ret);
goto done;
}
qib_class = class_create(THIS_MODULE, "ipath");
if (IS_ERR(qib_class)) {
ret = PTR_ERR(qib_class);
pr_err("Could not create device class (err %d)\n", -ret);
unregister_chrdev_region(qib_dev, QIB_NMINORS);
}
done:
return ret;
}
void qib_dev_cleanup(void)
{
if (qib_class) {
class_destroy(qib_class);
qib_class = NULL;
}
unregister_chrdev_region(qib_dev, QIB_NMINORS);
}
static atomic_t user_count = ATOMIC_INIT(0);
static void qib_user_remove(struct qib_devdata *dd)
{
if (atomic_dec_return(&user_count) == 0)
qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
}
static int qib_user_add(struct qib_devdata *dd)
{
char name[10];
int ret;
if (atomic_inc_return(&user_count) == 1) {
ret = qib_cdev_init(0, "ipath", &qib_file_ops,
&wildcard_cdev, &wildcard_device);
if (ret)
goto done;
}
snprintf(name, sizeof(name), "ipath%d", dd->unit);
ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
&dd->user_cdev, &dd->user_device);
if (ret)
qib_user_remove(dd);
done:
return ret;
}
/*
* Create per-unit files in /dev
*/
int qib_device_create(struct qib_devdata *dd)
{
int r, ret;
r = qib_user_add(dd);
ret = qib_diag_add(dd);
if (r && !ret)
ret = r;
return ret;
}
/*
* Remove per-unit files in /dev
* void, core kernel returns no errors for this stuff
*/
void qib_device_remove(struct qib_devdata *dd)
{
qib_user_remove(dd);
qib_diag_remove(dd);
}
| gpl-2.0 |
waddedMeat/ember-proxy-example | test-app/node_modules/ember-cli/bin/generate-docs.js | 119 | #!/usr/bin/env node
var DocGenerator = require('../lib/utilities/doc-generator.js');
(new DocGenerator()).generate();
| mit |
Observer-Wu/phantomjs | src/qt/qtbase/src/corelib/plugin/quuid.cpp | 27549 | /****************************************************************************
**
** Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
** Contact: http://www.qt-project.org/legal
**
** This file is part of the QtCore module of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:LGPL21$
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and Digia. For licensing terms and
** conditions see http://qt.digia.com/licensing. For further information
** use the contact form at http://qt.digia.com/contact-us.
**
** GNU Lesser General Public License Usage
** Alternatively, this file may be used under the terms of the GNU Lesser
** General Public License version 2.1 or version 3 as published by the Free
** Software Foundation and appearing in the file LICENSE.LGPLv21 and
** LICENSE.LGPLv3 included in the packaging of this file. Please review the
** following information to ensure the GNU Lesser General Public License
** requirements will be met: https://www.gnu.org/licenses/lgpl.html and
** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
**
** In addition, as a special exception, Digia gives you certain additional
** rights. These rights are described in the Digia Qt LGPL Exception
** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
**
** $QT_END_LICENSE$
**
****************************************************************************/
#include "quuid.h"
#include "qdatastream.h"
#include "qendian.h"
#include "qdebug.h"
#ifndef QT_BOOTSTRAPPED
#include "qcryptographichash.h"
#endif
QT_BEGIN_NAMESPACE
static const char digits[] = "0123456789abcdef";
template <class Char, class Integral>
void _q_toHex(Char *&dst, Integral value)
{
value = qToBigEndian(value);
const char* p = reinterpret_cast<const char*>(&value);
for (uint i = 0; i < sizeof(Integral); ++i, dst += 2) {
uint j = (p[i] >> 4) & 0xf;
dst[0] = Char(digits[j]);
j = p[i] & 0xf;
dst[1] = Char(digits[j]);
}
}
template <class Char, class Integral>
bool _q_fromHex(const Char *&src, Integral &value)
{
value = 0;
for (uint i = 0; i < sizeof(Integral) * 2; ++i) {
int ch = *src++;
int tmp;
if (ch >= '0' && ch <= '9')
tmp = ch - '0';
else if (ch >= 'a' && ch <= 'f')
tmp = ch - 'a' + 10;
else if (ch >= 'A' && ch <= 'F')
tmp = ch - 'A' + 10;
else
return false;
value = value * 16 + tmp;
}
return true;
}
template <class Char>
void _q_uuidToHex(Char *&dst, const uint &d1, const ushort &d2, const ushort &d3, const uchar (&d4)[8])
{
*dst++ = Char('{');
_q_toHex(dst, d1);
*dst++ = Char('-');
_q_toHex(dst, d2);
*dst++ = Char('-');
_q_toHex(dst, d3);
*dst++ = Char('-');
for (int i = 0; i < 2; i++)
_q_toHex(dst, d4[i]);
*dst++ = Char('-');
for (int i = 2; i < 8; i++)
_q_toHex(dst, d4[i]);
*dst = Char('}');
}
template <class Char>
bool _q_uuidFromHex(const Char *&src, uint &d1, ushort &d2, ushort &d3, uchar (&d4)[8])
{
if (*src == Char('{'))
src++;
if (!_q_fromHex(src, d1)
|| *src++ != Char('-')
|| !_q_fromHex(src, d2)
|| *src++ != Char('-')
|| !_q_fromHex(src, d3)
|| *src++ != Char('-')
|| !_q_fromHex(src, d4[0])
|| !_q_fromHex(src, d4[1])
|| *src++ != Char('-')
|| !_q_fromHex(src, d4[2])
|| !_q_fromHex(src, d4[3])
|| !_q_fromHex(src, d4[4])
|| !_q_fromHex(src, d4[5])
|| !_q_fromHex(src, d4[6])
|| !_q_fromHex(src, d4[7])) {
return false;
}
return true;
}
#ifndef QT_BOOTSTRAPPED
static QUuid createFromName(const QUuid &ns, const QByteArray &baseData, QCryptographicHash::Algorithm algorithm, int version)
{
QByteArray hashResult;
// create a scope so later resize won't reallocate
{
QCryptographicHash hash(algorithm);
hash.addData(ns.toRfc4122());
hash.addData(baseData);
hashResult = hash.result();
}
hashResult.resize(16); // Sha1 will be too long
QUuid result = QUuid::fromRfc4122(hashResult);
result.data3 &= 0x0FFF;
result.data3 |= (version << 12);
result.data4[0] &= 0x3F;
result.data4[0] |= 0x80;
return result;
}
#endif
/*!
\class QUuid
\inmodule QtCore
\brief The QUuid class stores a Universally Unique Identifier (UUID).
\reentrant
Using \e{U}niversally \e{U}nique \e{ID}entifiers (UUID) is a
standard way to uniquely identify entities in a distributed
computing environment. A UUID is a 16-byte (128-bit) number
generated by some algorithm that is meant to guarantee that the
UUID will be unique in the distributed computing environment where
it is used. The acronym GUID is often used instead, \e{G}lobally
\e{U}nique \e{ID}entifiers, but it refers to the same thing.
\target Variant field
Actually, the GUID is one \e{variant} of UUID. Multiple variants
are in use. Each UUID contains a bit field that specifies which
type (variant) of UUID it is. Call variant() to discover which
type of UUID an instance of QUuid contains. It extracts the three
most significant bits of byte 8 of the 16 bytes. In QUuid, byte 8
is \c{QUuid::data4[0]}. If you create instances of QUuid using the
constructor that accepts all the numeric values as parameters, use
the following table to set the three most significant bits of
parameter \c{b1}, which becomes \c{QUuid::data4[0]} and contains
the variant field in its three most significant bits. In the
table, 'x' means \e {don't care}.
\table
\header
\li msb0
\li msb1
\li msb2
\li Variant
\row
\li 0
\li x
\li x
\li NCS (Network Computing System)
\row
\li 1
\li 0
\li x
\li DCE (Distributed Computing Environment)
\row
\li 1
\li 1
\li 0
\li Microsoft (GUID)
\row
\li 1
\li 1
\li 1
\li Reserved for future expansion
\endtable
\target Version field
If variant() returns QUuid::DCE, the UUID also contains a
\e{version} field in the four most significant bits of
\c{QUuid::data3}, and you can call version() to discover which
version your QUuid contains. If you create instances of QUuid
using the constructor that accepts all the numeric values as
parameters, use the following table to set the four most
significant bits of parameter \c{w2}, which becomes
\c{QUuid::data3} and contains the version field in its four most
significant bits.
\table
\header
\li msb0
\li msb1
\li msb2
\li msb3
\li Version
\row
\li 0
\li 0
\li 0
\li 1
\li Time
\row
\li 0
\li 0
\li 1
\li 0
\li Embedded POSIX
\row
\li 0
\li 0
\li 1
\li 1
\li Md5(Name)
\row
\li 0
\li 1
\li 0
\li 0
\li Random
\row
\li 0
\li 1
\li 0
\li 1
\li Sha1
\endtable
The field layouts for the DCE versions listed in the table above
are specified in the \l{http://www.ietf.org/rfc/rfc4122.txt}
{Network Working Group UUID Specification}.
Most platforms provide a tool for generating new UUIDs, e.g. \c
uuidgen and \c guidgen. You can also use createUuid(). UUIDs
generated by createUuid() are of the random type. Their
QUuid::Version bits are set to QUuid::Random, and their
QUuid::Variant bits are set to QUuid::DCE. The rest of the UUID is
composed of random numbers. Theoretically, this means there is a
small chance that a UUID generated by createUuid() will not be
unique. But it is
\l{http://en.wikipedia.org/wiki/Universally_Unique_Identifier#Random_UUID_probability_of_duplicates}
{a \e{very} small chance}.
UUIDs can be constructed from numeric values or from strings, or
using the static createUuid() function. They can be converted to a
string with toString(). UUIDs have a variant() and a version(),
and null UUIDs return true from isNull().
*/
/*!
\fn QUuid::QUuid(const GUID &guid)
Casts a Windows \a guid to a Qt QUuid.
\warning This function is only for Windows platforms.
*/
/*!
\fn QUuid &QUuid::operator=(const GUID &guid)
Assigns a Windows \a guid to a Qt QUuid.
\warning This function is only for Windows platforms.
*/
/*!
\fn QUuid::operator GUID() const
Returns a Windows GUID from a QUuid.
\warning This function is only for Windows platforms.
*/
/*!
\fn QUuid::QUuid()
Creates the null UUID. toString() will output the null UUID
as "{00000000-0000-0000-0000-000000000000}".
*/
/*!
\fn QUuid::QUuid(uint l, ushort w1, ushort w2, uchar b1, uchar b2, uchar b3, uchar b4, uchar b5, uchar b6, uchar b7, uchar b8)
Creates a UUID with the value specified by the parameters, \a l,
\a w1, \a w2, \a b1, \a b2, \a b3, \a b4, \a b5, \a b6, \a b7, \a
b8.
Example:
\snippet code/src_corelib_plugin_quuid.cpp 0
*/
/*!
Creates a QUuid object from the string \a text, which must be
formatted as five hex fields separated by '-', e.g.,
"{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" where 'x' is a hex
digit. The curly braces shown here are optional, but it is normal to
include them. If the conversion fails, a null UUID is created. See
toString() for an explanation of how the five hex fields map to the
public data members in QUuid.
\sa toString(), QUuid()
*/
QUuid::QUuid(const QString &text)
{
if (text.length() < 36) {
*this = QUuid();
return;
}
const ushort *data = reinterpret_cast<const ushort *>(text.unicode());
if (*data == '{' && text.length() < 37) {
*this = QUuid();
return;
}
if (!_q_uuidFromHex(data, data1, data2, data3, data4)) {
*this = QUuid();
return;
}
}
/*!
\internal
*/
QUuid::QUuid(const char *text)
{
if (!text) {
*this = QUuid();
return;
}
if (!_q_uuidFromHex(text, data1, data2, data3, data4)) {
*this = QUuid();
return;
}
}
/*!
Creates a QUuid object from the QByteArray \a text, which must be
formatted as five hex fields separated by '-', e.g.,
"{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" where 'x' is a hex
digit. The curly braces shown here are optional, but it is normal to
include them. If the conversion fails, a null UUID is created. See
toByteArray() for an explanation of how the five hex fields map to the
public data members in QUuid.
\since 4.8
\sa toByteArray(), QUuid()
*/
QUuid::QUuid(const QByteArray &text)
{
if (text.length() < 36) {
*this = QUuid();
return;
}
const char *data = text.constData();
if (*data == '{' && text.length() < 37) {
*this = QUuid();
return;
}
if (!_q_uuidFromHex(data, data1, data2, data3, data4)) {
*this = QUuid();
return;
}
}
/*!
\since 5.0
\fn QUuid QUuid::createUuidV3(const QUuid &ns, const QByteArray &baseData);
This function returns a new UUID with variant QUuid::DCE and version QUuid::Md5.
\a ns is the namespace and \a baseData is the basic data as described by RFC 4122.
\sa variant(), version(), createUuidV5()
*/
/*!
\since 5.0
\fn QUuid QUuid::createUuidV3(const QUuid &ns, const QString &baseData);
This function returns a new UUID with variant QUuid::DCE and version QUuid::Md5.
\a ns is the namespace and \a baseData is the basic data as described by RFC 4122.
\sa variant(), version(), createUuidV5()
*/
/*!
\since 5.0
\fn QUuid QUuid::createUuidV5(const QUuid &ns, const QByteArray &baseData);
This function returns a new UUID with variant QUuid::DCE and version QUuid::Sha1.
\a ns is the namespace and \a baseData is the basic data as described by RFC 4122.
\sa variant(), version(), createUuidV3()
*/
/*!
\since 5.0
\fn QUuid QUuid::createUuidV5(const QUuid &ns, const QString &baseData);
This function returns a new UUID with variant QUuid::DCE and version QUuid::Sha1.
\a ns is the namespace and \a baseData is the basic data as described by RFC 4122.
\sa variant(), version(), createUuidV3()
*/
#ifndef QT_BOOTSTRAPPED
QUuid QUuid::createUuidV3(const QUuid &ns, const QByteArray &baseData)
{
return createFromName(ns, baseData, QCryptographicHash::Md5, 3);
}
QUuid QUuid::createUuidV5(const QUuid &ns, const QByteArray &baseData)
{
return createFromName(ns, baseData, QCryptographicHash::Sha1, 5);
}
#endif
/*!
Creates a QUuid object from the binary representation of the UUID, as
specified by RFC 4122 section 4.1.2. See toRfc4122() for a further
explanation of the order of \a bytes required.
The byte array accepted is NOT a human readable format.
If the conversion fails, a null UUID is created.
\since 4.8
\sa toRfc4122(), QUuid()
*/
QUuid QUuid::fromRfc4122(const QByteArray &bytes)
{
if (bytes.isEmpty() || bytes.length() != 16)
return QUuid();
uint d1;
ushort d2, d3;
uchar d4[8];
const uchar *data = reinterpret_cast<const uchar *>(bytes.constData());
d1 = qFromBigEndian<quint32>(data);
data += sizeof(quint32);
d2 = qFromBigEndian<quint16>(data);
data += sizeof(quint16);
d3 = qFromBigEndian<quint16>(data);
data += sizeof(quint16);
for (int i = 0; i < 8; ++i) {
d4[i] = *(data);
data++;
}
return QUuid(d1, d2, d3, d4[0], d4[1], d4[2], d4[3], d4[4], d4[5], d4[6], d4[7]);
}
/*!
\fn bool QUuid::operator==(const QUuid &other) const
Returns \c true if this QUuid and the \a other QUuid are identical;
otherwise returns \c false.
*/
/*!
\fn bool QUuid::operator!=(const QUuid &other) const
Returns \c true if this QUuid and the \a other QUuid are different;
otherwise returns \c false.
*/
/*!
Returns the string representation of this QUuid. The string is
formatted as five hex fields separated by '-' and enclosed in
curly braces, i.e., "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" where
'x' is a hex digit. From left to right, the five hex fields are
obtained from the four public data members in QUuid as follows:
\table
\header
\li Field #
\li Source
\row
\li 1
\li data1
\row
\li 2
\li data2
\row
\li 3
\li data3
\row
\li 4
\li data4[0] .. data4[1]
\row
\li 5
\li data4[2] .. data4[7]
\endtable
*/
QString QUuid::toString() const
{
QString result(38, Qt::Uninitialized);
ushort *data = (ushort *)result.unicode();
_q_uuidToHex(data, data1, data2, data3, data4);
return result;
}
/*!
Returns the binary representation of this QUuid. The byte array is
formatted as five hex fields separated by '-' and enclosed in
curly braces, i.e., "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" where
'x' is a hex digit. From left to right, the five hex fields are
obtained from the four public data members in QUuid as follows:
\table
\header
\li Field #
\li Source
\row
\li 1
\li data1
\row
\li 2
\li data2
\row
\li 3
\li data3
\row
\li 4
\li data4[0] .. data4[1]
\row
\li 5
\li data4[2] .. data4[7]
\endtable
\since 4.8
*/
QByteArray QUuid::toByteArray() const
{
QByteArray result(38, Qt::Uninitialized);
char *data = result.data();
_q_uuidToHex(data, data1, data2, data3, data4);
return result;
}
/*!
Returns the binary representation of this QUuid. The byte array is in big
endian format, and formatted according to RFC 4122, section 4.1.2 -
"Layout and byte order".
The order is as follows:
\table
\header
\li Field #
\li Source
\row
\li 1
\li data1
\row
\li 2
\li data2
\row
\li 3
\li data3
\row
\li 4
\li data4[0] .. data4[7]
\endtable
\since 4.8
*/
QByteArray QUuid::toRfc4122() const
{
// we know how many bytes a UUID has, I hope :)
QByteArray bytes(16, Qt::Uninitialized);
uchar *data = reinterpret_cast<uchar*>(bytes.data());
qToBigEndian(data1, data);
data += sizeof(quint32);
qToBigEndian(data2, data);
data += sizeof(quint16);
qToBigEndian(data3, data);
data += sizeof(quint16);
for (int i = 0; i < 8; ++i) {
*(data) = data4[i];
data++;
}
return bytes;
}
#ifndef QT_NO_DATASTREAM
/*!
\relates QUuid
Writes the UUID \a id to the data stream \a s.
*/
QDataStream &operator<<(QDataStream &s, const QUuid &id)
{
QByteArray bytes;
if (s.byteOrder() == QDataStream::BigEndian) {
bytes = id.toRfc4122();
} else {
// we know how many bytes a UUID has, I hope :)
bytes = QByteArray(16, Qt::Uninitialized);
uchar *data = reinterpret_cast<uchar*>(bytes.data());
qToLittleEndian(id.data1, data);
data += sizeof(quint32);
qToLittleEndian(id.data2, data);
data += sizeof(quint16);
qToLittleEndian(id.data3, data);
data += sizeof(quint16);
for (int i = 0; i < 8; ++i) {
*(data) = id.data4[i];
data++;
}
}
if (s.writeRawData(bytes.data(), 16) != 16) {
s.setStatus(QDataStream::WriteFailed);
}
return s;
}
/*!
\relates QUuid
Reads a UUID from the stream \a s into \a id.
*/
QDataStream &operator>>(QDataStream &s, QUuid &id)
{
QByteArray bytes(16, Qt::Uninitialized);
if (s.readRawData(bytes.data(), 16) != 16) {
s.setStatus(QDataStream::ReadPastEnd);
return s;
}
if (s.byteOrder() == QDataStream::BigEndian) {
id = QUuid::fromRfc4122(bytes);
} else {
const uchar *data = reinterpret_cast<const uchar *>(bytes.constData());
id.data1 = qFromLittleEndian<quint32>(data);
data += sizeof(quint32);
id.data2 = qFromLittleEndian<quint16>(data);
data += sizeof(quint16);
id.data3 = qFromLittleEndian<quint16>(data);
data += sizeof(quint16);
for (int i = 0; i < 8; ++i) {
id.data4[i] = *(data);
data++;
}
}
return s;
}
#endif // QT_NO_DATASTREAM
/*!
Returns \c true if this is the null UUID
{00000000-0000-0000-0000-000000000000}; otherwise returns \c false.
*/
bool QUuid::isNull() const
{
return data4[0] == 0 && data4[1] == 0 && data4[2] == 0 && data4[3] == 0 &&
data4[4] == 0 && data4[5] == 0 && data4[6] == 0 && data4[7] == 0 &&
data1 == 0 && data2 == 0 && data3 == 0;
}
/*!
\enum QUuid::Variant
This enum defines the values used in the \l{Variant field}
{variant field} of the UUID. The value in the variant field
determines the layout of the 128-bit value.
\value VarUnknown Variant is unknown
\value NCS Reserved for NCS (Network Computing System) backward compatibility
\value DCE Distributed Computing Environment, the scheme used by QUuid
\value Microsoft Reserved for Microsoft backward compatibility (GUID)
\value Reserved Reserved for future definition
*/
/*!
\enum QUuid::Version
This enum defines the values used in the \l{Version field}
{version field} of the UUID. The version field is meaningful
only if the value in the \l{Variant field} {variant field}
is QUuid::DCE.
\value VerUnknown Version is unknown
\value Time Time-based, by using timestamp, clock sequence, and
MAC network card address (if available) for the node sections
\value EmbeddedPOSIX DCE Security version, with embedded POSIX UUIDs
\value Name Name-based, by using values from a name for all sections
\value Md5 Alias for Name
\value Random Random-based, by using random numbers for all sections
\value Sha1
*/
/*!
\fn QUuid::Variant QUuid::variant() const
Returns the value in the \l{Variant field} {variant field} of the
UUID. If the return value is QUuid::DCE, call version() to see
which layout it uses. The null UUID is considered to be of an
unknown variant.
\sa version()
*/
QUuid::Variant QUuid::variant() const
{
if (isNull())
return VarUnknown;
// Check the 3 MSB of data4[0]
if ((data4[0] & 0x80) == 0x00) return NCS;
else if ((data4[0] & 0xC0) == 0x80) return DCE;
else if ((data4[0] & 0xE0) == 0xC0) return Microsoft;
else if ((data4[0] & 0xE0) == 0xE0) return Reserved;
return VarUnknown;
}
/*!
\fn QUuid::Version QUuid::version() const
Returns the \l{Version field} {version field} of the UUID, if the
UUID's \l{Variant field} {variant field} is QUuid::DCE. Otherwise
it returns QUuid::VerUnknown.
\sa variant()
*/
QUuid::Version QUuid::version() const
{
// Check the 4 MSB of data3
Version ver = (Version)(data3>>12);
if (isNull()
|| (variant() != DCE)
|| ver < Time
|| ver > Sha1)
return VerUnknown;
return ver;
}
/*!
\fn bool QUuid::operator<(const QUuid &other) const
Returns \c true if this QUuid has the same \l{Variant field}
{variant field} as the \a other QUuid and is lexicographically
\e{before} the \a other QUuid. If the \a other QUuid has a
different variant field, the return value is determined by
comparing the two \l{QUuid::Variant} {variants}.
\sa variant()
*/
#define ISLESS(f1, f2) if (f1!=f2) return (f1<f2);
bool QUuid::operator<(const QUuid &other) const
{
if (variant() != other.variant())
return variant() < other.variant();
ISLESS(data1, other.data1);
ISLESS(data2, other.data2);
ISLESS(data3, other.data3);
for (int n = 0; n < 8; n++) {
ISLESS(data4[n], other.data4[n]);
}
return false;
}
/*!
\fn bool QUuid::operator>(const QUuid &other) const
Returns \c true if this QUuid has the same \l{Variant field}
{variant field} as the \a other QUuid and is lexicographically
\e{after} the \a other QUuid. If the \a other QUuid has a
different variant field, the return value is determined by
comparing the two \l{QUuid::Variant} {variants}.
\sa variant()
*/
#define ISMORE(f1, f2) if (f1!=f2) return (f1>f2);
bool QUuid::operator>(const QUuid &other) const
{
if (variant() != other.variant())
return variant() > other.variant();
ISMORE(data1, other.data1);
ISMORE(data2, other.data2);
ISMORE(data3, other.data3);
for (int n = 0; n < 8; n++) {
ISMORE(data4[n], other.data4[n]);
}
return false;
}
/*!
\fn QUuid QUuid::createUuid()
On any platform other than Windows, this function returns a new
UUID with variant QUuid::DCE and version QUuid::Random. If
the /dev/urandom device exists, then the numbers used to construct
the UUID will be of cryptographic quality, which will make the UUID
unique. Otherwise, the numbers of the UUID will be obtained from
the local pseudo-random number generator (qrand(), which is seeded
by qsrand()) which is usually not of cryptograhic quality, which
means that the UUID can't be guaranteed to be unique.
On a Windows platform, a GUID is generated, which almost certainly
\e{will} be unique, on this or any other system, networked or not.
\sa variant(), version()
*/
#if defined(Q_OS_WIN32)
QT_BEGIN_INCLUDE_NAMESPACE
#include <objbase.h> // For CoCreateGuid
QT_END_INCLUDE_NAMESPACE
QUuid QUuid::createUuid()
{
GUID guid;
CoCreateGuid(&guid);
QUuid result = guid;
return result;
}
#else // !Q_OS_WIN32
QT_BEGIN_INCLUDE_NAMESPACE
#include "qdatetime.h"
#include "qfile.h"
#include "qthreadstorage.h"
#include <stdlib.h> // for RAND_MAX
QT_END_INCLUDE_NAMESPACE
#if !defined(QT_BOOTSTRAPPED) && defined(Q_OS_UNIX)
Q_GLOBAL_STATIC(QThreadStorage<QFile *>, devUrandomStorage);
#endif
QUuid QUuid::createUuid()
{
QUuid result;
uint *data = &(result.data1);
#if defined(Q_OS_UNIX)
QFile *devUrandom;
# if !defined(QT_BOOTSTRAPPED)
devUrandom = devUrandomStorage()->localData();
if (!devUrandom) {
devUrandom = new QFile(QLatin1String("/dev/urandom"));
devUrandom->open(QIODevice::ReadOnly | QIODevice::Unbuffered);
devUrandomStorage()->setLocalData(devUrandom);
}
# else
QFile file(QLatin1String("/dev/urandom"));
devUrandom = &file;
devUrandom->open(QIODevice::ReadOnly | QIODevice::Unbuffered);
# endif
enum { AmountToRead = 4 * sizeof(uint) };
if (devUrandom->isOpen()
&& devUrandom->read((char *) data, AmountToRead) == AmountToRead) {
// we got what we wanted, nothing more to do
;
} else
#endif
{
static const int intbits = sizeof(int)*8;
static int randbits = 0;
if (!randbits) {
int r = 0;
int max = RAND_MAX;
do { ++r; } while ((max=max>>1));
randbits = r;
}
// Seed the PRNG once per thread with a combination of current time, a
// stack address and a serial counter (since thread stack addresses are
// re-used).
#ifndef QT_BOOTSTRAPPED
static QThreadStorage<int *> uuidseed;
if (!uuidseed.hasLocalData())
{
int *pseed = new int;
static QBasicAtomicInt serial = Q_BASIC_ATOMIC_INITIALIZER(2);
qsrand(*pseed = QDateTime::currentDateTime().toTime_t()
+ quintptr(&pseed)
+ serial.fetchAndAddRelaxed(1));
uuidseed.setLocalData(pseed);
}
#else
static bool seeded = false;
if (!seeded)
qsrand(QDateTime::currentDateTime().toTime_t()
+ quintptr(&seeded));
#endif
int chunks = 16 / sizeof(uint);
while (chunks--) {
uint randNumber = 0;
for (int filled = 0; filled < intbits; filled += randbits)
randNumber |= qrand()<<filled;
*(data+chunks) = randNumber;
}
}
result.data4[0] = (result.data4[0] & 0x3F) | 0x80; // UV_DCE
result.data3 = (result.data3 & 0x0FFF) | 0x4000; // UV_Random
return result;
}
#endif // !Q_OS_WIN32
/*!
\fn bool QUuid::operator==(const GUID &guid) const
Returns \c true if this UUID is equal to the Windows GUID \a guid;
otherwise returns \c false.
*/
/*!
\fn bool QUuid::operator!=(const GUID &guid) const
Returns \c true if this UUID is not equal to the Windows GUID \a
guid; otherwise returns \c false.
*/
#ifndef QT_NO_DEBUG_STREAM
/*!
\relates QUuid
Writes the UUID \a id to the output stream for debugging information \a dbg.
*/
QDebug operator<<(QDebug dbg, const QUuid &id)
{
dbg.nospace() << "QUuid(" << id.toString() << ')';
return dbg.space();
}
#endif
/*!
\since 5.0
\relates QUuid
Returns a hash of the UUID \a uuid, using \a seed to seed the calculation.
*/
uint qHash(const QUuid &uuid, uint seed) Q_DECL_NOTHROW
{
return uuid.data1 ^ uuid.data2 ^ (uuid.data3 << 16)
^ ((uuid.data4[0] << 24) | (uuid.data4[1] << 16) | (uuid.data4[2] << 8) | uuid.data4[3])
^ ((uuid.data4[4] << 24) | (uuid.data4[5] << 16) | (uuid.data4[6] << 8) | uuid.data4[7])
^ seed;
}
QT_END_NAMESPACE
| bsd-3-clause |
vousk/jsdelivr | files/jquery.lazyloadxt/1.0.4/jquery.lazyloadxt.spinner.min.css | 104 | /* Lazy Load XT 1.0.4 | MIT License */
.lazy-hidden{background:#eee url(loading.gif) no-repeat 50% 50%} | mit |
aaronryden/DefinitelyTyped | types/asynciterator/asynciterator-tests.ts | 7358 | import { ArrayIterator, AsyncIterator, BufferedIterator, ClonedIterator, EmptyIterator, IntegerIterator,
MultiTransformIterator, SingletonIterator, SimpleTransformIterator, TransformIterator } from "asynciterator";
function test_asynciterator() {
// We can't instantiate an abstract class.
const it1: AsyncIterator<number> = <any> {};
const read1: number = it1.read();
it1.each((data: number) => console.log(data));
it1.each((data: number) => console.log(data), {});
it1.close();
const it2: AsyncIterator<string> = <any> {};
const read2: string = it2.read();
it2.each((data: string) => console.log(data));
it2.each((data: string) => console.log(data), {});
it2.close();
const it3: AsyncIterator<AsyncIterator<string>> = <any> {};
const read3: AsyncIterator<string> = it3.read();
it3.each((data: AsyncIterator<string>) => data.each((data: string) => console.log(data)));
it3.each((data: AsyncIterator<string>) => data.each((data: string) => console.log(data), {}), {});
it3.close();
const readable2: boolean = it1.readable;
const closed2: boolean = it1.closed;
const ended2: boolean = it1.ended;
it1.setProperty('name1', 123);
it2.setProperty('name2', 'someValue');
const p1: number = it1.getProperty('name1');
const p2: string = it1.getProperty('name2');
it1.getProperty('name1', (value: number) => console.log(value));
it1.getProperty('name2', (value: string) => console.log(value));
const ps1: {[id: string]: any} = it1.getProperties();
it1.setProperties({ name1: 1234, name2: 'someOtherValue' });
it1.copyProperties(it2, [ 'name1', 'name2' ]);
const str: string = it1.toString();
const stit1: SimpleTransformIterator<number, string> = it1.transform();
const stit2: SimpleTransformIterator<number, string> = it1.map((number: number) => 'i' + number);
const stit3: AsyncIterator<string> = it1.map((number: number) => 'i' + number);
const stit4: AsyncIterator<number> = it2.map(parseInt);
const stit5: AsyncIterator<number> = it1.map((number: number) => number + 1);
const stit6: AsyncIterator<number> = it1.filter((number: number) => number < 10);
const stit7: AsyncIterator<number> = it1.prepend([0, 1, 2]);
const stit8: AsyncIterator<number> = it1.append([0, 1, 2]);
const stit9: AsyncIterator<number> = it1.surround([0, 1, 2], [0, 1, 2]);
const stit10: AsyncIterator<number> = it1.skip(2);
const stit11: AsyncIterator<number> = it1.take(2);
const stit12: AsyncIterator<number> = it1.range(2, 20);
const stit13: AsyncIterator<number> = it1.clone();
const intit1: IntegerIterator = AsyncIterator.range(10, 100, 1);
const intit2: IntegerIterator = AsyncIterator.range(10, 100);
const intit3: IntegerIterator = AsyncIterator.range(10);
const intit4: IntegerIterator = AsyncIterator.range();
}
function test_emptyiterator() {
const it1: AsyncIterator<number> = new EmptyIterator();
const it2: AsyncIterator<string> = new EmptyIterator();
}
function test_singletoniterator() {
const it1: AsyncIterator<number> = new SingletonIterator(3);
const it2: AsyncIterator<string> = new SingletonIterator('a');
}
function test_arrayiterator() {
const it1: AsyncIterator<number> = new ArrayIterator([1, 2, 3]);
const it2: AsyncIterator<string> = new ArrayIterator(['a', 'b', 'c']);
}
function test_integeriterator() {
const it1: IntegerIterator = new IntegerIterator();
const it2: AsyncIterator<number> = new IntegerIterator({});
const it3: AsyncIterator<number> = new IntegerIterator({ start: 0 });
const it4: AsyncIterator<number> = new IntegerIterator({ end: 100 });
const it5: AsyncIterator<number> = new IntegerIterator({ step: 10 });
}
function test_bufferediterator() {
const it1: BufferedIterator<number> = new BufferedIterator();
const it2: AsyncIterator<number> = new BufferedIterator({});
const it3: AsyncIterator<number> = new BufferedIterator({ maxBufferSize: 10 });
const it4: AsyncIterator<number> = new BufferedIterator({ autoStart: true });
}
function test_transformiterator() {
const it1: TransformIterator<number, string> = new TransformIterator<number, string>();
const it2: AsyncIterator<string> = new TransformIterator<number, string>();
const it3: AsyncIterator<number> = new TransformIterator<string, number>(it1);
const it4: AsyncIterator<number> = new TransformIterator<string, number>(it1, {});
const it5: AsyncIterator<number> = new TransformIterator<string, number>(it1, { optional: true });
const it6: AsyncIterator<number> = new TransformIterator<string, number>({ source: it1 });
const source: AsyncIterator<number> = it1.source;
}
function test_simpletransformiterator() {
const it1: SimpleTransformIterator<number, string> = new SimpleTransformIterator<number, string>();
const it2: TransformIterator<number, string> = new SimpleTransformIterator<number, string>();
const it3: AsyncIterator<string> = new SimpleTransformIterator<number, string>();
const it4: AsyncIterator<number> = new SimpleTransformIterator<string, number>(it1);
const it5: AsyncIterator<number> = new SimpleTransformIterator<string, number>(it1, {});
const it6: AsyncIterator<number> = new SimpleTransformIterator<string, number>({});
const it7: AsyncIterator<number> = new SimpleTransformIterator<string, number>({ optional: true });
const it8: AsyncIterator<number> = new SimpleTransformIterator<string, number>({ source: it1 });
const it9: AsyncIterator<number> = new SimpleTransformIterator<string, number>({ offset: 2 });
const it10: AsyncIterator<number> = new SimpleTransformIterator<string, number>({ limit: 2 });
const it11: AsyncIterator<number> = new SimpleTransformIterator<string, number>({ prepend: [0, 1, 2] });
const it12: AsyncIterator<number> = new SimpleTransformIterator<string, number>({ append: [0, 1, 2] });
const it13: AsyncIterator<number> = new SimpleTransformIterator<number, number>(
{ filter: (val: number) => val > 10 });
const it14: AsyncIterator<number> = new SimpleTransformIterator<number, number>({ map: (val: number) => val + 1 });
const it15: AsyncIterator<number> = new SimpleTransformIterator<number, number>(
{ transform: (val: number, cb: (result: number) => void) => cb(val + 1) });
}
function test_multitransformiterator() {
const it1: MultiTransformIterator<number, string> = new MultiTransformIterator<number, string>();
const it2: TransformIterator<number, string> = new MultiTransformIterator<number, string>();
const it3: AsyncIterator<string> = new MultiTransformIterator<number, string>();
const it4: AsyncIterator<number> = new MultiTransformIterator<string, number>(it1);
const it5: AsyncIterator<number> = new MultiTransformIterator<string, number>(it1, {});
const it6: AsyncIterator<number> = new MultiTransformIterator<string, number>({});
const it7: AsyncIterator<number> = new MultiTransformIterator<string, number>({ optional: true });
const it8: AsyncIterator<number> = new MultiTransformIterator<string, number>({ source: it1 });
}
function test_clonediterator() {
const it1: ClonedIterator<number> = new ClonedIterator<number>();
const it2: ClonedIterator<number> = new ClonedIterator<number>(it1);
}
| mit |
qtekfun/htcDesire820Kernel | kernel/drivers/platform/msm/msm_bus/msm_bus_adhoc.h | 3960 | /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
#define _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
#include <linux/types.h>
#include <linux/device.h>
#include <linux/msm-bus-board.h>
#include <linux/msm-bus.h>
#include <linux/msm_bus_rules.h>
#include "msm_bus_core.h"
struct msm_bus_node_device_type;
struct link_node {
uint64_t lnode_ib[NUM_CTX];
uint64_t lnode_ab[NUM_CTX];
int next;
struct device *next_dev;
struct list_head link;
uint32_t in_use;
};
/* New types introduced for adhoc topology */
struct msm_bus_noc_ops {
int (*qos_init)(struct msm_bus_node_device_type *dev,
void __iomem *qos_base, uint32_t qos_off,
uint32_t qos_delta, uint32_t qos_freq);
int (*set_bw)(struct msm_bus_node_device_type *dev,
void __iomem *qos_base, uint32_t qos_off,
uint32_t qos_delta, uint32_t qos_freq);
int (*limit_mport)(struct msm_bus_node_device_type *dev,
void __iomem *qos_base, uint32_t qos_off,
uint32_t qos_delta, uint32_t qos_freq, bool enable_lim,
uint64_t lim_bw);
};
struct nodebw {
uint64_t ab[NUM_CTX];
bool dirty;
};
struct msm_bus_fab_device_type {
void __iomem *qos_base;
phys_addr_t pqos_base;
size_t qos_range;
uint32_t base_offset;
uint32_t qos_freq;
uint32_t qos_off;
struct msm_bus_noc_ops noc_ops;
enum msm_bus_hw_sel bus_type;
bool bypass_qos_prg;
};
struct qos_params_type {
int mode;
unsigned int prio_lvl;
unsigned int prio_rd;
unsigned int prio_wr;
unsigned int prio1;
unsigned int prio0;
unsigned int gp;
unsigned int thmp;
unsigned int ws;
int cur_mode;
};
struct msm_bus_node_info_type {
const char *name;
unsigned int id;
int mas_rpm_id;
int slv_rpm_id;
int num_ports;
int num_qports;
int *qport;
struct qos_params_type qos_params;
unsigned int num_connections;
bool is_fab_dev;
bool virt_dev;
unsigned int *connections;
struct device **dev_connections;
unsigned int bus_device_id;
struct device *bus_device;
unsigned int buswidth;
struct rule_update_path_info rule;
uint64_t lim_bw;
};
struct msm_bus_node_device_type {
struct msm_bus_node_info_type *node_info;
struct msm_bus_fab_device_type *fabdev;
int num_lnodes;
struct link_node *lnode_list;
uint64_t cur_clk_hz[NUM_CTX];
struct nodebw node_ab;
struct list_head link;
unsigned int ap_owned;
struct nodeclk clk[NUM_CTX];
struct nodeclk qos_clk;
};
int msm_bus_enable_limiter(struct msm_bus_node_device_type *nodedev,
bool throttle_en, uint64_t lim_bw);
int msm_bus_update_clks(struct msm_bus_node_device_type *nodedev,
int ctx, int **dirty_nodes, int *num_dirty);
int msm_bus_commit_data(int *dirty_nodes, int ctx, int num_dirty);
int msm_bus_update_bw(struct msm_bus_node_device_type *nodedev, int ctx,
int64_t add_bw, int **dirty_nodes, int *num_dirty);
void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
size_t new_size, gfp_t flags);
extern struct msm_bus_device_node_registration
*msm_bus_of_to_pdata(struct platform_device *pdev);
extern void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops);
extern int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev);
extern int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev);
extern int msm_bus_of_get_static_rules(struct platform_device *pdev,
struct bus_rule_type **static_rule);
extern int msm_rules_update_path(struct list_head *input_list,
struct list_head *output_list);
extern void print_all_rules(void);
#endif /* _ARCH_ARM_MACH_MSM_BUS_ADHOC_H */
| gpl-2.0 |
marsorp/blog | presto166/presto-benchmark/src/main/java/com/facebook/presto/benchmark/AverageBenchmarkResults.java | 1837 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.benchmark;
import com.google.common.collect.Maps;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Map.Entry;
import static java.util.Objects.requireNonNull;
public class AverageBenchmarkResults
implements BenchmarkResultHook
{
private final Map<String, Long> resultsSum = new LinkedHashMap<>();
private int resultsCount;
@Override
public BenchmarkResultHook addResults(Map<String, Long> results)
{
requireNonNull(results, "results is null");
for (Entry<String, Long> entry : results.entrySet()) {
Long currentSum = resultsSum.get(entry.getKey());
if (currentSum == null) {
currentSum = 0L;
}
resultsSum.put(entry.getKey(), currentSum + entry.getValue());
}
resultsCount++;
return this;
}
public Map<String, Double> getAverageResultsValues()
{
return Maps.transformValues(resultsSum, input -> 1.0 * input / resultsCount);
}
public Map<String, String> getAverageResultsStrings()
{
return Maps.transformValues(resultsSum, input -> String.format("%,3.2f", 1.0 * input / resultsCount));
}
@Override
public void finished()
{
}
}
| apache-2.0 |
dperezde/little-penguin | linux-eudyptula/drivers/net/wireless/mwifiex/sdio.c | 58635 | /*
* Marvell Wireless LAN device driver: SDIO specific handling
*
* Copyright (C) 2011-2014, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include <linux/firmware.h>
#include "decl.h"
#include "ioctl.h"
#include "util.h"
#include "fw.h"
#include "main.h"
#include "wmm.h"
#include "11n.h"
#include "sdio.h"
#define SDIO_VERSION "1.0"
/* The mwifiex_sdio_remove() callback function is called when
* user removes this module from kernel space or ejects
* the card from the slot. The driver handles these 2 cases
* differently.
* If the user is removing the module, the few commands (FUNC_SHUTDOWN,
* HS_CANCEL etc.) are sent to the firmware.
* If the card is removed, there is no need to send these command.
*
* The variable 'user_rmmod' is used to distinguish these two
* scenarios. This flag is initialized as FALSE in case the card
* is removed, and will be set to TRUE for module removal when
* module_exit function is called.
*/
static u8 user_rmmod;
static struct mwifiex_if_ops sdio_ops;
static struct semaphore add_remove_card_sem;
static struct memory_type_mapping mem_type_mapping_tbl[] = {
{"ITCM", NULL, 0, 0xF0},
{"DTCM", NULL, 0, 0xF1},
{"SQRAM", NULL, 0, 0xF2},
{"APU", NULL, 0, 0xF3},
{"CIU", NULL, 0, 0xF4},
{"ICU", NULL, 0, 0xF5},
{"MAC", NULL, 0, 0xF6},
{"EXT7", NULL, 0, 0xF7},
{"EXT8", NULL, 0, 0xF8},
{"EXT9", NULL, 0, 0xF9},
{"EXT10", NULL, 0, 0xFA},
{"EXT11", NULL, 0, 0xFB},
{"EXT12", NULL, 0, 0xFC},
{"EXT13", NULL, 0, 0xFD},
{"EXTLAST", NULL, 0, 0xFE},
};
/*
* SDIO probe.
*
* This function probes an mwifiex device and registers it. It allocates
* the card structure, enables SDIO function number and initiates the
* device registration and initialization procedure by adding a logical
* interface.
*/
static int
mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
{
int ret;
struct sdio_mmc_card *card = NULL;
pr_debug("info: vendor=0x%4.04X device=0x%4.04X class=%d function=%d\n",
func->vendor, func->device, func->class, func->num);
card = kzalloc(sizeof(struct sdio_mmc_card), GFP_KERNEL);
if (!card)
return -ENOMEM;
card->func = func;
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
if (id->driver_data) {
struct mwifiex_sdio_device *data = (void *)id->driver_data;
card->firmware = data->firmware;
card->reg = data->reg;
card->max_ports = data->max_ports;
card->mp_agg_pkt_limit = data->mp_agg_pkt_limit;
card->supports_sdio_new_mode = data->supports_sdio_new_mode;
card->has_control_mask = data->has_control_mask;
card->tx_buf_size = data->tx_buf_size;
card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
card->supports_fw_dump = data->supports_fw_dump;
}
sdio_claim_host(func);
ret = sdio_enable_func(func);
sdio_release_host(func);
if (ret) {
pr_err("%s: failed to enable function\n", __func__);
kfree(card);
return -EIO;
}
if (mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops,
MWIFIEX_SDIO)) {
pr_err("%s: add card failed\n", __func__);
kfree(card);
sdio_claim_host(func);
ret = sdio_disable_func(func);
sdio_release_host(func);
ret = -1;
}
return ret;
}
/*
* SDIO resume.
*
* Kernel needs to suspend all functions separately. Therefore all
* registered functions must have drivers with suspend and resume
* methods. Failing that the kernel simply removes the whole card.
*
* If already not resumed, this function turns on the traffic and
* sends a host sleep cancel request to the firmware.
*/
static int mwifiex_sdio_resume(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
struct sdio_mmc_card *card;
struct mwifiex_adapter *adapter;
mmc_pm_flag_t pm_flag = 0;
if (func) {
pm_flag = sdio_get_host_pm_caps(func);
card = sdio_get_drvdata(func);
if (!card || !card->adapter) {
pr_err("resume: invalid card or adapter\n");
return 0;
}
} else {
pr_err("resume: sdio_func is not specified\n");
return 0;
}
adapter = card->adapter;
if (!adapter->is_suspended) {
dev_warn(adapter->dev, "device already resumed\n");
return 0;
}
adapter->is_suspended = false;
/* Disable Host Sleep */
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
MWIFIEX_ASYNC_CMD);
return 0;
}
/*
* SDIO remove.
*
* This function removes the interface and frees up the card structure.
*/
static void
mwifiex_sdio_remove(struct sdio_func *func)
{
struct sdio_mmc_card *card;
struct mwifiex_adapter *adapter;
struct mwifiex_private *priv;
pr_debug("info: SDIO func num=%d\n", func->num);
card = sdio_get_drvdata(func);
if (!card)
return;
adapter = card->adapter;
if (!adapter || !adapter->priv_num)
return;
cancel_work_sync(&adapter->iface_work);
if (user_rmmod) {
if (adapter->is_suspended)
mwifiex_sdio_resume(adapter->dev);
mwifiex_deauthenticate_all(adapter);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
mwifiex_disable_auto_ds(priv);
mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN);
}
mwifiex_remove_card(card->adapter, &add_remove_card_sem);
}
/*
* SDIO suspend.
*
* Kernel needs to suspend all functions separately. Therefore all
* registered functions must have drivers with suspend and resume
* methods. Failing that the kernel simply removes the whole card.
*
* If already not suspended, this function allocates and sends a host
* sleep activate request to the firmware and turns off the traffic.
*/
static int mwifiex_sdio_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
struct sdio_mmc_card *card;
struct mwifiex_adapter *adapter;
mmc_pm_flag_t pm_flag = 0;
int ret = 0;
if (func) {
pm_flag = sdio_get_host_pm_caps(func);
pr_debug("cmd: %s: suspend: PM flag = 0x%x\n",
sdio_func_id(func), pm_flag);
if (!(pm_flag & MMC_PM_KEEP_POWER)) {
pr_err("%s: cannot remain alive while host is"
" suspended\n", sdio_func_id(func));
return -ENOSYS;
}
card = sdio_get_drvdata(func);
if (!card || !card->adapter) {
pr_err("suspend: invalid card or adapter\n");
return 0;
}
} else {
pr_err("suspend: sdio_func is not specified\n");
return 0;
}
adapter = card->adapter;
/* Enable the Host Sleep */
if (!mwifiex_enable_hs(adapter)) {
dev_err(adapter->dev, "cmd: failed to suspend\n");
adapter->hs_enabling = false;
return -EFAULT;
}
dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n");
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
/* Indicate device suspended */
adapter->is_suspended = true;
adapter->hs_enabling = false;
return ret;
}
/* Device ID for SD8786 */
#define SDIO_DEVICE_ID_MARVELL_8786 (0x9116)
/* Device ID for SD8787 */
#define SDIO_DEVICE_ID_MARVELL_8787 (0x9119)
/* Device ID for SD8797 */
#define SDIO_DEVICE_ID_MARVELL_8797 (0x9129)
/* Device ID for SD8897 */
#define SDIO_DEVICE_ID_MARVELL_8897 (0x912d)
/* WLAN IDs */
static const struct sdio_device_id mwifiex_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786),
.driver_data = (unsigned long) &mwifiex_sdio_sd8786},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787),
.driver_data = (unsigned long) &mwifiex_sdio_sd8787},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797),
.driver_data = (unsigned long) &mwifiex_sdio_sd8797},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8897),
.driver_data = (unsigned long) &mwifiex_sdio_sd8897},
{},
};
MODULE_DEVICE_TABLE(sdio, mwifiex_ids);
static const struct dev_pm_ops mwifiex_sdio_pm_ops = {
.suspend = mwifiex_sdio_suspend,
.resume = mwifiex_sdio_resume,
};
static struct sdio_driver mwifiex_sdio = {
.name = "mwifiex_sdio",
.id_table = mwifiex_ids,
.probe = mwifiex_sdio_probe,
.remove = mwifiex_sdio_remove,
.drv = {
.owner = THIS_MODULE,
.pm = &mwifiex_sdio_pm_ops,
}
};
/* Write data into SDIO card register. Caller claims SDIO device. */
static int
mwifiex_write_reg_locked(struct sdio_func *func, u32 reg, u8 data)
{
int ret = -1;
sdio_writeb(func, data, reg, &ret);
return ret;
}
/*
* This function writes data into SDIO card register.
*/
static int
mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data)
{
struct sdio_mmc_card *card = adapter->card;
int ret;
sdio_claim_host(card->func);
ret = mwifiex_write_reg_locked(card->func, reg, data);
sdio_release_host(card->func);
return ret;
}
/*
* This function reads data from SDIO card register.
*/
static int
mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u8 *data)
{
struct sdio_mmc_card *card = adapter->card;
int ret = -1;
u8 val;
sdio_claim_host(card->func);
val = sdio_readb(card->func, reg, &ret);
sdio_release_host(card->func);
*data = val;
return ret;
}
/*
* This function writes multiple data into SDIO card memory.
*
* This does not work in suspended mode.
*/
static int
mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
u8 *buffer, u32 pkt_len, u32 port)
{
struct sdio_mmc_card *card = adapter->card;
int ret;
u8 blk_mode =
(port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE;
u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
u32 blk_cnt =
(blk_mode ==
BLOCK_MODE) ? (pkt_len /
MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len;
u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
if (adapter->is_suspended) {
dev_err(adapter->dev,
"%s: not allowed while suspended\n", __func__);
return -1;
}
sdio_claim_host(card->func);
ret = sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size);
sdio_release_host(card->func);
return ret;
}
/*
* This function reads multiple data from SDIO card memory.
*/
static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
u32 len, u32 port, u8 claim)
{
struct sdio_mmc_card *card = adapter->card;
int ret;
u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE
: BLOCK_MODE;
u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
u32 blk_cnt = (blk_mode == BLOCK_MODE) ? (len / MWIFIEX_SDIO_BLOCK_SIZE)
: len;
u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
if (claim)
sdio_claim_host(card->func);
ret = sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size);
if (claim)
sdio_release_host(card->func);
return ret;
}
/*
* This function wakes up the card.
*
* A host power up command is written to the card configuration
* register to wake up the card.
*/
static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
dev_dbg(adapter->dev, "event: wakeup device...\n");
return mwifiex_write_reg(adapter, CONFIGURATION_REG, HOST_POWER_UP);
}
/*
* This function is called after the card has woken up.
*
* The card configuration register is reset.
*/
static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
{
dev_dbg(adapter->dev, "cmd: wakeup device completed\n");
return mwifiex_write_reg(adapter, CONFIGURATION_REG, 0);
}
/*
* This function is used to initialize IO ports for the
* chipsets supporting SDIO new mode eg SD8897.
*/
static int mwifiex_init_sdio_new_mode(struct mwifiex_adapter *adapter)
{
u8 reg;
adapter->ioport = MEM_PORT;
/* enable sdio new mode */
if (mwifiex_read_reg(adapter, CARD_CONFIG_2_1_REG, ®))
return -1;
if (mwifiex_write_reg(adapter, CARD_CONFIG_2_1_REG,
reg | CMD53_NEW_MODE))
return -1;
/* Configure cmd port and enable reading rx length from the register */
if (mwifiex_read_reg(adapter, CMD_CONFIG_0, ®))
return -1;
if (mwifiex_write_reg(adapter, CMD_CONFIG_0, reg | CMD_PORT_RD_LEN_EN))
return -1;
/* Enable Dnld/Upld ready auto reset for cmd port after cmd53 is
* completed
*/
if (mwifiex_read_reg(adapter, CMD_CONFIG_1, ®))
return -1;
if (mwifiex_write_reg(adapter, CMD_CONFIG_1, reg | CMD_PORT_AUTO_EN))
return -1;
return 0;
}
/* This function initializes the IO ports.
*
* The following operations are performed -
* - Read the IO ports (0, 1 and 2)
* - Set host interrupt Reset-To-Read to clear
* - Set auto re-enable interrupt
*/
static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter)
{
u8 reg;
struct sdio_mmc_card *card = adapter->card;
adapter->ioport = 0;
if (card->supports_sdio_new_mode) {
if (mwifiex_init_sdio_new_mode(adapter))
return -1;
goto cont;
}
/* Read the IO port */
if (!mwifiex_read_reg(adapter, IO_PORT_0_REG, ®))
adapter->ioport |= (reg & 0xff);
else
return -1;
if (!mwifiex_read_reg(adapter, IO_PORT_1_REG, ®))
adapter->ioport |= ((reg & 0xff) << 8);
else
return -1;
if (!mwifiex_read_reg(adapter, IO_PORT_2_REG, ®))
adapter->ioport |= ((reg & 0xff) << 16);
else
return -1;
cont:
pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport);
/* Set Host interrupt reset to read to clear */
if (!mwifiex_read_reg(adapter, HOST_INT_RSR_REG, ®))
mwifiex_write_reg(adapter, HOST_INT_RSR_REG,
reg | card->reg->sdio_int_mask);
else
return -1;
/* Dnld/Upld ready set to auto reset */
if (!mwifiex_read_reg(adapter, card->reg->card_misc_cfg_reg, ®))
mwifiex_write_reg(adapter, card->reg->card_misc_cfg_reg,
reg | AUTO_RE_ENABLE_INT);
else
return -1;
return 0;
}
/*
* This function sends data to the card.
*/
static int mwifiex_write_data_to_card(struct mwifiex_adapter *adapter,
u8 *payload, u32 pkt_len, u32 port)
{
u32 i = 0;
int ret;
do {
ret = mwifiex_write_data_sync(adapter, payload, pkt_len, port);
if (ret) {
i++;
dev_err(adapter->dev, "host_to_card, write iomem"
" (%d) failed: %d\n", i, ret);
if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04))
dev_err(adapter->dev, "write CFG reg failed\n");
ret = -1;
if (i > MAX_WRITE_IOMEM_RETRY)
return ret;
}
} while (ret == -1);
return ret;
}
/*
* This function gets the read port.
*
* If control port bit is set in MP read bitmap, the control port
* is returned, otherwise the current read port is returned and
* the value is increased (provided it does not reach the maximum
* limit, in which case it is reset to 1)
*/
static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
{
struct sdio_mmc_card *card = adapter->card;
const struct mwifiex_sdio_card_reg *reg = card->reg;
u32 rd_bitmap = card->mp_rd_bitmap;
dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%08x\n", rd_bitmap);
if (card->supports_sdio_new_mode) {
if (!(rd_bitmap & reg->data_port_mask))
return -1;
} else {
if (!(rd_bitmap & (CTRL_PORT_MASK | reg->data_port_mask)))
return -1;
}
if ((card->has_control_mask) &&
(card->mp_rd_bitmap & CTRL_PORT_MASK)) {
card->mp_rd_bitmap &= (u32) (~CTRL_PORT_MASK);
*port = CTRL_PORT;
dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%08x\n",
*port, card->mp_rd_bitmap);
return 0;
}
if (!(card->mp_rd_bitmap & (1 << card->curr_rd_port)))
return -1;
/* We are now handling the SDIO data ports */
card->mp_rd_bitmap &= (u32)(~(1 << card->curr_rd_port));
*port = card->curr_rd_port;
if (++card->curr_rd_port == card->max_ports)
card->curr_rd_port = reg->start_rd_port;
dev_dbg(adapter->dev,
"data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n",
*port, rd_bitmap, card->mp_rd_bitmap);
return 0;
}
/*
* This function gets the write port for data.
*
* The current write port is returned if available and the value is
* increased (provided it does not reach the maximum limit, in which
* case it is reset to 1)
*/
static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port)
{
struct sdio_mmc_card *card = adapter->card;
const struct mwifiex_sdio_card_reg *reg = card->reg;
u32 wr_bitmap = card->mp_wr_bitmap;
dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%08x\n", wr_bitmap);
if (card->supports_sdio_new_mode &&
!(wr_bitmap & reg->data_port_mask)) {
adapter->data_sent = true;
return -EBUSY;
} else if (!card->supports_sdio_new_mode &&
!(wr_bitmap & card->mp_data_port_mask)) {
return -1;
}
if (card->mp_wr_bitmap & (1 << card->curr_wr_port)) {
card->mp_wr_bitmap &= (u32) (~(1 << card->curr_wr_port));
*port = card->curr_wr_port;
if (((card->supports_sdio_new_mode) &&
(++card->curr_wr_port == card->max_ports)) ||
((!card->supports_sdio_new_mode) &&
(++card->curr_wr_port == card->mp_end_port)))
card->curr_wr_port = reg->start_wr_port;
} else {
adapter->data_sent = true;
return -EBUSY;
}
if ((card->has_control_mask) && (*port == CTRL_PORT)) {
dev_err(adapter->dev,
"invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
*port, card->curr_wr_port, wr_bitmap,
card->mp_wr_bitmap);
return -1;
}
dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
*port, wr_bitmap, card->mp_wr_bitmap);
return 0;
}
/*
* This function polls the card status.
*/
static int
mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits)
{
struct sdio_mmc_card *card = adapter->card;
u32 tries;
u8 cs;
for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
if (mwifiex_read_reg(adapter, card->reg->poll_reg, &cs))
break;
else if ((cs & bits) == bits)
return 0;
usleep_range(10, 20);
}
dev_err(adapter->dev, "poll card status failed, tries = %d\n", tries);
return -1;
}
/*
* This function reads the firmware status.
*/
static int
mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
{
struct sdio_mmc_card *card = adapter->card;
const struct mwifiex_sdio_card_reg *reg = card->reg;
u8 fws0, fws1;
if (mwifiex_read_reg(adapter, reg->status_reg_0, &fws0))
return -1;
if (mwifiex_read_reg(adapter, reg->status_reg_1, &fws1))
return -1;
*dat = (u16) ((fws1 << 8) | fws0);
return 0;
}
/*
* This function disables the host interrupt.
*
* The host interrupt mask is read, the disable bit is reset and
* written back to the card host interrupt mask register.
*/
static void mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
struct sdio_func *func = card->func;
sdio_claim_host(func);
mwifiex_write_reg_locked(func, HOST_INT_MASK_REG, 0);
sdio_release_irq(func);
sdio_release_host(func);
}
/*
* This function reads the interrupt status from card.
*/
static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
u8 sdio_ireg;
unsigned long flags;
if (mwifiex_read_data_sync(adapter, card->mp_regs,
card->reg->max_mp_regs,
REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
dev_err(adapter->dev, "read mp_regs failed\n");
return;
}
sdio_ireg = card->mp_regs[HOST_INTSTATUS_REG];
if (sdio_ireg) {
/*
* DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
* For SDIO new mode CMD port interrupts
* DN_LD_CMD_PORT_HOST_INT_STATUS and/or
* UP_LD_CMD_PORT_HOST_INT_STATUS
* Clear the interrupt status register
*/
dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
spin_lock_irqsave(&adapter->int_lock, flags);
adapter->int_status |= sdio_ireg;
spin_unlock_irqrestore(&adapter->int_lock, flags);
}
}
/*
* SDIO interrupt handler.
*
* This function reads the interrupt status from firmware and handles
* the interrupt in current thread (ksdioirqd) right away.
*/
static void
mwifiex_sdio_interrupt(struct sdio_func *func)
{
struct mwifiex_adapter *adapter;
struct sdio_mmc_card *card;
card = sdio_get_drvdata(func);
if (!card || !card->adapter) {
pr_debug("int: func=%p card=%p adapter=%p\n",
func, card, card ? card->adapter : NULL);
return;
}
adapter = card->adapter;
if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP)
adapter->ps_state = PS_STATE_AWAKE;
mwifiex_interrupt_status(adapter);
mwifiex_main_process(adapter);
}
/*
* This function enables the host interrupt.
*
* The host interrupt enable mask is written to the card
* host interrupt mask register.
*/
static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
struct sdio_func *func = card->func;
int ret;
sdio_claim_host(func);
/* Request the SDIO IRQ */
ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
if (ret) {
dev_err(adapter->dev, "claim irq failed: ret=%d\n", ret);
goto out;
}
/* Simply write the mask to the register */
ret = mwifiex_write_reg_locked(func, HOST_INT_MASK_REG,
card->reg->host_int_enable);
if (ret) {
dev_err(adapter->dev, "enable host interrupt failed\n");
sdio_release_irq(func);
}
out:
sdio_release_host(func);
return ret;
}
/*
* This function sends a data buffer to the card.
*/
static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter,
u32 *type, u8 *buffer,
u32 npayload, u32 ioport)
{
int ret;
u32 nb;
if (!buffer) {
dev_err(adapter->dev, "%s: buffer is NULL\n", __func__);
return -1;
}
ret = mwifiex_read_data_sync(adapter, buffer, npayload, ioport, 1);
if (ret) {
dev_err(adapter->dev, "%s: read iomem failed: %d\n", __func__,
ret);
return -1;
}
nb = le16_to_cpu(*(__le16 *) (buffer));
if (nb > npayload) {
dev_err(adapter->dev, "%s: invalid packet, nb=%d npayload=%d\n",
__func__, nb, npayload);
return -1;
}
*type = le16_to_cpu(*(__le16 *) (buffer + 2));
return ret;
}
/*
* This function downloads the firmware to the card.
*
* Firmware is downloaded to the card in blocks. Every block download
* is tested for CRC errors, and retried a number of times before
* returning failure.
*/
static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
struct mwifiex_fw_image *fw)
{
struct sdio_mmc_card *card = adapter->card;
const struct mwifiex_sdio_card_reg *reg = card->reg;
int ret;
u8 *firmware = fw->fw_buf;
u32 firmware_len = fw->fw_len;
u32 offset = 0;
u8 base0, base1;
u8 *fwbuf;
u16 len = 0;
u32 txlen, tx_blocks = 0, tries;
u32 i = 0;
if (!firmware_len) {
dev_err(adapter->dev,
"firmware image not found! Terminating download\n");
return -1;
}
dev_dbg(adapter->dev, "info: downloading FW image (%d bytes)\n",
firmware_len);
/* Assume that the allocated buffer is 8-byte aligned */
fwbuf = kzalloc(MWIFIEX_UPLD_SIZE, GFP_KERNEL);
if (!fwbuf)
return -ENOMEM;
/* Perform firmware data transfer */
do {
/* The host polls for the DN_LD_CARD_RDY and CARD_IO_READY
bits */
ret = mwifiex_sdio_poll_card_status(adapter, CARD_IO_READY |
DN_LD_CARD_RDY);
if (ret) {
dev_err(adapter->dev, "FW download with helper:"
" poll status timeout @ %d\n", offset);
goto done;
}
/* More data? */
if (offset >= firmware_len)
break;
for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
ret = mwifiex_read_reg(adapter, reg->base_0_reg,
&base0);
if (ret) {
dev_err(adapter->dev,
"dev BASE0 register read failed: "
"base0=%#04X(%d). Terminating dnld\n",
base0, base0);
goto done;
}
ret = mwifiex_read_reg(adapter, reg->base_1_reg,
&base1);
if (ret) {
dev_err(adapter->dev,
"dev BASE1 register read failed: "
"base1=%#04X(%d). Terminating dnld\n",
base1, base1);
goto done;
}
len = (u16) (((base1 & 0xff) << 8) | (base0 & 0xff));
if (len)
break;
usleep_range(10, 20);
}
if (!len) {
break;
} else if (len > MWIFIEX_UPLD_SIZE) {
dev_err(adapter->dev,
"FW dnld failed @ %d, invalid length %d\n",
offset, len);
ret = -1;
goto done;
}
txlen = len;
if (len & BIT(0)) {
i++;
if (i > MAX_WRITE_IOMEM_RETRY) {
dev_err(adapter->dev,
"FW dnld failed @ %d, over max retry\n",
offset);
ret = -1;
goto done;
}
dev_err(adapter->dev, "CRC indicated by the helper:"
" len = 0x%04X, txlen = %d\n", len, txlen);
len &= ~BIT(0);
/* Setting this to 0 to resend from same offset */
txlen = 0;
} else {
i = 0;
/* Set blocksize to transfer - checking for last
block */
if (firmware_len - offset < txlen)
txlen = firmware_len - offset;
tx_blocks = (txlen + MWIFIEX_SDIO_BLOCK_SIZE - 1)
/ MWIFIEX_SDIO_BLOCK_SIZE;
/* Copy payload to buffer */
memmove(fwbuf, &firmware[offset], txlen);
}
ret = mwifiex_write_data_sync(adapter, fwbuf, tx_blocks *
MWIFIEX_SDIO_BLOCK_SIZE,
adapter->ioport);
if (ret) {
dev_err(adapter->dev,
"FW download, write iomem (%d) failed @ %d\n",
i, offset);
if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04))
dev_err(adapter->dev, "write CFG reg failed\n");
ret = -1;
goto done;
}
offset += txlen;
} while (true);
dev_dbg(adapter->dev, "info: FW download over, size %d bytes\n",
offset);
ret = 0;
done:
kfree(fwbuf);
return ret;
}
/*
* This function checks the firmware status in card.
*
* The winner interface is also determined by this function.
*/
static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
u32 poll_num)
{
struct sdio_mmc_card *card = adapter->card;
int ret = 0;
u16 firmware_stat;
u32 tries;
u8 winner_status;
/* Wait for firmware initialization event */
for (tries = 0; tries < poll_num; tries++) {
ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
if (ret)
continue;
if (firmware_stat == FIRMWARE_READY_SDIO) {
ret = 0;
break;
} else {
msleep(100);
ret = -1;
}
}
if (ret) {
if (mwifiex_read_reg
(adapter, card->reg->status_reg_0, &winner_status))
winner_status = 0;
if (winner_status)
adapter->winner = 0;
else
adapter->winner = 1;
}
return ret;
}
/*
* This function decodes a received packet.
*
* Based on the type, the packet is treated as either a data, or
* a command response, or an event, and the correct handler
* function is invoked.
*/
static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
struct sk_buff *skb, u32 upld_typ)
{
u8 *cmd_buf;
__le16 *curr_ptr = (__le16 *)skb->data;
u16 pkt_len = le16_to_cpu(*curr_ptr);
skb_trim(skb, pkt_len);
skb_pull(skb, INTF_HEADER_LEN);
switch (upld_typ) {
case MWIFIEX_TYPE_DATA:
dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n");
mwifiex_handle_rx_packet(adapter, skb);
break;
case MWIFIEX_TYPE_CMD:
dev_dbg(adapter->dev, "info: --- Rx: Cmd Response ---\n");
/* take care of curr_cmd = NULL case */
if (!adapter->curr_cmd) {
cmd_buf = adapter->upld_buf;
if (adapter->ps_state == PS_STATE_SLEEP_CFM)
mwifiex_process_sleep_confirm_resp(adapter,
skb->data,
skb->len);
memcpy(cmd_buf, skb->data,
min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER,
skb->len));
dev_kfree_skb_any(skb);
} else {
adapter->cmd_resp_received = true;
adapter->curr_cmd->resp_skb = skb;
}
break;
case MWIFIEX_TYPE_EVENT:
dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
adapter->event_cause = le32_to_cpu(*(__le32 *) skb->data);
if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE))
memcpy(adapter->event_body,
skb->data + MWIFIEX_EVENT_HEADER_LEN,
skb->len);
/* event cause has been saved to adapter->event_cause */
adapter->event_received = true;
adapter->event_skb = skb;
break;
default:
dev_err(adapter->dev, "unknown upload type %#x\n", upld_typ);
dev_kfree_skb_any(skb);
break;
}
return 0;
}
/*
* This function transfers received packets from card to driver, performing
* aggregation if required.
*
* For data received on control port, or if aggregation is disabled, the
* received buffers are uploaded as separate packets. However, if aggregation
* is enabled and required, the buffers are copied onto an aggregation buffer,
* provided there is space left, processed and finally uploaded.
*/
static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
struct sk_buff *skb, u8 port)
{
struct sdio_mmc_card *card = adapter->card;
s32 f_do_rx_aggr = 0;
s32 f_do_rx_cur = 0;
s32 f_aggr_cur = 0;
struct sk_buff *skb_deaggr;
u32 pind;
u32 pkt_len, pkt_type, mport;
u8 *curr_ptr;
u32 rx_len = skb->len;
if ((card->has_control_mask) && (port == CTRL_PORT)) {
/* Read the command Resp without aggr */
dev_dbg(adapter->dev, "info: %s: no aggregation for cmd "
"response\n", __func__);
f_do_rx_cur = 1;
goto rx_curr_single;
}
if (!card->mpa_rx.enabled) {
dev_dbg(adapter->dev, "info: %s: rx aggregation disabled\n",
__func__);
f_do_rx_cur = 1;
goto rx_curr_single;
}
if ((!card->has_control_mask && (card->mp_rd_bitmap &
card->reg->data_port_mask)) ||
(card->has_control_mask && (card->mp_rd_bitmap &
(~((u32) CTRL_PORT_MASK))))) {
/* Some more data RX pending */
dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__);
if (MP_RX_AGGR_IN_PROGRESS(card)) {
if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len)) {
f_aggr_cur = 1;
} else {
/* No room in Aggr buf, do rx aggr now */
f_do_rx_aggr = 1;
f_do_rx_cur = 1;
}
} else {
/* Rx aggr not in progress */
f_aggr_cur = 1;
}
} else {
/* No more data RX pending */
dev_dbg(adapter->dev, "info: %s: last packet\n", __func__);
if (MP_RX_AGGR_IN_PROGRESS(card)) {
f_do_rx_aggr = 1;
if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len))
f_aggr_cur = 1;
else
/* No room in Aggr buf, do rx aggr now */
f_do_rx_cur = 1;
} else {
f_do_rx_cur = 1;
}
}
if (f_aggr_cur) {
dev_dbg(adapter->dev, "info: current packet aggregation\n");
/* Curr pkt can be aggregated */
mp_rx_aggr_setup(card, skb, port);
if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
mp_rx_aggr_port_limit_reached(card)) {
dev_dbg(adapter->dev, "info: %s: aggregated packet "
"limit reached\n", __func__);
/* No more pkts allowed in Aggr buf, rx it */
f_do_rx_aggr = 1;
}
}
if (f_do_rx_aggr) {
/* do aggr RX now */
dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n",
card->mpa_rx.pkt_cnt);
if (card->supports_sdio_new_mode) {
int i;
u32 port_count;
for (i = 0, port_count = 0; i < card->max_ports; i++)
if (card->mpa_rx.ports & BIT(i))
port_count++;
/* Reading data from "start_port + 0" to "start_port +
* port_count -1", so decrease the count by 1
*/
port_count--;
mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
(port_count << 8)) + card->mpa_rx.start_port;
} else {
mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
(card->mpa_rx.ports << 4)) +
card->mpa_rx.start_port;
}
if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf,
card->mpa_rx.buf_len, mport, 1))
goto error;
curr_ptr = card->mpa_rx.buf;
for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
/* get curr PKT len & type */
pkt_len = le16_to_cpu(*(__le16 *) &curr_ptr[0]);
pkt_type = le16_to_cpu(*(__le16 *) &curr_ptr[2]);
/* copy pkt to deaggr buf */
skb_deaggr = card->mpa_rx.skb_arr[pind];
if ((pkt_type == MWIFIEX_TYPE_DATA) && (pkt_len <=
card->mpa_rx.len_arr[pind])) {
memcpy(skb_deaggr->data, curr_ptr, pkt_len);
skb_trim(skb_deaggr, pkt_len);
/* Process de-aggr packet */
mwifiex_decode_rx_packet(adapter, skb_deaggr,
pkt_type);
} else {
dev_err(adapter->dev, "wrong aggr pkt:"
" type=%d len=%d max_len=%d\n",
pkt_type, pkt_len,
card->mpa_rx.len_arr[pind]);
dev_kfree_skb_any(skb_deaggr);
}
curr_ptr += card->mpa_rx.len_arr[pind];
}
MP_RX_AGGR_BUF_RESET(card);
}
rx_curr_single:
if (f_do_rx_cur) {
dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n",
port, rx_len);
if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
skb->data, skb->len,
adapter->ioport + port))
goto error;
mwifiex_decode_rx_packet(adapter, skb, pkt_type);
}
return 0;
error:
if (MP_RX_AGGR_IN_PROGRESS(card)) {
/* Multiport-aggregation transfer failed - cleanup */
for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
/* copy pkt to deaggr buf */
skb_deaggr = card->mpa_rx.skb_arr[pind];
dev_kfree_skb_any(skb_deaggr);
}
MP_RX_AGGR_BUF_RESET(card);
}
if (f_do_rx_cur)
/* Single transfer pending. Free curr buff also */
dev_kfree_skb_any(skb);
return -1;
}
/*
* This function checks the current interrupt status.
*
* The following interrupts are checked and handled by this function -
* - Data sent
* - Command sent
* - Packets received
*
* Since the firmware does not generate download ready interrupt if the
* port updated is command port only, command sent interrupt checking
* should be done manually, and for every SDIO interrupt.
*
* In case of Rx packets received, the packets are uploaded from card to
* host and processed accordingly.
*/
static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
const struct mwifiex_sdio_card_reg *reg = card->reg;
int ret = 0;
u8 sdio_ireg;
struct sk_buff *skb;
u8 port = CTRL_PORT;
u32 len_reg_l, len_reg_u;
u32 rx_blocks;
u16 rx_len;
unsigned long flags;
u32 bitmap;
u8 cr;
spin_lock_irqsave(&adapter->int_lock, flags);
sdio_ireg = adapter->int_status;
adapter->int_status = 0;
spin_unlock_irqrestore(&adapter->int_lock, flags);
if (!sdio_ireg)
return ret;
/* Following interrupt is only for SDIO new mode */
if (sdio_ireg & DN_LD_CMD_PORT_HOST_INT_STATUS && adapter->cmd_sent)
adapter->cmd_sent = false;
/* Following interrupt is only for SDIO new mode */
if (sdio_ireg & UP_LD_CMD_PORT_HOST_INT_STATUS) {
u32 pkt_type;
/* read the len of control packet */
rx_len = card->mp_regs[CMD_RD_LEN_1] << 8;
rx_len |= (u16) card->mp_regs[CMD_RD_LEN_0];
rx_blocks = DIV_ROUND_UP(rx_len, MWIFIEX_SDIO_BLOCK_SIZE);
if (rx_len <= INTF_HEADER_LEN ||
(rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
MWIFIEX_RX_DATA_BUF_SIZE)
return -1;
rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
skb = dev_alloc_skb(rx_len);
if (!skb)
return -1;
skb_put(skb, rx_len);
if (mwifiex_sdio_card_to_host(adapter, &pkt_type, skb->data,
skb->len, adapter->ioport |
CMD_PORT_SLCT)) {
dev_err(adapter->dev,
"%s: failed to card_to_host", __func__);
dev_kfree_skb_any(skb);
goto term_cmd;
}
if ((pkt_type != MWIFIEX_TYPE_CMD) &&
(pkt_type != MWIFIEX_TYPE_EVENT))
dev_err(adapter->dev,
"%s:Received wrong packet on cmd port",
__func__);
mwifiex_decode_rx_packet(adapter, skb, pkt_type);
}
if (sdio_ireg & DN_LD_HOST_INT_STATUS) {
bitmap = (u32) card->mp_regs[reg->wr_bitmap_l];
bitmap |= ((u32) card->mp_regs[reg->wr_bitmap_u]) << 8;
if (card->supports_sdio_new_mode) {
bitmap |=
((u32) card->mp_regs[reg->wr_bitmap_1l]) << 16;
bitmap |=
((u32) card->mp_regs[reg->wr_bitmap_1u]) << 24;
}
card->mp_wr_bitmap = bitmap;
dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%x\n",
card->mp_wr_bitmap);
if (adapter->data_sent &&
(card->mp_wr_bitmap & card->mp_data_port_mask)) {
dev_dbg(adapter->dev,
"info: <--- Tx DONE Interrupt --->\n");
adapter->data_sent = false;
}
}
/* As firmware will not generate download ready interrupt if the port
updated is command port only, cmd_sent should be done for any SDIO
interrupt. */
if (card->has_control_mask && adapter->cmd_sent) {
/* Check if firmware has attach buffer at command port and
update just that in wr_bit_map. */
card->mp_wr_bitmap |=
(u32) card->mp_regs[reg->wr_bitmap_l] & CTRL_PORT_MASK;
if (card->mp_wr_bitmap & CTRL_PORT_MASK)
adapter->cmd_sent = false;
}
dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
adapter->cmd_sent, adapter->data_sent);
if (sdio_ireg & UP_LD_HOST_INT_STATUS) {
bitmap = (u32) card->mp_regs[reg->rd_bitmap_l];
bitmap |= ((u32) card->mp_regs[reg->rd_bitmap_u]) << 8;
if (card->supports_sdio_new_mode) {
bitmap |=
((u32) card->mp_regs[reg->rd_bitmap_1l]) << 16;
bitmap |=
((u32) card->mp_regs[reg->rd_bitmap_1u]) << 24;
}
card->mp_rd_bitmap = bitmap;
dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%x\n",
card->mp_rd_bitmap);
while (true) {
ret = mwifiex_get_rd_port(adapter, &port);
if (ret) {
dev_dbg(adapter->dev,
"info: no more rd_port available\n");
break;
}
len_reg_l = reg->rd_len_p0_l + (port << 1);
len_reg_u = reg->rd_len_p0_u + (port << 1);
rx_len = ((u16) card->mp_regs[len_reg_u]) << 8;
rx_len |= (u16) card->mp_regs[len_reg_l];
dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n",
port, rx_len);
rx_blocks =
(rx_len + MWIFIEX_SDIO_BLOCK_SIZE -
1) / MWIFIEX_SDIO_BLOCK_SIZE;
if (rx_len <= INTF_HEADER_LEN ||
(rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
MWIFIEX_RX_DATA_BUF_SIZE) {
dev_err(adapter->dev, "invalid rx_len=%d\n",
rx_len);
return -1;
}
rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
skb = dev_alloc_skb(rx_len);
if (!skb) {
dev_err(adapter->dev, "%s: failed to alloc skb",
__func__);
return -1;
}
skb_put(skb, rx_len);
dev_dbg(adapter->dev, "info: rx_len = %d skb->len = %d\n",
rx_len, skb->len);
if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb,
port)) {
dev_err(adapter->dev, "card_to_host_mpa failed:"
" int status=%#x\n", sdio_ireg);
goto term_cmd;
}
}
}
return 0;
term_cmd:
/* terminate cmd */
if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
dev_err(adapter->dev, "read CFG reg failed\n");
else
dev_dbg(adapter->dev, "info: CFG reg val = %d\n", cr);
if (mwifiex_write_reg(adapter, CONFIGURATION_REG, (cr | 0x04)))
dev_err(adapter->dev, "write CFG reg failed\n");
else
dev_dbg(adapter->dev, "info: write success\n");
if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
dev_err(adapter->dev, "read CFG reg failed\n");
else
dev_dbg(adapter->dev, "info: CFG reg val =%x\n", cr);
return -1;
}
/*
* This function aggregates transmission buffers in driver and downloads
* the aggregated packet to card.
*
* The individual packets are aggregated by copying into an aggregation
* buffer and then downloaded to the card. Previous unsent packets in the
* aggregation buffer are pre-copied first before new packets are added.
* Aggregation is done till there is space left in the aggregation buffer,
* or till new packets are available.
*
* The function will only download the packet to the card when aggregation
* stops, otherwise it will just aggregate the packet in aggregation buffer
* and return.
*/
static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
u8 *payload, u32 pkt_len, u32 port,
u32 next_pkt_len)
{
struct sdio_mmc_card *card = adapter->card;
int ret = 0;
s32 f_send_aggr_buf = 0;
s32 f_send_cur_buf = 0;
s32 f_precopy_cur_buf = 0;
s32 f_postcopy_cur_buf = 0;
u32 mport;
if (!card->mpa_tx.enabled ||
(card->has_control_mask && (port == CTRL_PORT)) ||
(card->supports_sdio_new_mode && (port == CMD_PORT_SLCT))) {
dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n",
__func__);
f_send_cur_buf = 1;
goto tx_curr_single;
}
if (next_pkt_len) {
/* More pkt in TX queue */
dev_dbg(adapter->dev, "info: %s: more packets in queue.\n",
__func__);
if (MP_TX_AGGR_IN_PROGRESS(card)) {
if (!mp_tx_aggr_port_limit_reached(card) &&
MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) {
f_precopy_cur_buf = 1;
if (!(card->mp_wr_bitmap &
(1 << card->curr_wr_port)) ||
!MP_TX_AGGR_BUF_HAS_ROOM(
card, pkt_len + next_pkt_len))
f_send_aggr_buf = 1;
} else {
/* No room in Aggr buf, send it */
f_send_aggr_buf = 1;
if (mp_tx_aggr_port_limit_reached(card) ||
!(card->mp_wr_bitmap &
(1 << card->curr_wr_port)))
f_send_cur_buf = 1;
else
f_postcopy_cur_buf = 1;
}
} else {
if (MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len) &&
(card->mp_wr_bitmap & (1 << card->curr_wr_port)))
f_precopy_cur_buf = 1;
else
f_send_cur_buf = 1;
}
} else {
/* Last pkt in TX queue */
dev_dbg(adapter->dev, "info: %s: Last packet in Tx Queue.\n",
__func__);
if (MP_TX_AGGR_IN_PROGRESS(card)) {
/* some packs in Aggr buf already */
f_send_aggr_buf = 1;
if (MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len))
f_precopy_cur_buf = 1;
else
/* No room in Aggr buf, send it */
f_send_cur_buf = 1;
} else {
f_send_cur_buf = 1;
}
}
if (f_precopy_cur_buf) {
dev_dbg(adapter->dev, "data: %s: precopy current buffer\n",
__func__);
MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) ||
mp_tx_aggr_port_limit_reached(card))
/* No more pkts allowed in Aggr buf, send it */
f_send_aggr_buf = 1;
}
if (f_send_aggr_buf) {
dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n",
__func__,
card->mpa_tx.start_port, card->mpa_tx.ports);
if (card->supports_sdio_new_mode) {
u32 port_count;
int i;
for (i = 0, port_count = 0; i < card->max_ports; i++)
if (card->mpa_tx.ports & BIT(i))
port_count++;
/* Writing data from "start_port + 0" to "start_port +
* port_count -1", so decrease the count by 1
*/
port_count--;
mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
(port_count << 8)) + card->mpa_tx.start_port;
} else {
mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
(card->mpa_tx.ports << 4)) +
card->mpa_tx.start_port;
}
ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf,
card->mpa_tx.buf_len, mport);
MP_TX_AGGR_BUF_RESET(card);
}
tx_curr_single:
if (f_send_cur_buf) {
dev_dbg(adapter->dev, "data: %s: send current buffer %d\n",
__func__, port);
ret = mwifiex_write_data_to_card(adapter, payload, pkt_len,
adapter->ioport + port);
}
if (f_postcopy_cur_buf) {
dev_dbg(adapter->dev, "data: %s: postcopy current buffer\n",
__func__);
MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
}
return ret;
}
/*
* This function downloads data from driver to card.
*
* Both commands and data packets are transferred to the card by this
* function.
*
* This function adds the SDIO specific header to the front of the buffer
* before transferring. The header contains the length of the packet and
* the type. The firmware handles the packets based upon this set type.
*/
static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
u8 type, struct sk_buff *skb,
struct mwifiex_tx_param *tx_param)
{
struct sdio_mmc_card *card = adapter->card;
int ret;
u32 buf_block_len;
u32 blk_size;
u32 port = CTRL_PORT;
u8 *payload = (u8 *)skb->data;
u32 pkt_len = skb->len;
/* Allocate buffer and copy payload */
blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
buf_block_len = (pkt_len + blk_size - 1) / blk_size;
*(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len);
*(__le16 *)&payload[2] = cpu_to_le16(type);
/*
* This is SDIO specific header
* u16 length,
* u16 type (MWIFIEX_TYPE_DATA = 0, MWIFIEX_TYPE_CMD = 1,
* MWIFIEX_TYPE_EVENT = 3)
*/
if (type == MWIFIEX_TYPE_DATA) {
ret = mwifiex_get_wr_port_data(adapter, &port);
if (ret) {
dev_err(adapter->dev, "%s: no wr_port available\n",
__func__);
return ret;
}
} else {
adapter->cmd_sent = true;
/* Type must be MWIFIEX_TYPE_CMD */
if (pkt_len <= INTF_HEADER_LEN ||
pkt_len > MWIFIEX_UPLD_SIZE)
dev_err(adapter->dev, "%s: payload=%p, nb=%d\n",
__func__, payload, pkt_len);
if (card->supports_sdio_new_mode)
port = CMD_PORT_SLCT;
}
/* Transfer data to card */
pkt_len = buf_block_len * blk_size;
if (tx_param)
ret = mwifiex_host_to_card_mp_aggr(adapter, payload, pkt_len,
port, tx_param->next_pkt_len
);
else
ret = mwifiex_host_to_card_mp_aggr(adapter, payload, pkt_len,
port, 0);
if (ret) {
if (type == MWIFIEX_TYPE_CMD)
adapter->cmd_sent = false;
if (type == MWIFIEX_TYPE_DATA) {
adapter->data_sent = false;
/* restore curr_wr_port in error cases */
card->curr_wr_port = port;
card->mp_wr_bitmap |= (u32)(1 << card->curr_wr_port);
}
} else {
if (type == MWIFIEX_TYPE_DATA) {
if (!(card->mp_wr_bitmap & (1 << card->curr_wr_port)))
adapter->data_sent = true;
else
adapter->data_sent = false;
}
}
return ret;
}
/*
* This function allocates the MPA Tx and Rx buffers.
*/
static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter,
u32 mpa_tx_buf_size, u32 mpa_rx_buf_size)
{
struct sdio_mmc_card *card = adapter->card;
int ret = 0;
card->mpa_tx.buf = kzalloc(mpa_tx_buf_size, GFP_KERNEL);
if (!card->mpa_tx.buf) {
ret = -1;
goto error;
}
card->mpa_tx.buf_size = mpa_tx_buf_size;
card->mpa_rx.buf = kzalloc(mpa_rx_buf_size, GFP_KERNEL);
if (!card->mpa_rx.buf) {
ret = -1;
goto error;
}
card->mpa_rx.buf_size = mpa_rx_buf_size;
error:
if (ret) {
kfree(card->mpa_tx.buf);
kfree(card->mpa_rx.buf);
}
return ret;
}
/*
* This function unregisters the SDIO device.
*
* The SDIO IRQ is released, the function is disabled and driver
* data is set to null.
*/
static void
mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
if (adapter->card) {
sdio_claim_host(card->func);
sdio_disable_func(card->func);
sdio_release_host(card->func);
}
}
/*
* This function registers the SDIO device.
*
* SDIO IRQ is claimed, block size is set and driver data is initialized.
*/
static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
{
int ret;
struct sdio_mmc_card *card = adapter->card;
struct sdio_func *func = card->func;
/* save adapter pointer in card */
card->adapter = adapter;
adapter->tx_buf_size = card->tx_buf_size;
sdio_claim_host(func);
/* Set block size */
ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE);
sdio_release_host(func);
if (ret) {
pr_err("cannot set SDIO block size\n");
return ret;
}
adapter->dev = &func->dev;
strcpy(adapter->fw_name, card->firmware);
adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
return 0;
}
/*
* This function initializes the SDIO driver.
*
* The following initializations steps are followed -
* - Read the Host interrupt status register to acknowledge
* the first interrupt got from bootloader
* - Disable host interrupt mask register
* - Get SDIO port
* - Initialize SDIO variables in card
* - Allocate MP registers
* - Allocate MPA Tx and Rx buffers
*/
static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
const struct mwifiex_sdio_card_reg *reg = card->reg;
int ret;
u8 sdio_ireg;
sdio_set_drvdata(card->func, card);
/*
* Read the HOST_INT_STATUS_REG for ACK the first interrupt got
* from the bootloader. If we don't do this we get a interrupt
* as soon as we register the irq.
*/
mwifiex_read_reg(adapter, HOST_INTSTATUS_REG, &sdio_ireg);
/* Get SDIO ioport */
mwifiex_init_sdio_ioport(adapter);
/* Initialize SDIO variables in card */
card->mp_rd_bitmap = 0;
card->mp_wr_bitmap = 0;
card->curr_rd_port = reg->start_rd_port;
card->curr_wr_port = reg->start_wr_port;
card->mp_data_port_mask = reg->data_port_mask;
card->mpa_tx.buf_len = 0;
card->mpa_tx.pkt_cnt = 0;
card->mpa_tx.start_port = 0;
card->mpa_tx.enabled = 1;
card->mpa_tx.pkt_aggr_limit = card->mp_agg_pkt_limit;
card->mpa_rx.buf_len = 0;
card->mpa_rx.pkt_cnt = 0;
card->mpa_rx.start_port = 0;
card->mpa_rx.enabled = 1;
card->mpa_rx.pkt_aggr_limit = card->mp_agg_pkt_limit;
/* Allocate buffers for SDIO MP-A */
card->mp_regs = kzalloc(reg->max_mp_regs, GFP_KERNEL);
if (!card->mp_regs)
return -ENOMEM;
/* Allocate skb pointer buffers */
card->mpa_rx.skb_arr = kzalloc((sizeof(void *)) *
card->mp_agg_pkt_limit, GFP_KERNEL);
card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) *
card->mp_agg_pkt_limit, GFP_KERNEL);
ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
card->mp_tx_agg_buf_size,
card->mp_rx_agg_buf_size);
if (ret) {
dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n");
kfree(card->mp_regs);
return -1;
}
return ret;
}
/*
* This function resets the MPA Tx and Rx buffers.
*/
static void mwifiex_cleanup_mpa_buf(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
MP_TX_AGGR_BUF_RESET(card);
MP_RX_AGGR_BUF_RESET(card);
}
/*
* This function cleans up the allocated card buffers.
*
* The following are freed by this function -
* - MP registers
* - MPA Tx buffer
* - MPA Rx buffer
*/
static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
kfree(card->mp_regs);
kfree(card->mpa_rx.skb_arr);
kfree(card->mpa_rx.len_arr);
kfree(card->mpa_tx.buf);
kfree(card->mpa_rx.buf);
sdio_set_drvdata(card->func, NULL);
kfree(card);
}
/*
* This function updates the MP end port in card.
*/
static void
mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
{
struct sdio_mmc_card *card = adapter->card;
const struct mwifiex_sdio_card_reg *reg = card->reg;
int i;
card->mp_end_port = port;
card->mp_data_port_mask = reg->data_port_mask;
if (reg->start_wr_port) {
for (i = 1; i <= card->max_ports - card->mp_end_port; i++)
card->mp_data_port_mask &=
~(1 << (card->max_ports - i));
}
card->curr_wr_port = reg->start_wr_port;
dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n",
port, card->mp_data_port_mask);
}
static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
struct mmc_host *target = card->func->card->host;
/* The actual reset operation must be run outside of driver thread.
* This is because mmc_remove_host() will cause the device to be
* instantly destroyed, and the driver then needs to end its thread,
* leading to a deadlock.
*
* We run it in a totally independent workqueue.
*/
pr_err("Resetting card...\n");
mmc_remove_host(target);
/* 20ms delay is based on experiment with sdhci controller */
mdelay(20);
target->rescan_entered = 0; /* rescan non-removable cards */
mmc_add_host(target);
}
/* This function read/write firmware */
static enum
rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
u8 doneflag)
{
struct sdio_mmc_card *card = adapter->card;
int ret, tries;
u8 ctrl_data = 0;
sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl,
&ret);
if (ret) {
dev_err(adapter->dev, "SDIO Write ERR\n");
return RDWR_STATUS_FAILURE;
}
for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
ctrl_data = sdio_readb(card->func, card->reg->fw_dump_ctrl,
&ret);
if (ret) {
dev_err(adapter->dev, "SDIO read err\n");
return RDWR_STATUS_FAILURE;
}
if (ctrl_data == FW_DUMP_DONE)
break;
if (doneflag && ctrl_data == doneflag)
return RDWR_STATUS_DONE;
if (ctrl_data != FW_DUMP_HOST_READY) {
dev_info(adapter->dev,
"The ctrl reg was changed, re-try again!\n");
sdio_writeb(card->func, FW_DUMP_HOST_READY,
card->reg->fw_dump_ctrl, &ret);
if (ret) {
dev_err(adapter->dev, "SDIO write err\n");
return RDWR_STATUS_FAILURE;
}
}
usleep_range(100, 200);
}
if (ctrl_data == FW_DUMP_HOST_READY) {
dev_err(adapter->dev, "Fail to pull ctrl_data\n");
return RDWR_STATUS_FAILURE;
}
return RDWR_STATUS_SUCCESS;
}
/* This function dump firmware memory to file */
static void mwifiex_sdio_fw_dump_work(struct work_struct *work)
{
struct mwifiex_adapter *adapter =
container_of(work, struct mwifiex_adapter, iface_work);
struct sdio_mmc_card *card = adapter->card;
int ret = 0;
unsigned int reg, reg_start, reg_end;
u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0;
enum rdwr_status stat;
u32 memory_size;
static char *env[] = { "DRIVER=mwifiex_sdio", "EVENT=fw_dump", NULL };
if (!card->supports_fw_dump)
return;
for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) {
struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
if (entry->mem_ptr) {
vfree(entry->mem_ptr);
entry->mem_ptr = NULL;
}
entry->mem_size = 0;
}
mwifiex_pm_wakeup_card(adapter);
sdio_claim_host(card->func);
dev_info(adapter->dev, "== mwifiex firmware dump start ==\n");
stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
if (stat == RDWR_STATUS_FAILURE)
goto done;
reg = card->reg->fw_dump_start;
/* Read the number of the memories which will dump */
dump_num = sdio_readb(card->func, reg, &ret);
if (ret) {
dev_err(adapter->dev, "SDIO read memory length err\n");
goto done;
}
/* Read the length of every memory which will dump */
for (idx = 0; idx < dump_num; idx++) {
struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
if (stat == RDWR_STATUS_FAILURE)
goto done;
memory_size = 0;
reg = card->reg->fw_dump_start;
for (i = 0; i < 4; i++) {
read_reg = sdio_readb(card->func, reg, &ret);
if (ret) {
dev_err(adapter->dev, "SDIO read err\n");
goto done;
}
memory_size |= (read_reg << i*8);
reg++;
}
if (memory_size == 0) {
dev_info(adapter->dev, "Firmware dump Finished!\n");
break;
}
dev_info(adapter->dev,
"%s_SIZE=0x%x\n", entry->mem_name, memory_size);
entry->mem_ptr = vmalloc(memory_size + 1);
entry->mem_size = memory_size;
if (!entry->mem_ptr) {
dev_err(adapter->dev, "Vmalloc %s failed\n",
entry->mem_name);
goto done;
}
dbg_ptr = entry->mem_ptr;
end_ptr = dbg_ptr + memory_size;
doneflag = entry->done_flag;
dev_info(adapter->dev, "Start %s output, please wait...\n",
entry->mem_name);
do {
stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
if (stat == RDWR_STATUS_FAILURE)
goto done;
reg_start = card->reg->fw_dump_start;
reg_end = card->reg->fw_dump_end;
for (reg = reg_start; reg <= reg_end; reg++) {
*dbg_ptr = sdio_readb(card->func, reg, &ret);
if (ret) {
dev_err(adapter->dev,
"SDIO read err\n");
goto done;
}
if (dbg_ptr < end_ptr)
dbg_ptr++;
else
dev_err(adapter->dev,
"Allocated buf not enough\n");
}
if (stat != RDWR_STATUS_DONE)
continue;
dev_info(adapter->dev, "%s done: size=0x%tx\n",
entry->mem_name, dbg_ptr - entry->mem_ptr);
break;
} while (1);
}
dev_info(adapter->dev, "== mwifiex firmware dump end ==\n");
kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env);
done:
sdio_release_host(card->func);
adapter->curr_mem_idx = 0;
}
static void mwifiex_sdio_work(struct work_struct *work)
{
struct mwifiex_adapter *adapter =
container_of(work, struct mwifiex_adapter, iface_work);
if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
&adapter->iface_work_flags))
mwifiex_sdio_card_reset_work(adapter);
if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
&adapter->iface_work_flags))
mwifiex_sdio_fw_dump_work(work);
}
/* This function resets the card */
static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
{
if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &adapter->iface_work_flags))
return;
set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &adapter->iface_work_flags);
schedule_work(&adapter->iface_work);
}
/* This function dumps FW information */
static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
{
if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags))
return;
set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags);
schedule_work(&adapter->iface_work);
}
static struct mwifiex_if_ops sdio_ops = {
.init_if = mwifiex_init_sdio,
.cleanup_if = mwifiex_cleanup_sdio,
.check_fw_status = mwifiex_check_fw_status,
.prog_fw = mwifiex_prog_fw_w_helper,
.register_dev = mwifiex_register_dev,
.unregister_dev = mwifiex_unregister_dev,
.enable_int = mwifiex_sdio_enable_host_int,
.disable_int = mwifiex_sdio_disable_host_int,
.process_int_status = mwifiex_process_int_status,
.host_to_card = mwifiex_sdio_host_to_card,
.wakeup = mwifiex_pm_wakeup_card,
.wakeup_complete = mwifiex_pm_wakeup_card_complete,
/* SDIO specific */
.update_mp_end_port = mwifiex_update_mp_end_port,
.cleanup_mpa_buf = mwifiex_cleanup_mpa_buf,
.cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
.event_complete = mwifiex_sdio_event_complete,
.card_reset = mwifiex_sdio_card_reset,
.iface_work = mwifiex_sdio_work,
.fw_dump = mwifiex_sdio_fw_dump,
};
/*
* This function initializes the SDIO driver.
*
* This initiates the semaphore and registers the device with
* SDIO bus.
*/
static int
mwifiex_sdio_init_module(void)
{
sema_init(&add_remove_card_sem, 1);
/* Clear the flag in case user removes the card. */
user_rmmod = 0;
return sdio_register_driver(&mwifiex_sdio);
}
/*
* This function cleans up the SDIO driver.
*
* The following major steps are followed for cleanup -
* - Resume the device if its suspended
* - Disconnect the device if connected
* - Shutdown the firmware
* - Unregister the device from SDIO bus.
*/
static void
mwifiex_sdio_cleanup_module(void)
{
if (!down_interruptible(&add_remove_card_sem))
up(&add_remove_card_sem);
/* Set the flag as user is removing this module. */
user_rmmod = 1;
sdio_unregister_driver(&mwifiex_sdio);
}
module_init(mwifiex_sdio_init_module);
module_exit(mwifiex_sdio_cleanup_module);
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
MODULE_VERSION(SDIO_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(SD8786_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
| gpl-2.0 |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/cython/src/Cython/Utility/ModuleSetupCode.c | 25252 | /////////////// CModulePreamble ///////////////
#include <stddef.h> /* For offsetof */
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600
#define Py_OptimizeFlag 0
#endif
#if PY_VERSION_HEX < 0x02050000
typedef int Py_ssize_t;
#define PY_SSIZE_T_MAX INT_MAX
#define PY_SSIZE_T_MIN INT_MIN
#define PY_FORMAT_SIZE_T ""
#define CYTHON_FORMAT_SSIZE_T ""
#define PyInt_FromSsize_t(z) PyInt_FromLong(z)
#define PyInt_AsSsize_t(o) __Pyx_PyInt_As_int(o)
#define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \
(PyErr_Format(PyExc_TypeError, \
"expected index value, got %.200s", Py_TYPE(o)->tp_name), \
(PyObject*)0))
#define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \
!PyComplex_Check(o))
#define PyIndex_Check __Pyx_PyIndex_Check
#define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
#define __PYX_BUILD_PY_SSIZE_T "i"
#else
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#define __Pyx_PyIndex_Check PyIndex_Check
#endif
#if PY_VERSION_HEX < 0x02060000
#define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
#define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size)
#define PyVarObject_HEAD_INIT(type, size) \
PyObject_HEAD_INIT(type) size,
#define PyType_Modified(t)
typedef struct {
void *buf;
PyObject *obj;
Py_ssize_t len;
Py_ssize_t itemsize;
int readonly;
int ndim;
char *format;
Py_ssize_t *shape;
Py_ssize_t *strides;
Py_ssize_t *suboffsets;
void *internal;
} Py_buffer;
#define PyBUF_SIMPLE 0
#define PyBUF_WRITABLE 0x0001
#define PyBUF_FORMAT 0x0004
#define PyBUF_ND 0x0008
#define PyBUF_STRIDES (0x0010 | PyBUF_ND)
#define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
#define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
#define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
#define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
#define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE)
#define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE)
typedef int (*getbufferproc)(PyObject *, Py_buffer *, int);
typedef void (*releasebufferproc)(PyObject *, Py_buffer *);
#endif
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#if PY_VERSION_HEX < 0x02060000
#define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict")
#endif
#if PY_MAJOR_VERSION >= 3
#define Py_TPFLAGS_CHECKTYPES 0
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#if PY_VERSION_HEX < 0x02060000
#define Py_TPFLAGS_HAVE_VERSION_TAG 0
#endif
#if PY_VERSION_HEX < 0x02060000 && !defined(Py_TPFLAGS_IS_ABSTRACT)
#define Py_TPFLAGS_IS_ABSTRACT 0
#endif
#if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE)
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
/* new Py3.3 unicode type (PEP 393) */
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#else
#define CYTHON_PEP393_ENABLED 0
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
/* (void)(k) => avoid unused variable warning due to macro: */
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_VERSION_HEX < 0x02060000
#define PyBytesObject PyStringObject
#define PyBytes_Type PyString_Type
#define PyBytes_Check PyString_Check
#define PyBytes_CheckExact PyString_CheckExact
#define PyBytes_FromString PyString_FromString
#define PyBytes_FromStringAndSize PyString_FromStringAndSize
#define PyBytes_FromFormat PyString_FromFormat
#define PyBytes_DecodeEscape PyString_DecodeEscape
#define PyBytes_AsString PyString_AsString
#define PyBytes_AsStringAndSize PyString_AsStringAndSize
#define PyBytes_Size PyString_Size
#define PyBytes_AS_STRING PyString_AS_STRING
#define PyBytes_GET_SIZE PyString_GET_SIZE
#define PyBytes_Repr PyString_Repr
#define PyBytes_Concat PyString_Concat
#define PyBytes_ConcatAndDel PyString_ConcatAndDel
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj) || \
PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#if PY_VERSION_HEX < 0x02060000
#define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type)
#define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type)
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300)
#define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b)
#define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value)
#define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b)
#else
#define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0)))
#define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1)))
#define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1)))
#endif
#if PY_MAJOR_VERSION >= 3
#define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#endif
#if PY_VERSION_HEX < 0x02050000
#define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n)))
#define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
#define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n)))
#else
#define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n))
#define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
#define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n))
#endif
#if PY_VERSION_HEX < 0x02050000
#define __Pyx_NAMESTR(n) ((char *)(n))
#define __Pyx_DOCSTR(n) ((char *)(n))
#else
#define __Pyx_NAMESTR(n) (n)
#define __Pyx_DOCSTR(n) (n)
#endif
/* inline attribute */
#ifndef CYTHON_INLINE
#if defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
/* restrict */
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
/* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
a quiet NaN. */
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
// Work around clang bug http://stackoverflow.com/questions/21847816/c-invoke-nested-template-class-destructor
#ifdef __cplusplus
template<typename T>
void __Pyx_call_destructor(T* x) {
x->~T();
}
#endif
/////////////// UtilityFunctionPredeclarations.proto ///////////////
/* unused attribute */
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
/////////////// ForceInitThreads.proto ///////////////
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/////////////// InitThreads.init ///////////////
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
/////////////// CodeObjectCache.proto ///////////////
typedef struct {
int code_line;
PyCodeObject* code_object;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/////////////// CodeObjectCache ///////////////
// Note that errors are simply ignored in the code below.
// This is just a cache, if a lookup or insertion fails - so what?
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = (start + end) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/////////////// CodeObjectCache.cleanup ///////////////
if (__pyx_code_cache.entries) {
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
int i, count = __pyx_code_cache.count;
__pyx_code_cache.count = 0;
__pyx_code_cache.max_count = 0;
__pyx_code_cache.entries = NULL;
for (i=0; i<count; i++) {
Py_DECREF(entries[i].code_object);
}
PyMem_Free(entries);
}
/////////////// CheckBinaryVersion.proto ///////////////
static int __Pyx_check_binary_version(void);
/////////////// CheckBinaryVersion ///////////////
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
#if PY_VERSION_HEX < 0x02050000
return PyErr_Warn(NULL, message);
#else
return PyErr_WarnEx(NULL, message, 1);
#endif
}
return 0;
}
/////////////// Refnanny.proto ///////////////
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil) \
if (acquire_gil) { \
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
PyGILState_Release(__pyx_gilstate_save); \
} else { \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil) \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext() \
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif /* CYTHON_REFNANNY */
#define __Pyx_XDECREF_SET(r, v) do { \
PyObject *tmp = (PyObject *) r; \
r = v; __Pyx_XDECREF(tmp); \
} while (0)
#define __Pyx_DECREF_SET(r, v) do { \
PyObject *tmp = (PyObject *) r; \
r = v; __Pyx_DECREF(tmp); \
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/////////////// Refnanny ///////////////
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif /* CYTHON_REFNANNY */
/////////////// RegisterModuleCleanup.proto ///////////////
//@substitute: naming
static void ${cleanup_cname}(PyObject *self); /*proto*/
static int __Pyx_RegisterCleanup(void); /*proto*/
/////////////// RegisterModuleCleanup ///////////////
//@substitute: naming
//@requires: ImportExport.c::ModuleImport
#if PY_MAJOR_VERSION < 3
static PyObject* ${cleanup_cname}_atexit(PyObject *module, CYTHON_UNUSED PyObject *unused) {
${cleanup_cname}(module);
Py_INCREF(Py_None); return Py_None;
}
static int __Pyx_RegisterCleanup(void) {
// Don't use Py_AtExit because that has a 32-call limit and is called
// after python finalization.
// Also, we try to prepend the cleanup function to "atexit._exithandlers"
// in Py2 because CPython runs them last-to-first. Being run last allows
// user exit code to run before us that may depend on the globals
// and cached objects that we are about to clean up.
static PyMethodDef cleanup_def = {
__Pyx_NAMESTR("__cleanup"), (PyCFunction)${cleanup_cname}_atexit, METH_NOARGS, 0};
PyObject *cleanup_func = 0;
PyObject *atexit = 0;
PyObject *reg = 0;
PyObject *args = 0;
PyObject *res = 0;
int ret = -1;
cleanup_func = PyCFunction_New(&cleanup_def, 0);
if (!cleanup_func)
goto bad;
atexit = __Pyx_ImportModule("atexit");
if (!atexit)
goto bad;
reg = __Pyx_GetAttrString(atexit, "_exithandlers");
if (reg && PyList_Check(reg)) {
PyObject *a, *kw;
a = PyTuple_New(0);
kw = PyDict_New();
if (!a || !kw) {
Py_XDECREF(a);
Py_XDECREF(kw);
goto bad;
}
args = PyTuple_Pack(3, cleanup_func, a, kw);
Py_DECREF(a);
Py_DECREF(kw);
if (!args)
goto bad;
ret = PyList_Insert(reg, 0, args);
} else {
if (!reg)
PyErr_Clear();
Py_XDECREF(reg);
reg = __Pyx_GetAttrString(atexit, "register");
if (!reg)
goto bad;
args = PyTuple_Pack(1, cleanup_func);
if (!args)
goto bad;
res = PyObject_CallObject(reg, args);
if (!res)
goto bad;
ret = 0;
}
bad:
Py_XDECREF(cleanup_func);
Py_XDECREF(atexit);
Py_XDECREF(reg);
Py_XDECREF(args);
Py_XDECREF(res);
return ret;
}
#else
// fake call purely to work around "unused function" warning for __Pyx_ImportModule()
static int __Pyx_RegisterCleanup(void) {
if (0) __Pyx_ImportModule(NULL);
return 0;
}
#endif
| mit |
stardog-union/stardog-graviton | vendor/github.com/hashicorp/terraform/vendor/github.com/googleapis/gax-go/invoke.go | 2878 | // Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gax
import (
"time"
"golang.org/x/net/context"
)
// A user defined call stub.
type APICall func(context.Context) error
// Invoke calls the given APICall,
// performing retries as specified by opts, if any.
func Invoke(ctx context.Context, call APICall, opts ...CallOption) error {
var settings CallSettings
for _, opt := range opts {
opt.Resolve(&settings)
}
return invoke(ctx, call, settings, Sleep)
}
// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing.
// If interrupted, Sleep returns ctx.Err().
func Sleep(ctx context.Context, d time.Duration) error {
t := time.NewTimer(d)
select {
case <-ctx.Done():
t.Stop()
return ctx.Err()
case <-t.C:
return nil
}
}
type sleeper func(ctx context.Context, d time.Duration) error
// invoke implements Invoke, taking an additional sleeper argument for testing.
func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error {
var retryer Retryer
for {
err := call(ctx)
if err == nil {
return nil
}
if settings.Retry == nil {
return err
}
if retryer == nil {
if r := settings.Retry(); r != nil {
retryer = r
} else {
return err
}
}
if d, ok := retryer.Retry(err); !ok {
return err
} else if err = sp(ctx, d); err != nil {
return err
}
}
}
| apache-2.0 |
lshain-android-source/tools-idea | java/java-tests/testData/refactoring/suggestedTypes/LtLtInt.java | 91 | class Test {
void foo() {
int b = 0;
<selection>int i = b << 1;</selection>
}
} | apache-2.0 |
BPI-SINOVOIP/BPI-Mainline-kernel | linux-5.4/drivers/regulator/arizona-ldo1.c | 10160 | // SPDX-License-Identifier: GPL-2.0+
//
// arizona-ldo1.c -- LDO1 supply for Arizona devices
//
// Copyright 2012 Wolfson Microelectronics PLC.
//
// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
#include <linux/regulator/arizona-ldo1.h>
#include <linux/mfd/arizona/core.h>
#include <linux/mfd/arizona/pdata.h>
#include <linux/mfd/arizona/registers.h>
#include <linux/mfd/madera/core.h>
#include <linux/mfd/madera/pdata.h>
#include <linux/mfd/madera/registers.h>
struct arizona_ldo1 {
struct regulator_dev *regulator;
struct regmap *regmap;
struct regulator_consumer_supply supply;
struct regulator_init_data init_data;
struct gpio_desc *ena_gpiod;
};
static int arizona_ldo1_hc_set_voltage_sel(struct regulator_dev *rdev,
unsigned sel)
{
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int val;
int ret;
if (sel == rdev->desc->n_voltages - 1)
val = ARIZONA_LDO1_HI_PWR;
else
val = 0;
ret = regmap_update_bits(regmap, ARIZONA_LDO1_CONTROL_2,
ARIZONA_LDO1_HI_PWR, val);
if (ret != 0)
return ret;
if (val)
return 0;
return regulator_set_voltage_sel_regmap(rdev, sel);
}
static int arizona_ldo1_hc_get_voltage_sel(struct regulator_dev *rdev)
{
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int val;
int ret;
ret = regmap_read(regmap, ARIZONA_LDO1_CONTROL_2, &val);
if (ret != 0)
return ret;
if (val & ARIZONA_LDO1_HI_PWR)
return rdev->desc->n_voltages - 1;
return regulator_get_voltage_sel_regmap(rdev);
}
static const struct regulator_ops arizona_ldo1_hc_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = arizona_ldo1_hc_get_voltage_sel,
.set_voltage_sel = arizona_ldo1_hc_set_voltage_sel,
.get_bypass = regulator_get_bypass_regmap,
.set_bypass = regulator_set_bypass_regmap,
};
static const struct regulator_linear_range arizona_ldo1_hc_ranges[] = {
REGULATOR_LINEAR_RANGE(900000, 0, 0x6, 50000),
REGULATOR_LINEAR_RANGE(1800000, 0x7, 0x7, 0),
};
static const struct regulator_desc arizona_ldo1_hc = {
.name = "LDO1",
.supply_name = "LDOVDD",
.type = REGULATOR_VOLTAGE,
.ops = &arizona_ldo1_hc_ops,
.vsel_reg = ARIZONA_LDO1_CONTROL_1,
.vsel_mask = ARIZONA_LDO1_VSEL_MASK,
.bypass_reg = ARIZONA_LDO1_CONTROL_1,
.bypass_mask = ARIZONA_LDO1_BYPASS,
.linear_ranges = arizona_ldo1_hc_ranges,
.n_linear_ranges = ARRAY_SIZE(arizona_ldo1_hc_ranges),
.n_voltages = 8,
.enable_time = 1500,
.ramp_delay = 24000,
.owner = THIS_MODULE,
};
static const struct regulator_ops arizona_ldo1_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
static const struct regulator_desc arizona_ldo1 = {
.name = "LDO1",
.supply_name = "LDOVDD",
.type = REGULATOR_VOLTAGE,
.ops = &arizona_ldo1_ops,
.vsel_reg = ARIZONA_LDO1_CONTROL_1,
.vsel_mask = ARIZONA_LDO1_VSEL_MASK,
.min_uV = 900000,
.uV_step = 25000,
.n_voltages = 13,
.enable_time = 500,
.ramp_delay = 24000,
.owner = THIS_MODULE,
};
static const struct regulator_init_data arizona_ldo1_dvfs = {
.constraints = {
.min_uV = 1200000,
.max_uV = 1800000,
.valid_ops_mask = REGULATOR_CHANGE_STATUS |
REGULATOR_CHANGE_VOLTAGE,
},
.num_consumer_supplies = 1,
};
static const struct regulator_init_data arizona_ldo1_default = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = 1,
};
static const struct regulator_init_data arizona_ldo1_wm5110 = {
.constraints = {
.min_uV = 1175000,
.max_uV = 1200000,
.valid_ops_mask = REGULATOR_CHANGE_STATUS |
REGULATOR_CHANGE_VOLTAGE,
},
.num_consumer_supplies = 1,
};
static const struct regulator_desc madera_ldo1 = {
.name = "LDO1",
.supply_name = "LDOVDD",
.type = REGULATOR_VOLTAGE,
.ops = &arizona_ldo1_ops,
.vsel_reg = MADERA_LDO1_CONTROL_1,
.vsel_mask = MADERA_LDO1_VSEL_MASK,
.min_uV = 900000,
.uV_step = 25000,
.n_voltages = 13,
.enable_time = 3000,
.owner = THIS_MODULE,
};
static const struct regulator_init_data madera_ldo1_default = {
.constraints = {
.min_uV = 1200000,
.max_uV = 1200000,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = 1,
};
static int arizona_ldo1_of_get_pdata(struct arizona_ldo1_pdata *pdata,
struct regulator_config *config,
const struct regulator_desc *desc,
bool *external_dcvdd)
{
struct arizona_ldo1 *ldo1 = config->driver_data;
struct device_node *np = config->dev->of_node;
struct device_node *init_node, *dcvdd_node;
struct regulator_init_data *init_data;
init_node = of_get_child_by_name(np, "ldo1");
dcvdd_node = of_parse_phandle(np, "DCVDD-supply", 0);
if (init_node) {
config->of_node = init_node;
init_data = of_get_regulator_init_data(config->dev, init_node,
desc);
if (init_data) {
init_data->consumer_supplies = &ldo1->supply;
init_data->num_consumer_supplies = 1;
if (dcvdd_node && dcvdd_node != init_node)
*external_dcvdd = true;
pdata->init_data = init_data;
}
} else if (dcvdd_node) {
*external_dcvdd = true;
}
of_node_put(dcvdd_node);
return 0;
}
static int arizona_ldo1_common_init(struct platform_device *pdev,
struct arizona_ldo1 *ldo1,
const struct regulator_desc *desc,
struct arizona_ldo1_pdata *pdata,
bool *external_dcvdd)
{
struct device *parent_dev = pdev->dev.parent;
struct regulator_config config = { };
int ret;
*external_dcvdd = false;
ldo1->supply.supply = "DCVDD";
ldo1->init_data.consumer_supplies = &ldo1->supply;
ldo1->supply.dev_name = dev_name(parent_dev);
config.dev = parent_dev;
config.driver_data = ldo1;
config.regmap = ldo1->regmap;
if (IS_ENABLED(CONFIG_OF)) {
if (!dev_get_platdata(parent_dev)) {
ret = arizona_ldo1_of_get_pdata(pdata,
&config, desc,
external_dcvdd);
if (ret < 0)
return ret;
}
}
/* We assume that high output = regulator off
* Don't use devm, since we need to get against the parent device
* so clean up would happen at the wrong time
*/
config.ena_gpiod = gpiod_get_optional(parent_dev, "wlf,ldoena",
GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(config.ena_gpiod))
return PTR_ERR(config.ena_gpiod);
ldo1->ena_gpiod = config.ena_gpiod;
if (pdata->init_data)
config.init_data = pdata->init_data;
else
config.init_data = &ldo1->init_data;
/*
* LDO1 can only be used to supply DCVDD so if it has no
* consumers then DCVDD is supplied externally.
*/
if (config.init_data->num_consumer_supplies == 0)
*external_dcvdd = true;
ldo1->regulator = devm_regulator_register(&pdev->dev, desc, &config);
of_node_put(config.of_node);
if (IS_ERR(ldo1->regulator)) {
ret = PTR_ERR(ldo1->regulator);
dev_err(&pdev->dev, "Failed to register LDO1 supply: %d\n",
ret);
return ret;
}
platform_set_drvdata(pdev, ldo1);
return 0;
}
static int arizona_ldo1_probe(struct platform_device *pdev)
{
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
struct arizona_ldo1 *ldo1;
const struct regulator_desc *desc;
bool external_dcvdd;
int ret;
ldo1 = devm_kzalloc(&pdev->dev, sizeof(*ldo1), GFP_KERNEL);
if (!ldo1)
return -ENOMEM;
ldo1->regmap = arizona->regmap;
/*
* Since the chip usually supplies itself we provide some
* default init_data for it. This will be overridden with
* platform data if provided.
*/
switch (arizona->type) {
case WM5102:
case WM8997:
case WM8998:
case WM1814:
desc = &arizona_ldo1_hc;
ldo1->init_data = arizona_ldo1_dvfs;
break;
case WM5110:
case WM8280:
desc = &arizona_ldo1;
ldo1->init_data = arizona_ldo1_wm5110;
break;
default:
desc = &arizona_ldo1;
ldo1->init_data = arizona_ldo1_default;
break;
}
ret = arizona_ldo1_common_init(pdev, ldo1, desc,
&arizona->pdata.ldo1,
&external_dcvdd);
if (ret == 0)
arizona->external_dcvdd = external_dcvdd;
return ret;
}
static int arizona_ldo1_remove(struct platform_device *pdev)
{
struct arizona_ldo1 *ldo1 = platform_get_drvdata(pdev);
if (ldo1->ena_gpiod)
gpiod_put(ldo1->ena_gpiod);
return 0;
}
static int madera_ldo1_probe(struct platform_device *pdev)
{
struct madera *madera = dev_get_drvdata(pdev->dev.parent);
struct arizona_ldo1 *ldo1;
bool external_dcvdd;
int ret;
ldo1 = devm_kzalloc(&pdev->dev, sizeof(*ldo1), GFP_KERNEL);
if (!ldo1)
return -ENOMEM;
ldo1->regmap = madera->regmap;
ldo1->init_data = madera_ldo1_default;
ret = arizona_ldo1_common_init(pdev, ldo1, &madera_ldo1,
&madera->pdata.ldo1,
&external_dcvdd);
if (ret)
return ret;
madera->internal_dcvdd = !external_dcvdd;
return 0;
}
static struct platform_driver arizona_ldo1_driver = {
.probe = arizona_ldo1_probe,
.remove = arizona_ldo1_remove,
.driver = {
.name = "arizona-ldo1",
},
};
static struct platform_driver madera_ldo1_driver = {
.probe = madera_ldo1_probe,
.remove = arizona_ldo1_remove,
.driver = {
.name = "madera-ldo1",
},
};
static struct platform_driver * const madera_ldo1_drivers[] = {
&arizona_ldo1_driver,
&madera_ldo1_driver,
};
static int __init arizona_ldo1_init(void)
{
return platform_register_drivers(madera_ldo1_drivers,
ARRAY_SIZE(madera_ldo1_drivers));
}
module_init(arizona_ldo1_init);
static void __exit madera_ldo1_exit(void)
{
platform_unregister_drivers(madera_ldo1_drivers,
ARRAY_SIZE(madera_ldo1_drivers));
}
module_exit(madera_ldo1_exit);
/* Module information */
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("Arizona LDO1 driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:arizona-ldo1");
MODULE_ALIAS("platform:madera-ldo1");
| gpl-2.0 |
kmtoki/qmk_firmware | keyboards/mechlovin/kanu/keymaps/via/keymap.c | 3806 | /* Copyright 2020 Team Mechlovin'
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include QMK_KEYBOARD_H
const uint16_t PROGMEM keymaps[][MATRIX_ROWS][MATRIX_COLS] = {
[0] = LAYOUT_all( /* Base */
KC_ESC, KC_1, KC_2, KC_3, KC_4, KC_5, KC_6, KC_7, KC_8, KC_9, KC_0, KC_MINS, KC_EQL, KC_BSPC, KC_DEL, KC_PSCR,
KC_TAB, KC_Q, KC_W, KC_E, KC_R, KC_T, KC_Y, KC_U, KC_I, KC_O, KC_P, KC_LBRC, KC_RBRC, KC_BSLS, KC_INS,
LT(1,KC_CAPS), KC_A, KC_S, KC_D, KC_F, KC_G, KC_H, KC_J, KC_K, KC_L, KC_SCLN, KC_QUOT, KC_NUHS, KC_ENT, KC_HOME,
KC_LSFT, KC_NUBS, KC_Z, KC_X, KC_C, KC_V, KC_B, KC_N, KC_M, KC_COMM, KC_DOT, KC_SLSH, KC_RSFT, KC_UP, KC_END,
KC_LCTL, KC_LGUI, KC_LALT, KC_SPC, KC_SPC, KC_SPC, KC_RALT, KC_RCTL, KC_LEFT, KC_DOWN, KC_RGHT
),
[1] = LAYOUT_all(
KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS,
KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS,
KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS,
KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS,
KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS
),
[2] = LAYOUT_all(
KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS,
KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS,
KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS,
KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS,
KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS
),
[3] = LAYOUT_all(
KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS,
KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS,
KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS,
KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS,
KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS
),
}; | gpl-2.0 |
selmentdev/selment-toolchain | source/gcc-latest/gcc/testsuite/gcc.target/i386/avx512f-vpunpckhqdq-1.c | 653 | /* { dg-do compile } */
/* { dg-options "-mavx512f -O2" } */
/* { dg-final { scan-assembler-times "vpunpckhqdq\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vpunpckhqdq\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
/* { dg-final { scan-assembler-times "vpunpckhqdq\[ \\t\]+\[^\{\n\]*%zmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)" 1 } } */
#include <immintrin.h>
volatile __m512i x, y, z;
volatile __mmask8 m;
void extern
avx512f_test (void)
{
x = _mm512_unpackhi_epi64 (y, z);
x = _mm512_mask_unpackhi_epi64 (x, m, y, z);
x = _mm512_maskz_unpackhi_epi64 (m, y, z);
}
| gpl-3.0 |
yoki/phantomjs | src/qt/qtbase/src/3rdparty/angle/src/libGLESv2/renderer/d3d/d3d11/formatutils11.h | 1877 | //
// Copyright (c) 2013-2014 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// formatutils11.h: Queries for GL image formats and their translations to D3D11
// formats.
#ifndef LIBGLESV2_RENDERER_FORMATUTILS11_H_
#define LIBGLESV2_RENDERER_FORMATUTILS11_H_
#include "libGLESv2/formatutils.h"
#include <map>
namespace rx
{
namespace d3d11
{
typedef std::map<std::pair<GLenum, GLenum>, ColorCopyFunction> FastCopyFunctionMap;
struct DXGIFormat
{
DXGIFormat();
GLuint pixelBytes;
GLuint blockWidth;
GLuint blockHeight;
GLuint depthBits;
GLuint depthOffset;
GLuint stencilBits;
GLuint stencilOffset;
GLenum internalFormat;
GLenum componentType;
MipGenerationFunction mipGenerationFunction;
ColorReadFunction colorReadFunction;
FastCopyFunctionMap fastCopyFunctions;
ColorCopyFunction getFastCopyFunction(GLenum format, GLenum type) const;
};
const DXGIFormat &GetDXGIFormatInfo(DXGI_FORMAT format);
struct TextureFormat
{
TextureFormat();
DXGI_FORMAT texFormat;
DXGI_FORMAT srvFormat;
DXGI_FORMAT rtvFormat;
DXGI_FORMAT dsvFormat;
DXGI_FORMAT renderFormat;
DXGI_FORMAT swizzleTexFormat;
DXGI_FORMAT swizzleSRVFormat;
DXGI_FORMAT swizzleRTVFormat;
InitializeTextureDataFunction dataInitializerFunction;
typedef std::map<GLenum, LoadImageFunction> LoadFunctionMap;
LoadFunctionMap loadFunctions;
};
const TextureFormat &GetTextureFormatInfo(GLenum internalFormat);
struct VertexFormat
{
VertexFormat();
VertexConversionType conversionType;
DXGI_FORMAT nativeFormat;
VertexCopyFunction copyFunction;
};
const VertexFormat &GetVertexFormatInfo(const gl::VertexFormat &vertexFormat);
}
}
#endif // LIBGLESV2_RENDERER_FORMATUTILS11_H_
| bsd-3-clause |
iamjasonp/corefx | src/System.Composition.Convention/src/Microsoft/Internal/AttributeServices.cs | 2881 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Reflection;
namespace Microsoft.Internal
{
internal static class AttributeServices
{
// MemberInfo Attribute helpers
public static IEnumerable<T> GetAttributes<T>(this MemberInfo memberInfo) where T : System.Attribute
{
return memberInfo.GetCustomAttributes<T>(false);
}
public static IEnumerable<T> GetAttributes<T>(this MemberInfo memberInfo, bool inherit) where T : System.Attribute
{
return memberInfo.GetCustomAttributes<T>(inherit);
}
public static T GetFirstAttribute<T>(this MemberInfo memberInfo) where T : System.Attribute
{
return GetAttributes<T>(memberInfo).FirstOrDefault();
}
public static T GetFirstAttribute<T>(this MemberInfo memberInfo, bool inherit) where T : System.Attribute
{
return GetAttributes<T>(memberInfo, inherit).FirstOrDefault();
}
public static bool IsAttributeDefined<T>(this MemberInfo memberInfo) where T : System.Attribute
{
return memberInfo.IsDefined(typeof(T), false);
}
public static bool IsAttributeDefined<T>(this MemberInfo memberInfo, bool inherit) where T : System.Attribute
{
return memberInfo.IsDefined(typeof(T), inherit);
}
// ParameterInfo Attribute helpers
public static IEnumerable<T> GetAttributes<T>(this ParameterInfo parameterInfo) where T : System.Attribute
{
return parameterInfo.GetCustomAttributes<T>(false);
}
public static IEnumerable<T> GetAttributes<T>(this ParameterInfo parameterInfo, bool inherit) where T : System.Attribute
{
return parameterInfo.GetCustomAttributes<T>(inherit);
}
public static T GetFirstAttribute<T>(this ParameterInfo parameterInfo) where T : System.Attribute
{
return GetAttributes<T>(parameterInfo).FirstOrDefault();
}
public static T GetFirstAttribute<T>(this ParameterInfo parameterInfo, bool inherit) where T : System.Attribute
{
return GetAttributes<T>(parameterInfo, inherit).FirstOrDefault();
}
public static bool IsAttributeDefined<T>(this ParameterInfo parameterInfo) where T : System.Attribute
{
return parameterInfo.IsDefined(typeof(T), false);
}
public static bool IsAttributeDefined<T>(this ParameterInfo parameterInfo, bool inherit) where T : System.Attribute
{
return parameterInfo.IsDefined(typeof(T), inherit);
}
}
}
| mit |
ShinySide/SM-A700F | drivers/usb/serial/qcserial.c | 12433 | /*
* Qualcomm Serial USB driver
*
* Copyright (c) 2008, 2012 The Linux Foundation. All rights reserved.
* Copyright (c) 2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2009 Novell Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
*/
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/slab.h>
#include "usb-wwan.h"
#define DRIVER_AUTHOR "Qualcomm Inc"
#define DRIVER_DESC "Qualcomm USB Serial driver"
#define DEVICE_G1K(v, p) \
USB_DEVICE(v, p), .driver_info = 1
static const struct usb_device_id id_table[] = {
/* Gobi 1000 devices */
{DEVICE_G1K(0x05c6, 0x9211)}, /* Acer Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
{DEVICE_G1K(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
{DEVICE_G1K(0x03f0, 0x201d)}, /* HP un2400 Gobi QDL Device */
{DEVICE_G1K(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
{DEVICE_G1K(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */
{DEVICE_G1K(0x413c, 0x8172)}, /* Dell Gobi Modem device */
{DEVICE_G1K(0x413c, 0x8171)}, /* Dell Gobi QDL device */
{DEVICE_G1K(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
{DEVICE_G1K(0x1410, 0xa008)}, /* Novatel Gobi QDL device */
{DEVICE_G1K(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
{DEVICE_G1K(0x0b05, 0x1774)}, /* Asus Gobi QDL device */
{DEVICE_G1K(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
{DEVICE_G1K(0x19d2, 0xfff2)}, /* ONDA Gobi QDL device */
{DEVICE_G1K(0x1557, 0x0a80)}, /* OQO Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9008)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9201)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
{DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */
/* Gobi 2000 devices */
{USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */
{USB_DEVICE(0x1410, 0xa011)}, /* Novatel Gobi 2000 QDL device */
{USB_DEVICE(0x1410, 0xa012)}, /* Novatel Gobi 2000 QDL device */
{USB_DEVICE(0x1410, 0xa013)}, /* Novatel Gobi 2000 QDL device */
{USB_DEVICE(0x1410, 0xa014)}, /* Novatel Gobi 2000 QDL device */
{USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */
{USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
{USB_DEVICE(0x05c6, 0x9208)}, /* Generic Gobi 2000 QDL device */
{USB_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
{USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */
{USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
{USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */
{USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
{USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */
{USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
{USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */
{USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */
{USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */
{USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
{USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */
{USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
{USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */
{USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
{USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */
{USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */
{USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
{USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
{USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
{USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
/* Gobi 3000 devices */
{USB_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Gobi 3000 QDL */
{USB_DEVICE(0x05c6, 0x920c)}, /* Gobi 3000 QDL */
{USB_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
{USB_DEVICE(0x1410, 0xa020)}, /* Novatel Gobi 3000 QDL */
{USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
{USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
{USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
{USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */
{USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
{USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */
{USB_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */
{USB_DEVICE(0x1199, 0x9010)}, /* Sierra Wireless Gobi 3000 QDL */
{USB_DEVICE(0x1199, 0x9012)}, /* Sierra Wireless Gobi 3000 QDL */
{USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
{USB_DEVICE(0x1199, 0x9014)}, /* Sierra Wireless Gobi 3000 QDL */
{USB_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
{USB_DEVICE(0x1199, 0x9018)}, /* Sierra Wireless Gobi 3000 QDL */
{USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
{USB_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */
{USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
{USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
{USB_DEVICE(0x0AF0, 0x8120)}, /* Option GTM681W */
{USB_DEVICE(0x05c6, 0x9048)}, /* MDM9x15 device */
{USB_DEVICE(0x05c6, 0x904C)}, /* MDM9x15 device */
/* non Gobi Qualcomm serial devices */
{USB_DEVICE_INTERFACE_NUMBER(0x0f3d, 0x68a2, 0)}, /* Sierra Wireless MC7700 Device Management */
{USB_DEVICE_INTERFACE_NUMBER(0x0f3d, 0x68a2, 2)}, /* Sierra Wireless MC7700 NMEA */
{USB_DEVICE_INTERFACE_NUMBER(0x0f3d, 0x68a2, 3)}, /* Sierra Wireless MC7700 Modem */
{USB_DEVICE_INTERFACE_NUMBER(0x114f, 0x68a2, 0)}, /* Sierra Wireless MC7750 Device Management */
{USB_DEVICE_INTERFACE_NUMBER(0x114f, 0x68a2, 2)}, /* Sierra Wireless MC7750 NMEA */
{USB_DEVICE_INTERFACE_NUMBER(0x114f, 0x68a2, 3)}, /* Sierra Wireless MC7750 Modem */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 0)}, /* Sierra Wireless MC7710 Device Management */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 2)}, /* Sierra Wireless MC7710 NMEA */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 3)}, /* Sierra Wireless MC7710 Modem */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
{
struct usb_host_interface *intf = serial->interface->cur_altsetting;
struct device *dev = &serial->dev->dev;
int retval = -ENODEV;
__u8 nintf;
__u8 ifnum;
bool is_gobi1k = id->driver_info ? true : false;
int altsetting = -1;
dev_dbg(dev, "Is Gobi 1000 = %d\n", is_gobi1k);
nintf = serial->dev->actconfig->desc.bNumInterfaces;
dev_dbg(dev, "Num Interfaces = %d\n", nintf);
ifnum = intf->desc.bInterfaceNumber;
dev_dbg(dev, "This Interface = %d\n", ifnum);
if (nintf == 1) {
/* QDL mode */
/* Gobi 2000 has a single altsetting, older ones have two */
if (serial->interface->num_altsetting == 2)
intf = &serial->interface->altsetting[1];
else if (serial->interface->num_altsetting > 2)
goto done;
if (intf->desc.bNumEndpoints == 2 &&
usb_endpoint_is_bulk_in(&intf->endpoint[0].desc) &&
usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) {
dev_dbg(dev, "QDL port found\n");
if (serial->interface->num_altsetting == 1)
retval = 0; /* Success */
else
altsetting = 1;
}
goto done;
}
/* allow any number of interfaces when doing direct interface match */
if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER) {
dev_dbg(dev, "Generic Qualcomm serial interface found\n");
altsetting = 0;
goto done;
}
if (nintf < 3 || (nintf > 4 && nintf != 9)) {
dev_err(dev, "unknown number of interfaces: %d\n", nintf);
goto done;
}
/* default to enabling interface */
altsetting = 0;
/* Composite mode; don't bind to the QMI/net interface as that
* gets handled by other drivers.
*/
if (is_gobi1k) {
/* Gobi 1K USB layout:
* 0: DM/DIAG (use libqcdm from ModemManager for communication)
* 1: serial port (doesn't respond)
* 2: AT-capable modem port
* 3: QMI/net
*/
if (ifnum == 0) {
dev_dbg(dev, "Gobi 1K DM/DIAG interface found\n");
altsetting = 1;
} else if (ifnum == 2)
dev_dbg(dev, "Modem port found\n");
else
altsetting = -1;
} else {
/* Gobi 2K+ USB layout:
* 0: QMI/net
* 1: DM/DIAG (use libqcdm from ModemManager for communication)
* 2: AT-capable modem port
* 3: NMEA
*/
switch (ifnum) {
case 0:
/* Don't claim the QMI/net interface */
altsetting = -1;
break;
case 1:
dev_dbg(dev, "Gobi 2K+ DM/DIAG interface found\n");
break;
case 2:
dev_dbg(dev, "Modem port found\n");
break;
case 3:
/*
* NMEA (serial line 9600 8N1)
* # echo "\$GPS_START" > /dev/ttyUSBx
* # echo "\$GPS_STOP" > /dev/ttyUSBx
*/
dev_dbg(dev, "Gobi 2K+ NMEA GPS interface found\n");
break;
}
}
done:
if (altsetting >= 0) {
retval = usb_set_interface(serial->dev, ifnum, altsetting);
if (retval < 0) {
dev_err(dev,
"Could not set interface, error %d\n",
retval);
retval = -ENODEV;
}
}
return retval;
}
static int qc_attach(struct usb_serial *serial)
{
struct usb_wwan_intf_private *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
spin_lock_init(&data->susp_lock);
usb_set_serial_data(serial, data);
return 0;
}
static void qc_release(struct usb_serial *serial)
{
struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
usb_set_serial_data(serial, NULL);
kfree(priv);
}
static struct usb_serial_driver qcdevice = {
.driver = {
.owner = THIS_MODULE,
.name = "qcserial",
},
.description = "Qualcomm USB modem",
.id_table = id_table,
.num_ports = 1,
.probe = qcprobe,
.open = usb_wwan_open,
.close = usb_wwan_close,
.write = usb_wwan_write,
.write_room = usb_wwan_write_room,
.chars_in_buffer = usb_wwan_chars_in_buffer,
.throttle = usb_wwan_throttle,
.unthrottle = usb_wwan_unthrottle,
.attach = qc_attach,
.release = qc_release,
.port_probe = usb_wwan_port_probe,
.port_remove = usb_wwan_port_remove,
#ifdef CONFIG_PM
.suspend = usb_wwan_suspend,
.resume = usb_wwan_resume,
#endif
};
static struct usb_serial_driver * const serial_drivers[] = {
&qcdevice, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
shashi95/kafkaES | core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java | 3999 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.repositories.verify;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.repositories.RepositoryVerificationException;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
/**
* Transport action for verifying repository operation
*/
public class TransportVerifyRepositoryAction extends TransportMasterNodeAction<VerifyRepositoryRequest, VerifyRepositoryResponse> {
private final RepositoriesService repositoriesService;
protected final ClusterName clusterName;
@Inject
public TransportVerifyRepositoryAction(Settings settings, ClusterName clusterName, TransportService transportService, ClusterService clusterService,
RepositoriesService repositoriesService, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, VerifyRepositoryAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, VerifyRepositoryRequest.class);
this.repositoriesService = repositoriesService;
this.clusterName = clusterName;
}
@Override
protected String executor() {
return ThreadPool.Names.MANAGEMENT;
}
@Override
protected VerifyRepositoryResponse newResponse() {
return new VerifyRepositoryResponse();
}
@Override
protected ClusterBlockException checkBlock(VerifyRepositoryRequest request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
}
@Override
protected void masterOperation(final VerifyRepositoryRequest request, ClusterState state, final ActionListener<VerifyRepositoryResponse> listener) {
repositoriesService.verifyRepository(request.name(), new ActionListener<RepositoriesService.VerifyResponse>() {
@Override
public void onResponse(RepositoriesService.VerifyResponse verifyResponse) {
if (verifyResponse.failed()) {
listener.onFailure(new RepositoryVerificationException(request.name(), verifyResponse.failureDescription()));
} else {
listener.onResponse(new VerifyRepositoryResponse(clusterName, verifyResponse.nodes()));
}
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
});
}
}
| apache-2.0 |
cnfire/hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java | 12556 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
/**
* Helper class to determine hardware related characteristics such as the
* number of processors and the amount of memory on the node.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class NodeManagerHardwareUtils {
private static final Log LOG = LogFactory
.getLog(NodeManagerHardwareUtils.class);
/**
*
* Returns the number of CPUs on the node. This value depends on the
* configuration setting which decides whether to count logical processors
* (such as hyperthreads) as cores or not.
*
* @param conf
* - Configuration object
* @return Number of CPUs
*/
public static int getNodeCPUs(Configuration conf) {
ResourceCalculatorPlugin plugin =
ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf);
return NodeManagerHardwareUtils.getNodeCPUs(plugin, conf);
}
/**
*
* Returns the number of CPUs on the node. This value depends on the
* configuration setting which decides whether to count logical processors
* (such as hyperthreads) as cores or not.
*
* @param plugin
* - ResourceCalculatorPlugin object to determine hardware specs
* @param conf
* - Configuration object
* @return Number of CPU cores on the node.
*/
public static int getNodeCPUs(ResourceCalculatorPlugin plugin,
Configuration conf) {
int numProcessors = plugin.getNumProcessors();
boolean countLogicalCores =
conf.getBoolean(YarnConfiguration.NM_COUNT_LOGICAL_PROCESSORS_AS_CORES,
YarnConfiguration.DEFAULT_NM_COUNT_LOGICAL_PROCESSORS_AS_CORES);
if (!countLogicalCores) {
numProcessors = plugin.getNumCores();
}
return numProcessors;
}
/**
*
* Returns the fraction of CPUs that should be used for YARN containers.
* The number is derived based on various configuration params such as
* YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT
*
* @param conf
* - Configuration object
* @return Fraction of CPUs to be used for YARN containers
*/
public static float getContainersCPUs(Configuration conf) {
ResourceCalculatorPlugin plugin =
ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf);
return NodeManagerHardwareUtils.getContainersCPUs(plugin, conf);
}
/**
*
* Returns the fraction of CPUs that should be used for YARN containers.
* The number is derived based on various configuration params such as
* YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT
*
* @param plugin
* - ResourceCalculatorPlugin object to determine hardware specs
* @param conf
* - Configuration object
* @return Fraction of CPUs to be used for YARN containers
*/
public static float getContainersCPUs(ResourceCalculatorPlugin plugin,
Configuration conf) {
int numProcessors = getNodeCPUs(plugin, conf);
int nodeCpuPercentage = getNodeCpuPercentage(conf);
return (nodeCpuPercentage * numProcessors) / 100.0f;
}
/**
* Gets the percentage of physical CPU that is configured for YARN containers.
* This is percent {@literal >} 0 and {@literal <=} 100 based on
* {@link YarnConfiguration#NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT}
* @param conf Configuration object
* @return percent {@literal >} 0 and {@literal <=} 100
*/
public static int getNodeCpuPercentage(Configuration conf) {
int nodeCpuPercentage =
Math.min(conf.getInt(
YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT,
YarnConfiguration.DEFAULT_NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT),
100);
nodeCpuPercentage = Math.max(0, nodeCpuPercentage);
if (nodeCpuPercentage == 0) {
String message =
"Illegal value for "
+ YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT
+ ". Value cannot be less than or equal to 0.";
throw new IllegalArgumentException(message);
}
return nodeCpuPercentage;
}
/**
* Function to return the number of vcores on the system that can be used for
* YARN containers. If a number is specified in the configuration file, then
* that number is returned. If nothing is specified - 1. If the OS is an
* "unknown" OS(one for which we don't have ResourceCalculatorPlugin
* implemented), return the default NodeManager cores. 2. If the config
* variable yarn.nodemanager.cpu.use_logical_processors is set to true, it
* returns the logical processor count(count hyperthreads as cores), else it
* returns the physical cores count.
*
* @param conf
* - the configuration for the NodeManager
* @return the number of cores to be used for YARN containers
*
*/
public static int getVCores(Configuration conf) {
// is this os for which we can determine cores?
ResourceCalculatorPlugin plugin =
ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf);
return NodeManagerHardwareUtils.getVCores(plugin, conf);
}
/**
* Function to return the number of vcores on the system that can be used for
* YARN containers. If a number is specified in the configuration file, then
* that number is returned. If nothing is specified - 1. If the OS is an
* "unknown" OS(one for which we don't have ResourceCalculatorPlugin
* implemented), return the default NodeManager cores. 2. If the config
* variable yarn.nodemanager.cpu.use_logical_processors is set to true, it
* returns the logical processor count(count hyperthreads as cores), else it
* returns the physical cores count.
*
* @param plugin
* - ResourceCalculatorPlugin object to determine hardware specs
* @param conf
* - the configuration for the NodeManager
* @return the number of cores to be used for YARN containers
*
*/
public static int getVCores(ResourceCalculatorPlugin plugin,
Configuration conf) {
int cores;
boolean hardwareDetectionEnabled =
conf.getBoolean(
YarnConfiguration.NM_ENABLE_HARDWARE_CAPABILITY_DETECTION,
YarnConfiguration.DEFAULT_NM_ENABLE_HARDWARE_CAPABILITY_DETECTION);
String message;
if (!hardwareDetectionEnabled || plugin == null) {
cores =
conf.getInt(YarnConfiguration.NM_VCORES,
YarnConfiguration.DEFAULT_NM_VCORES);
if (cores == -1) {
cores = YarnConfiguration.DEFAULT_NM_VCORES;
}
} else {
cores = conf.getInt(YarnConfiguration.NM_VCORES, -1);
if (cores == -1) {
float physicalCores =
NodeManagerHardwareUtils.getContainersCPUs(plugin, conf);
float multiplier =
conf.getFloat(YarnConfiguration.NM_PCORES_VCORES_MULTIPLIER,
YarnConfiguration.DEFAULT_NM_PCORES_VCORES_MULTIPLIER);
if (multiplier > 0) {
float tmp = physicalCores * multiplier;
if (tmp > 0 && tmp < 1) {
// on a single core machine - tmp can be between 0 and 1
cores = 1;
} else {
cores = (int) tmp;
}
} else {
message = "Illegal value for "
+ YarnConfiguration.NM_PCORES_VCORES_MULTIPLIER
+ ". Value must be greater than 0.";
throw new IllegalArgumentException(message);
}
}
}
if(cores <= 0) {
message = "Illegal value for " + YarnConfiguration.NM_VCORES
+ ". Value must be greater than 0.";
throw new IllegalArgumentException(message);
}
return cores;
}
/**
* Function to return how much memory we should set aside for YARN containers.
* If a number is specified in the configuration file, then that number is
* returned. If nothing is specified - 1. If the OS is an "unknown" OS(one for
* which we don't have ResourceCalculatorPlugin implemented), return the
* default NodeManager physical memory. 2. If the OS has a
* ResourceCalculatorPlugin implemented, the calculation is 0.8 * (RAM - 2 *
* JVM-memory) i.e. use 80% of the memory after accounting for memory used by
* the DataNode and the NodeManager. If the number is less than 1GB, log a
* warning message.
*
* @param conf
* - the configuration for the NodeManager
* @return the amount of memory that will be used for YARN containers in MB.
*/
public static int getContainerMemoryMB(Configuration conf) {
return NodeManagerHardwareUtils.getContainerMemoryMB(
ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf), conf);
}
/**
* Function to return how much memory we should set aside for YARN containers.
* If a number is specified in the configuration file, then that number is
* returned. If nothing is specified - 1. If the OS is an "unknown" OS(one for
* which we don't have ResourceCalculatorPlugin implemented), return the
* default NodeManager physical memory. 2. If the OS has a
* ResourceCalculatorPlugin implemented, the calculation is 0.8 * (RAM - 2 *
* JVM-memory) i.e. use 80% of the memory after accounting for memory used by
* the DataNode and the NodeManager. If the number is less than 1GB, log a
* warning message.
*
* @param plugin
* - ResourceCalculatorPlugin object to determine hardware specs
* @param conf
* - the configuration for the NodeManager
* @return the amount of memory that will be used for YARN containers in MB.
*/
public static int getContainerMemoryMB(ResourceCalculatorPlugin plugin,
Configuration conf) {
int memoryMb;
boolean hardwareDetectionEnabled = conf.getBoolean(
YarnConfiguration.NM_ENABLE_HARDWARE_CAPABILITY_DETECTION,
YarnConfiguration.DEFAULT_NM_ENABLE_HARDWARE_CAPABILITY_DETECTION);
if (!hardwareDetectionEnabled || plugin == null) {
memoryMb = conf.getInt(YarnConfiguration.NM_PMEM_MB,
YarnConfiguration.DEFAULT_NM_PMEM_MB);
if (memoryMb == -1) {
memoryMb = YarnConfiguration.DEFAULT_NM_PMEM_MB;
}
} else {
memoryMb = conf.getInt(YarnConfiguration.NM_PMEM_MB, -1);
if (memoryMb == -1) {
int physicalMemoryMB =
(int) (plugin.getPhysicalMemorySize() / (1024 * 1024));
int hadoopHeapSizeMB =
(int) (Runtime.getRuntime().maxMemory() / (1024 * 1024));
int containerPhysicalMemoryMB =
(int) (0.8f * (physicalMemoryMB - (2 * hadoopHeapSizeMB)));
int reservedMemoryMB =
conf.getInt(YarnConfiguration.NM_SYSTEM_RESERVED_PMEM_MB, -1);
if (reservedMemoryMB != -1) {
containerPhysicalMemoryMB = physicalMemoryMB - reservedMemoryMB;
}
if(containerPhysicalMemoryMB <= 0) {
LOG.error("Calculated memory for YARN containers is too low."
+ " Node memory is " + physicalMemoryMB
+ " MB, system reserved memory is "
+ reservedMemoryMB + " MB.");
}
containerPhysicalMemoryMB = Math.max(containerPhysicalMemoryMB, 0);
memoryMb = containerPhysicalMemoryMB;
}
}
if(memoryMb <= 0) {
String message = "Illegal value for " + YarnConfiguration.NM_PMEM_MB
+ ". Value must be greater than 0.";
throw new IllegalArgumentException(message);
}
return memoryMb;
}
}
| apache-2.0 |
shines77/go | src/strconv/isprint.go | 9915 | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// DO NOT EDIT. GENERATED BY
// go run makeisprint.go -output isprint.go
package strconv
// (470+136+73)*2 + (342)*4 = 2726 bytes
var isPrint16 = []uint16{
0x0020, 0x007e,
0x00a1, 0x0377,
0x037a, 0x037f,
0x0384, 0x0556,
0x0559, 0x058a,
0x058d, 0x05c7,
0x05d0, 0x05ea,
0x05f0, 0x05f4,
0x0606, 0x061b,
0x061e, 0x070d,
0x0710, 0x074a,
0x074d, 0x07b1,
0x07c0, 0x07fa,
0x0800, 0x082d,
0x0830, 0x085b,
0x085e, 0x085e,
0x08a0, 0x08b4,
0x08e3, 0x098c,
0x098f, 0x0990,
0x0993, 0x09b2,
0x09b6, 0x09b9,
0x09bc, 0x09c4,
0x09c7, 0x09c8,
0x09cb, 0x09ce,
0x09d7, 0x09d7,
0x09dc, 0x09e3,
0x09e6, 0x09fb,
0x0a01, 0x0a0a,
0x0a0f, 0x0a10,
0x0a13, 0x0a39,
0x0a3c, 0x0a42,
0x0a47, 0x0a48,
0x0a4b, 0x0a4d,
0x0a51, 0x0a51,
0x0a59, 0x0a5e,
0x0a66, 0x0a75,
0x0a81, 0x0ab9,
0x0abc, 0x0acd,
0x0ad0, 0x0ad0,
0x0ae0, 0x0ae3,
0x0ae6, 0x0af1,
0x0af9, 0x0af9,
0x0b01, 0x0b0c,
0x0b0f, 0x0b10,
0x0b13, 0x0b39,
0x0b3c, 0x0b44,
0x0b47, 0x0b48,
0x0b4b, 0x0b4d,
0x0b56, 0x0b57,
0x0b5c, 0x0b63,
0x0b66, 0x0b77,
0x0b82, 0x0b8a,
0x0b8e, 0x0b95,
0x0b99, 0x0b9f,
0x0ba3, 0x0ba4,
0x0ba8, 0x0baa,
0x0bae, 0x0bb9,
0x0bbe, 0x0bc2,
0x0bc6, 0x0bcd,
0x0bd0, 0x0bd0,
0x0bd7, 0x0bd7,
0x0be6, 0x0bfa,
0x0c00, 0x0c39,
0x0c3d, 0x0c4d,
0x0c55, 0x0c5a,
0x0c60, 0x0c63,
0x0c66, 0x0c6f,
0x0c78, 0x0cb9,
0x0cbc, 0x0ccd,
0x0cd5, 0x0cd6,
0x0cde, 0x0ce3,
0x0ce6, 0x0cf2,
0x0d01, 0x0d3a,
0x0d3d, 0x0d4e,
0x0d57, 0x0d57,
0x0d5f, 0x0d63,
0x0d66, 0x0d75,
0x0d79, 0x0d7f,
0x0d82, 0x0d96,
0x0d9a, 0x0dbd,
0x0dc0, 0x0dc6,
0x0dca, 0x0dca,
0x0dcf, 0x0ddf,
0x0de6, 0x0def,
0x0df2, 0x0df4,
0x0e01, 0x0e3a,
0x0e3f, 0x0e5b,
0x0e81, 0x0e84,
0x0e87, 0x0e8a,
0x0e8d, 0x0e8d,
0x0e94, 0x0ea7,
0x0eaa, 0x0ebd,
0x0ec0, 0x0ecd,
0x0ed0, 0x0ed9,
0x0edc, 0x0edf,
0x0f00, 0x0f6c,
0x0f71, 0x0fda,
0x1000, 0x10c7,
0x10cd, 0x10cd,
0x10d0, 0x124d,
0x1250, 0x125d,
0x1260, 0x128d,
0x1290, 0x12b5,
0x12b8, 0x12c5,
0x12c8, 0x1315,
0x1318, 0x135a,
0x135d, 0x137c,
0x1380, 0x1399,
0x13a0, 0x13f5,
0x13f8, 0x13fd,
0x1400, 0x169c,
0x16a0, 0x16f8,
0x1700, 0x1714,
0x1720, 0x1736,
0x1740, 0x1753,
0x1760, 0x1773,
0x1780, 0x17dd,
0x17e0, 0x17e9,
0x17f0, 0x17f9,
0x1800, 0x180d,
0x1810, 0x1819,
0x1820, 0x1877,
0x1880, 0x18aa,
0x18b0, 0x18f5,
0x1900, 0x192b,
0x1930, 0x193b,
0x1940, 0x1940,
0x1944, 0x196d,
0x1970, 0x1974,
0x1980, 0x19ab,
0x19b0, 0x19c9,
0x19d0, 0x19da,
0x19de, 0x1a1b,
0x1a1e, 0x1a7c,
0x1a7f, 0x1a89,
0x1a90, 0x1a99,
0x1aa0, 0x1aad,
0x1ab0, 0x1abe,
0x1b00, 0x1b4b,
0x1b50, 0x1b7c,
0x1b80, 0x1bf3,
0x1bfc, 0x1c37,
0x1c3b, 0x1c49,
0x1c4d, 0x1c7f,
0x1cc0, 0x1cc7,
0x1cd0, 0x1cf9,
0x1d00, 0x1df5,
0x1dfc, 0x1f15,
0x1f18, 0x1f1d,
0x1f20, 0x1f45,
0x1f48, 0x1f4d,
0x1f50, 0x1f7d,
0x1f80, 0x1fd3,
0x1fd6, 0x1fef,
0x1ff2, 0x1ffe,
0x2010, 0x2027,
0x2030, 0x205e,
0x2070, 0x2071,
0x2074, 0x209c,
0x20a0, 0x20be,
0x20d0, 0x20f0,
0x2100, 0x218b,
0x2190, 0x23fa,
0x2400, 0x2426,
0x2440, 0x244a,
0x2460, 0x2b73,
0x2b76, 0x2b95,
0x2b98, 0x2bb9,
0x2bbd, 0x2bd1,
0x2bec, 0x2bef,
0x2c00, 0x2cf3,
0x2cf9, 0x2d27,
0x2d2d, 0x2d2d,
0x2d30, 0x2d67,
0x2d6f, 0x2d70,
0x2d7f, 0x2d96,
0x2da0, 0x2e42,
0x2e80, 0x2ef3,
0x2f00, 0x2fd5,
0x2ff0, 0x2ffb,
0x3001, 0x3096,
0x3099, 0x30ff,
0x3105, 0x312d,
0x3131, 0x31ba,
0x31c0, 0x31e3,
0x31f0, 0x4db5,
0x4dc0, 0x9fd5,
0xa000, 0xa48c,
0xa490, 0xa4c6,
0xa4d0, 0xa62b,
0xa640, 0xa6f7,
0xa700, 0xa7ad,
0xa7b0, 0xa7b7,
0xa7f7, 0xa82b,
0xa830, 0xa839,
0xa840, 0xa877,
0xa880, 0xa8c4,
0xa8ce, 0xa8d9,
0xa8e0, 0xa8fd,
0xa900, 0xa953,
0xa95f, 0xa97c,
0xa980, 0xa9d9,
0xa9de, 0xaa36,
0xaa40, 0xaa4d,
0xaa50, 0xaa59,
0xaa5c, 0xaac2,
0xaadb, 0xaaf6,
0xab01, 0xab06,
0xab09, 0xab0e,
0xab11, 0xab16,
0xab20, 0xab65,
0xab70, 0xabed,
0xabf0, 0xabf9,
0xac00, 0xd7a3,
0xd7b0, 0xd7c6,
0xd7cb, 0xd7fb,
0xf900, 0xfa6d,
0xfa70, 0xfad9,
0xfb00, 0xfb06,
0xfb13, 0xfb17,
0xfb1d, 0xfbc1,
0xfbd3, 0xfd3f,
0xfd50, 0xfd8f,
0xfd92, 0xfdc7,
0xfdf0, 0xfdfd,
0xfe00, 0xfe19,
0xfe20, 0xfe6b,
0xfe70, 0xfefc,
0xff01, 0xffbe,
0xffc2, 0xffc7,
0xffca, 0xffcf,
0xffd2, 0xffd7,
0xffda, 0xffdc,
0xffe0, 0xffee,
0xfffc, 0xfffd,
}
var isNotPrint16 = []uint16{
0x00ad,
0x038b,
0x038d,
0x03a2,
0x0530,
0x0560,
0x0588,
0x0590,
0x06dd,
0x083f,
0x0984,
0x09a9,
0x09b1,
0x09de,
0x0a04,
0x0a29,
0x0a31,
0x0a34,
0x0a37,
0x0a3d,
0x0a5d,
0x0a84,
0x0a8e,
0x0a92,
0x0aa9,
0x0ab1,
0x0ab4,
0x0ac6,
0x0aca,
0x0b04,
0x0b29,
0x0b31,
0x0b34,
0x0b5e,
0x0b84,
0x0b91,
0x0b9b,
0x0b9d,
0x0bc9,
0x0c04,
0x0c0d,
0x0c11,
0x0c29,
0x0c45,
0x0c49,
0x0c57,
0x0c80,
0x0c84,
0x0c8d,
0x0c91,
0x0ca9,
0x0cb4,
0x0cc5,
0x0cc9,
0x0cdf,
0x0cf0,
0x0d04,
0x0d0d,
0x0d11,
0x0d45,
0x0d49,
0x0d84,
0x0db2,
0x0dbc,
0x0dd5,
0x0dd7,
0x0e83,
0x0e89,
0x0e98,
0x0ea0,
0x0ea4,
0x0ea6,
0x0eac,
0x0eba,
0x0ec5,
0x0ec7,
0x0f48,
0x0f98,
0x0fbd,
0x0fcd,
0x10c6,
0x1249,
0x1257,
0x1259,
0x1289,
0x12b1,
0x12bf,
0x12c1,
0x12d7,
0x1311,
0x1680,
0x170d,
0x176d,
0x1771,
0x191f,
0x1a5f,
0x1cf7,
0x1f58,
0x1f5a,
0x1f5c,
0x1f5e,
0x1fb5,
0x1fc5,
0x1fdc,
0x1ff5,
0x208f,
0x2bc9,
0x2c2f,
0x2c5f,
0x2d26,
0x2da7,
0x2daf,
0x2db7,
0x2dbf,
0x2dc7,
0x2dcf,
0x2dd7,
0x2ddf,
0x2e9a,
0x3040,
0x318f,
0x321f,
0x32ff,
0xa9ce,
0xa9ff,
0xab27,
0xab2f,
0xfb37,
0xfb3d,
0xfb3f,
0xfb42,
0xfb45,
0xfe53,
0xfe67,
0xfe75,
0xffe7,
}
var isPrint32 = []uint32{
0x010000, 0x01004d,
0x010050, 0x01005d,
0x010080, 0x0100fa,
0x010100, 0x010102,
0x010107, 0x010133,
0x010137, 0x01018c,
0x010190, 0x01019b,
0x0101a0, 0x0101a0,
0x0101d0, 0x0101fd,
0x010280, 0x01029c,
0x0102a0, 0x0102d0,
0x0102e0, 0x0102fb,
0x010300, 0x010323,
0x010330, 0x01034a,
0x010350, 0x01037a,
0x010380, 0x0103c3,
0x0103c8, 0x0103d5,
0x010400, 0x01049d,
0x0104a0, 0x0104a9,
0x010500, 0x010527,
0x010530, 0x010563,
0x01056f, 0x01056f,
0x010600, 0x010736,
0x010740, 0x010755,
0x010760, 0x010767,
0x010800, 0x010805,
0x010808, 0x010838,
0x01083c, 0x01083c,
0x01083f, 0x01089e,
0x0108a7, 0x0108af,
0x0108e0, 0x0108f5,
0x0108fb, 0x01091b,
0x01091f, 0x010939,
0x01093f, 0x01093f,
0x010980, 0x0109b7,
0x0109bc, 0x0109cf,
0x0109d2, 0x010a06,
0x010a0c, 0x010a33,
0x010a38, 0x010a3a,
0x010a3f, 0x010a47,
0x010a50, 0x010a58,
0x010a60, 0x010a9f,
0x010ac0, 0x010ae6,
0x010aeb, 0x010af6,
0x010b00, 0x010b35,
0x010b39, 0x010b55,
0x010b58, 0x010b72,
0x010b78, 0x010b91,
0x010b99, 0x010b9c,
0x010ba9, 0x010baf,
0x010c00, 0x010c48,
0x010c80, 0x010cb2,
0x010cc0, 0x010cf2,
0x010cfa, 0x010cff,
0x010e60, 0x010e7e,
0x011000, 0x01104d,
0x011052, 0x01106f,
0x01107f, 0x0110c1,
0x0110d0, 0x0110e8,
0x0110f0, 0x0110f9,
0x011100, 0x011143,
0x011150, 0x011176,
0x011180, 0x0111cd,
0x0111d0, 0x0111f4,
0x011200, 0x01123d,
0x011280, 0x0112a9,
0x0112b0, 0x0112ea,
0x0112f0, 0x0112f9,
0x011300, 0x01130c,
0x01130f, 0x011310,
0x011313, 0x011339,
0x01133c, 0x011344,
0x011347, 0x011348,
0x01134b, 0x01134d,
0x011350, 0x011350,
0x011357, 0x011357,
0x01135d, 0x011363,
0x011366, 0x01136c,
0x011370, 0x011374,
0x011480, 0x0114c7,
0x0114d0, 0x0114d9,
0x011580, 0x0115b5,
0x0115b8, 0x0115dd,
0x011600, 0x011644,
0x011650, 0x011659,
0x011680, 0x0116b7,
0x0116c0, 0x0116c9,
0x011700, 0x011719,
0x01171d, 0x01172b,
0x011730, 0x01173f,
0x0118a0, 0x0118f2,
0x0118ff, 0x0118ff,
0x011ac0, 0x011af8,
0x012000, 0x012399,
0x012400, 0x012474,
0x012480, 0x012543,
0x013000, 0x01342e,
0x014400, 0x014646,
0x016800, 0x016a38,
0x016a40, 0x016a69,
0x016a6e, 0x016a6f,
0x016ad0, 0x016aed,
0x016af0, 0x016af5,
0x016b00, 0x016b45,
0x016b50, 0x016b77,
0x016b7d, 0x016b8f,
0x016f00, 0x016f44,
0x016f50, 0x016f7e,
0x016f8f, 0x016f9f,
0x01b000, 0x01b001,
0x01bc00, 0x01bc6a,
0x01bc70, 0x01bc7c,
0x01bc80, 0x01bc88,
0x01bc90, 0x01bc99,
0x01bc9c, 0x01bc9f,
0x01d000, 0x01d0f5,
0x01d100, 0x01d126,
0x01d129, 0x01d172,
0x01d17b, 0x01d1e8,
0x01d200, 0x01d245,
0x01d300, 0x01d356,
0x01d360, 0x01d371,
0x01d400, 0x01d49f,
0x01d4a2, 0x01d4a2,
0x01d4a5, 0x01d4a6,
0x01d4a9, 0x01d50a,
0x01d50d, 0x01d546,
0x01d54a, 0x01d6a5,
0x01d6a8, 0x01d7cb,
0x01d7ce, 0x01da8b,
0x01da9b, 0x01daaf,
0x01e800, 0x01e8c4,
0x01e8c7, 0x01e8d6,
0x01ee00, 0x01ee24,
0x01ee27, 0x01ee3b,
0x01ee42, 0x01ee42,
0x01ee47, 0x01ee54,
0x01ee57, 0x01ee64,
0x01ee67, 0x01ee9b,
0x01eea1, 0x01eebb,
0x01eef0, 0x01eef1,
0x01f000, 0x01f02b,
0x01f030, 0x01f093,
0x01f0a0, 0x01f0ae,
0x01f0b1, 0x01f0f5,
0x01f100, 0x01f10c,
0x01f110, 0x01f16b,
0x01f170, 0x01f19a,
0x01f1e6, 0x01f202,
0x01f210, 0x01f23a,
0x01f240, 0x01f248,
0x01f250, 0x01f251,
0x01f300, 0x01f6d0,
0x01f6e0, 0x01f6ec,
0x01f6f0, 0x01f6f3,
0x01f700, 0x01f773,
0x01f780, 0x01f7d4,
0x01f800, 0x01f80b,
0x01f810, 0x01f847,
0x01f850, 0x01f859,
0x01f860, 0x01f887,
0x01f890, 0x01f8ad,
0x01f910, 0x01f918,
0x01f980, 0x01f984,
0x01f9c0, 0x01f9c0,
0x020000, 0x02a6d6,
0x02a700, 0x02b734,
0x02b740, 0x02b81d,
0x02b820, 0x02cea1,
0x02f800, 0x02fa1d,
0x0e0100, 0x0e01ef,
}
var isNotPrint32 = []uint16{ // add 0x10000 to each entry
0x000c,
0x0027,
0x003b,
0x003e,
0x039e,
0x0809,
0x0836,
0x0856,
0x08f3,
0x0a04,
0x0a14,
0x0a18,
0x10bd,
0x1135,
0x11e0,
0x1212,
0x1287,
0x1289,
0x128e,
0x129e,
0x1304,
0x1329,
0x1331,
0x1334,
0x246f,
0x6a5f,
0x6b5a,
0x6b62,
0xd455,
0xd49d,
0xd4ad,
0xd4ba,
0xd4bc,
0xd4c4,
0xd506,
0xd515,
0xd51d,
0xd53a,
0xd53f,
0xd545,
0xd551,
0xdaa0,
0xee04,
0xee20,
0xee23,
0xee28,
0xee33,
0xee38,
0xee3a,
0xee48,
0xee4a,
0xee4c,
0xee50,
0xee53,
0xee58,
0xee5a,
0xee5c,
0xee5e,
0xee60,
0xee63,
0xee6b,
0xee73,
0xee78,
0xee7d,
0xee7f,
0xee8a,
0xeea4,
0xeeaa,
0xf0c0,
0xf0d0,
0xf12f,
0xf57a,
0xf5a4,
}
| bsd-3-clause |
AppConcur/islacart | sites/all/themes/marketplace/vendor/uikit/js/components/notify.js | 4965 | /*! UIkit 2.13.1 | http://www.getuikit.com | (c) 2014 YOOtheme | MIT License */
(function(addon) {
var component;
if (jQuery && UIkit) {
component = addon(jQuery, UIkit);
}
if (typeof define == "function" && define.amd) {
define("uikit-notify", ["uikit"], function(){
return component || addon(jQuery, UIkit);
});
}
})(function($, UI){
"use strict";
var containers = {},
messages = {},
notify = function(options){
if ($.type(options) == 'string') {
options = { message: options };
}
if (arguments[1]) {
options = $.extend(options, $.type(arguments[1]) == 'string' ? {status:arguments[1]} : arguments[1]);
}
return (new Message(options)).show();
},
closeAll = function(group, instantly){
var id;
if (group) {
for(id in messages) { if(group===messages[id].group) messages[id].close(instantly); }
} else {
for(id in messages) { messages[id].close(instantly); }
}
};
var Message = function(options){
var $this = this;
this.options = $.extend({}, Message.defaults, options);
this.uuid = UI.Utils.uid("notifymsg");
this.element = UI.$([
'<div class="@-notify-message">',
'<a class="@-close"></a>',
'<div></div>',
'</div>'
].join('')).data("notifyMessage", this);
this.content(this.options.message);
// status
if (this.options.status) {
this.element.addClass('@-notify-message-'+this.options.status);
this.currentstatus = this.options.status;
}
this.group = this.options.group;
messages[this.uuid] = this;
if(!containers[this.options.pos]) {
containers[this.options.pos] = UI.$('<div class="@-notify @-notify-'+this.options.pos+'"></div>').appendTo('body').on("click", UI.prefix(".@-notify-message"), function(){
UI.$(this).data("notifyMessage").close();
});
}
};
$.extend(Message.prototype, {
uuid: false,
element: false,
timout: false,
currentstatus: "",
group: false,
show: function() {
if (this.element.is(":visible")) return;
var $this = this;
containers[this.options.pos].show().prepend(this.element);
var marginbottom = parseInt(this.element.css("margin-bottom"), 10);
this.element.css({"opacity":0, "margin-top": -1*this.element.outerHeight(), "margin-bottom":0}).animate({"opacity":1, "margin-top": 0, "margin-bottom":marginbottom}, function(){
if ($this.options.timeout) {
var closefn = function(){ $this.close(); };
$this.timeout = setTimeout(closefn, $this.options.timeout);
$this.element.hover(
function() { clearTimeout($this.timeout); },
function() { $this.timeout = setTimeout(closefn, $this.options.timeout); }
);
}
});
return this;
},
close: function(instantly) {
var $this = this,
finalize = function(){
$this.element.remove();
if(!containers[$this.options.pos].children().length) {
containers[$this.options.pos].hide();
}
$this.options.onClose.apply($this, []);
delete messages[$this.uuid];
};
if (this.timeout) clearTimeout(this.timeout);
if (instantly) {
finalize();
} else {
this.element.animate({"opacity":0, "margin-top": -1* this.element.outerHeight(), "margin-bottom":0}, function(){
finalize();
});
}
},
content: function(html){
var container = this.element.find(">div");
if(!html) {
return container.html();
}
container.html(html);
return this;
},
status: function(status) {
if (!status) {
return this.currentstatus;
}
this.element.removeClass('@-notify-message-'+this.currentstatus).addClass('@-notify-message-'+status);
this.currentstatus = status;
return this;
}
});
Message.defaults = {
message: "",
status: "",
timeout: 5000,
group: null,
pos: 'top-center',
onClose: function() {}
};
UI.notify = notify;
UI.notify.message = Message;
UI.notify.closeAll = closeAll;
return notify;
});
| gpl-2.0 |
effortlesssites/template | tmp/com_akeeba-3.9.2-core/fof/form/header/published.php | 1276 | <?php
/**
* @package FrameworkOnFramework
* @subpackage form
* @copyright Copyright (C) 2010 - 2012 Akeeba Ltd. All rights reserved.
* @license GNU General Public License version 2 or later; see LICENSE.txt
*/
// Protect from unauthorized access
defined('_JEXEC') or die;
/**
* Field header for Published (enabled) columns
*
* @package FrameworkOnFramework
* @since 2.0
*/
class FOFFormHeaderPublished extends FOFFormHeaderFieldselectable
{
/**
* Create objects for the options
*
* @return array The array of option objects
*/
protected function getOptions()
{
$config = array(
'published' => 1,
'unpublished' => 1,
'archived' => 0,
'trash' => 0,
'all' => 0,
);
$stack = array();
if ($this->element['show_published'] == 'false')
{
$config['published'] = 0;
}
if ($this->element['show_unpublished'] == 'false')
{
$config['unpublished'] = 0;
}
if ($this->element['show_archived'] == 'true')
{
$config['archived'] = 1;
}
if ($this->element['show_trash'] == 'true')
{
$config['trash'] = 1;
}
if ($this->element['show_all'] == 'true')
{
$config['all'] = 1;
}
$options = JHtml::_('jgrid.publishedOptions', $config);
reset($options);
return $options;
}
}
| gpl-2.0 |
africallshop/africallshop-iphone | submodules/externals/polarssl/include/polarssl/openssl.h | 5039 | /**
* \file openssl.h
*
* \brief OpenSSL wrapper (definitions, inline functions).
*
* Copyright (C) 2006-2010, Brainspark B.V.
*
* This file is part of PolarSSL (http://www.polarssl.org)
* Lead Maintainer: Paul Bakker <polarssl_maintainer at polarssl.org>
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/*
* OpenSSL wrapper contributed by David Barett
*/
#ifndef POLARSSL_OPENSSL_H
#define POLARSSL_OPENSSL_H
#include "aes.h"
#include "md5.h"
#include "rsa.h"
#include "sha1.h"
#define AES_SIZE 16
#define AES_BLOCK_SIZE 16
#define AES_KEY aes_context
#define MD5_CTX md5_context
#define SHA_CTX sha1_context
#define SHA1_Init( CTX ) \
sha1_starts( (CTX) )
#define SHA1_Update( CTX, BUF, LEN ) \
sha1_update( (CTX), (unsigned char *)(BUF), (LEN) )
#define SHA1_Final( OUT, CTX ) \
sha1_finish( (CTX), (OUT) )
#define MD5_Init( CTX ) \
md5_starts( (CTX) )
#define MD5_Update( CTX, BUF, LEN ) \
md5_update( (CTX), (unsigned char *)(BUF), (LEN) )
#define MD5_Final( OUT, CTX ) \
md5_finish( (CTX), (OUT) )
#define AES_set_encrypt_key( KEY, KEYSIZE, CTX ) \
aes_setkey_enc( (CTX), (KEY), (KEYSIZE) )
#define AES_set_decrypt_key( KEY, KEYSIZE, CTX ) \
aes_setkey_dec( (CTX), (KEY), (KEYSIZE) )
#define AES_cbc_encrypt( INPUT, OUTPUT, LEN, CTX, IV, MODE ) \
aes_crypt_cbc( (CTX), (MODE), (LEN), (IV), (INPUT), (OUTPUT) )
/*
* RSA stuff follows. TODO: needs cleanup
*/
inline int __RSA_Passthrough( void *output, void *input, int size )
{
memcpy( output, input, size );
return size;
}
inline rsa_context* d2i_RSA_PUBKEY( void *ignore, unsigned char **bufptr,
int len )
{
unsigned char *buffer = *(unsigned char **) bufptr;
rsa_context *rsa;
/*
* Not a general-purpose parser: only parses public key from *exactly*
* openssl genrsa -out privkey.pem 512 (or 1024)
* openssl rsa -in privkey.pem -out privatekey.der -outform der
* openssl rsa -in privkey.pem -out pubkey.der -outform der -pubout
*
* TODO: make a general-purpose parse
*/
if( ignore != 0 || ( len != 94 && len != 162 ) )
return( 0 );
rsa = (rsa_context *) malloc( sizeof( rsa_rsa ) );
if( rsa == NULL )
return( 0 );
memset( rsa, 0, sizeof( rsa_context ) );
if( ( len == 94 &&
mpi_read_binary( &rsa->N, &buffer[ 25], 64 ) == 0 &&
mpi_read_binary( &rsa->E, &buffer[ 91], 3 ) == 0 ) ||
( len == 162 &&
mpi_read_binary( &rsa->N, &buffer[ 29], 128 ) == 0 ) &&
mpi_read_binary( &rsa->E, &buffer[159], 3 ) == 0 )
{
/*
* key read successfully
*/
rsa->len = ( mpi_msb( &rsa->N ) + 7 ) >> 3;
return( rsa );
}
else
{
memset( rsa, 0, sizeof( rsa_context ) );
free( rsa );
return( 0 );
}
}
#define RSA rsa_context
#define RSA_PKCS1_PADDING 1 /* ignored; always encrypt with this */
#define RSA_size( CTX ) (CTX)->len
#define RSA_free( CTX ) rsa_free( CTX )
#define ERR_get_error( ) "ERR_get_error() not supported"
#define RSA_blinding_off( IGNORE )
#define d2i_RSAPrivateKey( a, b, c ) new rsa_context /* TODO: C++ bleh */
inline int RSA_public_decrypt ( int size, unsigned char* input, unsigned char* output, RSA* key, int ignore ) { int outsize=size; if( !rsa_pkcs1_decrypt( key, RSA_PUBLIC, &outsize, input, output ) ) return outsize; else return -1; }
inline int RSA_private_decrypt( int size, unsigned char* input, unsigned char* output, RSA* key, int ignore ) { int outsize=size; if( !rsa_pkcs1_decrypt( key, RSA_PRIVATE, &outsize, input, output ) ) return outsize; else return -1; }
inline int RSA_public_encrypt ( int size, unsigned char* input, unsigned char* output, RSA* key, int ignore ) { if( !rsa_pkcs1_encrypt( key, RSA_PUBLIC, size, input, output ) ) return RSA_size(key); else return -1; }
inline int RSA_private_encrypt( int size, unsigned char* input, unsigned char* output, RSA* key, int ignore ) { if( !rsa_pkcs1_encrypt( key, RSA_PRIVATE, size, input, output ) ) return RSA_size(key); else return -1; }
#ifdef __cplusplus
}
#endif
#endif /* openssl.h */
| gpl-2.0 |
zarboz/XBMC-PVR-mac | tools/darwin/depends/samba/samba-3.6.6/source4/ntvfs/posix/pvfs_seek.c | 1663 | /*
Unix SMB/CIFS implementation.
POSIX NTVFS backend - seek
Copyright (C) Andrew Tridgell 2004
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "includes.h"
#include "vfs_posix.h"
/*
seek in a file
*/
NTSTATUS pvfs_seek(struct ntvfs_module_context *ntvfs,
struct ntvfs_request *req,
union smb_seek *io)
{
struct pvfs_state *pvfs = talloc_get_type(ntvfs->private_data,
struct pvfs_state);
struct pvfs_file *f;
struct pvfs_file_handle *h;
NTSTATUS status;
f = pvfs_find_fd(pvfs, req, io->lseek.in.file.ntvfs);
if (!f) {
return NT_STATUS_INVALID_HANDLE;
}
h = f->handle;
status = NT_STATUS_OK;
switch (io->lseek.in.mode) {
case SEEK_MODE_START:
h->seek_offset = io->lseek.in.offset;
break;
case SEEK_MODE_CURRENT:
h->seek_offset += io->lseek.in.offset;
break;
case SEEK_MODE_END:
status = pvfs_resolve_name_fd(pvfs, h->fd, h->name, PVFS_RESOLVE_NO_OPENDB);
h->seek_offset = h->name->st.st_size + io->lseek.in.offset;
break;
}
io->lseek.out.offset = h->seek_offset;
return status;
}
| gpl-2.0 |
rperier/linux-rockchip | arch/x86/tools/Makefile | 1929 | # SPDX-License-Identifier: GPL-2.0
PHONY += posttest
ifeq ($(KBUILD_VERBOSE),1)
posttest_verbose = -v
else
posttest_verbose =
endif
ifeq ($(CONFIG_64BIT),y)
posttest_64bit = -y
else
posttest_64bit = -n
endif
reformatter = $(srctree)/arch/x86/tools/objdump_reformat.awk
chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk
quiet_cmd_posttest = TEST $@
cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(reformatter) | $(obj)/insn_decoder_test $(posttest_64bit) $(posttest_verbose)
quiet_cmd_sanitytest = TEST $@
cmd_sanitytest = $(obj)/insn_sanity $(posttest_64bit) -m 1000000
posttest: $(obj)/insn_decoder_test vmlinux $(obj)/insn_sanity
$(call cmd,posttest)
$(call cmd,sanitytest)
hostprogs += insn_decoder_test insn_sanity
# -I needed for generated C source and C source which in the kernel tree.
HOSTCFLAGS_insn_decoder_test.o := -Wall -I$(srctree)/tools/arch/x86/lib/ -I$(srctree)/tools/arch/x86/include/ -I$(objtree)/arch/x86/lib/
HOSTCFLAGS_insn_sanity.o := -Wall -I$(srctree)/tools/arch/x86/lib/ -I$(srctree)/tools/arch/x86/include/ -I$(objtree)/arch/x86/lib/
# Dependencies are also needed.
$(obj)/insn_decoder_test.o: $(srctree)/tools/arch/x86/lib/insn.c $(srctree)/tools/arch/x86/lib/inat.c $(srctree)/tools/arch/x86/include/asm/inat_types.h $(srctree)/tools/arch/x86/include/asm/inat.h $(srctree)/tools/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
$(obj)/insn_sanity.o: $(srctree)/tools/arch/x86/lib/insn.c $(srctree)/tools/arch/x86/lib/inat.c $(srctree)/tools/arch/x86/include/asm/inat_types.h $(srctree)/tools/arch/x86/include/asm/inat.h $(srctree)/tools/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
hostprogs += relocs
relocs-objs := relocs_32.o relocs_64.o relocs_common.o
PHONY += relocs
relocs: $(obj)/relocs
@:
| gpl-2.0 |
BrennanConroy/corefx | src/System.Globalization/tests/CompareInfo/CompareInfoTests.IsSuffix.cs | 6686 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System.Collections.Generic;
using Xunit;
namespace System.Globalization.Tests
{
public class CompareInfoIsSuffixTests
{
private static CompareInfo s_invariantCompare = CultureInfo.InvariantCulture.CompareInfo;
private static CompareInfo s_hungarianCompare = new CultureInfo("hu-HU").CompareInfo;
private static CompareInfo s_turkishCompare = new CultureInfo("tr-TR").CompareInfo;
public static IEnumerable<object[]> IsSuffix_TestData()
{
// Empty strings
yield return new object[] { s_invariantCompare, "foo", "", CompareOptions.None, true };
yield return new object[] { s_invariantCompare, "", "", CompareOptions.None, true };
// Long strings
yield return new object[] { s_invariantCompare, new string('a', 5555), "aaaaaaaaaaaaaaa", CompareOptions.None, true };
yield return new object[] { s_invariantCompare, new string('a', 5555), new string('a', 5000), CompareOptions.None, true };
yield return new object[] { s_invariantCompare, new string('a', 5555), new string('a', 5000) + "b", CompareOptions.None, false };
// Hungarian
yield return new object[] { s_hungarianCompare, "foobardzsdzs", "rddzs", CompareOptions.Ordinal, false };
yield return new object[] { s_invariantCompare, "foobardzsdzs", "rddzs", CompareOptions.None, false };
yield return new object[] { s_invariantCompare, "foobardzsdzs", "rddzs", CompareOptions.Ordinal, false };
// Turkish
yield return new object[] { s_turkishCompare, "Hi", "I", CompareOptions.None, false };
yield return new object[] { s_turkishCompare, "Hi", "I", CompareOptions.IgnoreCase, false };
yield return new object[] { s_turkishCompare, "Hi", "\u0130", CompareOptions.None, false };
yield return new object[] { s_turkishCompare, "Hi", "\u0130", CompareOptions.IgnoreCase, true };
yield return new object[] { s_invariantCompare, "Hi", "I", CompareOptions.None, false };
yield return new object[] { s_invariantCompare, "Hi", "I", CompareOptions.IgnoreCase, true };
yield return new object[] { s_invariantCompare, "Hi", "\u0130", CompareOptions.None, false };
yield return new object[] { s_invariantCompare, "Hi", "\u0130", CompareOptions.IgnoreCase, false };
// Unicode
yield return new object[] { s_invariantCompare, "Exhibit \u00C0", "A\u0300", CompareOptions.None, true };
yield return new object[] { s_invariantCompare, "Exhibit \u00C0", "A\u0300", CompareOptions.Ordinal, false };
yield return new object[] { s_invariantCompare, "Exhibit \u00C0", "a\u0300", CompareOptions.None, false };
yield return new object[] { s_invariantCompare, "Exhibit \u00C0", "a\u0300", CompareOptions.IgnoreCase, true };
yield return new object[] { s_invariantCompare, "Exhibit \u00C0", "a\u0300", CompareOptions.Ordinal, false };
yield return new object[] { s_invariantCompare, "Exhibit \u00C0", "a\u0300", CompareOptions.OrdinalIgnoreCase, false };
yield return new object[] { s_invariantCompare, "FooBar", "Foo\u0400Bar", CompareOptions.Ordinal, false };
yield return new object[] { s_invariantCompare, "FooBA\u0300R", "FooB\u00C0R", CompareOptions.IgnoreNonSpace, true };
// Ignore symbols
yield return new object[] { s_invariantCompare, "More Test's", "Tests", CompareOptions.IgnoreSymbols, true };
yield return new object[] { s_invariantCompare, "More Test's", "Tests", CompareOptions.None, false };
// Platform differences
yield return new object[] { s_hungarianCompare, "foobardzsdzs", "rddzs", CompareOptions.None, PlatformDetection.IsWindows ? true : false };
}
[Theory]
[MemberData(nameof(IsSuffix_TestData))]
public void IsSuffix(CompareInfo compareInfo, string source, string value, CompareOptions options, bool expected)
{
if (options == CompareOptions.None)
{
Assert.Equal(expected, compareInfo.IsSuffix(source, value));
}
Assert.Equal(expected, compareInfo.IsSuffix(source, value, options));
}
[Fact]
public void IsSuffix_UnassignedUnicode()
{
bool result = PlatformDetection.IsWindows ? true : false;
IsSuffix(s_invariantCompare, "FooBar", "Foo\uFFFFBar", CompareOptions.None, result);
IsSuffix(s_invariantCompare, "FooBar", "Foo\uFFFFBar", CompareOptions.IgnoreNonSpace, result);
}
[Fact]
public void IsSuffix_Invalid()
{
// Source is null
AssertExtensions.Throws<ArgumentNullException>("source", () => s_invariantCompare.IsSuffix(null, ""));
AssertExtensions.Throws<ArgumentNullException>("source", () => s_invariantCompare.IsSuffix(null, "", CompareOptions.None));
// Prefix is null
AssertExtensions.Throws<ArgumentNullException>("suffix", () => s_invariantCompare.IsSuffix("", null));
AssertExtensions.Throws<ArgumentNullException>("suffix", () => s_invariantCompare.IsSuffix("", null, CompareOptions.None));
// Source and prefix are null
AssertExtensions.Throws<ArgumentNullException>("source", () => s_invariantCompare.IsSuffix(null, null));
AssertExtensions.Throws<ArgumentNullException>("source", () => s_invariantCompare.IsSuffix(null, null, CompareOptions.None));
// Options are invalid
AssertExtensions.Throws<ArgumentException>("options", () => s_invariantCompare.IsSuffix("Test's", "Tests", CompareOptions.StringSort));
AssertExtensions.Throws<ArgumentException>("options", () => s_invariantCompare.IsSuffix("Test's", "Tests", CompareOptions.Ordinal | CompareOptions.IgnoreWidth));
AssertExtensions.Throws<ArgumentException>("options", () => s_invariantCompare.IsSuffix("Test's", "Tests", CompareOptions.OrdinalIgnoreCase | CompareOptions.IgnoreWidth));
AssertExtensions.Throws<ArgumentException>("options", () => s_invariantCompare.IsSuffix("Test's", "Tests", (CompareOptions)(-1)));
AssertExtensions.Throws<ArgumentException>("options", () => s_invariantCompare.IsSuffix("Test's", "Tests", (CompareOptions)0x11111111));
}
}
}
| mit |
fwmiller/Conserver-Freescale-Linux-U-boot | rpm/BUILD/linux-3.0.35/drivers/net/wireless/ath6kl/os/linux/include/config_linux.h | 1177 | /*
* Copyright (c) 2004-2007 Atheros Communications Inc.
* All rights reserved.
*
*
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation;
//
// Software distributed under the License is distributed on an "AS
// IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
// implied. See the License for the specific language governing
// rights and limitations under the License.
//
//
*
*/
#ifndef _CONFIG_LINUX_H_
#define _CONFIG_LINUX_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <linux/version.h>
/*
* Host-side GPIO support is optional.
* If run-time access to GPIO pins is not required, then
* this should be changed to #undef.
*/
#define CONFIG_HOST_GPIO_SUPPORT
/*
* Host side Test Command support
*/
#define CONFIG_HOST_TCMD_SUPPORT
#define USE_4BYTE_REGISTER_ACCESS
/* Host-side support for Target-side profiling */
#undef CONFIG_TARGET_PROFILE_SUPPORT
/* IP/TCP checksum offload */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
#define CONFIG_CHECKSUM_OFFLOAD
#endif
#ifdef __cplusplus
}
#endif
#endif
| gpl-2.0 |
TRex22/xbmc | xbmc/visualizations/Vortex/angelscript/docs/manual/functions_func_0x70.html | 2389 | <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
<title>AngelScript: Class Members - Functions</title>
<link href="tabs.css" rel="stylesheet" type="text/css">
<link href="doxygen.css" rel="stylesheet" type="text/css">
</head><body>
<!-- Generated by Doxygen 1.5.9 -->
<div class="tabs">
<ul>
<li><a href="functions.html"><span>All</span></a></li>
<li class="current"><a href="functions_func.html"><span>Functions</span></a></li>
<li><a href="functions_vars.html"><span>Variables</span></a></li>
</ul>
</div>
<div class="tabs">
<ul>
<li><a href="functions_func.html#index_a"><span>a</span></a></li>
<li><a href="functions_func_0x62.html#index_b"><span>b</span></a></li>
<li><a href="functions_func_0x63.html#index_c"><span>c</span></a></li>
<li><a href="functions_func_0x64.html#index_d"><span>d</span></a></li>
<li><a href="functions_func_0x65.html#index_e"><span>e</span></a></li>
<li><a href="functions_func_0x67.html#index_g"><span>g</span></a></li>
<li><a href="functions_func_0x69.html#index_i"><span>i</span></a></li>
<li><a href="functions_func_0x6c.html#index_l"><span>l</span></a></li>
<li><a href="functions_func_0x6e.html#index_n"><span>n</span></a></li>
<li class="current"><a href="functions_func_0x70.html#index_p"><span>p</span></a></li>
<li><a href="functions_func_0x72.html#index_r"><span>r</span></a></li>
<li><a href="functions_func_0x73.html#index_s"><span>s</span></a></li>
<li><a href="functions_func_0x75.html#index_u"><span>u</span></a></li>
<li><a href="functions_func_0x77.html#index_w"><span>w</span></a></li>
</ul>
</div>
<div class="contents">
<p>
<h3><a class="anchor" name="index_p">- p -</a></h3><ul>
<li>ParseToken()
: <a class="el" href="classas_i_script_engine.html#b0926f43ddecf1ceb0788102e099186a">asIScriptEngine</a>
<li>Prepare()
: <a class="el" href="classas_i_script_context.html#d048f847854cc8b95fde0380a8b39d7e">asIScriptContext</a>
</ul>
</div>
<hr size="1"><address style="text-align: right;"><small>Generated on Wed Dec 16 19:34:51 2009 for AngelScript by
<a href="http://www.doxygen.org/index.html">
<img src="doxygen.png" alt="doxygen" align="middle" border="0"></a> 1.5.9 </small></address>
</body>
</html>
| gpl-2.0 |
BPI-SINOVOIP/BPI-Mainline-kernel | linux-4.14/drivers/scsi/aic94xx/aic94xx_init.c | 27747 | /*
* Aic94xx SAS/SATA driver initialization.
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
*
* This file is licensed under GPLv2.
*
* This file is part of the aic94xx driver.
*
* The aic94xx driver is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of the
* License.
*
* The aic94xx driver is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with the aic94xx driver; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <scsi/scsi_host.h>
#include "aic94xx.h"
#include "aic94xx_reg.h"
#include "aic94xx_hwi.h"
#include "aic94xx_seq.h"
#include "aic94xx_sds.h"
/* The format is "version.release.patchlevel" */
#define ASD_DRIVER_VERSION "1.0.3"
static int use_msi = 0;
module_param_named(use_msi, use_msi, int, S_IRUGO);
MODULE_PARM_DESC(use_msi, "\n"
"\tEnable(1) or disable(0) using PCI MSI.\n"
"\tDefault: 0");
static struct scsi_transport_template *aic94xx_transport_template;
static int asd_scan_finished(struct Scsi_Host *, unsigned long);
static void asd_scan_start(struct Scsi_Host *);
static struct scsi_host_template aic94xx_sht = {
.module = THIS_MODULE,
/* .name is initialized */
.name = "aic94xx",
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.scan_finished = asd_scan_finished,
.scan_start = asd_scan_start,
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
.can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.track_queue_depth = 1,
};
static int asd_map_memio(struct asd_ha_struct *asd_ha)
{
int err, i;
struct asd_ha_addrspace *io_handle;
asd_ha->iospace = 0;
for (i = 0; i < 3; i += 2) {
io_handle = &asd_ha->io_handle[i==0?0:1];
io_handle->start = pci_resource_start(asd_ha->pcidev, i);
io_handle->len = pci_resource_len(asd_ha->pcidev, i);
io_handle->flags = pci_resource_flags(asd_ha->pcidev, i);
err = -ENODEV;
if (!io_handle->start || !io_handle->len) {
asd_printk("MBAR%d start or length for %s is 0.\n",
i==0?0:1, pci_name(asd_ha->pcidev));
goto Err;
}
err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME);
if (err) {
asd_printk("couldn't reserve memory region for %s\n",
pci_name(asd_ha->pcidev));
goto Err;
}
io_handle->addr = ioremap(io_handle->start, io_handle->len);
if (!io_handle->addr) {
asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1,
pci_name(asd_ha->pcidev));
err = -ENOMEM;
goto Err_unreq;
}
}
return 0;
Err_unreq:
pci_release_region(asd_ha->pcidev, i);
Err:
if (i > 0) {
io_handle = &asd_ha->io_handle[0];
iounmap(io_handle->addr);
pci_release_region(asd_ha->pcidev, 0);
}
return err;
}
static void asd_unmap_memio(struct asd_ha_struct *asd_ha)
{
struct asd_ha_addrspace *io_handle;
io_handle = &asd_ha->io_handle[1];
iounmap(io_handle->addr);
pci_release_region(asd_ha->pcidev, 2);
io_handle = &asd_ha->io_handle[0];
iounmap(io_handle->addr);
pci_release_region(asd_ha->pcidev, 0);
}
static int asd_map_ioport(struct asd_ha_struct *asd_ha)
{
int i = PCI_IOBAR_OFFSET, err;
struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0];
asd_ha->iospace = 1;
io_handle->start = pci_resource_start(asd_ha->pcidev, i);
io_handle->len = pci_resource_len(asd_ha->pcidev, i);
io_handle->flags = pci_resource_flags(asd_ha->pcidev, i);
io_handle->addr = (void __iomem *) io_handle->start;
if (!io_handle->start || !io_handle->len) {
asd_printk("couldn't get IO ports for %s\n",
pci_name(asd_ha->pcidev));
return -ENODEV;
}
err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME);
if (err) {
asd_printk("couldn't reserve io space for %s\n",
pci_name(asd_ha->pcidev));
}
return err;
}
static void asd_unmap_ioport(struct asd_ha_struct *asd_ha)
{
pci_release_region(asd_ha->pcidev, PCI_IOBAR_OFFSET);
}
static int asd_map_ha(struct asd_ha_struct *asd_ha)
{
int err;
u16 cmd_reg;
err = pci_read_config_word(asd_ha->pcidev, PCI_COMMAND, &cmd_reg);
if (err) {
asd_printk("couldn't read command register of %s\n",
pci_name(asd_ha->pcidev));
goto Err;
}
err = -ENODEV;
if (cmd_reg & PCI_COMMAND_MEMORY) {
if ((err = asd_map_memio(asd_ha)))
goto Err;
} else if (cmd_reg & PCI_COMMAND_IO) {
if ((err = asd_map_ioport(asd_ha)))
goto Err;
asd_printk("%s ioport mapped -- upgrade your hardware\n",
pci_name(asd_ha->pcidev));
} else {
asd_printk("no proper device access to %s\n",
pci_name(asd_ha->pcidev));
goto Err;
}
return 0;
Err:
return err;
}
static void asd_unmap_ha(struct asd_ha_struct *asd_ha)
{
if (asd_ha->iospace)
asd_unmap_ioport(asd_ha);
else
asd_unmap_memio(asd_ha);
}
static const char *asd_dev_rev[30] = {
[0] = "A0",
[1] = "A1",
[8] = "B0",
};
static int asd_common_setup(struct asd_ha_struct *asd_ha)
{
int err, i;
asd_ha->revision_id = asd_ha->pcidev->revision;
err = -ENODEV;
if (asd_ha->revision_id < AIC9410_DEV_REV_B0) {
asd_printk("%s is revision %s (%X), which is not supported\n",
pci_name(asd_ha->pcidev),
asd_dev_rev[asd_ha->revision_id],
asd_ha->revision_id);
goto Err;
}
/* Provide some sane default values. */
asd_ha->hw_prof.max_scbs = 512;
asd_ha->hw_prof.max_ddbs = ASD_MAX_DDBS;
asd_ha->hw_prof.num_phys = ASD_MAX_PHYS;
/* All phys are enabled, by default. */
asd_ha->hw_prof.enabled_phys = 0xFF;
for (i = 0; i < ASD_MAX_PHYS; i++) {
asd_ha->hw_prof.phy_desc[i].max_sas_lrate =
SAS_LINK_RATE_3_0_GBPS;
asd_ha->hw_prof.phy_desc[i].min_sas_lrate =
SAS_LINK_RATE_1_5_GBPS;
asd_ha->hw_prof.phy_desc[i].max_sata_lrate =
SAS_LINK_RATE_1_5_GBPS;
asd_ha->hw_prof.phy_desc[i].min_sata_lrate =
SAS_LINK_RATE_1_5_GBPS;
}
return 0;
Err:
return err;
}
static int asd_aic9410_setup(struct asd_ha_struct *asd_ha)
{
int err = asd_common_setup(asd_ha);
if (err)
return err;
asd_ha->hw_prof.addr_range = 8;
asd_ha->hw_prof.port_name_base = 0;
asd_ha->hw_prof.dev_name_base = 8;
asd_ha->hw_prof.sata_name_base = 16;
return 0;
}
static int asd_aic9405_setup(struct asd_ha_struct *asd_ha)
{
int err = asd_common_setup(asd_ha);
if (err)
return err;
asd_ha->hw_prof.addr_range = 4;
asd_ha->hw_prof.port_name_base = 0;
asd_ha->hw_prof.dev_name_base = 4;
asd_ha->hw_prof.sata_name_base = 8;
return 0;
}
static ssize_t asd_show_dev_rev(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
return snprintf(buf, PAGE_SIZE, "%s\n",
asd_dev_rev[asd_ha->revision_id]);
}
static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL);
static ssize_t asd_show_dev_bios_build(struct device *dev,
struct device_attribute *attr,char *buf)
{
struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
return snprintf(buf, PAGE_SIZE, "%d\n", asd_ha->hw_prof.bios.bld);
}
static DEVICE_ATTR(bios_build, S_IRUGO, asd_show_dev_bios_build, NULL);
static ssize_t asd_show_dev_pcba_sn(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
return snprintf(buf, PAGE_SIZE, "%s\n", asd_ha->hw_prof.pcba_sn);
}
static DEVICE_ATTR(pcba_sn, S_IRUGO, asd_show_dev_pcba_sn, NULL);
#define FLASH_CMD_NONE 0x00
#define FLASH_CMD_UPDATE 0x01
#define FLASH_CMD_VERIFY 0x02
struct flash_command {
u8 command[8];
int code;
};
static struct flash_command flash_command_table[] =
{
{"verify", FLASH_CMD_VERIFY},
{"update", FLASH_CMD_UPDATE},
{"", FLASH_CMD_NONE} /* Last entry should be NULL. */
};
struct error_bios {
char *reason;
int err_code;
};
static struct error_bios flash_error_table[] =
{
{"Failed to open bios image file", FAIL_OPEN_BIOS_FILE},
{"PCI ID mismatch", FAIL_CHECK_PCI_ID},
{"Checksum mismatch", FAIL_CHECK_SUM},
{"Unknown Error", FAIL_UNKNOWN},
{"Failed to verify.", FAIL_VERIFY},
{"Failed to reset flash chip.", FAIL_RESET_FLASH},
{"Failed to find flash chip type.", FAIL_FIND_FLASH_ID},
{"Failed to erash flash chip.", FAIL_ERASE_FLASH},
{"Failed to program flash chip.", FAIL_WRITE_FLASH},
{"Flash in progress", FLASH_IN_PROGRESS},
{"Image file size Error", FAIL_FILE_SIZE},
{"Input parameter error", FAIL_PARAMETERS},
{"Out of memory", FAIL_OUT_MEMORY},
{"OK", 0} /* Last entry err_code = 0. */
};
static ssize_t asd_store_update_bios(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
char *cmd_ptr, *filename_ptr;
struct bios_file_header header, *hdr_ptr;
int res, i;
u32 csum = 0;
int flash_command = FLASH_CMD_NONE;
int err = 0;
cmd_ptr = kzalloc(count*2, GFP_KERNEL);
if (!cmd_ptr) {
err = FAIL_OUT_MEMORY;
goto out;
}
filename_ptr = cmd_ptr + count;
res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr);
if (res != 2) {
err = FAIL_PARAMETERS;
goto out1;
}
for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) {
if (!memcmp(flash_command_table[i].command,
cmd_ptr, strlen(cmd_ptr))) {
flash_command = flash_command_table[i].code;
break;
}
}
if (flash_command == FLASH_CMD_NONE) {
err = FAIL_PARAMETERS;
goto out1;
}
if (asd_ha->bios_status == FLASH_IN_PROGRESS) {
err = FLASH_IN_PROGRESS;
goto out1;
}
err = request_firmware(&asd_ha->bios_image,
filename_ptr,
&asd_ha->pcidev->dev);
if (err) {
asd_printk("Failed to load bios image file %s, error %d\n",
filename_ptr, err);
err = FAIL_OPEN_BIOS_FILE;
goto out1;
}
hdr_ptr = (struct bios_file_header *)asd_ha->bios_image->data;
if ((hdr_ptr->contrl_id.vendor != asd_ha->pcidev->vendor ||
hdr_ptr->contrl_id.device != asd_ha->pcidev->device) &&
(hdr_ptr->contrl_id.sub_vendor != asd_ha->pcidev->vendor ||
hdr_ptr->contrl_id.sub_device != asd_ha->pcidev->device)) {
ASD_DPRINTK("The PCI vendor or device id does not match\n");
ASD_DPRINTK("vendor=%x dev=%x sub_vendor=%x sub_dev=%x"
" pci vendor=%x pci dev=%x\n",
hdr_ptr->contrl_id.vendor,
hdr_ptr->contrl_id.device,
hdr_ptr->contrl_id.sub_vendor,
hdr_ptr->contrl_id.sub_device,
asd_ha->pcidev->vendor,
asd_ha->pcidev->device);
err = FAIL_CHECK_PCI_ID;
goto out2;
}
if (hdr_ptr->filelen != asd_ha->bios_image->size) {
err = FAIL_FILE_SIZE;
goto out2;
}
/* calculate checksum */
for (i = 0; i < hdr_ptr->filelen; i++)
csum += asd_ha->bios_image->data[i];
if ((csum & 0x0000ffff) != hdr_ptr->checksum) {
ASD_DPRINTK("BIOS file checksum mismatch\n");
err = FAIL_CHECK_SUM;
goto out2;
}
if (flash_command == FLASH_CMD_UPDATE) {
asd_ha->bios_status = FLASH_IN_PROGRESS;
err = asd_write_flash_seg(asd_ha,
&asd_ha->bios_image->data[sizeof(*hdr_ptr)],
0, hdr_ptr->filelen-sizeof(*hdr_ptr));
if (!err)
err = asd_verify_flash_seg(asd_ha,
&asd_ha->bios_image->data[sizeof(*hdr_ptr)],
0, hdr_ptr->filelen-sizeof(*hdr_ptr));
} else {
asd_ha->bios_status = FLASH_IN_PROGRESS;
err = asd_verify_flash_seg(asd_ha,
&asd_ha->bios_image->data[sizeof(header)],
0, hdr_ptr->filelen-sizeof(header));
}
out2:
release_firmware(asd_ha->bios_image);
out1:
kfree(cmd_ptr);
out:
asd_ha->bios_status = err;
if (!err)
return count;
else
return -err;
}
static ssize_t asd_show_update_bios(struct device *dev,
struct device_attribute *attr, char *buf)
{
int i;
struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
for (i = 0; flash_error_table[i].err_code != 0; i++) {
if (flash_error_table[i].err_code == asd_ha->bios_status)
break;
}
if (asd_ha->bios_status != FLASH_IN_PROGRESS)
asd_ha->bios_status = FLASH_OK;
return snprintf(buf, PAGE_SIZE, "status=%x %s\n",
flash_error_table[i].err_code,
flash_error_table[i].reason);
}
static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
asd_show_update_bios, asd_store_update_bios);
static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
{
int err;
err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision);
if (err)
return err;
err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
if (err)
goto err_rev;
err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
if (err)
goto err_biosb;
err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
if (err)
goto err_update_bios;
return 0;
err_update_bios:
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
err_biosb:
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
err_rev:
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
return err;
}
static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
{
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
}
/* The first entry, 0, is used for dynamic ids, the rest for devices
* we know about.
*/
static const struct asd_pcidev_struct {
const char * name;
int (*setup)(struct asd_ha_struct *asd_ha);
} asd_pcidev_data[] = {
/* Id 0 is used for dynamic ids. */
{ .name = "Adaptec AIC-94xx SAS/SATA Host Adapter",
.setup = asd_aic9410_setup
},
{ .name = "Adaptec AIC-9410W SAS/SATA Host Adapter",
.setup = asd_aic9410_setup
},
{ .name = "Adaptec AIC-9405W SAS/SATA Host Adapter",
.setup = asd_aic9405_setup
},
};
static int asd_create_ha_caches(struct asd_ha_struct *asd_ha)
{
asd_ha->scb_pool = dma_pool_create(ASD_DRIVER_NAME "_scb_pool",
&asd_ha->pcidev->dev,
sizeof(struct scb),
8, 0);
if (!asd_ha->scb_pool) {
asd_printk("couldn't create scb pool\n");
return -ENOMEM;
}
return 0;
}
/**
* asd_free_edbs -- free empty data buffers
* asd_ha: pointer to host adapter structure
*/
static void asd_free_edbs(struct asd_ha_struct *asd_ha)
{
struct asd_seq_data *seq = &asd_ha->seq;
int i;
for (i = 0; i < seq->num_edbs; i++)
asd_free_coherent(asd_ha, seq->edb_arr[i]);
kfree(seq->edb_arr);
seq->edb_arr = NULL;
}
static void asd_free_escbs(struct asd_ha_struct *asd_ha)
{
struct asd_seq_data *seq = &asd_ha->seq;
int i;
for (i = 0; i < seq->num_escbs; i++) {
if (!list_empty(&seq->escb_arr[i]->list))
list_del_init(&seq->escb_arr[i]->list);
asd_ascb_free(seq->escb_arr[i]);
}
kfree(seq->escb_arr);
seq->escb_arr = NULL;
}
static void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha)
{
int i;
if (asd_ha->hw_prof.ddb_ext)
asd_free_coherent(asd_ha, asd_ha->hw_prof.ddb_ext);
if (asd_ha->hw_prof.scb_ext)
asd_free_coherent(asd_ha, asd_ha->hw_prof.scb_ext);
if (asd_ha->hw_prof.ddb_bitmap)
kfree(asd_ha->hw_prof.ddb_bitmap);
asd_ha->hw_prof.ddb_bitmap = NULL;
for (i = 0; i < ASD_MAX_PHYS; i++) {
struct asd_phy *phy = &asd_ha->phys[i];
asd_free_coherent(asd_ha, phy->id_frm_tok);
}
if (asd_ha->seq.escb_arr)
asd_free_escbs(asd_ha);
if (asd_ha->seq.edb_arr)
asd_free_edbs(asd_ha);
if (asd_ha->hw_prof.ue.area) {
kfree(asd_ha->hw_prof.ue.area);
asd_ha->hw_prof.ue.area = NULL;
}
if (asd_ha->seq.tc_index_array) {
kfree(asd_ha->seq.tc_index_array);
kfree(asd_ha->seq.tc_index_bitmap);
asd_ha->seq.tc_index_array = NULL;
asd_ha->seq.tc_index_bitmap = NULL;
}
if (asd_ha->seq.actual_dl) {
asd_free_coherent(asd_ha, asd_ha->seq.actual_dl);
asd_ha->seq.actual_dl = NULL;
asd_ha->seq.dl = NULL;
}
if (asd_ha->seq.next_scb.vaddr) {
dma_pool_free(asd_ha->scb_pool, asd_ha->seq.next_scb.vaddr,
asd_ha->seq.next_scb.dma_handle);
asd_ha->seq.next_scb.vaddr = NULL;
}
dma_pool_destroy(asd_ha->scb_pool);
asd_ha->scb_pool = NULL;
}
struct kmem_cache *asd_dma_token_cache;
struct kmem_cache *asd_ascb_cache;
static int asd_create_global_caches(void)
{
if (!asd_dma_token_cache) {
asd_dma_token_cache
= kmem_cache_create(ASD_DRIVER_NAME "_dma_token",
sizeof(struct asd_dma_tok),
0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!asd_dma_token_cache) {
asd_printk("couldn't create dma token cache\n");
return -ENOMEM;
}
}
if (!asd_ascb_cache) {
asd_ascb_cache = kmem_cache_create(ASD_DRIVER_NAME "_ascb",
sizeof(struct asd_ascb),
0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!asd_ascb_cache) {
asd_printk("couldn't create ascb cache\n");
goto Err;
}
}
return 0;
Err:
kmem_cache_destroy(asd_dma_token_cache);
asd_dma_token_cache = NULL;
return -ENOMEM;
}
static void asd_destroy_global_caches(void)
{
if (asd_dma_token_cache)
kmem_cache_destroy(asd_dma_token_cache);
asd_dma_token_cache = NULL;
if (asd_ascb_cache)
kmem_cache_destroy(asd_ascb_cache);
asd_ascb_cache = NULL;
}
static int asd_register_sas_ha(struct asd_ha_struct *asd_ha)
{
int i;
struct asd_sas_phy **sas_phys =
kcalloc(ASD_MAX_PHYS, sizeof(*sas_phys), GFP_KERNEL);
struct asd_sas_port **sas_ports =
kcalloc(ASD_MAX_PHYS, sizeof(*sas_ports), GFP_KERNEL);
if (!sas_phys || !sas_ports) {
kfree(sas_phys);
kfree(sas_ports);
return -ENOMEM;
}
asd_ha->sas_ha.sas_ha_name = (char *) asd_ha->name;
asd_ha->sas_ha.lldd_module = THIS_MODULE;
asd_ha->sas_ha.sas_addr = &asd_ha->hw_prof.sas_addr[0];
for (i = 0; i < ASD_MAX_PHYS; i++) {
sas_phys[i] = &asd_ha->phys[i].sas_phy;
sas_ports[i] = &asd_ha->ports[i];
}
asd_ha->sas_ha.sas_phy = sas_phys;
asd_ha->sas_ha.sas_port= sas_ports;
asd_ha->sas_ha.num_phys= ASD_MAX_PHYS;
return sas_register_ha(&asd_ha->sas_ha);
}
static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha)
{
int err;
err = sas_unregister_ha(&asd_ha->sas_ha);
sas_remove_host(asd_ha->sas_ha.core.shost);
scsi_host_put(asd_ha->sas_ha.core.shost);
kfree(asd_ha->sas_ha.sas_phy);
kfree(asd_ha->sas_ha.sas_port);
return err;
}
static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct asd_pcidev_struct *asd_dev;
unsigned asd_id = (unsigned) id->driver_data;
struct asd_ha_struct *asd_ha;
struct Scsi_Host *shost;
int err;
if (asd_id >= ARRAY_SIZE(asd_pcidev_data)) {
asd_printk("wrong driver_data in PCI table\n");
return -ENODEV;
}
if ((err = pci_enable_device(dev))) {
asd_printk("couldn't enable device %s\n", pci_name(dev));
return err;
}
pci_set_master(dev);
err = -ENOMEM;
shost = scsi_host_alloc(&aic94xx_sht, sizeof(void *));
if (!shost)
goto Err;
asd_dev = &asd_pcidev_data[asd_id];
asd_ha = kzalloc(sizeof(*asd_ha), GFP_KERNEL);
if (!asd_ha) {
asd_printk("out of memory\n");
goto Err_put;
}
asd_ha->pcidev = dev;
asd_ha->sas_ha.dev = &asd_ha->pcidev->dev;
asd_ha->sas_ha.lldd_ha = asd_ha;
asd_ha->bios_status = FLASH_OK;
asd_ha->name = asd_dev->name;
asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev));
SHOST_TO_SAS_HA(shost) = &asd_ha->sas_ha;
asd_ha->sas_ha.core.shost = shost;
shost->transportt = aic94xx_transport_template;
shost->max_id = ~0;
shost->max_lun = ~0;
shost->max_cmd_len = 16;
err = scsi_add_host(shost, &dev->dev);
if (err)
goto Err_free;
err = asd_dev->setup(asd_ha);
if (err)
goto Err_remove;
err = -ENODEV;
if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64))
&& !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)))
;
else if (!pci_set_dma_mask(dev, DMA_BIT_MASK(32))
&& !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)))
;
else {
asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
goto Err_remove;
}
pci_set_drvdata(dev, asd_ha);
err = asd_map_ha(asd_ha);
if (err)
goto Err_remove;
err = asd_create_ha_caches(asd_ha);
if (err)
goto Err_unmap;
err = asd_init_hw(asd_ha);
if (err)
goto Err_free_cache;
asd_printk("device %s: SAS addr %llx, PCBA SN %s, %d phys, %d enabled "
"phys, flash %s, BIOS %s%d\n",
pci_name(dev), SAS_ADDR(asd_ha->hw_prof.sas_addr),
asd_ha->hw_prof.pcba_sn, asd_ha->hw_prof.max_phys,
asd_ha->hw_prof.num_phys,
asd_ha->hw_prof.flash.present ? "present" : "not present",
asd_ha->hw_prof.bios.present ? "build " : "not present",
asd_ha->hw_prof.bios.bld);
shost->can_queue = asd_ha->seq.can_queue;
if (use_msi)
pci_enable_msi(asd_ha->pcidev);
err = request_irq(asd_ha->pcidev->irq, asd_hw_isr, IRQF_SHARED,
ASD_DRIVER_NAME, asd_ha);
if (err) {
asd_printk("couldn't get irq %d for %s\n",
asd_ha->pcidev->irq, pci_name(asd_ha->pcidev));
goto Err_irq;
}
asd_enable_ints(asd_ha);
err = asd_init_post_escbs(asd_ha);
if (err) {
asd_printk("couldn't post escbs for %s\n",
pci_name(asd_ha->pcidev));
goto Err_escbs;
}
ASD_DPRINTK("escbs posted\n");
err = asd_create_dev_attrs(asd_ha);
if (err)
goto Err_dev_attrs;
err = asd_register_sas_ha(asd_ha);
if (err)
goto Err_reg_sas;
scsi_scan_host(shost);
return 0;
Err_reg_sas:
asd_remove_dev_attrs(asd_ha);
Err_dev_attrs:
Err_escbs:
asd_disable_ints(asd_ha);
free_irq(dev->irq, asd_ha);
Err_irq:
if (use_msi)
pci_disable_msi(dev);
asd_chip_hardrst(asd_ha);
Err_free_cache:
asd_destroy_ha_caches(asd_ha);
Err_unmap:
asd_unmap_ha(asd_ha);
Err_remove:
scsi_remove_host(shost);
Err_free:
kfree(asd_ha);
Err_put:
scsi_host_put(shost);
Err:
pci_disable_device(dev);
return err;
}
static void asd_free_queues(struct asd_ha_struct *asd_ha)
{
unsigned long flags;
LIST_HEAD(pending);
struct list_head *n, *pos;
spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
asd_ha->seq.pending = 0;
list_splice_init(&asd_ha->seq.pend_q, &pending);
spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
if (!list_empty(&pending))
ASD_DPRINTK("Uh-oh! Pending is not empty!\n");
list_for_each_safe(pos, n, &pending) {
struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list);
/*
* Delete unexpired ascb timers. This may happen if we issue
* a CONTROL PHY scb to an adapter and rmmod before the scb
* times out. Apparently we don't wait for the CONTROL PHY
* to complete, so it doesn't matter if we kill the timer.
*/
del_timer_sync(&ascb->timer);
WARN_ON(ascb->scb->header.opcode != CONTROL_PHY);
list_del_init(pos);
ASD_DPRINTK("freeing from pending\n");
asd_ascb_free(ascb);
}
}
static void asd_turn_off_leds(struct asd_ha_struct *asd_ha)
{
u8 phy_mask = asd_ha->hw_prof.enabled_phys;
u8 i;
for_each_phy(phy_mask, phy_mask, i) {
asd_turn_led(asd_ha, i, 0);
asd_control_led(asd_ha, i, 0);
}
}
static void asd_pci_remove(struct pci_dev *dev)
{
struct asd_ha_struct *asd_ha = pci_get_drvdata(dev);
if (!asd_ha)
return;
asd_unregister_sas_ha(asd_ha);
asd_disable_ints(asd_ha);
asd_remove_dev_attrs(asd_ha);
/* XXX more here as needed */
free_irq(dev->irq, asd_ha);
if (use_msi)
pci_disable_msi(asd_ha->pcidev);
asd_turn_off_leds(asd_ha);
asd_chip_hardrst(asd_ha);
asd_free_queues(asd_ha);
asd_destroy_ha_caches(asd_ha);
asd_unmap_ha(asd_ha);
kfree(asd_ha);
pci_disable_device(dev);
return;
}
static void asd_scan_start(struct Scsi_Host *shost)
{
struct asd_ha_struct *asd_ha;
int err;
asd_ha = SHOST_TO_SAS_HA(shost)->lldd_ha;
err = asd_enable_phys(asd_ha, asd_ha->hw_prof.enabled_phys);
if (err)
asd_printk("Couldn't enable phys, err:%d\n", err);
}
static int asd_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
/* give the phy enabling interrupt event time to come in (1s
* is empirically about all it takes) */
if (time < HZ)
return 0;
/* Wait for discovery to finish */
sas_drain_work(SHOST_TO_SAS_HA(shost));
return 1;
}
static ssize_t version_show(struct device_driver *driver, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION);
}
static DRIVER_ATTR_RO(version);
static int asd_create_driver_attrs(struct device_driver *driver)
{
return driver_create_file(driver, &driver_attr_version);
}
static void asd_remove_driver_attrs(struct device_driver *driver)
{
driver_remove_file(driver, &driver_attr_version);
}
static struct sas_domain_function_template aic94xx_transport_functions = {
.lldd_dev_found = asd_dev_found,
.lldd_dev_gone = asd_dev_gone,
.lldd_execute_task = asd_execute_task,
.lldd_abort_task = asd_abort_task,
.lldd_abort_task_set = asd_abort_task_set,
.lldd_clear_aca = asd_clear_aca,
.lldd_clear_task_set = asd_clear_task_set,
.lldd_I_T_nexus_reset = asd_I_T_nexus_reset,
.lldd_lu_reset = asd_lu_reset,
.lldd_query_task = asd_query_task,
.lldd_clear_nexus_port = asd_clear_nexus_port,
.lldd_clear_nexus_ha = asd_clear_nexus_ha,
.lldd_control_phy = asd_control_phy,
.lldd_ata_set_dmamode = asd_set_dmamode,
};
static const struct pci_device_id aic94xx_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x41E),0, 0, 1},
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x41F),0, 0, 1},
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x430),0, 0, 2},
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x432),0, 0, 2},
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x43E),0, 0, 2},
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x43F),0, 0, 2},
{}
};
MODULE_DEVICE_TABLE(pci, aic94xx_pci_table);
static struct pci_driver aic94xx_pci_driver = {
.name = ASD_DRIVER_NAME,
.id_table = aic94xx_pci_table,
.probe = asd_pci_probe,
.remove = asd_pci_remove,
};
static int __init aic94xx_init(void)
{
int err;
asd_printk("%s version %s loaded\n", ASD_DRIVER_DESCRIPTION,
ASD_DRIVER_VERSION);
err = asd_create_global_caches();
if (err)
return err;
aic94xx_transport_template =
sas_domain_attach_transport(&aic94xx_transport_functions);
if (!aic94xx_transport_template)
goto out_destroy_caches;
err = pci_register_driver(&aic94xx_pci_driver);
if (err)
goto out_release_transport;
err = asd_create_driver_attrs(&aic94xx_pci_driver.driver);
if (err)
goto out_unregister_pcidrv;
return err;
out_unregister_pcidrv:
pci_unregister_driver(&aic94xx_pci_driver);
out_release_transport:
sas_release_transport(aic94xx_transport_template);
out_destroy_caches:
asd_destroy_global_caches();
return err;
}
static void __exit aic94xx_exit(void)
{
asd_remove_driver_attrs(&aic94xx_pci_driver.driver);
pci_unregister_driver(&aic94xx_pci_driver);
sas_release_transport(aic94xx_transport_template);
asd_release_firmware();
asd_destroy_global_caches();
asd_printk("%s version %s unloaded\n", ASD_DRIVER_DESCRIPTION,
ASD_DRIVER_VERSION);
}
module_init(aic94xx_init);
module_exit(aic94xx_exit);
MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
MODULE_DESCRIPTION(ASD_DRIVER_DESCRIPTION);
MODULE_LICENSE("GPL v2");
MODULE_VERSION(ASD_DRIVER_VERSION);
| gpl-2.0 |
fmoliveira/docker | graph/pull_v1.go | 10610 | package graph
import (
"errors"
"fmt"
"net"
"net/url"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/registry/client/transport"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/progressreader"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/registry"
"github.com/docker/docker/utils"
)
type v1Puller struct {
*TagStore
endpoint registry.APIEndpoint
config *ImagePullConfig
sf *streamformatter.StreamFormatter
repoInfo *registry.RepositoryInfo
session *registry.Session
}
func (p *v1Puller) Pull(tag string) (fallback bool, err error) {
if utils.DigestReference(tag) {
// Allowing fallback, because HTTPS v1 is before HTTP v2
return true, registry.ErrNoSupport{errors.New("Cannot pull by digest with v1 registry")}
}
tlsConfig, err := p.registryService.TLSConfig(p.repoInfo.Index.Name)
if err != nil {
return false, err
}
// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
tr := transport.NewTransport(
// TODO(tiborvass): was ReceiveTimeout
registry.NewTransport(tlsConfig),
registry.DockerHeaders(p.config.MetaHeaders)...,
)
client := registry.HTTPClient(tr)
v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders)
if err != nil {
logrus.Debugf("Could not get v1 endpoint: %v", err)
return true, err
}
p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint)
if err != nil {
// TODO(dmcgowan): Check if should fallback
logrus.Debugf("Fallback from error: %s", err)
return true, err
}
if err := p.pullRepository(tag); err != nil {
// TODO(dmcgowan): Check if should fallback
return false, err
}
return false, nil
}
func (p *v1Puller) pullRepository(askedTag string) error {
out := p.config.OutStream
out.Write(p.sf.FormatStatus("", "Pulling repository %s", p.repoInfo.CanonicalName))
repoData, err := p.session.GetRepositoryData(p.repoInfo.RemoteName)
if err != nil {
if strings.Contains(err.Error(), "HTTP code: 404") {
return fmt.Errorf("Error: image %s not found", utils.ImageReference(p.repoInfo.RemoteName, askedTag))
}
// Unexpected HTTP error
return err
}
logrus.Debugf("Retrieving the tag list")
tagsList := make(map[string]string)
if askedTag == "" {
tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.RemoteName)
} else {
var tagID string
tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.RemoteName, askedTag)
tagsList[askedTag] = tagID
}
if err != nil {
if err == registry.ErrRepoNotFound && askedTag != "" {
return fmt.Errorf("Tag %s not found in repository %s", askedTag, p.repoInfo.CanonicalName)
}
logrus.Errorf("unable to get remote tags: %s", err)
return err
}
for tag, id := range tagsList {
repoData.ImgList[id] = ®istry.ImgData{
ID: id,
Tag: tag,
Checksum: "",
}
}
logrus.Debugf("Registering tags")
// If no tag has been specified, pull them all
if askedTag == "" {
for tag, id := range tagsList {
repoData.ImgList[id].Tag = tag
}
} else {
// Otherwise, check that the tag exists and use only that one
id, exists := tagsList[askedTag]
if !exists {
return fmt.Errorf("Tag %s not found in repository %s", askedTag, p.repoInfo.CanonicalName)
}
repoData.ImgList[id].Tag = askedTag
}
errors := make(chan error)
layersDownloaded := false
imgIDs := []string{}
sessionID := p.session.ID()
defer func() {
p.graph.Release(sessionID, imgIDs...)
}()
for _, image := range repoData.ImgList {
downloadImage := func(img *registry.ImgData) {
if askedTag != "" && img.Tag != askedTag {
errors <- nil
return
}
if img.Tag == "" {
logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
errors <- nil
return
}
// ensure no two downloads of the same image happen at the same time
if c, err := p.poolAdd("pull", "img:"+img.ID); err != nil {
if c != nil {
out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
<-c
out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
} else {
logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
}
errors <- nil
return
}
defer p.poolRemove("pull", "img:"+img.ID)
// we need to retain it until tagging
p.graph.Retain(sessionID, img.ID)
imgIDs = append(imgIDs, img.ID)
out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, p.repoInfo.CanonicalName), nil))
success := false
var lastErr, err error
var isDownloaded bool
for _, ep := range p.repoInfo.Index.Mirrors {
ep += "v1/"
out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil))
if isDownloaded, err = p.pullImage(img.ID, ep, repoData.Tokens); err != nil {
// Don't report errors when pulling from mirrors.
logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err)
continue
}
layersDownloaded = layersDownloaded || isDownloaded
success = true
break
}
if !success {
for _, ep := range repoData.Endpoints {
out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil))
if isDownloaded, err = p.pullImage(img.ID, ep, repoData.Tokens); err != nil {
// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
// As the error is also given to the output stream the user will see the error.
lastErr = err
out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err), nil))
continue
}
layersDownloaded = layersDownloaded || isDownloaded
success = true
break
}
}
if !success {
err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.CanonicalName, lastErr)
out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil))
errors <- err
return
}
out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
errors <- nil
}
go downloadImage(image)
}
var lastError error
for i := 0; i < len(repoData.ImgList); i++ {
if err := <-errors; err != nil {
lastError = err
}
}
if lastError != nil {
return lastError
}
for tag, id := range tagsList {
if askedTag != "" && tag != askedTag {
continue
}
if err := p.Tag(p.repoInfo.LocalName, tag, id, true); err != nil {
return err
}
}
requestedTag := p.repoInfo.LocalName
if len(askedTag) > 0 {
requestedTag = utils.ImageReference(p.repoInfo.LocalName, askedTag)
}
writeStatus(requestedTag, out, p.sf, layersDownloaded)
return nil
}
func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, error) {
history, err := p.session.GetRemoteHistory(imgID, endpoint)
if err != nil {
return false, err
}
out := p.config.OutStream
out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil))
// FIXME: Try to stream the images?
// FIXME: Launch the getRemoteImage() in goroutines
sessionID := p.session.ID()
// As imgID has been retained in pullRepository, no need to retain again
p.graph.Retain(sessionID, history[1:]...)
defer p.graph.Release(sessionID, history[1:]...)
layersDownloaded := false
for i := len(history) - 1; i >= 0; i-- {
id := history[i]
// ensure no two downloads of the same layer happen at the same time
if c, err := p.poolAdd("pull", "layer:"+id); err != nil {
logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
<-c
}
defer p.poolRemove("pull", "layer:"+id)
if !p.graph.Exists(id) {
out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil))
var (
imgJSON []byte
imgSize int64
err error
img *image.Image
)
retries := 5
for j := 1; j <= retries; j++ {
imgJSON, imgSize, err = p.session.GetRemoteImageJSON(id, endpoint)
if err != nil && j == retries {
out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
return layersDownloaded, err
} else if err != nil {
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue
}
img, err = image.NewImgJSON(imgJSON)
layersDownloaded = true
if err != nil && j == retries {
out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err)
} else if err != nil {
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue
} else {
break
}
}
for j := 1; j <= retries; j++ {
// Get the layer
status := "Pulling fs layer"
if j > 1 {
status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
}
out.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil))
layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, imgSize)
if uerr, ok := err.(*url.Error); ok {
err = uerr.Err
}
if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue
} else if err != nil {
out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
return layersDownloaded, err
}
layersDownloaded = true
defer layer.Close()
err = p.graph.Register(img,
progressreader.New(progressreader.Config{
In: layer,
Out: out,
Formatter: p.sf,
Size: imgSize,
NewLines: false,
ID: stringid.TruncateID(id),
Action: "Downloading",
}))
if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue
} else if err != nil {
out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil))
return layersDownloaded, err
} else {
break
}
}
}
out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil))
}
return layersDownloaded, nil
}
| apache-2.0 |
stardog-union/stardog-graviton | vendor/github.com/hashicorp/terraform/builtin/providers/rancher/import_rancher_environment_test.go | 605 | package rancher
import (
"testing"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccRancherEnvironment_importBasic(t *testing.T) {
resourceName := "rancher_environment.foo"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckRancherEnvironmentDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccRancherEnvironmentConfig,
},
resource.TestStep{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
| apache-2.0 |
youtube/cobalt | third_party/web_platform_tests/fetch/api/redirect/redirect-to-dataurl.html | 380 | <!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>Fetch: data URL loading after redirections</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
</head>
<body>
<script src="/common/get-host-info.sub.js"></script>
<script src="redirect-to-dataurl.js"></script>
</body>
</html>
| bsd-3-clause |
AriZuu/micropython | tests/basics/subclass_native3.py | 376 | class MyExc(Exception):
pass
e = MyExc(100, "Some error")
print(e)
print(repr(e))
print(e.args)
try:
raise MyExc("Some error")
except MyExc as e:
print("Caught exception:", repr(e))
try:
raise MyExc("Some error2")
except Exception as e:
print("Caught exception:", repr(e))
try:
raise MyExc("Some error2")
except:
print("Caught user exception")
| mit |
eabatalov/au-linux-kernel-autumn-2017 | linux/drivers/acpi/ac.c | 11821 | /*
* acpi_ac.c - ACPI AC Adapter Driver ($Revision: 27 $)
*
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/dmi.h>
#include <linux/delay.h>
#ifdef CONFIG_ACPI_PROCFS_POWER
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#endif
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/acpi.h>
#include "battery.h"
#define PREFIX "ACPI: "
#define ACPI_AC_CLASS "ac_adapter"
#define ACPI_AC_DEVICE_NAME "AC Adapter"
#define ACPI_AC_FILE_STATE "state"
#define ACPI_AC_NOTIFY_STATUS 0x80
#define ACPI_AC_STATUS_OFFLINE 0x00
#define ACPI_AC_STATUS_ONLINE 0x01
#define ACPI_AC_STATUS_UNKNOWN 0xFF
#define _COMPONENT ACPI_AC_COMPONENT
ACPI_MODULE_NAME("ac");
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI AC Adapter Driver");
MODULE_LICENSE("GPL");
static int acpi_ac_add(struct acpi_device *device);
static int acpi_ac_remove(struct acpi_device *device);
static void acpi_ac_notify(struct acpi_device *device, u32 event);
struct acpi_ac_bl {
const char *hid;
int hrv;
};
static const struct acpi_device_id ac_device_ids[] = {
{"ACPI0003", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, ac_device_ids);
/* Lists of PMIC ACPI HIDs with an (often better) native charger driver */
static const struct acpi_ac_bl acpi_ac_blacklist[] = {
{ "INT33F4", -1 }, /* X-Powers AXP288 PMIC */
{ "INT34D3", 3 }, /* Intel Cherrytrail Whiskey Cove PMIC */
};
#ifdef CONFIG_PM_SLEEP
static int acpi_ac_resume(struct device *dev);
#endif
static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
#ifdef CONFIG_ACPI_PROCFS_POWER
extern struct proc_dir_entry *acpi_lock_ac_dir(void);
extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
static int acpi_ac_open_fs(struct inode *inode, struct file *file);
#endif
static int ac_sleep_before_get_state_ms;
static struct acpi_driver acpi_ac_driver = {
.name = "ac",
.class = ACPI_AC_CLASS,
.ids = ac_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = acpi_ac_add,
.remove = acpi_ac_remove,
.notify = acpi_ac_notify,
},
.drv.pm = &acpi_ac_pm,
};
struct acpi_ac {
struct power_supply *charger;
struct power_supply_desc charger_desc;
struct acpi_device * device;
unsigned long long state;
struct notifier_block battery_nb;
};
#define to_acpi_ac(x) power_supply_get_drvdata(x)
#ifdef CONFIG_ACPI_PROCFS_POWER
static const struct file_operations acpi_ac_fops = {
.owner = THIS_MODULE,
.open = acpi_ac_open_fs,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif
/* --------------------------------------------------------------------------
AC Adapter Management
-------------------------------------------------------------------------- */
static int acpi_ac_get_state(struct acpi_ac *ac)
{
acpi_status status = AE_OK;
if (!ac)
return -EINVAL;
status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL,
&ac->state);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Error reading AC Adapter state"));
ac->state = ACPI_AC_STATUS_UNKNOWN;
return -ENODEV;
}
return 0;
}
/* --------------------------------------------------------------------------
sysfs I/F
-------------------------------------------------------------------------- */
static int get_ac_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct acpi_ac *ac = to_acpi_ac(psy);
if (!ac)
return -ENODEV;
if (acpi_ac_get_state(ac))
return -ENODEV;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = ac->state;
break;
default:
return -EINVAL;
}
return 0;
}
static enum power_supply_property ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
#ifdef CONFIG_ACPI_PROCFS_POWER
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
static struct proc_dir_entry *acpi_ac_dir;
static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_ac *ac = seq->private;
if (!ac)
return 0;
if (acpi_ac_get_state(ac)) {
seq_puts(seq, "ERROR: Unable to read AC Adapter state\n");
return 0;
}
seq_puts(seq, "state: ");
switch (ac->state) {
case ACPI_AC_STATUS_OFFLINE:
seq_puts(seq, "off-line\n");
break;
case ACPI_AC_STATUS_ONLINE:
seq_puts(seq, "on-line\n");
break;
default:
seq_puts(seq, "unknown\n");
break;
}
return 0;
}
static int acpi_ac_open_fs(struct inode *inode, struct file *file)
{
return single_open(file, acpi_ac_seq_show, PDE_DATA(inode));
}
static int acpi_ac_add_fs(struct acpi_ac *ac)
{
struct proc_dir_entry *entry = NULL;
printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
" please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
if (!acpi_device_dir(ac->device)) {
acpi_device_dir(ac->device) =
proc_mkdir(acpi_device_bid(ac->device), acpi_ac_dir);
if (!acpi_device_dir(ac->device))
return -ENODEV;
}
/* 'state' [R] */
entry = proc_create_data(ACPI_AC_FILE_STATE,
S_IRUGO, acpi_device_dir(ac->device),
&acpi_ac_fops, ac);
if (!entry)
return -ENODEV;
return 0;
}
static int acpi_ac_remove_fs(struct acpi_ac *ac)
{
if (acpi_device_dir(ac->device)) {
remove_proc_entry(ACPI_AC_FILE_STATE,
acpi_device_dir(ac->device));
remove_proc_entry(acpi_device_bid(ac->device), acpi_ac_dir);
acpi_device_dir(ac->device) = NULL;
}
return 0;
}
#endif
/* --------------------------------------------------------------------------
Driver Model
-------------------------------------------------------------------------- */
static void acpi_ac_notify(struct acpi_device *device, u32 event)
{
struct acpi_ac *ac = acpi_driver_data(device);
if (!ac)
return;
switch (event) {
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
case ACPI_AC_NOTIFY_STATUS:
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
/*
* A buggy BIOS may notify AC first and then sleep for
* a specific time before doing actual operations in the
* EC event handler (_Qxx). This will cause the AC state
* reported by the ACPI event to be incorrect, so wait for a
* specific time for the EC event handler to make progress.
*/
if (ac_sleep_before_get_state_ms > 0)
msleep(ac_sleep_before_get_state_ms);
acpi_ac_get_state(ac);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
(u32) ac->state);
acpi_notifier_call_chain(device, event, (u32) ac->state);
kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
}
return;
}
static int acpi_ac_battery_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct acpi_ac *ac = container_of(nb, struct acpi_ac, battery_nb);
struct acpi_bus_event *event = (struct acpi_bus_event *)data;
/*
* On HP Pavilion dv6-6179er AC status notifications aren't triggered
* when adapter is plugged/unplugged. However, battery status
* notifcations are triggered when battery starts charging or
* discharging. Re-reading AC status triggers lost AC notifications,
* if AC status has changed.
*/
if (strcmp(event->device_class, ACPI_BATTERY_CLASS) == 0 &&
event->type == ACPI_BATTERY_NOTIFY_STATUS)
acpi_ac_get_state(ac);
return NOTIFY_OK;
}
static int thinkpad_e530_quirk(const struct dmi_system_id *d)
{
ac_sleep_before_get_state_ms = 1000;
return 0;
}
static const struct dmi_system_id ac_dmi_table[] = {
{
.callback = thinkpad_e530_quirk,
.ident = "thinkpad e530",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"),
},
},
{},
};
static int acpi_ac_add(struct acpi_device *device)
{
struct power_supply_config psy_cfg = {};
int result = 0;
struct acpi_ac *ac = NULL;
if (!device)
return -EINVAL;
ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
if (!ac)
return -ENOMEM;
ac->device = device;
strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_AC_CLASS);
device->driver_data = ac;
result = acpi_ac_get_state(ac);
if (result)
goto end;
psy_cfg.drv_data = ac;
ac->charger_desc.name = acpi_device_bid(device);
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_ac_add_fs(ac);
if (result)
goto end;
#endif
ac->charger_desc.type = POWER_SUPPLY_TYPE_MAINS;
ac->charger_desc.properties = ac_props;
ac->charger_desc.num_properties = ARRAY_SIZE(ac_props);
ac->charger_desc.get_property = get_ac_property;
ac->charger = power_supply_register(&ac->device->dev,
&ac->charger_desc, &psy_cfg);
if (IS_ERR(ac->charger)) {
result = PTR_ERR(ac->charger);
goto end;
}
printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
ac->state ? "on-line" : "off-line");
ac->battery_nb.notifier_call = acpi_ac_battery_notify;
register_acpi_notifier(&ac->battery_nb);
end:
if (result) {
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_ac_remove_fs(ac);
#endif
kfree(ac);
}
dmi_check_system(ac_dmi_table);
return result;
}
#ifdef CONFIG_PM_SLEEP
static int acpi_ac_resume(struct device *dev)
{
struct acpi_ac *ac;
unsigned old_state;
if (!dev)
return -EINVAL;
ac = acpi_driver_data(to_acpi_device(dev));
if (!ac)
return -EINVAL;
old_state = ac->state;
if (acpi_ac_get_state(ac))
return 0;
if (old_state != ac->state)
kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE);
return 0;
}
#else
#define acpi_ac_resume NULL
#endif
static int acpi_ac_remove(struct acpi_device *device)
{
struct acpi_ac *ac = NULL;
if (!device || !acpi_driver_data(device))
return -EINVAL;
ac = acpi_driver_data(device);
power_supply_unregister(ac->charger);
unregister_acpi_notifier(&ac->battery_nb);
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_ac_remove_fs(ac);
#endif
kfree(ac);
return 0;
}
static int __init acpi_ac_init(void)
{
unsigned int i;
int result;
if (acpi_disabled)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(acpi_ac_blacklist); i++)
if (acpi_dev_present(acpi_ac_blacklist[i].hid, "1",
acpi_ac_blacklist[i].hrv)) {
pr_info(PREFIX "AC: found native %s PMIC, not loading\n",
acpi_ac_blacklist[i].hid);
return -ENODEV;
}
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_ac_dir = acpi_lock_ac_dir();
if (!acpi_ac_dir)
return -ENODEV;
#endif
result = acpi_bus_register_driver(&acpi_ac_driver);
if (result < 0) {
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_unlock_ac_dir(acpi_ac_dir);
#endif
return -ENODEV;
}
return 0;
}
static void __exit acpi_ac_exit(void)
{
acpi_bus_unregister_driver(&acpi_ac_driver);
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_unlock_ac_dir(acpi_ac_dir);
#endif
}
module_init(acpi_ac_init);
module_exit(acpi_ac_exit);
| gpl-3.0 |
tomwscott/GoCD | server/db/migrate/h2deltas/23_add_index_for_stages.sql | 1157 | --*************************GO-LICENSE-START*********************************
-- Copyright 2014 ThoughtWorks, Inc.
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--*************************GO-LICENSE-END***********************************
ALTER TABLE stages ADD COLUMN counter INTEGER;
CREATE INDEX idx_stages_counter_index ON stages(counter);
update stages SET counter = (SELECT Count(*) FROM stages s2 where s2.pipelineid = stages.pipelineid AND s2.name = stages.name AND s2.id<=stages.id);
ALTER TABLE stages ALTER COLUMN counter SET NOT NULL;
--//@UNDO
DROP INDEX idx_stages_counter_index IF EXISTS;
ALTER TABLE stages DROP COLUMN counter;
| apache-2.0 |
dominic/cdnjs | ajax/libs/bootstrap-select/1.7.0/js/i18n/defaults-ro_RO.js | 597 | /*!
* Bootstrap-select v1.7.0 (http://silviomoreto.github.io/bootstrap-select)
*
* Copyright 2013-2015 bootstrap-select
* Licensed under MIT (https://github.com/silviomoreto/bootstrap-select/blob/master/LICENSE)
*/
(function ($) {
$.fn.selectpicker.defaults = {
noneSelectedText: 'Nu a fost selectat nimic',
noneResultsText: 'Nu exista niciun rezultat {0}',
countSelectedText: '{0} din {1} selectat(e)',
maxOptionsText: ['Limita a fost atinsa ({n} {var} max)', 'Limita de grup a fost atinsa ({n} {var} max)', ['iteme', 'item']],
multipleSeparator: ', '
};
})(jQuery);
| mit |
enigmamarketing/csf-allow-domains | usr/local/csf/bin/csf-allow-domains/dns/rdtypes/mxbase.py | 3968 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""MX-like base classes."""
import cStringIO
import struct
import dns.exception
import dns.rdata
import dns.name
class MXBase(dns.rdata.Rdata):
"""Base class for rdata that is like an MX record.
@ivar preference: the preference value
@type preference: int
@ivar exchange: the exchange name
@type exchange: dns.name.Name object"""
__slots__ = ['preference', 'exchange']
def __init__(self, rdclass, rdtype, preference, exchange):
super(MXBase, self).__init__(rdclass, rdtype)
self.preference = preference
self.exchange = exchange
def to_text(self, origin=None, relativize=True, **kw):
exchange = self.exchange.choose_relativity(origin, relativize)
return '%d %s' % (self.preference, exchange)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
preference = tok.get_uint16()
exchange = tok.get_name()
exchange = exchange.choose_relativity(origin, relativize)
tok.get_eol()
return cls(rdclass, rdtype, preference, exchange)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
pref = struct.pack("!H", self.preference)
file.write(pref)
self.exchange.to_wire(file, compress, origin)
def to_digestable(self, origin = None):
return struct.pack("!H", self.preference) + \
self.exchange.to_digestable(origin)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(preference, ) = struct.unpack('!H', wire[current : current + 2])
current += 2
rdlen -= 2
(exchange, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
if cused != rdlen:
raise dns.exception.FormError
if not origin is None:
exchange = exchange.relativize(origin)
return cls(rdclass, rdtype, preference, exchange)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.exchange = self.exchange.choose_relativity(origin, relativize)
def _cmp(self, other):
sp = struct.pack("!H", self.preference)
op = struct.pack("!H", other.preference)
v = cmp(sp, op)
if v == 0:
v = cmp(self.exchange, other.exchange)
return v
class UncompressedMX(MXBase):
"""Base class for rdata that is like an MX record, but whose name
is not compressed when converted to DNS wire format, and whose
digestable form is not downcased."""
def to_wire(self, file, compress = None, origin = None):
super(UncompressedMX, self).to_wire(file, None, origin)
def to_digestable(self, origin = None):
f = cStringIO.StringIO()
self.to_wire(f, None, origin)
return f.getvalue()
class UncompressedDowncasingMX(MXBase):
"""Base class for rdata that is like an MX record, but whose name
is not compressed when convert to DNS wire format."""
def to_wire(self, file, compress = None, origin = None):
super(UncompressedDowncasingMX, self).to_wire(file, None, origin)
| mit |
Leonog/trabalho-les | web/vendor/zendframework/zend-authentication/doc/book/zend.authentication.adapter.ldap.md | 11270 | # LDAP Authentication
## Introduction
`Zend\Authentication\Adapter\Ldap` supports web application authentication with *LDAP* services. Its
features include username and domain name canonicalization, multi-domain authentication, and
failover capabilities. It has been tested to work with [Microsoft Active
Directory](http://www.microsoft.com/windowsserver2003/technologies/directory/activedirectory/) and
[OpenLDAP](http://www.openldap.org/), but it should also work with other *LDAP* service providers.
This documentation includes a guide on using `Zend\Authentication\Adapter\Ldap`, an exploration of
its *API*, an outline of the various available options, diagnostic information for troubleshooting
authentication problems, and example options for both Active Directory and OpenLDAP servers.
## Usage
To incorporate `Zend\Authentication\Adapter\Ldap` authentication into your application quickly, even
if you're not using `Zend\Mvc`, the meat of your code should look something like the following:
```php
<?php
use Zend\Authentication\AuthenticationService;
use Zend\Authentication\Adapter\Ldap as AuthAdapter;
use Zend\Config\Reader\Ini as ConfigReader;
use Zend\Config\Config;
use Zend\Log\Logger;
use Zend\Log\Writer\Stream as LogWriter;
use Zend\Log\Filter\Priority as LogFilter;
$username = $this->getRequest()->getPost('username');
$password = $this->getRequest()->getPost('password');
$auth = new AuthenticationService();
$configReader = new ConfigReader();
$configData = $configReader->fromFile('./ldap-config.ini');
$config = new Config($configData, true);
$log_path = $config->production->ldap->log_path;
$options = $config->production->ldap->toArray();
unset($options['log_path']);
$adapter = new AuthAdapter($options,
$username,
$password);
$result = $auth->authenticate($adapter);
if ($log_path) {
$messages = $result->getMessages();
$logger = new Logger;
$writer = new LogWriter($log_path);
$logger->addWriter($writer);
$filter = new LogFilter(Logger::DEBUG);
$writer->addFilter($filter);
foreach ($messages as $i => $message) {
if ($i-- > 1) { // $messages[2] and up are log messages
$message = str_replace("\n", "\n ", $message);
$logger->debug("Ldap: $i: $message");
}
}
}
```
Of course, the logging code is optional, but it is highly recommended that you use a logger.
`Zend\Authentication\Adapter\Ldap` will record just about every bit of information anyone could want
in `$messages` (more below), which is a nice feature in itself for something that has a history of
being notoriously difficult to debug.
The `Zend\Config\Reader\Ini` code is used above to load the adapter options. It is also optional. A
regular array would work equally well. The following is an example `ldap-config.ini` file that has
options for two separate servers. With multiple sets of server options the adapter will try each, in
order, until the credentials are successfully authenticated. The names of the servers (e.g.,
'server1' and 'server2') are largely arbitrary. For details regarding the options array, see the
**Server Options** section below. Note that `Zend\Config\Reader\Ini` requires that any values with
"equals" characters (**=**) will need to be quoted (like the DNs shown below).
```ini
[production]
ldap.log_path = /tmp/ldap.log
; Typical options for OpenLDAP
ldap.server1.host = s0.foo.net
ldap.server1.accountDomainName = foo.net
ldap.server1.accountDomainNameShort = FOO
ldap.server1.accountCanonicalForm = 3
ldap.server1.username = "CN=user1,DC=foo,DC=net"
ldap.server1.password = pass1
ldap.server1.baseDn = "OU=Sales,DC=foo,DC=net"
ldap.server1.bindRequiresDn = true
; Typical options for Active Directory
ldap.server2.host = dc1.w.net
ldap.server2.useStartTls = true
ldap.server2.accountDomainName = w.net
ldap.server2.accountDomainNameShort = W
ldap.server2.accountCanonicalForm = 3
ldap.server2.baseDn = "CN=Users,DC=w,DC=net"
```
The above configuration will instruct `Zend\Authentication\Adapter\Ldap` to attempt to authenticate
users with the OpenLDAP server `s0.foo.net` first. If the authentication fails for any reason, the
AD server `dc1.w.net` will be tried.
With servers in different domains, this configuration illustrates multi-domain authentication. You
can also have multiple servers in the same domain to provide redundancy.
Note that in this case, even though OpenLDAP has no need for the short NetBIOS style domain name
used by Windows, we provide it here for name canonicalization purposes (described in the **Username
Canonicalization** section below).
## The API
The `Zend\Authentication\Adapter\Ldap` constructor accepts three parameters.
The `$options` parameter is required and must be an array containing one or more sets of options.
Note that it is **an array of arrays** of [Zend\\Ldap\\Ldap](zend.ldap.introduction) options. Even
if you will be using only one *LDAP* server, the options must still be within another array.
Below is [print\_r()](http://php.net/print_r) output of an example options parameter containing two
sets of server options for *LDAP* servers `s0.foo.net` and `dc1.w.net` (the same options as the
above *INI* representation):
```console
Array
(
[server2] => Array
(
[host] => dc1.w.net
[useStartTls] => 1
[accountDomainName] => w.net
[accountDomainNameShort] => W
[accountCanonicalForm] => 3
[baseDn] => CN=Users,DC=w,DC=net
)
[server1] => Array
(
[host] => s0.foo.net
[accountDomainName] => foo.net
[accountDomainNameShort] => FOO
[accountCanonicalForm] => 3
[username] => CN=user1,DC=foo,DC=net
[password] => pass1
[baseDn] => OU=Sales,DC=foo,DC=net
[bindRequiresDn] => 1
)
)
```
The information provided in each set of options above is different mainly because AD does not
require a username be in DN form when binding (see the `bindRequiresDn` option in the **Server
Options** section below), which means we can omit a number of options associated with retrieving the
DN for a username being authenticated.
> ## Note
#### What is a Distinguished Name?
A DN or "distinguished name" is a string that represents the path to an object within the *LDAP*
directory. Each comma-separated component is an attribute and value representing a node. The
components are evaluated in reverse. For example, the user account **CN=Bob
Carter,CN=Users,DC=w,DC=net** is located directly within the **CN=Users,DC=w,DC=net container**.
This structure is best explored with an *LDAP* browser like the *ADSI* Edit *MMC* snap-in for Active
Directory or phpLDAPadmin.
The names of servers (e.g. 'server1' and 'server2' shown above) are largely arbitrary, but for the
sake of using `Zend\Config\Reader\Ini`, the identifiers should be present (as opposed to being
numeric indexes) and should not contain any special characters used by the associated file formats
(e.g. the '**.**'*INI* property separator, '**&**' for *XML* entity references, etc).
With multiple sets of server options, the adapter can authenticate users in multiple domains and
provide failover so that if one server is not available, another will be queried.
> ## Note
#### The Gory Details: What Happens in the Authenticate Method?
When the `authenticate()` method is called, the adapter iterates over each set of server options,
sets them on the internal `Zend\Ldap\Ldap` instance, and calls the `Zend\Ldap\Ldap::bind()` method
with the username and password being authenticated. The `Zend\Ldap\Ldap` class checks to see if the
username is qualified with a domain (e.g., has a domain component like `alice@foo.net` or
`FOO\alice`). If a domain is present, but does not match either of the server's domain names
(`foo.net` or *FOO*), a special exception is thrown and caught by `Zend\Authentication\Adapter\Ldap`
that causes that server to be ignored and the next set of server options is selected. If a domain
**does** match, or if the user did not supply a qualified username, `Zend\Ldap\Ldap` proceeds to try
to bind with the supplied credentials. if the bind is not successful, `Zend\Ldap\Ldap` throws a
`Zend\Ldap\Exception\LdapException` which is caught by `Zend\Authentication\Adapter\Ldap` and the
next set of server options is tried. If the bind is successful, the iteration stops, and the
adapter's `authenticate()` method returns a successful result. If all server options have been tried
without success, the authentication fails, and `authenticate()` returns a failure result with error
messages from the last iteration.
The username and password parameters of the `Zend\Authentication\Adapter\Ldap` constructor represent
the credentials being authenticated (i.e., the credentials supplied by the user through your *HTML*
login form). Alternatively, they may also be set with the `setUsername()` and `setPassword()`
methods.
## Server Options
Each set of server options **in the context of Zend\\Authentication\\Adapter\\Ldap** consists of the
following options, which are passed, largely unmodified, to `Zend\Ldap\Ldap::setOptions()`:
> ## Note
If you enable **useStartTls = TRUE** or **useSsl = TRUE** you may find that the *LDAP* client
generates an error claiming that it cannot validate the server's certificate. Assuming the *PHP*
*LDAP* extension is ultimately linked to the OpenLDAP client libraries, to resolve this issue you
can set "`TLS_REQCERT never`" in the OpenLDAP client `ldap.conf` (and restart the web server) to
indicate to the OpenLDAP client library that you trust the server. Alternatively, if you are
concerned that the server could be spoofed, you can export the *LDAP* server's root certificate and
put it on the web server so that the OpenLDAP client can validate the server's identity.
## Collecting Debugging Messages
`Zend\Authentication\Adapter\Ldap` collects debugging information within its `authenticate()`
method. This information is stored in the `Zend\Authentication\Result` object as messages. The array
returned by `Zend\Authentication\Result::getMessages()` is described as follows
In practice, index 0 should be displayed to the user (e.g., using the FlashMessenger helper), index
1 should be logged and, if debugging information is being collected, indexes 2 and higher could be
logged as well (although the final message always includes the string from index 1).
## Common Options for Specific Servers
### Options for Active Directory
For *ADS*, the following options are noteworthy:
> ## Note
Technically there should be no danger of accidental cross-domain authentication with the current
`Zend\Authentication\Adapter\Ldap` implementation, since server domains are explicitly checked, but
this may not be true of a future implementation that discovers the domain at runtime, or if an
alternative adapter is used (e.g., Kerberos). In general, account name ambiguity is known to be the
source of security issues, so always try to use qualified account names.
### Options for OpenLDAP
For OpenLDAP or a generic *LDAP* server using a typical posixAccount style schema, the following
options are noteworthy:
| gpl-3.0 |
alebianco/ANE-Android-Expansion | source/android/play_apk_expansion/downloader_library/src/com/google/android/vending/expansion/downloader/impl/DownloadNotification.java | 9068 | /*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.android.vending.expansion.downloader.impl;
import com.android.vending.expansion.downloader.R;
import com.google.android.vending.expansion.downloader.DownloadProgressInfo;
import com.google.android.vending.expansion.downloader.DownloaderClientMarshaller;
import com.google.android.vending.expansion.downloader.Helpers;
import com.google.android.vending.expansion.downloader.IDownloaderClient;
import android.app.Notification;
import android.app.NotificationManager;
import android.app.PendingIntent;
import android.content.Context;
import android.os.Messenger;
/**
* This class handles displaying the notification associated with the download
* queue going on in the download manager. It handles multiple status types;
* Some require user interaction and some do not. Some of the user interactions
* may be transient. (for example: the user is queried to continue the download
* on 3G when it started on WiFi, but then the phone locks onto WiFi again so
* the prompt automatically goes away)
* <p/>
* The application interface for the downloader also needs to understand and
* handle these transient states.
*/
public class DownloadNotification implements IDownloaderClient {
private int mState;
private final Context mContext;
private final NotificationManager mNotificationManager;
private String mCurrentTitle;
private IDownloaderClient mClientProxy;
final ICustomNotification mCustomNotification;
private Notification mNotification;
private Notification mCurrentNotification;
private CharSequence mLabel;
private String mCurrentText;
private PendingIntent mContentIntent;
private DownloadProgressInfo mProgressInfo;
static final String LOGTAG = "DownloadNotification";
static final int NOTIFICATION_ID = LOGTAG.hashCode();
public PendingIntent getClientIntent() {
return mContentIntent;
}
public void setClientIntent(PendingIntent mClientIntent) {
this.mContentIntent = mClientIntent;
}
public void resendState() {
if (null != mClientProxy) {
mClientProxy.onDownloadStateChanged(mState);
}
}
@Override
public void onDownloadStateChanged(int newState) {
if (null != mClientProxy) {
mClientProxy.onDownloadStateChanged(newState);
}
if (newState != mState) {
mState = newState;
if (newState == IDownloaderClient.STATE_IDLE || null == mContentIntent) {
return;
}
int stringDownloadID;
int iconResource;
boolean ongoingEvent;
// get the new title string and paused text
switch (newState) {
case 0:
iconResource = android.R.drawable.stat_sys_warning;
stringDownloadID = R.string.state_unknown;
ongoingEvent = false;
break;
case IDownloaderClient.STATE_DOWNLOADING:
iconResource = android.R.drawable.stat_sys_download;
stringDownloadID = Helpers.getDownloaderStringResourceIDFromState(newState);
ongoingEvent = true;
break;
case IDownloaderClient.STATE_FETCHING_URL:
case IDownloaderClient.STATE_CONNECTING:
iconResource = android.R.drawable.stat_sys_download_done;
stringDownloadID = Helpers.getDownloaderStringResourceIDFromState(newState);
ongoingEvent = true;
break;
case IDownloaderClient.STATE_COMPLETED:
case IDownloaderClient.STATE_PAUSED_BY_REQUEST:
iconResource = android.R.drawable.stat_sys_download_done;
stringDownloadID = Helpers.getDownloaderStringResourceIDFromState(newState);
ongoingEvent = false;
break;
case IDownloaderClient.STATE_FAILED:
case IDownloaderClient.STATE_FAILED_CANCELED:
case IDownloaderClient.STATE_FAILED_FETCHING_URL:
case IDownloaderClient.STATE_FAILED_SDCARD_FULL:
case IDownloaderClient.STATE_FAILED_UNLICENSED:
iconResource = android.R.drawable.stat_sys_warning;
stringDownloadID = Helpers.getDownloaderStringResourceIDFromState(newState);
ongoingEvent = false;
break;
default:
iconResource = android.R.drawable.stat_sys_warning;
stringDownloadID = Helpers.getDownloaderStringResourceIDFromState(newState);
ongoingEvent = true;
break;
}
mCurrentText = mContext.getString(stringDownloadID);
mCurrentTitle = mLabel.toString();
mCurrentNotification.tickerText = mLabel + ": " + mCurrentText;
mCurrentNotification.icon = iconResource;
mCurrentNotification.setLatestEventInfo(mContext, mCurrentTitle, mCurrentText,
mContentIntent);
if (ongoingEvent) {
mCurrentNotification.flags |= Notification.FLAG_ONGOING_EVENT;
} else {
mCurrentNotification.flags &= ~Notification.FLAG_ONGOING_EVENT;
mCurrentNotification.flags |= Notification.FLAG_AUTO_CANCEL;
}
mNotificationManager.notify(NOTIFICATION_ID, mCurrentNotification);
}
}
@Override
public void onDownloadProgress(DownloadProgressInfo progress) {
mProgressInfo = progress;
if (null != mClientProxy) {
mClientProxy.onDownloadProgress(progress);
}
if (progress.mOverallTotal <= 0) {
// we just show the text
mNotification.tickerText = mCurrentTitle;
mNotification.icon = android.R.drawable.stat_sys_download;
mNotification.setLatestEventInfo(mContext, mLabel, mCurrentText, mContentIntent);
mCurrentNotification = mNotification;
} else {
mCustomNotification.setCurrentBytes(progress.mOverallProgress);
mCustomNotification.setTotalBytes(progress.mOverallTotal);
mCustomNotification.setIcon(android.R.drawable.stat_sys_download);
mCustomNotification.setPendingIntent(mContentIntent);
mCustomNotification.setTicker(mLabel + ": " + mCurrentText);
mCustomNotification.setTitle(mLabel);
mCustomNotification.setTimeRemaining(progress.mTimeRemaining);
mCurrentNotification = mCustomNotification.updateNotification(mContext);
}
mNotificationManager.notify(NOTIFICATION_ID, mCurrentNotification);
}
public interface ICustomNotification {
void setTitle(CharSequence title);
void setTicker(CharSequence ticker);
void setPendingIntent(PendingIntent mContentIntent);
void setTotalBytes(long totalBytes);
void setCurrentBytes(long currentBytes);
void setIcon(int iconResource);
void setTimeRemaining(long timeRemaining);
Notification updateNotification(Context c);
}
/**
* Called in response to onClientUpdated. Creates a new proxy and notifies
* it of the current state.
*
* @param msg the client Messenger to notify
*/
public void setMessenger(Messenger msg) {
mClientProxy = DownloaderClientMarshaller.CreateProxy(msg);
if (null != mProgressInfo) {
mClientProxy.onDownloadProgress(mProgressInfo);
}
if (mState != -1) {
mClientProxy.onDownloadStateChanged(mState);
}
}
/**
* Constructor
*
* @param ctx The context to use to obtain access to the Notification
* Service
*/
DownloadNotification(Context ctx, CharSequence applicationLabel) {
mState = -1;
mContext = ctx;
mLabel = applicationLabel;
mNotificationManager = (NotificationManager)
mContext.getSystemService(Context.NOTIFICATION_SERVICE);
mCustomNotification = CustomNotificationFactory
.createCustomNotification();
mNotification = new Notification();
mCurrentNotification = mNotification;
}
@Override
public void onServiceConnected(Messenger m) {
}
}
| mit |
webflo/d8-core | vendor/sdboyer/gliph/src/Gliph/Visitor/StatefulVisitorInterface.php | 594 | <?php
namespace Gliph\Visitor;
/**
* Interface for stateful algorithm visitors.
*/
interface StatefulVisitorInterface {
const NOT_STARTED = 0;
const IN_PROGRESS = 1;
const COMPLETE = 2;
/**
* Returns an integer indicating the current state of the visitor.
*
* @return int
* State should be one of the three StatefulVisitorInterface constants:
* - 0: indicates the algorithm has not yet started.
* - 1: indicates the algorithm is in progress.
* - 2: indicates the algorithm is complete.
*/
public function getState();
} | gpl-2.0 |
wastrachan/homebrew-cask | Casks/pocket-tanks.rb | 208 | cask 'pocket-tanks' do
version :latest
sha256 :no_check
url 'http://www.blitwise.com/ptanks_mac.dmg'
name 'Pocket Tanks'
homepage 'http://www.blitwise.com/index.html'
app 'Pocket Tanks.app'
end
| bsd-2-clause |
beealone/build-web-application-with-golang | tr/01.3.md | 7100 | #1.3 Go Komutları
## Go Komutları
Go dili kapsamlı bir araç seti ile beraber geliyor. Terminalinizden `go` komutunu çalıştırarak bunları görebilirsiniz:
![](images/1.3.go.png?raw=true)
Şekil 1.3 Go komutlarnın detaylı gösterimi
Hepsi çok kullanışlı araçlar. Şimdi onları nasıl kullanabileceğimize bakalım.
## go build
Go paketlerini derlemek için kullanılan komuttur. Eğer gerektiği durumlarda bağmlı olan paketleride otomatik derler.
- Eğer paket `main` değilde, bölüm 1.2 deki gibi `mymath` ise, `go build` çalıştığında hiç birşey oluşturulmayacak. Eğer `$GOPATH/pkg` içinde `.a` uzantılı derlemiş haline istiyorsanız, `go install` komutunu kullanabilirsiniz.
- Eğer paket `main` ise, aynı dizin içinde çalıştırılabilir haline oluşturcaktır. Eğer oluşacak dosyanın `$GOPATH/bin` içinde olmasını istiyorsanız, `go install` ya da `go build -o ${DİZİN_YOLU}/a.exe` olarak çalıştırın.
- Dizinde birden fazla go dosyası varsa, ve sadece birini derlemek istiyorsanız, `go build` komutuna dosya ismini argüman olarak vermelisiniz. Örneğin, `go build a.go`. `go build` argüman almaz ise dizindeki bütüm dosyaları derleyecektir.
- Oluşacak dosyanın isminide önceden belirieyebilirsiniz. Örneğin, `mathapp` projesinde (bölüm 1.2'deki), `go build -o astaxie.exe` komutu `mathapp.exe` yerine `astaxie.exe` adında bir dosya oluşturacaktır. Öntanımlı isimlendirme ya dizin ismi ya da ilk kaynak kodu içeren dosya ismi olarak seçilmiştir.
( [Go Programlama Dili Kılavuzu](https://golang.org/ref/spec)'na göre , paket isimleri `package` anahtar kelimesinden sonra kaynak dosyanın ilk satırına yazılmalıdır. Dizin ismi ile aynı olması zorunlu değildir, çalıştırılabilir dosyanın ismi dizin ismi olacaktır.])
- `go build`, `_` ya da `.` ile başlayan dosyaları görmezden gelir.
- Her işletim sistemi için ayrı dosyalar tutmak isterseniz, işletim sistemi ismini ek olarak kullanabilirsiniz. Örneğin, `array.go`'yu farklı sistemler için yazacaksanız. Dosya isimleri aşağıdaki gibi olabilir:
array_linux.go | array_darwin.go | array_windows.go | array_freebsd.go
`go build` işletim sisteminize göre derleme işlemini yapacaktır. Örneğin, Linux kullanıyorsanız sadece array_linux.go derleyip, diğerlerini yoksayacaktır.
## go clean
Derleyici tarafından oluşturulmuş aşağıdaki dosyaları temizler:
_obj/ // eski obje klasörü, Makefiles tarafından oluşturulmuş
_test/ // eksi test klasörü, Makefiles tarafından oluşturulmuş
_testmain.go // eski gotest dizini, Makefiles tarafından oluşturulmuş
test.out // eski test dizini, Makefiles tarafından oluşturulmuş
build.out // eksi test dizini, Makefiles tarafından oluşturulmuş
*.[568ao] // obje dosyaları, Makefiles tarafından oluşturulmuş
DIR(.exe) // go build tarafıdan oluşturulmuş
DIR.test(.exe) // go test -c tarafından oluşturulmuş
MAINFILE(.exe) // go build MAINFILE.go tarafından oluşturulmuş
Projelerimi Github'a göndermeden önce genellikle bu komutu çalıştırırım. Yereldeki testler için önemli, ama sürüm takip için gereksiz dosyalar.
## go fmt
Öncede C/C++ ile çalışmış olanlar, hangi kodlama stilinin daha iyi olduğu konusundaki tartışmları biliyordur: K&R-stili ya da ANSI-stili. Go da ise, herkesin kullanmak zorunda olduğu sadece bir kod stili vardır. Örneğin, açma parantezleri satır sonlarını yazılmalıdır, yeni bir satırda yazılamazlar, eğer bu kurala uymazsanız derleme hatası alacaksınız! Neyse ki, bu kuralları ezberlemek zorunda değilsiniz. `go fmt` bu işi sizin yerinize yapacaktır. Terminalinizde `go fmt <dosya_ismi>.go` komutunu çalıştırmanız yetecektir. Bir çok IDE siz dosyayı kaydettiğinizde otomatik olarak bu komutu çalıştıracaktır. IDE'ler hakkında bir sonraki bölümde daha ayrıntılı bilgi vereceğim.
`gofmt` şeklind değilde `go fmt -w` şeklinde kullanmanız daha iyi olcaktır. `-w` parametresi formatladıktan sonra değişiklikleri kaydetcektir. `gofmt -w src` ise src altındaki butun dosyaları formatlar.
## go get
Bu komut üçüncü parti paketleri almanızı sağlar. Şuanda; BitBucket, Github, Google Code ve Launchpad desteği sunuyor. Bu komutu çalıştırdığımızda iki şey yapılıyor. Birincisi Go kaynak kodunu indiriyor, ikinci olarakta `go install` komutunu çalıştırıyor. Bu komutu çalıştırmadan önce, gerekli araçları kurudğunuzdan emin olun.
BitBucket (Mercurial Git)
Github (git)
Google Code (Git, Mercurial, Subversion)
Launchpad (Bazaar)
Bu komutu kullanmak için, yukarıdaki araçları kurmuş olmanız lazım. `$PATH` değişkenini ayarlamayıda unutmayın. Bu arada, özel alan adlarınıda destekliyor. `go help remote` komutu ile daha ayrıntılı bilgi edinebilrsiniz.
## go install
Bu komut bütün paketleri derleyip oluşan dosyaları, `$GOPATH/pkg` ya da `$GOPATH/bin` altına taşır.
## go test
Bu komut `*_test.go` ile biten dosyaları derleyip çalıştırır, gerekli bilgileri ekrana basar.
ok archive/tar 0.011s
FAIL archive/zip 0.022s
ok compress/gzip 0.033s
...
Öntanımlı olarak bütün test dosyalarını çalıştırır. `go help testflag` komutunu kullanarak daha ayrıntılı bilgi elde edebilirsiniz.
## go doc
Bir çok insan Go için üçüncü parti bir dökümantasyona aracına gerek olamdığını düşünüyor (aslına bakarsanız ben bir tane yazdım bile [CHM](https://github.com/astaxie/godoc)). Go dökümanları yönetmek için çok güçlü bir araca sahip.
Peki bir paketin dökümantasyonuna nasıl bakabiliriz? Örneğin, Eğer bir `builtin` paket hakkında daha fazla bilgi istiyorsanız, `go doc builtin` komutunu işinizi görecektir. Benzer şekilde, `go doc net/http` komutu ile `http` paketi hakkında bilgi elde edebilirsiniz. Fonksiyonlar hakkında daha ayrıntılı bilgi istiyorsanız, `godoc fmt Printf` ve `godoc -src fmt Printf` komutları işinizi görecektir(`-src` fonksiyonun kodlarını göstercektir).
`godoc -http=:8080` komutunu çalıştırın, `127.0.0.1:8080` adresine gidin. `golang.org`'ın lokal versiyonunu görmüş olmanız gerekiyor. Sadece standart paketler hakkında değil, `$GOPATH/pkg` altındaki bütün paketler içinde bilgileri bulabilirsiniz. Büyük Çin Güvenlik duvarından dökümantasyonlara erişemeyenler için.
## Diğer komutlar
Go yukarıda bahsettiklerimizden daha fazla komut sunuyor.
go fix // versiyon yükseltme
go version // Kulladığınız Go sürümü bilgisi
go env // Go ile alakalı ortam değişkenleri
go list // yüklü paketlerin listesi
go run // paketi geçiçi olarak derleyip çalıştırma
Go komutlarınin kendi dökümantasyonunda daha ayrıntılı bilgi bulabilirsiniz. `go help <komut_ismi>` ile bu bilgilere erişebilirsiniz.
## Linkler
- [İçerik](preface.md)
- Önceki bölüm: [$GOPATH ve çalışma ortamı](01.2.md)
- Sonraki bölüm: [Go geliştirme araçları](01.4.md)
| bsd-3-clause |
danlrobertson/servo | tests/wpt/web-platform-tests/referrer-policy/strict-origin/meta-referrer/same-origin/http-http/iframe-tag/swap-origin-redirect/insecure-protocol.http.html | 2047 | <!DOCTYPE html>
<!-- DO NOT EDIT! Generated by referrer-policy/generic/tools/generate.py using referrer-policy/generic/template/test.release.html.template. -->
<html>
<head>
<title>Referrer-Policy: Referrer Policy is set to 'strict-origin'</title>
<meta name="description" content="Check that non a priori insecure subresource gets only the origin portion of the referrer URL. A priori insecure subresource gets no referrer information.">
<meta name="referrer" content="strict-origin">
<link rel="author" title="Kristijan Burnik" href="burnik@chromium.org">
<link rel="help" href="https://w3c.github.io/webappsec-referrer-policy/#referrer-policy-strict-origin">
<meta name="assert" content="The referrer URL is origin when a
document served over http requires an http
sub-resource via iframe-tag using the meta-referrer
delivery method with swap-origin-redirect and when
the target request is same-origin.">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<!-- TODO(kristijanburnik): Minify and merge both: -->
<script src="/referrer-policy/generic/common.js"></script>
<script src="/referrer-policy/generic/referrer-policy-test-case.js?pipe=sub"></script>
</head>
<body>
<script>
ReferrerPolicyTestCase(
{
"referrer_policy": "strict-origin",
"delivery_method": "meta-referrer",
"redirection": "swap-origin-redirect",
"origin": "same-origin",
"source_protocol": "http",
"target_protocol": "http",
"subresource": "iframe-tag",
"subresource_path": "/referrer-policy/generic/subresource/document.py",
"referrer_url": "origin"
},
document.querySelector("meta[name=assert]").content,
new SanityChecker()
).start();
</script>
<div id="log"></div>
</body>
</html>
| mpl-2.0 |
SanDisk-Open-Source/SSD_Dashboard | uefi/linux-source-3.8.0/include/linux/inetdevice.h | 8144 | #ifndef _LINUX_INETDEVICE_H
#define _LINUX_INETDEVICE_H
#ifdef __KERNEL__
#include <linux/bitmap.h>
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <linux/timer.h>
#include <linux/sysctl.h>
#include <linux/rtnetlink.h>
enum
{
IPV4_DEVCONF_FORWARDING=1,
IPV4_DEVCONF_MC_FORWARDING,
IPV4_DEVCONF_PROXY_ARP,
IPV4_DEVCONF_ACCEPT_REDIRECTS,
IPV4_DEVCONF_SECURE_REDIRECTS,
IPV4_DEVCONF_SEND_REDIRECTS,
IPV4_DEVCONF_SHARED_MEDIA,
IPV4_DEVCONF_RP_FILTER,
IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
IPV4_DEVCONF_BOOTP_RELAY,
IPV4_DEVCONF_LOG_MARTIANS,
IPV4_DEVCONF_TAG,
IPV4_DEVCONF_ARPFILTER,
IPV4_DEVCONF_MEDIUM_ID,
IPV4_DEVCONF_NOXFRM,
IPV4_DEVCONF_NOPOLICY,
IPV4_DEVCONF_FORCE_IGMP_VERSION,
IPV4_DEVCONF_ARP_ANNOUNCE,
IPV4_DEVCONF_ARP_IGNORE,
IPV4_DEVCONF_PROMOTE_SECONDARIES,
IPV4_DEVCONF_ARP_ACCEPT,
IPV4_DEVCONF_ARP_NOTIFY,
IPV4_DEVCONF_ACCEPT_LOCAL,
IPV4_DEVCONF_SRC_VMARK,
IPV4_DEVCONF_PROXY_ARP_PVLAN,
IPV4_DEVCONF_ROUTE_LOCALNET,
__IPV4_DEVCONF_MAX
};
#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
struct ipv4_devconf {
void *sysctl;
int data[IPV4_DEVCONF_MAX];
DECLARE_BITMAP(state, IPV4_DEVCONF_MAX);
};
struct in_device {
struct net_device *dev;
atomic_t refcnt;
int dead;
struct in_ifaddr *ifa_list; /* IP ifaddr chain */
struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */
int mc_count; /* Number of installed mcasts */
spinlock_t mc_tomb_lock;
struct ip_mc_list *mc_tomb;
unsigned long mr_v1_seen;
unsigned long mr_v2_seen;
unsigned long mr_maxdelay;
unsigned char mr_qrv;
unsigned char mr_gq_running;
unsigned char mr_ifc_count;
struct timer_list mr_gq_timer; /* general query timer */
struct timer_list mr_ifc_timer; /* interface change timer */
struct neigh_parms *arp_parms;
struct ipv4_devconf cnf;
struct rcu_head rcu_head;
};
#define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1])
#define IPV4_DEVCONF_ALL(net, attr) \
IPV4_DEVCONF((*(net)->ipv4.devconf_all), attr)
static inline int ipv4_devconf_get(struct in_device *in_dev, int index)
{
index--;
return in_dev->cnf.data[index];
}
static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
int val)
{
index--;
set_bit(index, in_dev->cnf.state);
in_dev->cnf.data[index] = val;
}
static inline void ipv4_devconf_setall(struct in_device *in_dev)
{
bitmap_fill(in_dev->cnf.state, IPV4_DEVCONF_MAX);
}
#define IN_DEV_CONF_GET(in_dev, attr) \
ipv4_devconf_get((in_dev), IPV4_DEVCONF_ ## attr)
#define IN_DEV_CONF_SET(in_dev, attr, val) \
ipv4_devconf_set((in_dev), IPV4_DEVCONF_ ## attr, (val))
#define IN_DEV_ANDCONF(in_dev, attr) \
(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \
IN_DEV_CONF_GET((in_dev), attr))
#define IN_DEV_NET_ORCONF(in_dev, net, attr) \
(IPV4_DEVCONF_ALL(net, attr) || \
IN_DEV_CONF_GET((in_dev), attr))
#define IN_DEV_ORCONF(in_dev, attr) \
IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr)
#define IN_DEV_MAXCONF(in_dev, attr) \
(max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \
IN_DEV_CONF_GET((in_dev), attr)))
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
ACCEPT_SOURCE_ROUTE)
#define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL)
#define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)
#define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS)
#define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP)
#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN)
#define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA)
#define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS)
#define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \
SECURE_REDIRECTS)
#define IN_DEV_IDTAG(in_dev) IN_DEV_CONF_GET(in_dev, TAG)
#define IN_DEV_MEDIUM_ID(in_dev) IN_DEV_CONF_GET(in_dev, MEDIUM_ID)
#define IN_DEV_PROMOTE_SECONDARIES(in_dev) \
IN_DEV_ORCONF((in_dev), \
PROMOTE_SECONDARIES)
#define IN_DEV_ROUTE_LOCALNET(in_dev) IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET)
#define IN_DEV_NET_ROUTE_LOCALNET(in_dev, net) \
IN_DEV_NET_ORCONF(in_dev, net, ROUTE_LOCALNET)
#define IN_DEV_RX_REDIRECTS(in_dev) \
((IN_DEV_FORWARD(in_dev) && \
IN_DEV_ANDCONF((in_dev), ACCEPT_REDIRECTS)) \
|| (!IN_DEV_FORWARD(in_dev) && \
IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS)))
#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT)
#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
#define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
struct in_ifaddr {
struct hlist_node hash;
struct in_ifaddr *ifa_next;
struct in_device *ifa_dev;
struct rcu_head rcu_head;
__be32 ifa_local;
__be32 ifa_address;
__be32 ifa_mask;
__be32 ifa_broadcast;
unsigned char ifa_scope;
unsigned char ifa_flags;
unsigned char ifa_prefixlen;
char ifa_label[IFNAMSIZ];
};
extern int register_inetaddr_notifier(struct notifier_block *nb);
extern int unregister_inetaddr_notifier(struct notifier_block *nb);
extern void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
struct ipv4_devconf *devconf);
extern struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
{
return __ip_dev_find(net, addr, true);
}
extern int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
extern int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
extern void devinet_init(void);
extern struct in_device *inetdev_by_index(struct net *, int);
extern __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
extern __be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local, int scope);
extern struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask);
static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
{
return !((addr^ifa->ifa_address)&ifa->ifa_mask);
}
/*
* Check if a mask is acceptable.
*/
static __inline__ int bad_mask(__be32 mask, __be32 addr)
{
__u32 hmask;
if (addr & (mask = ~mask))
return 1;
hmask = ntohl(mask);
if (hmask & (hmask+1))
return 1;
return 0;
}
#define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \
for (ifa = (in_dev)->ifa_list; ifa && !(ifa->ifa_flags&IFA_F_SECONDARY); ifa = ifa->ifa_next)
#define for_ifa(in_dev) { struct in_ifaddr *ifa; \
for (ifa = (in_dev)->ifa_list; ifa; ifa = ifa->ifa_next)
#define endfor_ifa(in_dev) }
static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev)
{
return rcu_dereference(dev->ip_ptr);
}
static inline struct in_device *in_dev_get(const struct net_device *dev)
{
struct in_device *in_dev;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev)
atomic_inc(&in_dev->refcnt);
rcu_read_unlock();
return in_dev;
}
static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
{
return rtnl_dereference(dev->ip_ptr);
}
extern void in_dev_finish_destroy(struct in_device *idev);
static inline void in_dev_put(struct in_device *idev)
{
if (atomic_dec_and_test(&idev->refcnt))
in_dev_finish_destroy(idev);
}
#define __in_dev_put(idev) atomic_dec(&(idev)->refcnt)
#define in_dev_hold(idev) atomic_inc(&(idev)->refcnt)
#endif /* __KERNEL__ */
static __inline__ __be32 inet_make_mask(int logmask)
{
if (logmask)
return htonl(~((1<<(32-logmask))-1));
return 0;
}
static __inline__ int inet_mask_len(__be32 mask)
{
__u32 hmask = ntohl(mask);
if (!hmask)
return 0;
return 32 - ffz(~hmask);
}
#endif /* _LINUX_INETDEVICE_H */
| gpl-2.0 |
edureis95/xbmc | xbmc/storage/windows/Win32StorageProvider.h | 1587 | #pragma once
/*
* Copyright (C) 2005-2013 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "storage/IStorageProvider.h"
#include "utils/Job.h"
class CWin32StorageProvider : public IStorageProvider
{
public:
virtual ~CWin32StorageProvider() { }
virtual void Initialize();
virtual void Stop() { }
virtual void GetLocalDrives(VECSOURCES &localDrives);
virtual void GetRemovableDrives(VECSOURCES &removableDrives);
virtual std::string GetFirstOpticalDeviceFileName();
virtual bool Eject(const std::string& mountpath);
virtual std::vector<std::string> GetDiskUsage();
virtual bool PumpDriveChangeEvents(IStorageEventsCallback *callback);
static void SetEvent() { xbevent = true; }
static bool xbevent;
};
class CDetectDisc : public CJob
{
public:
CDetectDisc(const std::string &strPath, const bool bautorun);
bool DoWork();
private:
std::string m_strPath;
bool m_bautorun;
};
| gpl-2.0 |