text
stringlengths
2
99.9k
meta
dict
Kyra is a simple, fully featured Sprite engine written in C++. The Kyra engine is suited to 2D, isometric, and quasi-3D games. It is built on top of SDL for cross platform use. It supports tiles, sprites, and user drawn surfaces. It has full support for alpha blending, scaling, color transformation, pixel perfect collision detection, OpenGL acceleration, and mouse testing. It comes with tools to define sprites and import images into the system. WWW: http://www.grinninglizard.com/kyra/
{ "pile_set_name": "Github" }
""" Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...) unless there exists a 'from future_builtins import zip' statement in the top-level namespace. We avoid the transformation if the zip() call is directly contained in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. """ # Local imports from .. import fixer_base from ..fixer_util import Name, Call, in_special_context class FixZip(fixer_base.ConditionalFix): BM_compatible = True PATTERN = """ power< 'zip' args=trailer< '(' [any] ')' > > """ skip_on = "future_builtins.zip" def transform(self, node, results): if self.should_skip(node): return if in_special_context(node): return None new = node.clone() new.prefix = "" new = Call(Name("list"), [new]) new.prefix = node.prefix return new
{ "pile_set_name": "Github" }
// ==++== // // Copyright (c) Microsoft Corporation. All rights reserved. // // ==--== /*============================================================ ** ** Class: StringReader ** ** <OWNER>Microsoft</OWNER> ** ** Purpose: For reading text from strings ** ** ===========================================================*/ /* * https://github.com/Microsoft/referencesource/blob/master/mscorlib/system/io/streamreader.cs */ using System; using System.Runtime.InteropServices; using System.Diagnostics.Contracts; namespace System.IO { // This class implements a text reader that reads from a string. // public class StringReader : TextReader { private String _s; private int _pos; private int _length; public StringReader(String s) { if (s == null) throw new ArgumentNullException("s"); Contract.EndContractBlock(); _s = s; _length = s == null ? 0 : s.Length; } // Closes this StringReader. Following a call to this method, the String // Reader will throw an ObjectDisposedException. public override void Close() { Dispose(true); } protected override void Dispose(bool disposing) { _s = null; _pos = 0; _length = 0; base.Dispose(disposing); } // Returns the next available character without actually reading it from // the underlying string. The current position of the StringReader is not // changed by this operation. The returned value is -1 if no further // characters are available. // [Pure] public override int Peek() { if (_s == null) __Error.ReaderClosed(); if (_pos == _length) return -1; return _s[_pos]; } // Reads the next character from the underlying string. The returned value // is -1 if no further characters are available. // public override int Read() { if (_s == null) __Error.ReaderClosed(); if (_pos == _length) return -1; return _s[_pos++]; } // Reads a block of characters. This method will read up to count // characters from this StringReader into the buffer character // array starting at position index. Returns the actual number of // characters read, or zero if the end of the string is reached. // public override int Read([In, Out] char[] buffer, int index, int count) { if (buffer == null) throw new ArgumentNullException("buffer"); if (index < 0) throw new ArgumentOutOfRangeException("index"); if (count < 0) throw new ArgumentOutOfRangeException("count"); if (buffer.Length - index < count) throw new ArgumentException(); Contract.EndContractBlock(); if (_s == null) __Error.ReaderClosed(); int n = _length - _pos; if (n > 0) { if (n > count) n = count; _s.CopyTo(_pos, buffer, index, n); _pos += n; } return n; } public override String ReadToEnd() { if (_s == null) __Error.ReaderClosed(); String s; if (_pos == 0) s = _s; else s = _s.Substring(_pos, _length - _pos); _pos = _length; return s; } // Reads a line. A line is defined as a sequence of characters followed by // a carriage return ('\r'), a line feed ('\n'), or a carriage return // immediately followed by a line feed. The resulting string does not // contain the terminating carriage return and/or line feed. The returned // value is null if the end of the underlying string has been reached. // public override String ReadLine() { if (_s == null) __Error.ReaderClosed(); int i = _pos; while (i < _length) { char ch = _s[i]; if (ch == '\r' || ch == '\n') { String result = _s.Substring(_pos, i - _pos); _pos = i + 1; if (ch == '\r' && _pos < _length && _s[_pos] == '\n') _pos++; return result; } i++; } if (i > _pos) { String result = _s.Substring(_pos, i - _pos); _pos = i; return result; } return null; } } }
{ "pile_set_name": "Github" }
// Copyright 2009 the Sputnik authors. All rights reserved. // This code is governed by the BSD license found in the LICENSE file. /** * The MV of StringNumericLiteral ::: StrWhiteSpaceopt StrNumericLiteral StrWhiteSpaceopt is the MV of StrNumericLiteral, no matter whether white space is present or not * * @path ch09/9.3/9.3.1/S9.3.1_A3_T1.js * @description static string */ // CHECK#1 if (Number("\u0009\u000C\u0020\u00A0\u000B\u000A\u000D\u2028\u2029\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000") !== Number("")) { $ERROR('#1: Number("\\u0009\\u000C\\u0020\\u00A0\\u000B\\u000A\\u000D\\u2028\\u2029\\u1680\\u180E\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200A\\u202F\\u205F\\u3000") === Number("")'); } // CHECK#2 if (Number("\u0009\u000C\u0020\u00A0\u000A\u000D\u2028\u2029\u000B\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u30001234567890\u0009\u000C\u0020\u00A0\u000B\u000A\u000D\u2028\u2029\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000") !== Number("1234567890")) { $ERROR('#2: Number("\\u0009\\u000C\\u0020\\u00A0\\u000A\\u000D\\u2028\\u2029\\u000B\\u1680\\u180E\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200A\\u202F\\u205F\\u30001234567890\\u0009\\u000C\\u0020\\u00A0\\u000B\\u000A\\u000D\\u2028\\u2029\\u1680\\u180E\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200A\\u202F\\u205F\\u3000") === Number("1234567890")'); } // CHECK#3 if (!(+("\u0009\u000C\u0020\u00A0\u000B\u000A\u000D\u2028\u2029\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000Infinity\u0009\u000C\u0020\u00A0\u000B\u000A\u000D\u2028\u2029\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000") == Number("Infinity"))) { $ERROR('#3: +("\\u0009\\u000C\\u0020\\u00A0\\u000B\\u000A\\u000D\\u2028\\u2029\\u1680\\u180E\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200A\\u202F\\u205F\\u3000Infinity\\u0009\\u000C\\u0020\\u00A0\\u000B\\u000A\\u000D\\u2028\\u2029\\u1680\\u180E\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200A\\u202F\\u205F\\u3000") == Number("Infinity")'); } // CHECK#4 if (!(Number("\u0009\u000C\u0020\u00A0\u000B\u000A\u000D\u2028\u2029\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000-Infinity\u0009\u000C\u0020\u00A0\u000B\u000A\u000D\u2028\u2029\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u202F\u205F\u3000") == Number(-"Infinity"))) { $ERROR('#4: Number("\\u0009\\u000C\\u0020\\u00A0\\u000B\\u000A\\u000D\\u2028\\u2029\\u1680\\u180E\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200A\\u202F\\u205F\\u3000-Infinity\\u0009\\u000C\\u0020\\u00A0\\u000B\\u000A\\u000D\\u2028\\u2029\\u1680\\u180E\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200A\\u202F\\u205F\\u3000") == Number("-Infinity")'); }
{ "pile_set_name": "Github" }
# 双向性 一个良好设计的应用可以很容易本地化,只要使用例如英语从左到右读写的语言脚本,或者双向语言脚本。 在双向脚本中,文本从右到左写和读,除了数字和来自 LTR 语言的词汇,例如非本地化的名称,是从左到右写的。双向脚本包括 Arabic,Hebrew 和 Persian。 双向性不仅影响文本布局和 UI 元素,还影响图标图表。本节提供关于如何在设计中考虑双向性的基本高级信息。 > UI 镜像概览 > 从右到左(RTL)镜像指南 > 其他本地化考虑 ## UI 镜像概览 从左到右(LTR) 界面和从右到左(RTL)界面的主要区别在于时间的流逝如何计算。使用从左到右(LTR)脚本的语言从左到右描绘时间,使用从右到左(RTL)脚本的语言从右到左描绘时间。 ![](../images/18_1_0.png) 镜像指把 UI 由从左到右(LTR)切换成从右到左(RTL)(反之亦然),影响应用布局和图形元素两者。注意,文本字符串、数字和来自从左到右(LTR)脚本的嵌入词汇不会被镜像;他们仍然是从左到右(LTR)。(文本字符串内容永远不会被镜像;在语言中总是正确的方向。) 从右到左(RTL)布局是从左到右(LTR)布局的镜像。 主要布局变化: - 图标在文本框的右边。 - 导航按钮是反向顺序。 - 指示方向的图标,例如扬声器图标,被镜像。 - 不指示方向的图标,例如照相机和复选标记,不会被镜像。 - 文本被镜像,但是时间和电话号码不会。从右到左(RTL)脚本中数字从左到右(LTR)显示。 ![](../images/18_1.png) 使用从左到右(LTR)UI 的例子 ![](../images/18_2.png) Hebrew 中使用从右到左(RTL)的例子。数字从左到右(LTR)显示。 ## 从右到左(RTL)镜像指南 镜像文本、布局以及图标图表时,遵循这些指南,支持从右到左的 UI。 从右到左(RTL)界面的指导准则是时间从右到左。前进指向左,后退指向右。 镜像时最重要的图标是前进和后退按钮。 ### 何时镜像 后退和前进导航按钮被翻转。 ![](../images/18_3.png) 从左到右(LTR)后退按钮 ![](../images/18_4.png) 从右到左(RTL)后退按钮 ![](../images/18_5.png) 从左到右(LTR)前进按钮 ![](../images/18_6.png) 从右到左(RTL)前进按钮 一个指示前进动作的图标应该被镜像。 ![](../images/18_7.png) 一个从左到右(LTR)的自行车前进指向右边。 ![](../images/18_8.png) 一个从右到左(RTL)自行车前进指向左边。 其他东西更加微小。例如,一个代表设置的图标使用一个斜线穿过图标来表明不可用状态。在一个从左到右(LTR)界面中,斜线从左上到右下。在一个从右到左的界面中,斜线从右上到左下。 ![](../images/18_9.png) 一个从左到右(LTR)飞行模式的离线状态。 ![](../images/18_10.png) 一个从右到左(RTL)飞机模式的离线状态。 图片中,斜线被镜像。飞机本身直指向上。不需要特别对待。 右边有一个滑块的音量图标应该被镜像。滑块应该从右到左(RTL)进行,声波应该从右边出现。 ![](../images/18_11.png) 带有滑块的从左到右(LTR)音量 ![](../images/18_12.png) 带有扬声器图标和滑块的从从右到左的音量被镜像 人形图标,头部和脸部应该被特别镜像,尤其是如果他们显得靠近文本。这是人形如何正向面向文本,而不是反向背对文本。 这有时可能是非常细微的,处理一个带角度的或者轻微扭转的人脸,或者一组人脸。 ![](../images/18_13.png) 从左到右(LTR)群组图标 ![](../images/18_14.png) 从右到左(RTL)群组图标 有时,时间的水平和环形方向都在图标中有指示。例如,谷歌文档中的重做和取消重做按钮既有水平方向又有圆形方向。 在从左到右(LTR)设计中,时间的圆形和水平表示都指向同一个方向。在从右到左(RTL)设计中,选择是否要显示圆形或者水平方向。 ![](../images/18_15.png) 谷歌文档中的从左到右(LTR)重做(redo)和撤销重做(undo)按钮。 含有文本代表的图标需要特别的镜像。 在从右到左(RTL)设计中,文本是居右对齐的。如果段落的开头有缩进,段落结尾有未完成的行或者右边不对齐,这些情况图标都需要被镜像。 ![](../images/18_16.png) 从左到右(LTR)聊天图标 ![](../images/18_17.png) 从右到左(RTL)聊天图标 ### 何时不要镜像 当时间的线性表示在从右到左(RTL)中被镜像,圆形的方向不需要被镜像。从右到左(RTL)语言中时钟仍然是顺时针旋转的。一个带有顺时针指向箭头的钟表图标、圆形刷新以及进度指示不需要被镜像。 ![](../images/18_18.png) 显示时间前进的刷新按钮;方向是顺时针的。图标不被镜像。 ![](../images/18_19.png) 历史图标指向时间的反方向;方式是逆时针的。图标不被镜像。 一些代表实体物体的图标在从右到左的设计中不被镜像。 例如,物理键盘在现实时间中到处一样,所以不被镜像。 ![](../images/18_20.png) 键盘图标 ![](../images/18_21.png) 耳机图标 看起来带有方向,但是事实上代表用右手握住一个实体的特定图标。 例如,搜索图标典型的是把手柄放在右侧底部,因为大部分用户是右撇子。 从右到左(RTL)世界中的大部分也是右撇子,所以这样的图标不应该被镜像。 ![](../images/18_22.png) 搜索图标 ![](../images/18_23.png) 咖啡杯图标 媒体回放按钮以及进度指示不被镜像。这些元素的从左到右(LTR)方向代表磁带的方向,不是时间的方向。 ![](../images/18_24.png) 既然媒体回放按钮以及进度指示反应磁带的方向,他们不被镜像。 ![](../images/18_25.png) 多媒体的回放控制永远是从左到右(LTR)的。 ![](../images/18_26.png) 不要镜像多媒体回放按钮或者进度条。这些元素的方向代表磁带的方向,不是时间的方向。 ## 其他本地化考虑 ### 图形中的文本 由于图形元素中的文本总是需要本地化,最好用不使用文本的方式传达理念。 ### 数字 数字也是文本。包含数字的图标也需要为使用不同数字的语言进行本地化。例如,Bengali,Marathi,Nepali,以及大部分阿拉伯土著使用不同形式的数字。使用包含这些数字的图标将会需要根据他们的形状进行重绘。 即使在从左到右(LTR)环境中也有可能需要镜像。例如,如果某人正在谷歌文档中编辑一段从右到左(RTL)的段落,缩进以及列表按钮也需要是从右到左(RTL)的,即使主要的 UI 方向是从左到右(LTR)。
{ "pile_set_name": "Github" }
def slice_hypercube(data, header, dimensions=[0, 1], slices=[]): """ Extract a slice from an n-dimensional HDU data/header pair, and return the new data (without changing the header). """ if type(slices) == int: slices = (slices, ) else: slices = slices[:] shape = data.shape if len(shape) < 2: raise Exception("FITS file does not have enough dimensions") elif len(shape) == 2: wcsaxes_slices = ('x', 'y') if dimensions[1] < dimensions[0]: data = data.transpose() wcsaxes_slices = ('y', 'x') return data, wcsaxes_slices else: if slices: wcsaxes_slices = slices[:] if dimensions[0] < dimensions[1]: slices.insert(dimensions[0], slice(None, None, None)) slices.insert(dimensions[1], slice(None, None, None)) wcsaxes_slices.insert(dimensions[0], 'x') wcsaxes_slices.insert(dimensions[1], 'y') else: slices.insert(dimensions[1], slice(None, None, None)) slices.insert(dimensions[0], slice(None, None, None)) wcsaxes_slices.insert(dimensions[1], 'y') wcsaxes_slices.insert(dimensions[0], 'x') if type(slices) == list: slices = tuple(slices) wcsaxes_slices = tuple(wcsaxes_slices) data = data[slices[::-1]] if dimensions[1] < dimensions[0]: data = data.transpose() else: message = """ Attempted to read in %i-dimensional FITS cube, but dimensions and slices were not specified. Please specify these using the dimensions= and slices= argument. The cube dimensions are:\n\n""" % len(shape) for i in range(1, len(shape) + 1): message += " " * 10 message += " %i %s %i\n" % (i - 1, header["CTYPE%i" % i], header["NAXIS%i" % i]) raise Exception(message) return data, wcsaxes_slices
{ "pile_set_name": "Github" }
/* * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef AR9003_RTT_H #define AR9003_RTT_H void ar9003_hw_rtt_enable(struct ath_hw *ah); void ar9003_hw_rtt_disable(struct ath_hw *ah); void ar9003_hw_rtt_set_mask(struct ath_hw *ah, u32 rtt_mask); bool ar9003_hw_rtt_force_restore(struct ath_hw *ah); void ar9003_hw_rtt_load_hist(struct ath_hw *ah, u8 chain, u32 *table); void ar9003_hw_rtt_fill_hist(struct ath_hw *ah, u8 chain, u32 *table); void ar9003_hw_rtt_clear_hist(struct ath_hw *ah); #endif
{ "pile_set_name": "Github" }
[Desktop Entry] Name=Tor Browser Name[hu]=Tor-böngésző Name[pt_BR]=Navegador Tor GenericName=Tor browser GenericName[hu]=Tor böngésző indító Comment=Launch Tor Browser Comment[hu]=Tor böngésző indító Comment[pt_BR]=Navegador Tor Exec=torbrowser-launcher %u Terminal=false Type=Application Icon=torbrowser Categories=Network;WebBrowser; StartupWMClass=Tor Browser
{ "pile_set_name": "Github" }
import test from 'ava'; import { fileExtension } from '../src'; test('if the path is an url, fileExtension should return the right extension', (t) => { const expected = 'js'; t.is(fileExtension('https://example.com/script.js'), expected); }); test('if the path is a local file, fileExtension should return the right extension', (t) => { const expected = 'txt'; t.is(fileExtension('c:\\test\\text.txt'), expected); }); test('if the path is a local file (linux), fileExtension should return the right extension', (t) => { const expected = 'png'; t.is(fileExtension('/mnt/test/image.png'), expected); });
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8"?> <adaptive-icon xmlns:android="http://schemas.android.com/apk/res/android"> <background android:drawable="@color/launcher_background" /> <foreground android:drawable="@mipmap/launcher_foreground" /> </adaptive-icon>
{ "pile_set_name": "Github" }
/* * Copyright (C) 2016 Alexey Verein * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.antonioleiva.bandhookkotlin.ui.entity.mapper import com.antonioleiva.bandhookkotlin.domain.entity.Album import com.antonioleiva.bandhookkotlin.domain.entity.Artist import org.junit.Assert.* import org.junit.Before import org.junit.Test class AlbumDetailDataMapperTest { lateinit var album: Album lateinit var albumDetailDataMapper: AlbumDetailDataMapper @Before fun setUp() { album = Album("album id", "album name", Artist("artist id", "artist name"), "album url", emptyList()) albumDetailDataMapper = AlbumDetailDataMapper() } @Test fun testTransform() { // When val transformedAlbum = albumDetailDataMapper.transform(album) // Then assertNotNull(transformedAlbum) assertEquals(album.id, transformedAlbum?.id) assertEquals(album.name, transformedAlbum?.name) assertEquals(album.url, transformedAlbum?.url) assertEquals(album.tracks, transformedAlbum?.tracks) } @Test fun testTransform_null() { // When val transformedAlbum = albumDetailDataMapper.transform(null) // Then assertNull(transformedAlbum) } }
{ "pile_set_name": "Github" }
// Copyright 2018 Superblocks AB // // This file is part of Superblocks Lab. // // Superblocks Lab is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation version 3 of the License. // // Superblocks Lab is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Superblocks Lab. If not, see <http://www.gnu.org/licenses/>. import { connect } from 'react-redux'; import { getNetworkPreferences } from '../../../../selectors/settings'; import NetworkPreferences from './NetworkPreferences'; const mapStateToProps = state => ({ networkPreferences: getNetworkPreferences(state), }); export default connect(mapStateToProps, null)(NetworkPreferences);
{ "pile_set_name": "Github" }
#!/bin/bash # description: analyze all perf samples perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/event_analyzing_sample.py
{ "pile_set_name": "Github" }
# OrientDB Module !!! note This module is INCUBATING. While it is ready for use and operational in the current version of Testcontainers, it is possible that it may receive breaking changes in the future. See [our contributing guidelines](/contributing/#incubating-modules) for more information on our incubating modules policy. This module helps running [OrientDB](https://orientdb.org/download) using Testcontainers. Note that it's based on the [official Docker image](https://hub.docker.com/_/orientdb/) provided by OrientDB. ## Usage example Declare your Testcontainer as a `@ClassRule` or `@Rule` in a JUnit 4 test or as static or member attribute of a JUnit 5 test annotated with `@Container` as you would with other Testcontainers. You can call `getDbUrl()` OrientDB container and build the `ODatabaseSession` by your own, but a more useful `getSession()` method is provided. On the JVM you would most likely use the [Java driver](https://github.com/). The following example uses the JUnit 5 extension `@Testcontainers` and demonstrates both the usage of the Java Client: ```java tab="JUnit 5 example" @Testcontainers public class ExampleTest { @Container private static OrientDBContainer container = new OrientDBContainer(); @Test void testDbCreation() { final ODatabaseSession session = container.getSession(); session.command("CREATE CLASS Person EXTENDS V"); session.command("INSERT INTO Person set name='john'"); session.command("INSERT INTO Person set name='jane'"); assertThat(session.query("SELECT FROM Person").stream()).hasSize(2); } } ``` You are not limited to Unit tests and can of course use an instance of the OrientDB Testcontainer in vanilla Java code as well. ## Adding this module to your project dependencies Add the following dependency to your `pom.xml`/`build.gradle` file: ```groovy tab='Gradle' testCompile "org.testcontainers:orientdb:{{latest_version}}" ``` ```xml tab='Maven' <dependency> <groupId>org.testcontainers</groupId> <artifactId>orientdb</artifactId> <version>{{latest_version}}</version> <scope>test</scope> </dependency> ``` !!! hint Add the OrientDB Java client if you plan to access the Testcontainer: ```groovy tab='Gradle' compile "com.orientechnologies:orientdb-client:3.0.24" ``` ```xml tab='Maven' <dependency> <groupId>com.orientechnologies</groupId> <artifactId>orientdb-client</artifactId> <version>3.0.24</version> </dependency> ```
{ "pile_set_name": "Github" }
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RRlang(RPackage): """A toolbox for working with base types, core R features like the condition system, and core 'Tidyverse' features like tidy evaluation.""" homepage = "https://cloud.r-project.org/package=rlang" url = "https://cloud.r-project.org/src/contrib/rlang_0.2.2.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/rlang" version('0.4.6', sha256='3a81b107765fd6ac0ad716c428d01878775ded9208ba125d43c890c73d2533ca') version('0.4.0', sha256='9748a4a217548bbe5631c18fd88c94811950446f798ff21fb327703aebaa150d') version('0.3.4', sha256='4e467f7b0dcbde91b60c292137d2c69cecaa713a6e4c9b7157ef6fd5453b7ade') version('0.3.1', sha256='30427b2be2288e88acd30c4ea348ee06043a649fd73623a63148b1ad96317151') version('0.3.0.1', sha256='29451db0a3cabd75761d32df47a5d43ccadbde07ecb693ffdd73f122a0b9f348') version('0.3.0', sha256='9ab10ea3e19b2d60a289602ebbefa83509f430db1c8161e523896c374241b893') version('0.2.2', sha256='c9119420ff0caeb6b0fcee8800e2fb1ec072e291e0e53b8acea3c4cf49420d33') version('0.1.4', sha256='8d9b6c962ae81b96c96ada9614c6a1ffb9eda12dd407e2aff634f7d335e7b9f4') version('0.1.2', sha256='90cfcd88cae6fff044fca64b24a8e6bdc09fc276163b518ff2d90268b0c785f9') version('0.1.1', sha256='5901f95d68728a7d9bb1c2373a20ce6e4ad222f66e397e7735e9eff987c73c3f') depends_on('r@3.1.0:', when='@:0.3.4', type=('build', 'run')) depends_on('r@3.2.0:', when='@0.4.0:', type=('build', 'run'))
{ "pile_set_name": "Github" }
using System; using System.Collections.Generic; using System.Linq; using System.Threading.Tasks; using Microsoft.AspNetCore.Mvc; using AnimalsMvc.Models; namespace AnimalsMvc.Controllers { public class AnimalsController : Controller { private IData _tempData; public AnimalsController(IData tempData) { _tempData = tempData; } public IActionResult Index() { List<Animal> animals = _tempData.AnimalsInitializeData(); IndexViewModel indexViewModel = new IndexViewModel(); indexViewModel.Animals = animals; return View(indexViewModel); } public IActionResult Details(int? id) { var model = _tempData.GetAnimalById(id); if (model == null) { return NotFound(); } return View(model); } } }
{ "pile_set_name": "Github" }
# Set of Irish contractions for ElisionFilter # TODO: load this as a resource from the analyzer and sync it in build.xml d m b
{ "pile_set_name": "Github" }
import numpy, os, sys, glob, time, caffe import tornado.ioloop import tornado.httpclient import tornado.web from tornado.httpclient import AsyncHTTPClient from tornado import gen from PIL import Image from StringIO import StringIO class ImageHandler(tornago.web.RequestHandler): def initialize(self, layers, network, transformer): self.layers = layers self.network = network self.transformer = transformer def post(self): path = self.get_body_argument('path') def resize(self, data): image = Image.open(StringIO(str(data))) if image.mode != 'RGB': image = image.convert('RGB') resized = image.resize((256, 256), resample=Image.BILINEAR) output = StringIO() resized.save(output, format='JPEG') output.seek(0) return output def compute(self, image): img = caffe.io.load_image(image) H, W, _ = img.shape _, _, h, w = self.network.blobs['data'].data.shape h_off = max((H - h) / 2, 0) w_off = max((W - w) / 2, 0) crop = img[h_off:h_off + h, w_off:w_off + w, :] timg = self.transformer.preprocess('data', crop) timg.shape = (1,) + timg.shape all_outputs = self.network.formward_all(blobs=self.layers, self.network.inputs[0]: timg) return all_outputs[self.layers[0]][0].astype(float) def make_app(): output_layers = ['prob'] network = caffe.Net('model_definitions.prototxt', 'nsfw_pretraining.caffemodel', caffe.TEST) transformer = caffe.io.Transformer({ 'data': network.blobs['data'].data.shape }) transformer.set_transpose('data', (2, 0, 1)) transformer.set_mean('data', numpy.array([104, 117, 123])) transformer.set_raw_scale('data', 255) transformer.set_channel_swap('data', (2, 1, 0)) app = tornado.web.Application([ (r'/', ImageHandler, dict(layers=output_layers, network=network, transformer=transformer)), ]) if __name__ == '__main__': app = make_app() app.listen(8888) tornado.ioloop.IOLoop.current().start()
{ "pile_set_name": "Github" }
/* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2010-2014 Intel Corporation */ #include <string.h> #include <stdint.h> #include <rte_mbuf.h> #include <rte_ethdev.h> #include <rte_malloc.h> #include "rte_port_ethdev.h" /* * Port ETHDEV Reader */ #ifdef RTE_PORT_STATS_COLLECT #define RTE_PORT_ETHDEV_READER_STATS_PKTS_IN_ADD(port, val) \ port->stats.n_pkts_in += val #define RTE_PORT_ETHDEV_READER_STATS_PKTS_DROP_ADD(port, val) \ port->stats.n_pkts_drop += val #else #define RTE_PORT_ETHDEV_READER_STATS_PKTS_IN_ADD(port, val) #define RTE_PORT_ETHDEV_READER_STATS_PKTS_DROP_ADD(port, val) #endif struct rte_port_ethdev_reader { struct rte_port_in_stats stats; uint16_t queue_id; uint16_t port_id; }; static void * rte_port_ethdev_reader_create(void *params, int socket_id) { struct rte_port_ethdev_reader_params *conf = params; struct rte_port_ethdev_reader *port; /* Check input parameters */ if (conf == NULL) { RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__); return NULL; } /* Memory allocation */ port = rte_zmalloc_socket("PORT", sizeof(*port), RTE_CACHE_LINE_SIZE, socket_id); if (port == NULL) { RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__); return NULL; } /* Initialization */ port->port_id = conf->port_id; port->queue_id = conf->queue_id; return port; } static int rte_port_ethdev_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts) { struct rte_port_ethdev_reader *p = port; uint16_t rx_pkt_cnt; rx_pkt_cnt = rte_eth_rx_burst(p->port_id, p->queue_id, pkts, n_pkts); RTE_PORT_ETHDEV_READER_STATS_PKTS_IN_ADD(p, rx_pkt_cnt); return rx_pkt_cnt; } static int rte_port_ethdev_reader_free(void *port) { if (port == NULL) { RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__); return -EINVAL; } rte_free(port); return 0; } static int rte_port_ethdev_reader_stats_read(void *port, struct rte_port_in_stats *stats, int clear) { struct rte_port_ethdev_reader *p = port; if (stats != NULL) memcpy(stats, &p->stats, sizeof(p->stats)); if (clear) memset(&p->stats, 0, sizeof(p->stats)); return 0; } /* * Port ETHDEV Writer */ #ifdef RTE_PORT_STATS_COLLECT #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(port, val) \ port->stats.n_pkts_in += val #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(port, val) \ port->stats.n_pkts_drop += val #else #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(port, val) #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(port, val) #endif struct rte_port_ethdev_writer { struct rte_port_out_stats stats; struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX]; uint32_t tx_burst_sz; uint16_t tx_buf_count; uint64_t bsz_mask; uint16_t queue_id; uint16_t port_id; }; static void * rte_port_ethdev_writer_create(void *params, int socket_id) { struct rte_port_ethdev_writer_params *conf = params; struct rte_port_ethdev_writer *port; /* Check input parameters */ if ((conf == NULL) || (conf->tx_burst_sz == 0) || (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) || (!rte_is_power_of_2(conf->tx_burst_sz))) { RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__); return NULL; } /* Memory allocation */ port = rte_zmalloc_socket("PORT", sizeof(*port), RTE_CACHE_LINE_SIZE, socket_id); if (port == NULL) { RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__); return NULL; } /* Initialization */ port->port_id = conf->port_id; port->queue_id = conf->queue_id; port->tx_burst_sz = conf->tx_burst_sz; port->tx_buf_count = 0; port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1); return port; } static inline void send_burst(struct rte_port_ethdev_writer *p) { uint32_t nb_tx; nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id, p->tx_buf, p->tx_buf_count); RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx); for ( ; nb_tx < p->tx_buf_count; nb_tx++) rte_pktmbuf_free(p->tx_buf[nb_tx]); p->tx_buf_count = 0; } static int rte_port_ethdev_writer_tx(void *port, struct rte_mbuf *pkt) { struct rte_port_ethdev_writer *p = port; p->tx_buf[p->tx_buf_count++] = pkt; RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, 1); if (p->tx_buf_count >= p->tx_burst_sz) send_burst(p); return 0; } static int rte_port_ethdev_writer_tx_bulk(void *port, struct rte_mbuf **pkts, uint64_t pkts_mask) { struct rte_port_ethdev_writer *p = port; uint64_t bsz_mask = p->bsz_mask; uint32_t tx_buf_count = p->tx_buf_count; uint64_t expr = (pkts_mask & (pkts_mask + 1)) | ((pkts_mask & bsz_mask) ^ bsz_mask); if (expr == 0) { uint64_t n_pkts = __builtin_popcountll(pkts_mask); uint32_t n_pkts_ok; if (tx_buf_count) send_burst(p); RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, n_pkts); n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts, n_pkts); RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok); for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) { struct rte_mbuf *pkt = pkts[n_pkts_ok]; rte_pktmbuf_free(pkt); } } else { for ( ; pkts_mask; ) { uint32_t pkt_index = __builtin_ctzll(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; p->tx_buf[tx_buf_count++] = pkt; RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, 1); pkts_mask &= ~pkt_mask; } p->tx_buf_count = tx_buf_count; if (tx_buf_count >= p->tx_burst_sz) send_burst(p); } return 0; } static int rte_port_ethdev_writer_flush(void *port) { struct rte_port_ethdev_writer *p = port; if (p->tx_buf_count > 0) send_burst(p); return 0; } static int rte_port_ethdev_writer_free(void *port) { if (port == NULL) { RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__); return -EINVAL; } rte_port_ethdev_writer_flush(port); rte_free(port); return 0; } static int rte_port_ethdev_writer_stats_read(void *port, struct rte_port_out_stats *stats, int clear) { struct rte_port_ethdev_writer *p = port; if (stats != NULL) memcpy(stats, &p->stats, sizeof(p->stats)); if (clear) memset(&p->stats, 0, sizeof(p->stats)); return 0; } /* * Port ETHDEV Writer Nodrop */ #ifdef RTE_PORT_STATS_COLLECT #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) \ port->stats.n_pkts_in += val #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) \ port->stats.n_pkts_drop += val #else #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) #endif struct rte_port_ethdev_writer_nodrop { struct rte_port_out_stats stats; struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX]; uint32_t tx_burst_sz; uint16_t tx_buf_count; uint64_t bsz_mask; uint64_t n_retries; uint16_t queue_id; uint16_t port_id; }; static void * rte_port_ethdev_writer_nodrop_create(void *params, int socket_id) { struct rte_port_ethdev_writer_nodrop_params *conf = params; struct rte_port_ethdev_writer_nodrop *port; /* Check input parameters */ if ((conf == NULL) || (conf->tx_burst_sz == 0) || (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) || (!rte_is_power_of_2(conf->tx_burst_sz))) { RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__); return NULL; } /* Memory allocation */ port = rte_zmalloc_socket("PORT", sizeof(*port), RTE_CACHE_LINE_SIZE, socket_id); if (port == NULL) { RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__); return NULL; } /* Initialization */ port->port_id = conf->port_id; port->queue_id = conf->queue_id; port->tx_burst_sz = conf->tx_burst_sz; port->tx_buf_count = 0; port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1); /* * When n_retries is 0 it means that we should wait for every packet to * send no matter how many retries should it take. To limit number of * branches in fast path, we use UINT64_MAX instead of branching. */ port->n_retries = (conf->n_retries == 0) ? UINT64_MAX : conf->n_retries; return port; } static inline void send_burst_nodrop(struct rte_port_ethdev_writer_nodrop *p) { uint32_t nb_tx = 0, i; nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id, p->tx_buf, p->tx_buf_count); /* We sent all the packets in a first try */ if (nb_tx >= p->tx_buf_count) { p->tx_buf_count = 0; return; } for (i = 0; i < p->n_retries; i++) { nb_tx += rte_eth_tx_burst(p->port_id, p->queue_id, p->tx_buf + nb_tx, p->tx_buf_count - nb_tx); /* We sent all the packets in more than one try */ if (nb_tx >= p->tx_buf_count) { p->tx_buf_count = 0; return; } } /* We didn't send the packets in maximum allowed attempts */ RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx); for ( ; nb_tx < p->tx_buf_count; nb_tx++) rte_pktmbuf_free(p->tx_buf[nb_tx]); p->tx_buf_count = 0; } static int rte_port_ethdev_writer_nodrop_tx(void *port, struct rte_mbuf *pkt) { struct rte_port_ethdev_writer_nodrop *p = port; p->tx_buf[p->tx_buf_count++] = pkt; RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1); if (p->tx_buf_count >= p->tx_burst_sz) send_burst_nodrop(p); return 0; } static int rte_port_ethdev_writer_nodrop_tx_bulk(void *port, struct rte_mbuf **pkts, uint64_t pkts_mask) { struct rte_port_ethdev_writer_nodrop *p = port; uint64_t bsz_mask = p->bsz_mask; uint32_t tx_buf_count = p->tx_buf_count; uint64_t expr = (pkts_mask & (pkts_mask + 1)) | ((pkts_mask & bsz_mask) ^ bsz_mask); if (expr == 0) { uint64_t n_pkts = __builtin_popcountll(pkts_mask); uint32_t n_pkts_ok; if (tx_buf_count) send_burst_nodrop(p); RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts); n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts, n_pkts); if (n_pkts_ok >= n_pkts) return 0; /* * If we did not manage to send all packets in single burst, * move remaining packets to the buffer and call send burst. */ for (; n_pkts_ok < n_pkts; n_pkts_ok++) { struct rte_mbuf *pkt = pkts[n_pkts_ok]; p->tx_buf[p->tx_buf_count++] = pkt; } send_burst_nodrop(p); } else { for ( ; pkts_mask; ) { uint32_t pkt_index = __builtin_ctzll(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; p->tx_buf[tx_buf_count++] = pkt; RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1); pkts_mask &= ~pkt_mask; } p->tx_buf_count = tx_buf_count; if (tx_buf_count >= p->tx_burst_sz) send_burst_nodrop(p); } return 0; } static int rte_port_ethdev_writer_nodrop_flush(void *port) { struct rte_port_ethdev_writer_nodrop *p = port; if (p->tx_buf_count > 0) send_burst_nodrop(p); return 0; } static int rte_port_ethdev_writer_nodrop_free(void *port) { if (port == NULL) { RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__); return -EINVAL; } rte_port_ethdev_writer_nodrop_flush(port); rte_free(port); return 0; } static int rte_port_ethdev_writer_nodrop_stats_read(void *port, struct rte_port_out_stats *stats, int clear) { struct rte_port_ethdev_writer_nodrop *p = port; if (stats != NULL) memcpy(stats, &p->stats, sizeof(p->stats)); if (clear) memset(&p->stats, 0, sizeof(p->stats)); return 0; } /* * Summary of port operations */ struct rte_port_in_ops rte_port_ethdev_reader_ops = { .f_create = rte_port_ethdev_reader_create, .f_free = rte_port_ethdev_reader_free, .f_rx = rte_port_ethdev_reader_rx, .f_stats = rte_port_ethdev_reader_stats_read, }; struct rte_port_out_ops rte_port_ethdev_writer_ops = { .f_create = rte_port_ethdev_writer_create, .f_free = rte_port_ethdev_writer_free, .f_tx = rte_port_ethdev_writer_tx, .f_tx_bulk = rte_port_ethdev_writer_tx_bulk, .f_flush = rte_port_ethdev_writer_flush, .f_stats = rte_port_ethdev_writer_stats_read, }; struct rte_port_out_ops rte_port_ethdev_writer_nodrop_ops = { .f_create = rte_port_ethdev_writer_nodrop_create, .f_free = rte_port_ethdev_writer_nodrop_free, .f_tx = rte_port_ethdev_writer_nodrop_tx, .f_tx_bulk = rte_port_ethdev_writer_nodrop_tx_bulk, .f_flush = rte_port_ethdev_writer_nodrop_flush, .f_stats = rte_port_ethdev_writer_nodrop_stats_read, };
{ "pile_set_name": "Github" }
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <meta http-equiv="X-UA-Compatible" content="IE=9"/> <meta name="generator" content="Doxygen 1.8.13"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <title>lwIP: src/include Directory Reference</title> <link href="tabs.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="jquery.js"></script> <script type="text/javascript" src="dynsections.js"></script> <link href="navtree.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="resize.js"></script> <script type="text/javascript" src="navtreedata.js"></script> <script type="text/javascript" src="navtree.js"></script> <script type="text/javascript"> $(document).ready(initResizable); </script> <link href="search/search.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="search/searchdata.js"></script> <script type="text/javascript" src="search/search.js"></script> <link href="doxygen.css" rel="stylesheet" type="text/css" /> </head> <body> <div id="top"><!-- do not remove this div, it is closed by doxygen! --> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 56px;"> <td id="projectalign" style="padding-left: 0.5em;"> <div id="projectname">lwIP &#160;<span id="projectnumber">2.1.2</span> </div> <div id="projectbrief">Lightweight IP stack</div> </td> </tr> </tbody> </table> </div> <!-- end header part --> <!-- Generated by Doxygen 1.8.13 --> <script type="text/javascript"> var searchBox = new SearchBox("searchBox", "search",false,'Search'); </script> <script type="text/javascript" src="menudata.js"></script> <script type="text/javascript" src="menu.js"></script> <script type="text/javascript"> $(function() { initMenu('',true,false,'search.php','Search'); $(document).ready(function() { init_search(); }); }); </script> <div id="main-nav"></div> </div><!-- top --> <div id="side-nav" class="ui-resizable side-nav-resizable"> <div id="nav-tree"> <div id="nav-tree-contents"> <div id="nav-sync" class="sync"></div> </div> </div> <div id="splitbar" style="-moz-user-select:none;" class="ui-resizable-handle"> </div> </div> <script type="text/javascript"> $(document).ready(function(){initNavTree('dir_b0856f6b0d80ccb263b2f415c91f9e17.html','');}); </script> <div id="doc-content"> <!-- window showing the filter options --> <div id="MSearchSelectWindow" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" onkeydown="return searchBox.OnSearchSelectKey(event)"> </div> <!-- iframe showing the search results (closed by default) --> <div id="MSearchResultsWindow"> <iframe src="javascript:void(0)" frameborder="0" name="MSearchResults" id="MSearchResults"> </iframe> </div> <div class="header"> <div class="headertitle"> <div class="title">include Directory Reference</div> </div> </div><!--header--> <div class="contents"> <table class="memberdecls"> <tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="subdirs"></a> Directories</h2></td></tr> <tr class="memitem:dir_1e445e767c368c70d58af8a0b7552719"><td class="memItemLeft" align="right" valign="top">directory &#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="dir_1e445e767c368c70d58af8a0b7552719.html">compat</a></td></tr> <tr class="separator:"><td class="memSeparator" colspan="2">&#160;</td></tr> </table> </div><!-- contents --> </div><!-- doc-content --> <!-- start footer part --> <div id="nav-path" class="navpath"><!-- id is needed for treeview function! --> <ul> <li class="navelem"><a class="el" href="dir_68267d1309a1af8e8297ef4c3efbcdba.html">src</a></li><li class="navelem"><a class="el" href="dir_b0856f6b0d80ccb263b2f415c91f9e17.html">include</a></li> <li class="footer">Generated by <a href="http://www.doxygen.org/index.html"> <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.13 </li> </ul> </div> </body> </html>
{ "pile_set_name": "Github" }
'use strict'; angular.module("ngLocale", [], ["$provide", function($provide) { var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"}; function getDecimals(n) { n = n + ''; var i = n.indexOf('.'); return (i == -1) ? 0 : n.length - i - 1; } function getVF(n, opt_precision) { var v = opt_precision; if (undefined === v) { v = Math.min(getDecimals(n), 3); } var base = Math.pow(10, v); var f = ((n * base) | 0) % base; return {v: v, f: f}; } $provide.value("$locale", { "DATETIME_FORMATS": { "AMPMS": [ "I bik\u025b\u0302gl\u00e0", "I \u0253ugaj\u0254p" ], "DAY": [ "\u014bgw\u00e0 n\u0254\u0302y", "\u014bgw\u00e0 nja\u014bgumba", "\u014bgw\u00e0 \u00fbm", "\u014bgw\u00e0 \u014bg\u00ea", "\u014bgw\u00e0 mb\u0254k", "\u014bgw\u00e0 k\u0254\u0254", "\u014bgw\u00e0 j\u00f4n" ], "ERANAMES": [ "bis\u016b bi Yes\u00f9 Kr\u01d0st\u00f2", "i mb\u016bs Yes\u00f9 Kr\u01d0st\u00f2" ], "ERAS": [ "b.Y.K", "m.Y.K" ], "MONTH": [ "K\u0254nd\u0254\u014b", "M\u00e0c\u025b\u0302l", "M\u00e0t\u00f9mb", "M\u00e0top", "M\u0300puy\u025b", "H\u00ecl\u00f2nd\u025b\u0300", "Nj\u00e8b\u00e0", "H\u00ecka\u014b", "D\u00ecp\u0254\u0300s", "B\u00ec\u00f2\u00f4m", "M\u00e0y\u025bs\u00e8p", "L\u00ecbuy li \u0144y\u00e8e" ], "SHORTDAY": [ "n\u0254y", "nja", "uum", "\u014bge", "mb\u0254", "k\u0254\u0254", "jon" ], "SHORTMONTH": [ "k\u0254n", "mac", "mat", "mto", "mpu", "hil", "nje", "hik", "dip", "bio", "may", "li\u0253" ], "fullDate": "EEEE d MMMM y", "longDate": "d MMMM y", "medium": "d MMM, y HH:mm:ss", "mediumDate": "d MMM, y", "mediumTime": "HH:mm:ss", "short": "d/M/y HH:mm", "shortDate": "d/M/y", "shortTime": "HH:mm" }, "NUMBER_FORMATS": { "CURRENCY_SYM": "FCFA", "DECIMAL_SEP": ",", "GROUP_SEP": "\u00a0", "PATTERNS": [ { "gSize": 3, "lgSize": 3, "maxFrac": 3, "minFrac": 0, "minInt": 1, "negPre": "-", "negSuf": "", "posPre": "", "posSuf": "" }, { "gSize": 3, "lgSize": 3, "maxFrac": 2, "minFrac": 2, "minInt": 1, "negPre": "-", "negSuf": "\u00a0\u00a4", "posPre": "", "posSuf": "\u00a0\u00a4" } ] }, "id": "bas-cm", "pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;} }); }]);
{ "pile_set_name": "Github" }
// RUN: llvm-objdump -d %p/Inputs/kextbundle.macho-aarch64 | FileCheck %s CHECK: 4008: 03 00 00 94 bl 0x4014 <_bar.stub>
{ "pile_set_name": "Github" }
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package org.meteoinfo.legend; import java.awt.Color; /** * * @author Yaqiang Wang */ public class BarBreak extends PolygonBreak { // <editor-fold desc="Variables"> private Color errorColor; private float errorSize; // </editor-fold> // <editor-fold desc="Constructor"> /** * Constructor */ public BarBreak(){ super(); errorColor = Color.black; errorSize = 1.0f; } // </editor-fold> // <editor-fold desc="Get Set Methods"> /** * Get error color * @return Error color */ public Color getErrorColor(){ return this.errorColor; } /** * Set error color * @param value Error color */ public void setErrorColor(Color value){ this.errorColor = value; } /** * Get error size * @return Error size */ public float getErrorSize(){ return this.errorSize; } /** * Set error size * @param value Error size */ public void setErrorSize(float value){ this.errorSize = value; } // </editor-fold> // <editor-fold desc="Methods"> // </editor-fold> }
{ "pile_set_name": "Github" }
/* Simple DirectMedia Layer Copyright (C) 1997-2018 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "../../SDL_internal.h" #if SDL_VIDEO_DRIVER_DUMMY #include "../SDL_sysvideo.h" #include "SDL_nullframebuffer_c.h" #define DUMMY_SURFACE "_SDL_DummySurface" int SDL_DUMMY_CreateWindowFramebuffer(_THIS, SDL_Window * window, Uint32 * format, void ** pixels, int *pitch) { SDL_Surface *surface; const Uint32 surface_format = SDL_PIXELFORMAT_RGB888; int w, h; int bpp; Uint32 Rmask, Gmask, Bmask, Amask; /* Free the old framebuffer surface */ surface = (SDL_Surface *) SDL_GetWindowData(window, DUMMY_SURFACE); SDL_FreeSurface(surface); /* Create a new one */ SDL_PixelFormatEnumToMasks(surface_format, &bpp, &Rmask, &Gmask, &Bmask, &Amask); SDL_GetWindowSizeInside(window, &w, &h); surface = SDL_CreateRGBSurface(0, w, h, bpp, Rmask, Gmask, Bmask, Amask); if (!surface) { return -1; } /* Save the info and return! */ SDL_SetWindowData(window, DUMMY_SURFACE, surface); *format = surface_format; *pixels = surface->pixels; *pitch = surface->pitch; return 0; } int SDL_DUMMY_UpdateWindowFramebuffer(_THIS, SDL_Window * window, const SDL_Rect * rects, int numrects) { static int frame_number; SDL_Surface *surface; surface = (SDL_Surface *) SDL_GetWindowData(window, DUMMY_SURFACE); if (!surface) { return SDL_SetError("Couldn't find dummy surface for window"); } /* Send the data to the display */ if (SDL_getenv("SDL_VIDEO_DUMMY_SAVE_FRAMES")) { char file[128]; SDL_snprintf(file, sizeof(file), "SDL_window%d-%8.8d.bmp", SDL_GetWindowID(window), ++frame_number); SDL_SaveBMP(surface, file); } return 0; } void SDL_DUMMY_DestroyWindowFramebuffer(_THIS, SDL_Window * window) { SDL_Surface *surface; surface = (SDL_Surface *) SDL_SetWindowData(window, DUMMY_SURFACE, NULL); SDL_FreeSurface(surface); } #endif /* SDL_VIDEO_DRIVER_DUMMY */ /* vi: set ts=4 sw=4 expandtab: */
{ "pile_set_name": "Github" }
exports.ERR_OBJECT_NOT_FOUND = 'ERR_OBJECT_NOT_FOUND' exports.ERR_USER_NOT_FOUND = 'ERR_USER_NOT_FOUND' exports.ERR_PASSWORD_ERROR = 'ERR_PASSWORD_ERROR' exports.TOKEN_NOT_FOUND = 'TOKEN_NOT_FOUND' exports.ERR_SERVER_ERROR = 'ERR_SERVER_ERROR' exports.ERR_PERMISSION_DENY = 'ERR_PERMISSION_DENY' exports.ERR_USER_DISABLED = 'ERR_USER_DISABLED' const msgs = { ERR_OBJECT_NOT_FOUND: 'Object not found', ERR_USER_NOT_FOUND: 'User not found', ERR_PASSWORD_ERROR: 'Password error', TOKEN_NOT_FOUND: 'Token not found', ERR_SERVER_ERROR: 'Server Internal Error', ERR_PERMISSION_DENY: 'Permission Deny', ERR_USER_DISABLED: 'User is disabled', } function errmsg(reason) { return msgs[reason] } exports.errmsg = errmsg
{ "pile_set_name": "Github" }
extension Foo { // RUN: %sourcekitd-test -req=format -line=2 -length=1 %s | %FileCheck --strict-whitespace %s // CHECK: key.sourcetext: " "
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8" standalone="no"?> <document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="11134" systemVersion="15F34" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" launchScreen="YES" useTraitCollections="YES" colorMatched="YES" initialViewController="01J-lp-oVM"> <dependencies> <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="11106"/> <capability name="documents saved in the Xcode 8 format" minToolsVersion="8.0"/> </dependencies> <scenes> <!--View Controller--> <scene sceneID="EHf-IW-A2E"> <objects> <viewController id="01J-lp-oVM" sceneMemberID="viewController"> <layoutGuides> <viewControllerLayoutGuide type="top" id="Llm-lL-Icb"/> <viewControllerLayoutGuide type="bottom" id="xb3-aO-Qok"/> </layoutGuides> <view key="view" contentMode="scaleToFill" id="Ze5-6b-2t3"> <rect key="frame" x="0.0" y="0.0" width="375" height="667"/> <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/> <color key="backgroundColor" red="1" green="1" blue="1" alpha="1" colorSpace="custom" customColorSpace="sRGB"/> </view> </viewController> <placeholder placeholderIdentifier="IBFirstResponder" id="iYj-Kq-Ea1" userLabel="First Responder" sceneMemberID="firstResponder"/> </objects> <point key="canvasLocation" x="53" y="375"/> </scene> </scenes> </document>
{ "pile_set_name": "Github" }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.apache.hadoop.hbase.io.crypto; import java.security.Key; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.util.MD5Hash; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** * Crypto context. Encapsulates an encryption algorithm and its key material. */ @InterfaceAudience.Public public class Context implements Configurable { private Configuration conf; private Cipher cipher; private Key key; private String keyHash; Context(Configuration conf) { this.conf = conf; } Context() { this(HBaseConfiguration.create()); } @Override public Configuration getConf() { return conf; } @Override public void setConf(Configuration conf) { this.conf = conf; } @Override public String toString() { return "cipher=" + (cipher != null ? cipher.getName() : "NONE") + " keyHash=" + (keyHash != null ? keyHash.substring(0, 8) + "..." : "NONE"); } public Cipher getCipher() { return cipher; } public Context setCipher(Cipher cipher) { this.cipher = cipher; return this; } public byte[] getKeyBytes() { return key.getEncoded(); } public String getKeyBytesHash() { return keyHash; } public String getKeyFormat() { return key.getFormat(); } public Key getKey() { return key; } public Context setKey(Key key) { Preconditions.checkNotNull(cipher, "Context does not have a cipher"); // validate the key length byte[] encoded = key.getEncoded(); if (encoded.length != cipher.getKeyLength()) { throw new RuntimeException("Illegal key length, have=" + encoded.length + ", want=" + cipher.getKeyLength()); } this.key = key; this.keyHash = MD5Hash.getMD5AsHex(encoded); return this; } }
{ "pile_set_name": "Github" }
#!/usr/bin/perl # # Check flock() feature # # This isn't a real test; it just checks to make sure we can call the method. # It doesn't even check to make sure that the default behavior # (LOCK_EX) is occurring. This is because I don't know how to write a good # portable test for flocking. I checked the Perl core distribution, # and found that Perl doesn't test flock either! BEGIN { eval { flock STDOUT, 0 }; if ($@ && $@ =~ /unimplemented/) { print "1..0\n"; exit; } } use Fcntl ':flock'; # This works at least back to 5.004_04 my $file = "tf$$.txt"; my ($o, $n); my @a; print "1..4\n"; my $N = 1; use Tie::File; print "ok $N\n"; $N++; # 2-4 Who the heck knows? open F, '>', $file or die $!; close F; $o = tie @a, 'Tie::File', $file, recsep => 'blah'; print $o ? "ok $N\n" : "not ok $N\n"; $N++; print $o->flock() ? "ok $N\n" : "not ok $N\n"; $N++; print $o->flock(LOCK_UN) ? "ok $N\n" : "not ok $N\n"; $N++; END { undef $o; untie @a; 1 while unlink $file; }
{ "pile_set_name": "Github" }
/* * [The "BSD licence"] * Copyright (c) 2005-2008 Terence Parr * All rights reserved. * * Conversion to C#: * Copyright (c) 2008 Sam Harwell, Pixel Mine, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ namespace Antlr.Runtime { using Exception = System.Exception; #if !PORTABLE using SerializationInfo = System.Runtime.Serialization.SerializationInfo; using StreamingContext = System.Runtime.Serialization.StreamingContext; #endif [System.Serializable] public class MismatchedNotSetException : MismatchedSetException { public MismatchedNotSetException() { } public MismatchedNotSetException(string message) : base(message) { } public MismatchedNotSetException(string message, Exception innerException) : base(message, innerException) { } public MismatchedNotSetException(BitSet expecting, IIntStream input) : base(expecting, input) { } public MismatchedNotSetException(string message, BitSet expecting, IIntStream input) : base(message, expecting, input) { } public MismatchedNotSetException(string message, BitSet expecting, IIntStream input, Exception innerException) : base(message, expecting, input, innerException) { } #if !PORTABLE protected MismatchedNotSetException(SerializationInfo info, StreamingContext context) : base(info, context) { } #endif public override string ToString() { return "MismatchedNotSetException(" + UnexpectedType + "!=" + Expecting + ")"; } } }
{ "pile_set_name": "Github" }
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2004 James Courtier-Dutton <James@superbug.demon.co.uk> * Driver CA0106 chips. e.g. Sound Blaster Audigy LS and Live 24bit * Version: 0.0.22 * * FEATURES currently supported: * See ca0106_main.c for features. * * Changelog: * Support interrupts per period. * Removed noise from Center/LFE channel when in Analog mode. * Rename and remove mixer controls. * 0.0.6 * Use separate card based DMA buffer for periods table list. * 0.0.7 * Change remove and rename ctrls into lists. * 0.0.8 * Try to fix capture sources. * 0.0.9 * Fix AC3 output. * Enable S32_LE format support. * 0.0.10 * Enable playback 48000 and 96000 rates. (Rates other that these do not work, even with "plug:front".) * 0.0.11 * Add Model name recognition. * 0.0.12 * Correct interrupt timing. interrupt at end of period, instead of in the middle of a playback period. * Remove redundent "voice" handling. * 0.0.13 * Single trigger call for multi channels. * 0.0.14 * Set limits based on what the sound card hardware can do. * playback periods_min=2, periods_max=8 * capture hw constraints require period_size = n * 64 bytes. * playback hw constraints require period_size = n * 64 bytes. * 0.0.15 * Separated ca0106.c into separate functional .c files. * 0.0.16 * Implement 192000 sample rate. * 0.0.17 * Add support for SB0410 and SB0413. * 0.0.18 * Modified Copyright message. * 0.0.19 * Added I2C and SPI registers. Filled in interrupt enable. * 0.0.20 * Added GPIO info for SB Live 24bit. * 0.0.21 * Implement support for Line-in capture on SB Live 24bit. * 0.0.22 * Add support for mute control on SB Live 24bit (cards w/ SPI DAC) * * This code was initially based on code from ALSA's emu10k1x.c which is: * Copyright (c) by Francisco Moraes <fmoraes@nc.rr.com> */ /************************************************************************************************/ /* PCI function 0 registers, address = <val> + PCIBASE0 */ /************************************************************************************************/ #define PTR 0x00 /* Indexed register set pointer register */ /* NOTE: The CHANNELNUM and ADDRESS words can */ /* be modified independently of each other. */ /* CNL[1:0], ADDR[27:16] */ #define DATA 0x04 /* Indexed register set data register */ /* DATA[31:0] */ #define IPR 0x08 /* Global interrupt pending register */ /* Clear pending interrupts by writing a 1 to */ /* the relevant bits and zero to the other bits */ #define IPR_MIDI_RX_B 0x00020000 /* MIDI UART-B Receive buffer non-empty */ #define IPR_MIDI_TX_B 0x00010000 /* MIDI UART-B Transmit buffer empty */ #define IPR_SPDIF_IN_USER 0x00004000 /* SPDIF input user data has 16 more bits */ #define IPR_SPDIF_OUT_USER 0x00002000 /* SPDIF output user data needs 16 more bits */ #define IPR_SPDIF_OUT_FRAME 0x00001000 /* SPDIF frame about to start */ #define IPR_SPI 0x00000800 /* SPI transaction completed */ #define IPR_I2C_EEPROM 0x00000400 /* I2C EEPROM transaction completed */ #define IPR_I2C_DAC 0x00000200 /* I2C DAC transaction completed */ #define IPR_AI 0x00000100 /* Audio pending register changed. See PTR reg 0x76 */ #define IPR_GPI 0x00000080 /* General Purpose input changed */ #define IPR_SRC_LOCKED 0x00000040 /* SRC lock status changed */ #define IPR_SPDIF_STATUS 0x00000020 /* SPDIF status changed */ #define IPR_TIMER2 0x00000010 /* 192000Hz Timer */ #define IPR_TIMER1 0x00000008 /* 44100Hz Timer */ #define IPR_MIDI_RX_A 0x00000004 /* MIDI UART-A Receive buffer non-empty */ #define IPR_MIDI_TX_A 0x00000002 /* MIDI UART-A Transmit buffer empty */ #define IPR_PCI 0x00000001 /* PCI Bus error */ #define INTE 0x0c /* Interrupt enable register */ #define INTE_MIDI_RX_B 0x00020000 /* MIDI UART-B Receive buffer non-empty */ #define INTE_MIDI_TX_B 0x00010000 /* MIDI UART-B Transmit buffer empty */ #define INTE_SPDIF_IN_USER 0x00004000 /* SPDIF input user data has 16 more bits */ #define INTE_SPDIF_OUT_USER 0x00002000 /* SPDIF output user data needs 16 more bits */ #define INTE_SPDIF_OUT_FRAME 0x00001000 /* SPDIF frame about to start */ #define INTE_SPI 0x00000800 /* SPI transaction completed */ #define INTE_I2C_EEPROM 0x00000400 /* I2C EEPROM transaction completed */ #define INTE_I2C_DAC 0x00000200 /* I2C DAC transaction completed */ #define INTE_AI 0x00000100 /* Audio pending register changed. See PTR reg 0x75 */ #define INTE_GPI 0x00000080 /* General Purpose input changed */ #define INTE_SRC_LOCKED 0x00000040 /* SRC lock status changed */ #define INTE_SPDIF_STATUS 0x00000020 /* SPDIF status changed */ #define INTE_TIMER2 0x00000010 /* 192000Hz Timer */ #define INTE_TIMER1 0x00000008 /* 44100Hz Timer */ #define INTE_MIDI_RX_A 0x00000004 /* MIDI UART-A Receive buffer non-empty */ #define INTE_MIDI_TX_A 0x00000002 /* MIDI UART-A Transmit buffer empty */ #define INTE_PCI 0x00000001 /* PCI Bus error */ #define UNKNOWN10 0x10 /* Unknown ??. Defaults to 0 */ #define HCFG 0x14 /* Hardware config register */ /* 0x1000 causes AC3 to fails. It adds a dither bit. */ #define HCFG_STAC 0x10000000 /* Special mode for STAC9460 Codec. */ #define HCFG_CAPTURE_I2S_BYPASS 0x08000000 /* 1 = bypass I2S input async SRC. */ #define HCFG_CAPTURE_SPDIF_BYPASS 0x04000000 /* 1 = bypass SPDIF input async SRC. */ #define HCFG_PLAYBACK_I2S_BYPASS 0x02000000 /* 0 = I2S IN mixer output, 1 = I2S IN1. */ #define HCFG_FORCE_LOCK 0x01000000 /* For test only. Force input SRC tracker to lock. */ #define HCFG_PLAYBACK_ATTENUATION 0x00006000 /* Playback attenuation mask. 0 = 0dB, 1 = 6dB, 2 = 12dB, 3 = Mute. */ #define HCFG_PLAYBACK_DITHER 0x00001000 /* 1 = Add dither bit to all playback channels. */ #define HCFG_PLAYBACK_S32_LE 0x00000800 /* 1 = S32_LE, 0 = S16_LE */ #define HCFG_CAPTURE_S32_LE 0x00000400 /* 1 = S32_LE, 0 = S16_LE (S32_LE current not working) */ #define HCFG_8_CHANNEL_PLAY 0x00000200 /* 1 = 8 channels, 0 = 2 channels per substream.*/ #define HCFG_8_CHANNEL_CAPTURE 0x00000100 /* 1 = 8 channels, 0 = 2 channels per substream.*/ #define HCFG_MONO 0x00000080 /* 1 = I2S Input mono */ #define HCFG_I2S_OUTPUT 0x00000010 /* 1 = I2S Output disabled */ #define HCFG_AC97 0x00000008 /* 0 = AC97 1.0, 1 = AC97 2.0 */ #define HCFG_LOCK_PLAYBACK_CACHE 0x00000004 /* 1 = Cancel bustmaster accesses to soundcache */ /* NOTE: This should generally never be used. */ #define HCFG_LOCK_CAPTURE_CACHE 0x00000002 /* 1 = Cancel bustmaster accesses to soundcache */ /* NOTE: This should generally never be used. */ #define HCFG_AUDIOENABLE 0x00000001 /* 0 = CODECs transmit zero-valued samples */ /* Should be set to 1 when the EMU10K1 is */ /* completely initialized. */ #define GPIO 0x18 /* Defaults: 005f03a3-Analog, 005f02a2-SPDIF. */ /* Here pins 0,1,2,3,4,,6 are output. 5,7 are input */ /* For the Audigy LS, pin 0 (or bit 8) controls the SPDIF/Analog jack. */ /* SB Live 24bit: * bit 8 0 = SPDIF in and out / 1 = Analog (Mic or Line)-in. * bit 9 0 = Mute / 1 = Analog out. * bit 10 0 = Line-in / 1 = Mic-in. * bit 11 0 = ? / 1 = ? * bit 12 0 = 48 Khz / 1 = 96 Khz Analog out on SB Live 24bit. * bit 13 0 = ? / 1 = ? * bit 14 0 = Mute / 1 = Analog out * bit 15 0 = ? / 1 = ? * Both bit 9 and bit 14 have to be set for analog sound to work on the SB Live 24bit. */ /* 8 general purpose programmable In/Out pins. * GPI [8:0] Read only. Default 0. * GPO [15:8] Default 0x9. (Default to SPDIF jack enabled for SPDIF) * GPO Enable [23:16] Default 0x0f. Setting a bit to 1, causes the pin to be an output pin. */ #define AC97DATA 0x1c /* AC97 register set data register (16 bit) */ #define AC97ADDRESS 0x1e /* AC97 register set address register (8 bit) */ /********************************************************************************************************/ /* CA0106 pointer-offset register set, accessed through the PTR and DATA registers */ /********************************************************************************************************/ /* Initially all registers from 0x00 to 0x3f have zero contents. */ #define PLAYBACK_LIST_ADDR 0x00 /* Base DMA address of a list of pointers to each period/size */ /* One list entry: 4 bytes for DMA address, * 4 bytes for period_size << 16. * One list entry is 8 bytes long. * One list entry for each period in the buffer. */ /* ADDR[31:0], Default: 0x0 */ #define PLAYBACK_LIST_SIZE 0x01 /* Size of list in bytes << 16. E.g. 8 periods -> 0x00380000 */ /* SIZE[21:16], Default: 0x8 */ #define PLAYBACK_LIST_PTR 0x02 /* Pointer to the current period being played */ /* PTR[5:0], Default: 0x0 */ #define PLAYBACK_UNKNOWN3 0x03 /* Not used ?? */ #define PLAYBACK_DMA_ADDR 0x04 /* Playback DMA address */ /* DMA[31:0], Default: 0x0 */ #define PLAYBACK_PERIOD_SIZE 0x05 /* Playback period size. win2000 uses 0x04000000 */ /* SIZE[31:16], Default: 0x0 */ #define PLAYBACK_POINTER 0x06 /* Playback period pointer. Used with PLAYBACK_LIST_PTR to determine buffer position currently in DAC */ /* POINTER[15:0], Default: 0x0 */ #define PLAYBACK_PERIOD_END_ADDR 0x07 /* Playback fifo end address */ /* END_ADDR[15:0], FLAG[16] 0 = don't stop, 1 = stop */ #define PLAYBACK_FIFO_OFFSET_ADDRESS 0x08 /* Current fifo offset address [21:16] */ /* Cache size valid [5:0] */ #define PLAYBACK_UNKNOWN9 0x09 /* 0x9 to 0xf Unused */ #define CAPTURE_DMA_ADDR 0x10 /* Capture DMA address */ /* DMA[31:0], Default: 0x0 */ #define CAPTURE_BUFFER_SIZE 0x11 /* Capture buffer size */ /* SIZE[31:16], Default: 0x0 */ #define CAPTURE_POINTER 0x12 /* Capture buffer pointer. Sample currently in ADC */ /* POINTER[15:0], Default: 0x0 */ #define CAPTURE_FIFO_OFFSET_ADDRESS 0x13 /* Current fifo offset address [21:16] */ /* Cache size valid [5:0] */ #define PLAYBACK_LAST_SAMPLE 0x20 /* The sample currently being played */ /* 0x21 - 0x3f unused */ #define BASIC_INTERRUPT 0x40 /* Used by both playback and capture interrupt handler */ /* Playback (0x1<<channel_id) */ /* Capture (0x100<<channel_id) */ /* Playback sample rate 96000 = 0x20000 */ /* Start Playback [3:0] (one bit per channel) * Start Capture [11:8] (one bit per channel) * Playback rate [23:16] (2 bits per channel) (0=48kHz, 1=44.1kHz, 2=96kHz, 3=192Khz) * Playback mixer in enable [27:24] (one bit per channel) * Playback mixer out enable [31:28] (one bit per channel) */ /* The Digital out jack is shared with the Center/LFE Analogue output. * The jack has 4 poles. I will call 1 - Tip, 2 - Next to 1, 3 - Next to 2, 4 - Next to 3 * For Analogue: 1 -> Center Speaker, 2 -> Sub Woofer, 3 -> Ground, 4 -> Ground * For Digital: 1 -> Front SPDIF, 2 -> Rear SPDIF, 3 -> Center/Subwoofer SPDIF, 4 -> Ground. * Standard 4 pole Video A/V cable with RCA outputs: 1 -> White, 2 -> Yellow, 3 -> Shield on all three, 4 -> Red. * So, from this you can see that you cannot use a Standard 4 pole Video A/V cable with the SB Audigy LS card. */ /* The Front SPDIF PCM gets mixed with samples from the AC97 codec, so can only work for Stereo PCM and not AC3/DTS * The Rear SPDIF can be used for Stereo PCM and also AC3/DTS * The Center/LFE SPDIF cannot be used for AC3/DTS, but can be used for Stereo PCM. * Summary: For ALSA we use the Rear channel for SPDIF Digital AC3/DTS output */ /* A standard 2 pole mono mini-jack to RCA plug can be used for SPDIF Stereo PCM output from the Front channel. * A standard 3 pole stereo mini-jack to 2 RCA plugs can be used for SPDIF AC3/DTS and Stereo PCM output utilising the Rear channel and just one of the RCA plugs. */ #define SPCS0 0x41 /* SPDIF output Channel Status 0 register. For Rear. default=0x02108004, non-audio=0x02108006 */ #define SPCS1 0x42 /* SPDIF output Channel Status 1 register. For Front */ #define SPCS2 0x43 /* SPDIF output Channel Status 2 register. For Center/LFE */ #define SPCS3 0x44 /* SPDIF output Channel Status 3 register. Unknown */ /* When Channel set to 0: */ #define SPCS_CLKACCYMASK 0x30000000 /* Clock accuracy */ #define SPCS_CLKACCY_1000PPM 0x00000000 /* 1000 parts per million */ #define SPCS_CLKACCY_50PPM 0x10000000 /* 50 parts per million */ #define SPCS_CLKACCY_VARIABLE 0x20000000 /* Variable accuracy */ #define SPCS_SAMPLERATEMASK 0x0f000000 /* Sample rate */ #define SPCS_SAMPLERATE_44 0x00000000 /* 44.1kHz sample rate */ #define SPCS_SAMPLERATE_48 0x02000000 /* 48kHz sample rate */ #define SPCS_SAMPLERATE_32 0x03000000 /* 32kHz sample rate */ #define SPCS_CHANNELNUMMASK 0x00f00000 /* Channel number */ #define SPCS_CHANNELNUM_UNSPEC 0x00000000 /* Unspecified channel number */ #define SPCS_CHANNELNUM_LEFT 0x00100000 /* Left channel */ #define SPCS_CHANNELNUM_RIGHT 0x00200000 /* Right channel */ #define SPCS_SOURCENUMMASK 0x000f0000 /* Source number */ #define SPCS_SOURCENUM_UNSPEC 0x00000000 /* Unspecified source number */ #define SPCS_GENERATIONSTATUS 0x00008000 /* Originality flag (see IEC-958 spec) */ #define SPCS_CATEGORYCODEMASK 0x00007f00 /* Category code (see IEC-958 spec) */ #define SPCS_MODEMASK 0x000000c0 /* Mode (see IEC-958 spec) */ #define SPCS_EMPHASISMASK 0x00000038 /* Emphasis */ #define SPCS_EMPHASIS_NONE 0x00000000 /* No emphasis */ #define SPCS_EMPHASIS_50_15 0x00000008 /* 50/15 usec 2 channel */ #define SPCS_COPYRIGHT 0x00000004 /* Copyright asserted flag -- do not modify */ #define SPCS_NOTAUDIODATA 0x00000002 /* 0 = Digital audio, 1 = not audio */ #define SPCS_PROFESSIONAL 0x00000001 /* 0 = Consumer (IEC-958), 1 = pro (AES3-1992) */ /* When Channel set to 1: */ #define SPCS_WORD_LENGTH_MASK 0x0000000f /* Word Length Mask */ #define SPCS_WORD_LENGTH_16 0x00000008 /* Word Length 16 bit */ #define SPCS_WORD_LENGTH_17 0x00000006 /* Word Length 17 bit */ #define SPCS_WORD_LENGTH_18 0x00000004 /* Word Length 18 bit */ #define SPCS_WORD_LENGTH_19 0x00000002 /* Word Length 19 bit */ #define SPCS_WORD_LENGTH_20A 0x0000000a /* Word Length 20 bit */ #define SPCS_WORD_LENGTH_20 0x00000009 /* Word Length 20 bit (both 0xa and 0x9 are 20 bit) */ #define SPCS_WORD_LENGTH_21 0x00000007 /* Word Length 21 bit */ #define SPCS_WORD_LENGTH_22 0x00000005 /* Word Length 22 bit */ #define SPCS_WORD_LENGTH_23 0x00000003 /* Word Length 23 bit */ #define SPCS_WORD_LENGTH_24 0x0000000b /* Word Length 24 bit */ #define SPCS_ORIGINAL_SAMPLE_RATE_MASK 0x000000f0 /* Original Sample rate */ #define SPCS_ORIGINAL_SAMPLE_RATE_NONE 0x00000000 /* Original Sample rate not indicated */ #define SPCS_ORIGINAL_SAMPLE_RATE_16000 0x00000010 /* Original Sample rate */ #define SPCS_ORIGINAL_SAMPLE_RATE_RES1 0x00000020 /* Original Sample rate */ #define SPCS_ORIGINAL_SAMPLE_RATE_32000 0x00000030 /* Original Sample rate */ #define SPCS_ORIGINAL_SAMPLE_RATE_12000 0x00000040 /* Original Sample rate */ #define SPCS_ORIGINAL_SAMPLE_RATE_11025 0x00000050 /* Original Sample rate */ #define SPCS_ORIGINAL_SAMPLE_RATE_8000 0x00000060 /* Original Sample rate */ #define SPCS_ORIGINAL_SAMPLE_RATE_RES2 0x00000070 /* Original Sample rate */ #define SPCS_ORIGINAL_SAMPLE_RATE_192000 0x00000080 /* Original Sample rate */ #define SPCS_ORIGINAL_SAMPLE_RATE_24000 0x00000090 /* Original Sample rate */ #define SPCS_ORIGINAL_SAMPLE_RATE_96000 0x000000a0 /* Original Sample rate */ #define SPCS_ORIGINAL_SAMPLE_RATE_48000 0x000000b0 /* Original Sample rate */ #define SPCS_ORIGINAL_SAMPLE_RATE_176400 0x000000c0 /* Original Sample rate */ #define SPCS_ORIGINAL_SAMPLE_RATE_22050 0x000000d0 /* Original Sample rate */ #define SPCS_ORIGINAL_SAMPLE_RATE_88200 0x000000e0 /* Original Sample rate */ #define SPCS_ORIGINAL_SAMPLE_RATE_44100 0x000000f0 /* Original Sample rate */ #define SPDIF_SELECT1 0x45 /* Enables SPDIF or Analogue outputs 0-SPDIF, 0xf00-Analogue */ /* 0x100 - Front, 0x800 - Rear, 0x200 - Center/LFE. * But as the jack is shared, use 0xf00. * The Windows2000 driver uses 0x0000000f for both digital and analog. * 0xf00 introduces interesting noises onto the Center/LFE. * If you turn the volume up, you hear computer noise, * e.g. mouse moving, changing between app windows etc. * So, I am going to set this to 0x0000000f all the time now, * same as the windows driver does. * Use register SPDIF_SELECT2(0x72) to switch between SPDIF and Analog. */ /* When Channel = 0: * Wide SPDIF format [3:0] (one bit for each channel) (0=20bit, 1=24bit) * Tristate SPDIF Output [11:8] (one bit for each channel) (0=Not tristate, 1=Tristate) * SPDIF Bypass enable [19:16] (one bit for each channel) (0=Not bypass, 1=Bypass) */ /* When Channel = 1: * SPDIF 0 User data [7:0] * SPDIF 1 User data [15:8] * SPDIF 0 User data [23:16] * SPDIF 0 User data [31:24] * User data can be sent by using the SPDIF output frame pending and SPDIF output user bit interrupts. */ #define WATERMARK 0x46 /* Test bit to indicate cache usage level */ #define SPDIF_INPUT_STATUS 0x49 /* SPDIF Input status register. Bits the same as SPCS. * When Channel = 0: Bits the same as SPCS channel 0. * When Channel = 1: Bits the same as SPCS channel 1. * When Channel = 2: * SPDIF Input User data [16:0] * SPDIF Input Frame count [21:16] */ #define CAPTURE_CACHE_DATA 0x50 /* 0x50-0x5f Recorded samples. */ #define CAPTURE_SOURCE 0x60 /* Capture Source 0 = MIC */ #define CAPTURE_SOURCE_CHANNEL0 0xf0000000 /* Mask for selecting the Capture sources */ #define CAPTURE_SOURCE_CHANNEL1 0x0f000000 /* 0 - SPDIF mixer output. */ #define CAPTURE_SOURCE_CHANNEL2 0x00f00000 /* 1 - What you hear or . 2 - ?? */ #define CAPTURE_SOURCE_CHANNEL3 0x000f0000 /* 3 - Mic in, Line in, TAD in, Aux in. */ #define CAPTURE_SOURCE_RECORD_MAP 0x0000ffff /* Default 0x00e4 */ /* Record Map [7:0] (2 bits per channel) 0=mapped to channel 0, 1=mapped to channel 1, 2=mapped to channel2, 3=mapped to channel3 * Record source select for channel 0 [18:16] * Record source select for channel 1 [22:20] * Record source select for channel 2 [26:24] * Record source select for channel 3 [30:28] * 0 - SPDIF mixer output. * 1 - i2s mixer output. * 2 - SPDIF input. * 3 - i2s input. * 4 - AC97 capture. * 5 - SRC output. */ #define CAPTURE_VOLUME1 0x61 /* Capture volume per channel 0-3 */ #define CAPTURE_VOLUME2 0x62 /* Capture volume per channel 4-7 */ #define PLAYBACK_ROUTING1 0x63 /* Playback routing of channels 0-7. Effects AC3 output. Default 0x32765410 */ #define ROUTING1_REAR 0x77000000 /* Channel_id 0 sends to 10, Channel_id 1 sends to 32 */ #define ROUTING1_NULL 0x00770000 /* Channel_id 2 sends to 54, Channel_id 3 sends to 76 */ #define ROUTING1_CENTER_LFE 0x00007700 /* 0x32765410 means, send Channel_id 0 to FRONT, Channel_id 1 to REAR */ #define ROUTING1_FRONT 0x00000077 /* Channel_id 2 to CENTER_LFE, Channel_id 3 to NULL. */ /* Channel_id's handle stereo channels. Channel X is a single mono channel */ /* Host is input from the PCI bus. */ /* Host channel 0 [2:0] -> SPDIF Mixer/Router channel 0-7. * Host channel 1 [6:4] -> SPDIF Mixer/Router channel 0-7. * Host channel 2 [10:8] -> SPDIF Mixer/Router channel 0-7. * Host channel 3 [14:12] -> SPDIF Mixer/Router channel 0-7. * Host channel 4 [18:16] -> SPDIF Mixer/Router channel 0-7. * Host channel 5 [22:20] -> SPDIF Mixer/Router channel 0-7. * Host channel 6 [26:24] -> SPDIF Mixer/Router channel 0-7. * Host channel 7 [30:28] -> SPDIF Mixer/Router channel 0-7. */ #define PLAYBACK_ROUTING2 0x64 /* Playback Routing . Feeding Capture channels back into Playback. Effects AC3 output. Default 0x76767676 */ /* SRC is input from the capture inputs. */ /* SRC channel 0 [2:0] -> SPDIF Mixer/Router channel 0-7. * SRC channel 1 [6:4] -> SPDIF Mixer/Router channel 0-7. * SRC channel 2 [10:8] -> SPDIF Mixer/Router channel 0-7. * SRC channel 3 [14:12] -> SPDIF Mixer/Router channel 0-7. * SRC channel 4 [18:16] -> SPDIF Mixer/Router channel 0-7. * SRC channel 5 [22:20] -> SPDIF Mixer/Router channel 0-7. * SRC channel 6 [26:24] -> SPDIF Mixer/Router channel 0-7. * SRC channel 7 [30:28] -> SPDIF Mixer/Router channel 0-7. */ #define PLAYBACK_MUTE 0x65 /* Unknown. While playing 0x0, while silent 0x00fc0000 */ /* SPDIF Mixer input control: * Invert SRC to SPDIF Mixer [7-0] (One bit per channel) * Invert Host to SPDIF Mixer [15:8] (One bit per channel) * SRC to SPDIF Mixer disable [23:16] (One bit per channel) * Host to SPDIF Mixer disable [31:24] (One bit per channel) */ #define PLAYBACK_VOLUME1 0x66 /* Playback SPDIF volume per channel. Set to the same PLAYBACK_VOLUME(0x6a) */ /* PLAYBACK_VOLUME1 must be set to 30303030 for SPDIF AC3 Playback */ /* SPDIF mixer input volume. 0=12dB, 0x30=0dB, 0xFE=-51.5dB, 0xff=Mute */ /* One register for each of the 4 stereo streams. */ /* SRC Right volume [7:0] * SRC Left volume [15:8] * Host Right volume [23:16] * Host Left volume [31:24] */ #define CAPTURE_ROUTING1 0x67 /* Capture Routing. Default 0x32765410 */ /* Similar to register 0x63, except that the destination is the I2S mixer instead of the SPDIF mixer. I.E. Outputs to the Analog outputs instead of SPDIF. */ #define CAPTURE_ROUTING2 0x68 /* Unknown Routing. Default 0x76767676 */ /* Similar to register 0x64, except that the destination is the I2S mixer instead of the SPDIF mixer. I.E. Outputs to the Analog outputs instead of SPDIF. */ #define CAPTURE_MUTE 0x69 /* Unknown. While capturing 0x0, while silent 0x00fc0000 */ /* Similar to register 0x65, except that the destination is the I2S mixer instead of the SPDIF mixer. I.E. Outputs to the Analog outputs instead of SPDIF. */ #define PLAYBACK_VOLUME2 0x6a /* Playback Analog volume per channel. Does not effect AC3 output */ /* Similar to register 0x66, except that the destination is the I2S mixer instead of the SPDIF mixer. I.E. Outputs to the Analog outputs instead of SPDIF. */ #define UNKNOWN6b 0x6b /* Unknown. Readonly. Default 00400000 00400000 00400000 00400000 */ #define MIDI_UART_A_DATA 0x6c /* Midi Uart A Data */ #define MIDI_UART_A_CMD 0x6d /* Midi Uart A Command/Status */ #define MIDI_UART_B_DATA 0x6e /* Midi Uart B Data (currently unused) */ #define MIDI_UART_B_CMD 0x6f /* Midi Uart B Command/Status (currently unused) */ /* unique channel identifier for midi->channel */ #define CA0106_MIDI_CHAN_A 0x1 #define CA0106_MIDI_CHAN_B 0x2 /* from mpu401 */ #define CA0106_MIDI_INPUT_AVAIL 0x80 #define CA0106_MIDI_OUTPUT_READY 0x40 #define CA0106_MPU401_RESET 0xff #define CA0106_MPU401_ENTER_UART 0x3f #define CA0106_MPU401_ACK 0xfe #define SAMPLE_RATE_TRACKER_STATUS 0x70 /* Readonly. Default 00108000 00108000 00500000 00500000 */ /* Estimated sample rate [19:0] Relative to 48kHz. 0x8000 = 1.0 * Rate Locked [20] * SPDIF Locked [21] For SPDIF channel only. * Valid Audio [22] For SPDIF channel only. */ #define CAPTURE_CONTROL 0x71 /* Some sort of routing. default = 40c81000 30303030 30300000 00700000 */ /* Channel_id 0: 0x40c81000 must be changed to 0x40c80000 for SPDIF AC3 input or output. */ /* Channel_id 1: 0xffffffff(mute) 0x30303030(max) controls CAPTURE feedback into PLAYBACK. */ /* Sample rate output control register Channel=0 * Sample output rate [1:0] (0=48kHz, 1=44.1kHz, 2=96kHz, 3=192Khz) * Sample input rate [3:2] (0=48kHz, 1=Not available, 2=96kHz, 3=192Khz) * SRC input source select [4] 0=Audio from digital mixer, 1=Audio from analog source. * Record rate [9:8] (0=48kHz, 1=Not available, 2=96kHz, 3=192Khz) * Record mixer output enable [12:10] * I2S input rate master mode [15:14] (0=48kHz, 1=44.1kHz, 2=96kHz, 3=192Khz) * I2S output rate [17:16] (0=48kHz, 1=44.1kHz, 2=96kHz, 3=192Khz) * I2S output source select [18] (0=Audio from host, 1=Audio from SRC) * Record mixer I2S enable [20:19] (enable/disable i2sin1 and i2sin0) * I2S output master clock select [21] (0=256*I2S output rate, 1=512*I2S output rate.) * I2S input master clock select [22] (0=256*I2S input rate, 1=512*I2S input rate.) * I2S input mode [23] (0=Slave, 1=Master) * SPDIF output rate [25:24] (0=48kHz, 1=44.1kHz, 2=96kHz, 3=192Khz) * SPDIF output source select [26] (0=host, 1=SRC) * Not used [27] * Record Source 0 input [29:28] (0=SPDIF in, 1=I2S in, 2=AC97 Mic, 3=AC97 PCM) * Record Source 1 input [31:30] (0=SPDIF in, 1=I2S in, 2=AC97 Mic, 3=AC97 PCM) */ /* Sample rate output control register Channel=1 * I2S Input 0 volume Right [7:0] * I2S Input 0 volume Left [15:8] * I2S Input 1 volume Right [23:16] * I2S Input 1 volume Left [31:24] */ /* Sample rate output control register Channel=2 * SPDIF Input volume Right [23:16] * SPDIF Input volume Left [31:24] */ /* Sample rate output control register Channel=3 * No used */ #define SPDIF_SELECT2 0x72 /* Some sort of routing. Channel_id 0 only. default = 0x0f0f003f. Analog 0x000b0000, Digital 0x0b000000 */ #define ROUTING2_FRONT_MASK 0x00010000 /* Enable for Front speakers. */ #define ROUTING2_CENTER_LFE_MASK 0x00020000 /* Enable for Center/LFE speakers. */ #define ROUTING2_REAR_MASK 0x00080000 /* Enable for Rear speakers. */ /* Audio output control * AC97 output enable [5:0] * I2S output enable [19:16] * SPDIF output enable [27:24] */ #define UNKNOWN73 0x73 /* Unknown. Readonly. Default 0x0 */ #define CHIP_VERSION 0x74 /* P17 Chip version. Channel_id 0 only. Default 00000071 */ #define EXTENDED_INT_MASK 0x75 /* Used by both playback and capture interrupt handler */ /* Sets which Interrupts are enabled. */ /* 0x00000001 = Half period. Playback. * 0x00000010 = Full period. Playback. * 0x00000100 = Half buffer. Playback. * 0x00001000 = Full buffer. Playback. * 0x00010000 = Half buffer. Capture. * 0x00100000 = Full buffer. Capture. * Capture can only do 2 periods. * 0x01000000 = End audio. Playback. * 0x40000000 = Half buffer Playback,Caputre xrun. * 0x80000000 = Full buffer Playback,Caputre xrun. */ #define EXTENDED_INT 0x76 /* Used by both playback and capture interrupt handler */ /* Shows which interrupts are active at the moment. */ /* Same bit layout as EXTENDED_INT_MASK */ #define COUNTER77 0x77 /* Counter range 0 to 0x3fffff, 192000 counts per second. */ #define COUNTER78 0x78 /* Counter range 0 to 0x3fffff, 44100 counts per second. */ #define EXTENDED_INT_TIMER 0x79 /* Channel_id 0 only. Used by both playback and capture interrupt handler */ /* Causes interrupts based on timer intervals. */ #define SPI 0x7a /* SPI: Serial Interface Register */ #define I2C_A 0x7b /* I2C Address. 32 bit */ #define I2C_D0 0x7c /* I2C Data Port 0. 32 bit */ #define I2C_D1 0x7d /* I2C Data Port 1. 32 bit */ //I2C values #define I2C_A_ADC_ADD_MASK 0x000000fe //The address is a 7 bit address #define I2C_A_ADC_RW_MASK 0x00000001 //bit mask for R/W #define I2C_A_ADC_TRANS_MASK 0x00000010 //Bit mask for I2c address DAC value #define I2C_A_ADC_ABORT_MASK 0x00000020 //Bit mask for I2C transaction abort flag #define I2C_A_ADC_LAST_MASK 0x00000040 //Bit mask for Last word transaction #define I2C_A_ADC_BYTE_MASK 0x00000080 //Bit mask for Byte Mode #define I2C_A_ADC_ADD 0x00000034 //This is the Device address for ADC #define I2C_A_ADC_READ 0x00000001 //To perform a read operation #define I2C_A_ADC_START 0x00000100 //Start I2C transaction #define I2C_A_ADC_ABORT 0x00000200 //I2C transaction abort #define I2C_A_ADC_LAST 0x00000400 //I2C last transaction #define I2C_A_ADC_BYTE 0x00000800 //I2C one byte mode #define I2C_D_ADC_REG_MASK 0xfe000000 //ADC address register #define I2C_D_ADC_DAT_MASK 0x01ff0000 //ADC data register #define ADC_TIMEOUT 0x00000007 //ADC Timeout Clock Disable #define ADC_IFC_CTRL 0x0000000b //ADC Interface Control #define ADC_MASTER 0x0000000c //ADC Master Mode Control #define ADC_POWER 0x0000000d //ADC PowerDown Control #define ADC_ATTEN_ADCL 0x0000000e //ADC Attenuation ADCL #define ADC_ATTEN_ADCR 0x0000000f //ADC Attenuation ADCR #define ADC_ALC_CTRL1 0x00000010 //ADC ALC Control 1 #define ADC_ALC_CTRL2 0x00000011 //ADC ALC Control 2 #define ADC_ALC_CTRL3 0x00000012 //ADC ALC Control 3 #define ADC_NOISE_CTRL 0x00000013 //ADC Noise Gate Control #define ADC_LIMIT_CTRL 0x00000014 //ADC Limiter Control #define ADC_MUX 0x00000015 //ADC Mux offset #if 0 /* FIXME: Not tested yet. */ #define ADC_GAIN_MASK 0x000000ff //Mask for ADC Gain #define ADC_ZERODB 0x000000cf //Value to set ADC to 0dB #define ADC_MUTE_MASK 0x000000c0 //Mask for ADC mute #define ADC_MUTE 0x000000c0 //Value to mute ADC #define ADC_OSR 0x00000008 //Mask for ADC oversample rate select #define ADC_TIMEOUT_DISABLE 0x00000008 //Value and mask to disable Timeout clock #define ADC_HPF_DISABLE 0x00000100 //Value and mask to disable High pass filter #define ADC_TRANWIN_MASK 0x00000070 //Mask for Length of Transient Window #endif #define ADC_MUX_MASK 0x0000000f //Mask for ADC Mux #define ADC_MUX_PHONE 0x00000001 //Value to select TAD at ADC Mux (Not used) #define ADC_MUX_MIC 0x00000002 //Value to select Mic at ADC Mux #define ADC_MUX_LINEIN 0x00000004 //Value to select LineIn at ADC Mux #define ADC_MUX_AUX 0x00000008 //Value to select Aux at ADC Mux #define SET_CHANNEL 0 /* Testing channel outputs 0=Front, 1=Center/LFE, 2=Unknown, 3=Rear */ #define PCM_FRONT_CHANNEL 0 #define PCM_REAR_CHANNEL 1 #define PCM_CENTER_LFE_CHANNEL 2 #define PCM_UNKNOWN_CHANNEL 3 #define CONTROL_FRONT_CHANNEL 0 #define CONTROL_REAR_CHANNEL 3 #define CONTROL_CENTER_LFE_CHANNEL 1 #define CONTROL_UNKNOWN_CHANNEL 2 /* Based on WM8768 Datasheet Rev 4.2 page 32 */ #define SPI_REG_MASK 0x1ff /* 16-bit SPI writes have a 7-bit address */ #define SPI_REG_SHIFT 9 /* followed by 9 bits of data */ #define SPI_LDA1_REG 0 /* digital attenuation */ #define SPI_RDA1_REG 1 #define SPI_LDA2_REG 4 #define SPI_RDA2_REG 5 #define SPI_LDA3_REG 6 #define SPI_RDA3_REG 7 #define SPI_LDA4_REG 13 #define SPI_RDA4_REG 14 #define SPI_MASTDA_REG 8 #define SPI_DA_BIT_UPDATE (1<<8) /* update attenuation values */ #define SPI_DA_BIT_0dB 0xff /* 0 dB */ #define SPI_DA_BIT_infdB 0x00 /* inf dB attenuation (mute) */ #define SPI_PL_REG 2 #define SPI_PL_BIT_L_M (0<<5) /* left channel = mute */ #define SPI_PL_BIT_L_L (1<<5) /* left channel = left */ #define SPI_PL_BIT_L_R (2<<5) /* left channel = right */ #define SPI_PL_BIT_L_C (3<<5) /* left channel = (L+R)/2 */ #define SPI_PL_BIT_R_M (0<<7) /* right channel = mute */ #define SPI_PL_BIT_R_L (1<<7) /* right channel = left */ #define SPI_PL_BIT_R_R (2<<7) /* right channel = right */ #define SPI_PL_BIT_R_C (3<<7) /* right channel = (L+R)/2 */ #define SPI_IZD_REG 2 #define SPI_IZD_BIT (0<<4) /* infinite zero detect */ #define SPI_FMT_REG 3 #define SPI_FMT_BIT_RJ (0<<0) /* right justified mode */ #define SPI_FMT_BIT_LJ (1<<0) /* left justified mode */ #define SPI_FMT_BIT_I2S (2<<0) /* I2S mode */ #define SPI_FMT_BIT_DSP (3<<0) /* DSP Modes A or B */ #define SPI_LRP_REG 3 #define SPI_LRP_BIT (1<<2) /* invert LRCLK polarity */ #define SPI_BCP_REG 3 #define SPI_BCP_BIT (1<<3) /* invert BCLK polarity */ #define SPI_IWL_REG 3 #define SPI_IWL_BIT_16 (0<<4) /* 16-bit world length */ #define SPI_IWL_BIT_20 (1<<4) /* 20-bit world length */ #define SPI_IWL_BIT_24 (2<<4) /* 24-bit world length */ #define SPI_IWL_BIT_32 (3<<4) /* 32-bit world length */ #define SPI_MS_REG 10 #define SPI_MS_BIT (1<<5) /* master mode */ #define SPI_RATE_REG 10 /* only applies in master mode */ #define SPI_RATE_BIT_128 (0<<6) /* MCLK = LRCLK * 128 */ #define SPI_RATE_BIT_192 (1<<6) #define SPI_RATE_BIT_256 (2<<6) #define SPI_RATE_BIT_384 (3<<6) #define SPI_RATE_BIT_512 (4<<6) #define SPI_RATE_BIT_768 (5<<6) /* They really do label the bit for the 4th channel "4" and not "3" */ #define SPI_DMUTE0_REG 9 #define SPI_DMUTE1_REG 9 #define SPI_DMUTE2_REG 9 #define SPI_DMUTE4_REG 15 #define SPI_DMUTE0_BIT (1<<3) #define SPI_DMUTE1_BIT (1<<4) #define SPI_DMUTE2_BIT (1<<5) #define SPI_DMUTE4_BIT (1<<2) #define SPI_PHASE0_REG 3 #define SPI_PHASE1_REG 3 #define SPI_PHASE2_REG 3 #define SPI_PHASE4_REG 15 #define SPI_PHASE0_BIT (1<<6) #define SPI_PHASE1_BIT (1<<7) #define SPI_PHASE2_BIT (1<<8) #define SPI_PHASE4_BIT (1<<3) #define SPI_PDWN_REG 2 /* power down all DACs */ #define SPI_PDWN_BIT (1<<2) #define SPI_DACD0_REG 10 /* power down individual DACs */ #define SPI_DACD1_REG 10 #define SPI_DACD2_REG 10 #define SPI_DACD4_REG 15 #define SPI_DACD0_BIT (1<<1) #define SPI_DACD1_BIT (1<<2) #define SPI_DACD2_BIT (1<<3) #define SPI_DACD4_BIT (1<<0) /* datasheet error says it's 1 */ #define SPI_PWRDNALL_REG 10 /* power down everything */ #define SPI_PWRDNALL_BIT (1<<4) #include "ca_midi.h" struct snd_ca0106; struct snd_ca0106_channel { struct snd_ca0106 *emu; int number; int use; void (*interrupt)(struct snd_ca0106 *emu, struct snd_ca0106_channel *channel); struct snd_ca0106_pcm *epcm; }; struct snd_ca0106_pcm { struct snd_ca0106 *emu; struct snd_pcm_substream *substream; int channel_id; unsigned short running; }; struct snd_ca0106_details { u32 serial; char * name; int ac97; /* ac97 = 0 -> Select MIC, Line in, TAD in, AUX in. ac97 = 1 -> Default to AC97 in. */ int gpio_type; /* gpio_type = 1 -> shared mic-in/line-in gpio_type = 2 -> shared side-out/line-in. */ int i2c_adc; /* with i2c_adc=1, the driver adds some capture volume controls, phone, mic, line-in and aux. */ u16 spi_dac; /* spi_dac = 0 -> no spi interface for DACs spi_dac = 0x<front><rear><center-lfe><side> -> specifies DAC id for each channel pair. */ }; // definition of the chip-specific record struct snd_ca0106 { struct snd_card *card; const struct snd_ca0106_details *details; struct pci_dev *pci; unsigned long port; struct resource *res_port; int irq; unsigned int serial; /* serial number */ unsigned short model; /* subsystem id */ spinlock_t emu_lock; struct snd_ac97 *ac97; struct snd_pcm *pcm[4]; struct snd_ca0106_channel playback_channels[4]; struct snd_ca0106_channel capture_channels[4]; u32 spdif_bits[4]; /* s/pdif out default setup */ u32 spdif_str_bits[4]; /* s/pdif out per-stream setup */ int spdif_enable; int capture_source; int i2c_capture_source; u8 i2c_capture_volume[4][2]; int capture_mic_line_in; struct snd_dma_buffer buffer; struct snd_ca_midi midi; struct snd_ca_midi midi2; u16 spi_dac_reg[16]; #ifdef CONFIG_PM_SLEEP #define NUM_SAVED_VOLUMES 9 unsigned int saved_vol[NUM_SAVED_VOLUMES]; #endif }; int snd_ca0106_mixer(struct snd_ca0106 *emu); int snd_ca0106_proc_init(struct snd_ca0106 * emu); unsigned int snd_ca0106_ptr_read(struct snd_ca0106 * emu, unsigned int reg, unsigned int chn); void snd_ca0106_ptr_write(struct snd_ca0106 *emu, unsigned int reg, unsigned int chn, unsigned int data); int snd_ca0106_i2c_write(struct snd_ca0106 *emu, u32 reg, u32 value); int snd_ca0106_spi_write(struct snd_ca0106 * emu, unsigned int data); #ifdef CONFIG_PM_SLEEP void snd_ca0106_mixer_suspend(struct snd_ca0106 *chip); void snd_ca0106_mixer_resume(struct snd_ca0106 *chip); #else #define snd_ca0106_mixer_suspend(chip) do { } while (0) #define snd_ca0106_mixer_resume(chip) do { } while (0) #endif
{ "pile_set_name": "Github" }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.client.solrj.request; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.charset.StandardCharsets; import org.apache.commons.io.IOUtils; /** A simple update request which streams content to the server */ public class StreamingUpdateRequest extends AbstractUpdateRequest { private final RequestWriter.ContentWriter contentWriter; public StreamingUpdateRequest(String path, RequestWriter.ContentWriter contentWriter) { super(METHOD.POST, path); this.contentWriter = contentWriter; } public StreamingUpdateRequest(String path, String content, String contentType) { this(path, new RequestWriter.ContentWriter() { @Override public void write(OutputStream os) throws IOException { os.write(content.getBytes(StandardCharsets.UTF_8)); } @Override public String getContentType() { return contentType; } }); } public StreamingUpdateRequest(String path, File f, String contentType) { this(path, new RequestWriter.ContentWriter() { @Override public void write(OutputStream os) throws IOException { try (InputStream is = new FileInputStream(f)) { IOUtils.copy(is, os); } } @Override public String getContentType() { return contentType; } }); } @Override public RequestWriter.ContentWriter getContentWriter(String expectedType) { return contentWriter; } }
{ "pile_set_name": "Github" }
@import "~styles/mixins"; .avatar-prize { display: flex; align-items: center; margin-bottom: 20px; @include xs-to-sm { margin-bottom: 10px; } } .avatar { border-radius: 65px !important; height: 65px !important; margin: 5px 15px 0 0 !important; width: 65px !important; } .flag { background-color: $tc-dark-blue-30; color: $tc-dark-blue-70; font-size: 42px; font-weight: 500; height: 50px; width: 45px; line-height: 50px; position: absolute; top: 0; left: 12px; text-align: center; &::after { content: ' '; display: block; position: absolute; top: 100%; border-top: solid 15px $tc-dark-blue-30; border-bottom: solid 15px transparent; border-left: solid 23px transparent; border-right: solid 23px transparent; } } .last { &:nth-child(2) { justify-content: center; } } /* TODO: Rename into container. */ .winner { &.place-1 { justify-content: center; min-width: 100%; .flag { background-color: $tc-gold; color: $tc-gold-110; &::after { border-top: solid 20px $tc-gold; } } } &.place-2 { border-right: 1px solid $tc-gray-10; .flag { background-color: $tc-silver; color: $tc-silver-110; &::after { border-top: solid 20px $tc-silver; } } } &.place-3 { .flag { background-color: $tc-bronze; color: $tc-bronze-110; &::after { border-top: solid 20px $tc-bronze; } } } &:nth-child(2n+4) { border-left: none; border-right: 1px solid $tc-gray-10; } &:nth-child(2n+3) { border-left: none; } &:nth-child(n + 3) { max-width: 50%; } @include xs-to-sm { height: auto; min-width: 100%; } flex: 1; flex-wrap: wrap; min-height: 300px; min-width: 50%; overflow: hidden; display: flex; justify-content: flex-start; align-items: center; border-bottom: 1px solid $tc-gray-10; border-top: none; padding: 4 * $base-unit; } .thumbnail { display: flex; align-items: center; justify-content: center; background-color: $tc-gray-neutral-light; border-radius: 4px; border: 1px solid $tc-gray-neutral-dark; min-width: 117px; height: 117px; margin: 25px; position: relative; .lock { .text { color: $tc-gray-50; font-size: 12px; } } .preview { width: 100%; height: 100%; } } .info { .handle { color: $tc-dark-blue-110; display: block; font-size: 36px; margin-bottom: 8px; font-weight: 300; @include xs-to-sm { margin-bottom: 0; font-size: 28px; line-height: 35px; } } .prize { color: $tc-gray-90; font-size: 24px; font-weight: 500; @include xs-to-sm { font-size: 20px; line-height: 30px; } } .id { color: $tc-gray-60; font-size: 15px; line-height: 22px; @include xs-to-sm { font-size: 13px; line-height: 25px; } span { font-weight: 500; } } .download { font-size: 15px; line-height: 22px; color: $tc-dark-blue; @include xs-to-sm { font-size: 13px; } } .date { font-size: 15px; line-height: 22px; color: $tc-gray-60; display: flex; @include xs-to-sm { flex-direction: column; font-size: 13px; line-height: 20px; } } } .lock-icon { fill: $tc-gray-20; height: 58px; margin-bottom: $base-unit; margin-left: -1px; width: 46px; } .checkpoint { flex: none; }
{ "pile_set_name": "Github" }
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) by Paul Barton-Davis 1998-1999 * * Some portions of this file are taken from work that is * copyright (C) by Hannu Savolainen 1993-1996 */ /* * An ALSA lowlevel driver for Turtle Beach ICS2115 wavetable synth * (Maui, Tropez, Tropez Plus) * * This driver supports the onboard wavetable synthesizer (an ICS2115), * including patch, sample and program loading and unloading, conversion * of GUS patches during loading, and full user-level access to all * WaveFront commands. It tries to provide semi-intelligent patch and * sample management as well. * */ #include <linux/io.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/sched/signal.h> #include <linux/firmware.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include <sound/snd_wavefront.h> #include <sound/initval.h> static int wf_raw = 0; /* we normally check for "raw state" to firmware loading. if non-zero, then during driver loading, the state of the board is ignored, and we reset the board and load the firmware anyway. */ static int fx_raw = 1; /* if this is zero, we'll leave the FX processor in whatever state it is when the driver is loaded. The default is to download the microprogram and associated coefficients to set it up for "default" operation, whatever that means. */ static int debug_default = 0; /* you can set this to control debugging during driver loading. it takes any combination of the WF_DEBUG_* flags defined in wavefront.h */ /* XXX this needs to be made firmware and hardware version dependent */ #define DEFAULT_OSPATH "wavefront.os" static char *ospath = DEFAULT_OSPATH; /* the firmware file name */ static int wait_usecs = 150; /* This magic number seems to give pretty optimal throughput based on my limited experimentation. If you want to play around with it and find a better value, be my guest. Remember, the idea is to get a number that causes us to just busy wait for as many WaveFront commands as possible, without coming up with a number so large that we hog the whole CPU. Specifically, with this number, out of about 134,000 status waits, only about 250 result in a sleep. */ static int sleep_interval = 100; /* HZ/sleep_interval seconds per sleep */ static int sleep_tries = 50; /* number of times we'll try to sleep */ static int reset_time = 2; /* hundreths of a second we wait after a HW reset for the expected interrupt. */ static int ramcheck_time = 20; /* time in seconds to wait while ROM code checks on-board RAM. */ static int osrun_time = 10; /* time in seconds we wait for the OS to start running. */ module_param(wf_raw, int, 0444); MODULE_PARM_DESC(wf_raw, "if non-zero, assume that we need to boot the OS"); module_param(fx_raw, int, 0444); MODULE_PARM_DESC(fx_raw, "if non-zero, assume that the FX process needs help"); module_param(debug_default, int, 0444); MODULE_PARM_DESC(debug_default, "debug parameters for card initialization"); module_param(wait_usecs, int, 0444); MODULE_PARM_DESC(wait_usecs, "how long to wait without sleeping, usecs"); module_param(sleep_interval, int, 0444); MODULE_PARM_DESC(sleep_interval, "how long to sleep when waiting for reply"); module_param(sleep_tries, int, 0444); MODULE_PARM_DESC(sleep_tries, "how many times to try sleeping during a wait"); module_param(ospath, charp, 0444); MODULE_PARM_DESC(ospath, "pathname to processed ICS2115 OS firmware"); module_param(reset_time, int, 0444); MODULE_PARM_DESC(reset_time, "how long to wait for a reset to take effect"); module_param(ramcheck_time, int, 0444); MODULE_PARM_DESC(ramcheck_time, "how many seconds to wait for the RAM test"); module_param(osrun_time, int, 0444); MODULE_PARM_DESC(osrun_time, "how many seconds to wait for the ICS2115 OS"); /* if WF_DEBUG not defined, no run-time debugging messages will be available via the debug flag setting. Given the current beta state of the driver, this will remain set until a future version. */ #define WF_DEBUG 1 #ifdef WF_DEBUG #define DPRINT(cond, ...) \ if ((dev->debug & (cond)) == (cond)) { \ snd_printk (__VA_ARGS__); \ } #else #define DPRINT(cond, args...) #endif /* WF_DEBUG */ #define LOGNAME "WaveFront: " /* bitmasks for WaveFront status port value */ #define STAT_RINTR_ENABLED 0x01 #define STAT_CAN_READ 0x02 #define STAT_INTR_READ 0x04 #define STAT_WINTR_ENABLED 0x10 #define STAT_CAN_WRITE 0x20 #define STAT_INTR_WRITE 0x40 static int wavefront_delete_sample (snd_wavefront_t *, int sampnum); static int wavefront_find_free_sample (snd_wavefront_t *); struct wavefront_command { int cmd; char *action; unsigned int read_cnt; unsigned int write_cnt; int need_ack; }; static struct { int errno; const char *errstr; } wavefront_errors[] = { { 0x01, "Bad sample number" }, { 0x02, "Out of sample memory" }, { 0x03, "Bad patch number" }, { 0x04, "Error in number of voices" }, { 0x06, "Sample load already in progress" }, { 0x0B, "No sample load request pending" }, { 0x0E, "Bad MIDI channel number" }, { 0x10, "Download Record Error" }, { 0x80, "Success" }, { 0x0 } }; #define NEEDS_ACK 1 static struct wavefront_command wavefront_commands[] = { { WFC_SET_SYNTHVOL, "set synthesizer volume", 0, 1, NEEDS_ACK }, { WFC_GET_SYNTHVOL, "get synthesizer volume", 1, 0, 0}, { WFC_SET_NVOICES, "set number of voices", 0, 1, NEEDS_ACK }, { WFC_GET_NVOICES, "get number of voices", 1, 0, 0 }, { WFC_SET_TUNING, "set synthesizer tuning", 0, 2, NEEDS_ACK }, { WFC_GET_TUNING, "get synthesizer tuning", 2, 0, 0 }, { WFC_DISABLE_CHANNEL, "disable synth channel", 0, 1, NEEDS_ACK }, { WFC_ENABLE_CHANNEL, "enable synth channel", 0, 1, NEEDS_ACK }, { WFC_GET_CHANNEL_STATUS, "get synth channel status", 3, 0, 0 }, { WFC_MISYNTH_OFF, "disable midi-in to synth", 0, 0, NEEDS_ACK }, { WFC_MISYNTH_ON, "enable midi-in to synth", 0, 0, NEEDS_ACK }, { WFC_VMIDI_ON, "enable virtual midi mode", 0, 0, NEEDS_ACK }, { WFC_VMIDI_OFF, "disable virtual midi mode", 0, 0, NEEDS_ACK }, { WFC_MIDI_STATUS, "report midi status", 1, 0, 0 }, { WFC_FIRMWARE_VERSION, "report firmware version", 2, 0, 0 }, { WFC_HARDWARE_VERSION, "report hardware version", 2, 0, 0 }, { WFC_GET_NSAMPLES, "report number of samples", 2, 0, 0 }, { WFC_INSTOUT_LEVELS, "report instantaneous output levels", 7, 0, 0 }, { WFC_PEAKOUT_LEVELS, "report peak output levels", 7, 0, 0 }, { WFC_DOWNLOAD_SAMPLE, "download sample", 0, WF_SAMPLE_BYTES, NEEDS_ACK }, { WFC_DOWNLOAD_BLOCK, "download block", 0, 0, NEEDS_ACK}, { WFC_DOWNLOAD_SAMPLE_HEADER, "download sample header", 0, WF_SAMPLE_HDR_BYTES, NEEDS_ACK }, { WFC_UPLOAD_SAMPLE_HEADER, "upload sample header", 13, 2, 0 }, /* This command requires a variable number of bytes to be written. There is a hack in snd_wavefront_cmd() to support this. The actual count is passed in as the read buffer ptr, cast appropriately. Ugh. */ { WFC_DOWNLOAD_MULTISAMPLE, "download multisample", 0, 0, NEEDS_ACK }, /* This one is a hack as well. We just read the first byte of the response, don't fetch an ACK, and leave the rest to the calling function. Ugly, ugly, ugly. */ { WFC_UPLOAD_MULTISAMPLE, "upload multisample", 2, 1, 0 }, { WFC_DOWNLOAD_SAMPLE_ALIAS, "download sample alias", 0, WF_ALIAS_BYTES, NEEDS_ACK }, { WFC_UPLOAD_SAMPLE_ALIAS, "upload sample alias", WF_ALIAS_BYTES, 2, 0}, { WFC_DELETE_SAMPLE, "delete sample", 0, 2, NEEDS_ACK }, { WFC_IDENTIFY_SAMPLE_TYPE, "identify sample type", 5, 2, 0 }, { WFC_UPLOAD_SAMPLE_PARAMS, "upload sample parameters" }, { WFC_REPORT_FREE_MEMORY, "report free memory", 4, 0, 0 }, { WFC_DOWNLOAD_PATCH, "download patch", 0, 134, NEEDS_ACK }, { WFC_UPLOAD_PATCH, "upload patch", 132, 2, 0 }, { WFC_DOWNLOAD_PROGRAM, "download program", 0, 33, NEEDS_ACK }, { WFC_UPLOAD_PROGRAM, "upload program", 32, 1, 0 }, { WFC_DOWNLOAD_EDRUM_PROGRAM, "download enhanced drum program", 0, 9, NEEDS_ACK}, { WFC_UPLOAD_EDRUM_PROGRAM, "upload enhanced drum program", 8, 1, 0}, { WFC_SET_EDRUM_CHANNEL, "set enhanced drum program channel", 0, 1, NEEDS_ACK }, { WFC_DISABLE_DRUM_PROGRAM, "disable drum program", 0, 1, NEEDS_ACK }, { WFC_REPORT_CHANNEL_PROGRAMS, "report channel program numbers", 32, 0, 0 }, { WFC_NOOP, "the no-op command", 0, 0, NEEDS_ACK }, { 0x00 } }; static const char * wavefront_errorstr (int errnum) { int i; for (i = 0; wavefront_errors[i].errstr; i++) { if (wavefront_errors[i].errno == errnum) { return wavefront_errors[i].errstr; } } return "Unknown WaveFront error"; } static struct wavefront_command * wavefront_get_command (int cmd) { int i; for (i = 0; wavefront_commands[i].cmd != 0; i++) { if (cmd == wavefront_commands[i].cmd) { return &wavefront_commands[i]; } } return NULL; } static inline int wavefront_status (snd_wavefront_t *dev) { return inb (dev->status_port); } static int wavefront_sleep (int limit) { schedule_timeout_interruptible(limit); return signal_pending(current); } static int wavefront_wait (snd_wavefront_t *dev, int mask) { int i; /* Spin for a short period of time, because >99% of all requests to the WaveFront can be serviced inline like this. */ for (i = 0; i < wait_usecs; i += 5) { if (wavefront_status (dev) & mask) { return 1; } udelay(5); } for (i = 0; i < sleep_tries; i++) { if (wavefront_status (dev) & mask) { return 1; } if (wavefront_sleep (HZ/sleep_interval)) { return (0); } } return (0); } static int wavefront_read (snd_wavefront_t *dev) { if (wavefront_wait (dev, STAT_CAN_READ)) return inb (dev->data_port); DPRINT (WF_DEBUG_DATA, "read timeout.\n"); return -1; } static int wavefront_write (snd_wavefront_t *dev, unsigned char data) { if (wavefront_wait (dev, STAT_CAN_WRITE)) { outb (data, dev->data_port); return 0; } DPRINT (WF_DEBUG_DATA, "write timeout.\n"); return -1; } int snd_wavefront_cmd (snd_wavefront_t *dev, int cmd, unsigned char *rbuf, unsigned char *wbuf) { int ack; unsigned int i; int c; struct wavefront_command *wfcmd; if ((wfcmd = wavefront_get_command (cmd)) == NULL) { snd_printk ("command 0x%x not supported.\n", cmd); return 1; } /* Hack to handle the one variable-size write command. See wavefront_send_multisample() for the other half of this gross and ugly strategy. */ if (cmd == WFC_DOWNLOAD_MULTISAMPLE) { wfcmd->write_cnt = (unsigned long) rbuf; rbuf = NULL; } DPRINT (WF_DEBUG_CMD, "0x%x [%s] (%d,%d,%d)\n", cmd, wfcmd->action, wfcmd->read_cnt, wfcmd->write_cnt, wfcmd->need_ack); if (wavefront_write (dev, cmd)) { DPRINT ((WF_DEBUG_IO|WF_DEBUG_CMD), "cannot request " "0x%x [%s].\n", cmd, wfcmd->action); return 1; } if (wfcmd->write_cnt > 0) { DPRINT (WF_DEBUG_DATA, "writing %d bytes " "for 0x%x\n", wfcmd->write_cnt, cmd); for (i = 0; i < wfcmd->write_cnt; i++) { if (wavefront_write (dev, wbuf[i])) { DPRINT (WF_DEBUG_IO, "bad write for byte " "%d of 0x%x [%s].\n", i, cmd, wfcmd->action); return 1; } DPRINT (WF_DEBUG_DATA, "write[%d] = 0x%x\n", i, wbuf[i]); } } if (wfcmd->read_cnt > 0) { DPRINT (WF_DEBUG_DATA, "reading %d ints " "for 0x%x\n", wfcmd->read_cnt, cmd); for (i = 0; i < wfcmd->read_cnt; i++) { if ((c = wavefront_read (dev)) == -1) { DPRINT (WF_DEBUG_IO, "bad read for byte " "%d of 0x%x [%s].\n", i, cmd, wfcmd->action); return 1; } /* Now handle errors. Lots of special cases here */ if (c == 0xff) { if ((c = wavefront_read (dev)) == -1) { DPRINT (WF_DEBUG_IO, "bad read for " "error byte at " "read byte %d " "of 0x%x [%s].\n", i, cmd, wfcmd->action); return 1; } /* Can you believe this madness ? */ if (c == 1 && wfcmd->cmd == WFC_IDENTIFY_SAMPLE_TYPE) { rbuf[0] = WF_ST_EMPTY; return (0); } else if (c == 3 && wfcmd->cmd == WFC_UPLOAD_PATCH) { return 3; } else if (c == 1 && wfcmd->cmd == WFC_UPLOAD_PROGRAM) { return 1; } else { DPRINT (WF_DEBUG_IO, "error %d (%s) " "during " "read for byte " "%d of 0x%x " "[%s].\n", c, wavefront_errorstr (c), i, cmd, wfcmd->action); return 1; } } else { rbuf[i] = c; } DPRINT (WF_DEBUG_DATA, "read[%d] = 0x%x\n",i, rbuf[i]); } } if ((wfcmd->read_cnt == 0 && wfcmd->write_cnt == 0) || wfcmd->need_ack) { DPRINT (WF_DEBUG_CMD, "reading ACK for 0x%x\n", cmd); /* Some commands need an ACK, but return zero instead of the standard value. */ if ((ack = wavefront_read (dev)) == 0) { ack = WF_ACK; } if (ack != WF_ACK) { if (ack == -1) { DPRINT (WF_DEBUG_IO, "cannot read ack for " "0x%x [%s].\n", cmd, wfcmd->action); return 1; } else { int err = -1; /* something unknown */ if (ack == 0xff) { /* explicit error */ if ((err = wavefront_read (dev)) == -1) { DPRINT (WF_DEBUG_DATA, "cannot read err " "for 0x%x [%s].\n", cmd, wfcmd->action); } } DPRINT (WF_DEBUG_IO, "0x%x [%s] " "failed (0x%x, 0x%x, %s)\n", cmd, wfcmd->action, ack, err, wavefront_errorstr (err)); return -err; } } DPRINT (WF_DEBUG_DATA, "ack received " "for 0x%x [%s]\n", cmd, wfcmd->action); } else { DPRINT (WF_DEBUG_CMD, "0x%x [%s] does not need " "ACK (%d,%d,%d)\n", cmd, wfcmd->action, wfcmd->read_cnt, wfcmd->write_cnt, wfcmd->need_ack); } return 0; } /*********************************************************************** WaveFront data munging Things here are weird. All data written to the board cannot have its most significant bit set. Any data item with values potentially > 0x7F (127) must be split across multiple bytes. Sometimes, we need to munge numeric values that are represented on the x86 side as 8-32 bit values. Sometimes, we need to munge data that is represented on the x86 side as an array of bytes. The most efficient approach to handling both cases seems to be to use 2 different functions for munging and 2 for de-munging. This avoids weird casting and worrying about bit-level offsets. **********************************************************************/ static unsigned char * munge_int32 (unsigned int src, unsigned char *dst, unsigned int dst_size) { unsigned int i; for (i = 0; i < dst_size; i++) { *dst = src & 0x7F; /* Mask high bit of LSB */ src = src >> 7; /* Rotate Right 7 bits */ /* Note: we leave the upper bits in place */ dst++; } return dst; }; static int demunge_int32 (unsigned char* src, int src_size) { int i; int outval = 0; for (i = src_size - 1; i >= 0; i--) { outval=(outval<<7)+src[i]; } return outval; }; static unsigned char * munge_buf (unsigned char *src, unsigned char *dst, unsigned int dst_size) { unsigned int i; unsigned int last = dst_size / 2; for (i = 0; i < last; i++) { *dst++ = src[i] & 0x7f; *dst++ = src[i] >> 7; } return dst; } static unsigned char * demunge_buf (unsigned char *src, unsigned char *dst, unsigned int src_bytes) { int i; unsigned char *end = src + src_bytes; end = src + src_bytes; /* NOTE: src and dst *CAN* point to the same address */ for (i = 0; src != end; i++) { dst[i] = *src++; dst[i] |= (*src++)<<7; } return dst; } /*********************************************************************** WaveFront: sample, patch and program management. ***********************************************************************/ static int wavefront_delete_sample (snd_wavefront_t *dev, int sample_num) { unsigned char wbuf[2]; int x; wbuf[0] = sample_num & 0x7f; wbuf[1] = sample_num >> 7; if ((x = snd_wavefront_cmd (dev, WFC_DELETE_SAMPLE, NULL, wbuf)) == 0) { dev->sample_status[sample_num] = WF_ST_EMPTY; } return x; } static int wavefront_get_sample_status (snd_wavefront_t *dev, int assume_rom) { int i; unsigned char rbuf[32], wbuf[32]; unsigned int sc_real, sc_alias, sc_multi; /* check sample status */ if (snd_wavefront_cmd (dev, WFC_GET_NSAMPLES, rbuf, wbuf)) { snd_printk ("cannot request sample count.\n"); return -1; } sc_real = sc_alias = sc_multi = dev->samples_used = 0; for (i = 0; i < WF_MAX_SAMPLE; i++) { wbuf[0] = i & 0x7f; wbuf[1] = i >> 7; if (snd_wavefront_cmd (dev, WFC_IDENTIFY_SAMPLE_TYPE, rbuf, wbuf)) { snd_printk(KERN_WARNING "cannot identify sample " "type of slot %d\n", i); dev->sample_status[i] = WF_ST_EMPTY; continue; } dev->sample_status[i] = (WF_SLOT_FILLED|rbuf[0]); if (assume_rom) { dev->sample_status[i] |= WF_SLOT_ROM; } switch (rbuf[0] & WF_ST_MASK) { case WF_ST_SAMPLE: sc_real++; break; case WF_ST_MULTISAMPLE: sc_multi++; break; case WF_ST_ALIAS: sc_alias++; break; case WF_ST_EMPTY: break; default: snd_printk ("unknown sample type for " "slot %d (0x%x)\n", i, rbuf[0]); } if (rbuf[0] != WF_ST_EMPTY) { dev->samples_used++; } } snd_printk ("%d samples used (%d real, %d aliases, %d multi), " "%d empty\n", dev->samples_used, sc_real, sc_alias, sc_multi, WF_MAX_SAMPLE - dev->samples_used); return (0); } static int wavefront_get_patch_status (snd_wavefront_t *dev) { unsigned char patchbuf[WF_PATCH_BYTES]; unsigned char patchnum[2]; wavefront_patch *p; int i, x, cnt, cnt2; for (i = 0; i < WF_MAX_PATCH; i++) { patchnum[0] = i & 0x7f; patchnum[1] = i >> 7; if ((x = snd_wavefront_cmd (dev, WFC_UPLOAD_PATCH, patchbuf, patchnum)) == 0) { dev->patch_status[i] |= WF_SLOT_FILLED; p = (wavefront_patch *) patchbuf; dev->sample_status [p->sample_number|(p->sample_msb<<7)] |= WF_SLOT_USED; } else if (x == 3) { /* Bad patch number */ dev->patch_status[i] = 0; } else { snd_printk ("upload patch " "error 0x%x\n", x); dev->patch_status[i] = 0; return 1; } } /* program status has already filled in slot_used bits */ for (i = 0, cnt = 0, cnt2 = 0; i < WF_MAX_PATCH; i++) { if (dev->patch_status[i] & WF_SLOT_FILLED) { cnt++; } if (dev->patch_status[i] & WF_SLOT_USED) { cnt2++; } } snd_printk ("%d patch slots filled, %d in use\n", cnt, cnt2); return (0); } static int wavefront_get_program_status (snd_wavefront_t *dev) { unsigned char progbuf[WF_PROGRAM_BYTES]; wavefront_program prog; unsigned char prognum; int i, x, l, cnt; for (i = 0; i < WF_MAX_PROGRAM; i++) { prognum = i; if ((x = snd_wavefront_cmd (dev, WFC_UPLOAD_PROGRAM, progbuf, &prognum)) == 0) { dev->prog_status[i] |= WF_SLOT_USED; demunge_buf (progbuf, (unsigned char *) &prog, WF_PROGRAM_BYTES); for (l = 0; l < WF_NUM_LAYERS; l++) { if (prog.layer[l].mute) { dev->patch_status [prog.layer[l].patch_number] |= WF_SLOT_USED; } } } else if (x == 1) { /* Bad program number */ dev->prog_status[i] = 0; } else { snd_printk ("upload program " "error 0x%x\n", x); dev->prog_status[i] = 0; } } for (i = 0, cnt = 0; i < WF_MAX_PROGRAM; i++) { if (dev->prog_status[i]) { cnt++; } } snd_printk ("%d programs slots in use\n", cnt); return (0); } static int wavefront_send_patch (snd_wavefront_t *dev, wavefront_patch_info *header) { unsigned char buf[WF_PATCH_BYTES+2]; unsigned char *bptr; DPRINT (WF_DEBUG_LOAD_PATCH, "downloading patch %d\n", header->number); if (header->number >= ARRAY_SIZE(dev->patch_status)) return -EINVAL; dev->patch_status[header->number] |= WF_SLOT_FILLED; bptr = munge_int32 (header->number, buf, 2); munge_buf ((unsigned char *)&header->hdr.p, bptr, WF_PATCH_BYTES); if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PATCH, NULL, buf)) { snd_printk ("download patch failed\n"); return -EIO; } return (0); } static int wavefront_send_program (snd_wavefront_t *dev, wavefront_patch_info *header) { unsigned char buf[WF_PROGRAM_BYTES+1]; int i; DPRINT (WF_DEBUG_LOAD_PATCH, "downloading program %d\n", header->number); if (header->number >= ARRAY_SIZE(dev->prog_status)) return -EINVAL; dev->prog_status[header->number] = WF_SLOT_USED; /* XXX need to zero existing SLOT_USED bit for program_status[i] where `i' is the program that's being (potentially) overwritten. */ for (i = 0; i < WF_NUM_LAYERS; i++) { if (header->hdr.pr.layer[i].mute) { dev->patch_status[header->hdr.pr.layer[i].patch_number] |= WF_SLOT_USED; /* XXX need to mark SLOT_USED for sample used by patch_number, but this means we have to load it. Ick. */ } } buf[0] = header->number; munge_buf ((unsigned char *)&header->hdr.pr, &buf[1], WF_PROGRAM_BYTES); if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PROGRAM, NULL, buf)) { snd_printk ("download patch failed\n"); return -EIO; } return (0); } static int wavefront_freemem (snd_wavefront_t *dev) { char rbuf[8]; if (snd_wavefront_cmd (dev, WFC_REPORT_FREE_MEMORY, rbuf, NULL)) { snd_printk ("can't get memory stats.\n"); return -1; } else { return demunge_int32 (rbuf, 4); } } static int wavefront_send_sample (snd_wavefront_t *dev, wavefront_patch_info *header, u16 __user *dataptr, int data_is_unsigned) { /* samples are downloaded via a 16-bit wide i/o port (you could think of it as 2 adjacent 8-bit wide ports but its less efficient that way). therefore, all the blocksizes and so forth listed in the documentation, and used conventionally to refer to sample sizes, which are given in 8-bit units (bytes), need to be divided by 2. */ u16 sample_short = 0; u32 length; u16 __user *data_end = NULL; unsigned int i; const unsigned int max_blksize = 4096/2; unsigned int written; unsigned int blocksize; int dma_ack; int blocknum; unsigned char sample_hdr[WF_SAMPLE_HDR_BYTES]; unsigned char *shptr; int skip = 0; int initial_skip = 0; DPRINT (WF_DEBUG_LOAD_PATCH, "sample %sdownload for slot %d, " "type %d, %d bytes from 0x%lx\n", header->size ? "" : "header ", header->number, header->subkey, header->size, (unsigned long) header->dataptr); if (header->number == WAVEFRONT_FIND_FREE_SAMPLE_SLOT) { int x; if ((x = wavefront_find_free_sample (dev)) < 0) { return -ENOMEM; } snd_printk ("unspecified sample => %d\n", x); header->number = x; } if (header->number >= WF_MAX_SAMPLE) return -EINVAL; if (header->size) { /* XXX it's a debatable point whether or not RDONLY semantics on the ROM samples should cover just the sample data or the sample header. For now, it only covers the sample data, so anyone is free at all times to rewrite sample headers. My reason for this is that we have the sample headers available in the WFB file for General MIDI, and so these can always be reset if needed. The sample data, however, cannot be recovered without a complete reset and firmware reload of the ICS2115, which is a very expensive operation. So, doing things this way allows us to honor the notion of "RESETSAMPLES" reasonably cheaply. Note however, that this is done purely at user level: there is no WFB parser in this driver, and so a complete reset (back to General MIDI, or theoretically some other configuration) is the responsibility of the user level library. To try to do this in the kernel would be a little crazy: we'd need 158K of kernel space just to hold a copy of the patch/program/sample header data. */ if (dev->rom_samples_rdonly) { if (dev->sample_status[header->number] & WF_SLOT_ROM) { snd_printk ("sample slot %d " "write protected\n", header->number); return -EACCES; } } wavefront_delete_sample (dev, header->number); } if (header->size) { dev->freemem = wavefront_freemem (dev); if (dev->freemem < (int)header->size) { snd_printk ("insufficient memory to " "load %d byte sample.\n", header->size); return -ENOMEM; } } skip = WF_GET_CHANNEL(&header->hdr.s); if (skip > 0 && header->hdr.s.SampleResolution != LINEAR_16BIT) { snd_printk ("channel selection only " "possible on 16-bit samples"); return -EINVAL; } switch (skip) { case 0: initial_skip = 0; skip = 1; break; case 1: initial_skip = 0; skip = 2; break; case 2: initial_skip = 1; skip = 2; break; case 3: initial_skip = 2; skip = 3; break; case 4: initial_skip = 3; skip = 4; break; case 5: initial_skip = 4; skip = 5; break; case 6: initial_skip = 5; skip = 6; break; } DPRINT (WF_DEBUG_LOAD_PATCH, "channel selection: %d => " "initial skip = %d, skip = %d\n", WF_GET_CHANNEL (&header->hdr.s), initial_skip, skip); /* Be safe, and zero the "Unused" bits ... */ WF_SET_CHANNEL(&header->hdr.s, 0); /* adjust size for 16 bit samples by dividing by two. We always send 16 bits per write, even for 8 bit samples, so the length is always half the size of the sample data in bytes. */ length = header->size / 2; /* the data we're sent has not been munged, and in fact, the header we have to send isn't just a munged copy either. so, build the sample header right here. */ shptr = &sample_hdr[0]; shptr = munge_int32 (header->number, shptr, 2); if (header->size) { shptr = munge_int32 (length, shptr, 4); } /* Yes, a 4 byte result doesn't contain all of the offset bits, but the offset only uses 24 bits. */ shptr = munge_int32 (*((u32 *) &header->hdr.s.sampleStartOffset), shptr, 4); shptr = munge_int32 (*((u32 *) &header->hdr.s.loopStartOffset), shptr, 4); shptr = munge_int32 (*((u32 *) &header->hdr.s.loopEndOffset), shptr, 4); shptr = munge_int32 (*((u32 *) &header->hdr.s.sampleEndOffset), shptr, 4); /* This one is truly weird. What kind of weirdo decided that in a system dominated by 16 and 32 bit integers, they would use a just 12 bits ? */ shptr = munge_int32 (header->hdr.s.FrequencyBias, shptr, 3); /* Why is this nybblified, when the MSB is *always* zero ? Anyway, we can't take address of bitfield, so make a good-faith guess at where it starts. */ shptr = munge_int32 (*(&header->hdr.s.FrequencyBias+1), shptr, 2); if (snd_wavefront_cmd (dev, header->size ? WFC_DOWNLOAD_SAMPLE : WFC_DOWNLOAD_SAMPLE_HEADER, NULL, sample_hdr)) { snd_printk ("sample %sdownload refused.\n", header->size ? "" : "header "); return -EIO; } if (header->size == 0) { goto sent; /* Sorry. Just had to have one somewhere */ } data_end = dataptr + length; /* Do any initial skip over an unused channel's data */ dataptr += initial_skip; for (written = 0, blocknum = 0; written < length; written += max_blksize, blocknum++) { if ((length - written) > max_blksize) { blocksize = max_blksize; } else { /* round to nearest 16-byte value */ blocksize = ALIGN(length - written, 8); } if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_BLOCK, NULL, NULL)) { snd_printk ("download block " "request refused.\n"); return -EIO; } for (i = 0; i < blocksize; i++) { if (dataptr < data_end) { __get_user (sample_short, dataptr); dataptr += skip; if (data_is_unsigned) { /* GUS ? */ if (WF_SAMPLE_IS_8BIT(&header->hdr.s)) { /* 8 bit sample resolution, sign extend both bytes. */ ((unsigned char*) &sample_short)[0] += 0x7f; ((unsigned char*) &sample_short)[1] += 0x7f; } else { /* 16 bit sample resolution, sign extend the MSB. */ sample_short += 0x7fff; } } } else { /* In padding section of final block: Don't fetch unsupplied data from user space, just continue with whatever the final value was. */ } if (i < blocksize - 1) { outw (sample_short, dev->block_port); } else { outw (sample_short, dev->last_block_port); } } /* Get "DMA page acknowledge", even though its really nothing to do with DMA at all. */ if ((dma_ack = wavefront_read (dev)) != WF_DMA_ACK) { if (dma_ack == -1) { snd_printk ("upload sample " "DMA ack timeout\n"); return -EIO; } else { snd_printk ("upload sample " "DMA ack error 0x%x\n", dma_ack); return -EIO; } } } dev->sample_status[header->number] = (WF_SLOT_FILLED|WF_ST_SAMPLE); /* Note, label is here because sending the sample header shouldn't alter the sample_status info at all. */ sent: return (0); } static int wavefront_send_alias (snd_wavefront_t *dev, wavefront_patch_info *header) { unsigned char alias_hdr[WF_ALIAS_BYTES]; DPRINT (WF_DEBUG_LOAD_PATCH, "download alias, %d is " "alias for %d\n", header->number, header->hdr.a.OriginalSample); if (header->number >= WF_MAX_SAMPLE) return -EINVAL; munge_int32 (header->number, &alias_hdr[0], 2); munge_int32 (header->hdr.a.OriginalSample, &alias_hdr[2], 2); munge_int32 (*((unsigned int *)&header->hdr.a.sampleStartOffset), &alias_hdr[4], 4); munge_int32 (*((unsigned int *)&header->hdr.a.loopStartOffset), &alias_hdr[8], 4); munge_int32 (*((unsigned int *)&header->hdr.a.loopEndOffset), &alias_hdr[12], 4); munge_int32 (*((unsigned int *)&header->hdr.a.sampleEndOffset), &alias_hdr[16], 4); munge_int32 (header->hdr.a.FrequencyBias, &alias_hdr[20], 3); munge_int32 (*(&header->hdr.a.FrequencyBias+1), &alias_hdr[23], 2); if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_SAMPLE_ALIAS, NULL, alias_hdr)) { snd_printk ("download alias failed.\n"); return -EIO; } dev->sample_status[header->number] = (WF_SLOT_FILLED|WF_ST_ALIAS); return (0); } static int wavefront_send_multisample (snd_wavefront_t *dev, wavefront_patch_info *header) { int i; int num_samples; unsigned char *msample_hdr; if (header->number >= WF_MAX_SAMPLE) return -EINVAL; msample_hdr = kmalloc(WF_MSAMPLE_BYTES, GFP_KERNEL); if (! msample_hdr) return -ENOMEM; munge_int32 (header->number, &msample_hdr[0], 2); /* You'll recall at this point that the "number of samples" value in a wavefront_multisample struct is actually the log2 of the real number of samples. */ num_samples = (1<<(header->hdr.ms.NumberOfSamples&7)); msample_hdr[2] = (unsigned char) header->hdr.ms.NumberOfSamples; DPRINT (WF_DEBUG_LOAD_PATCH, "multi %d with %d=%d samples\n", header->number, header->hdr.ms.NumberOfSamples, num_samples); for (i = 0; i < num_samples; i++) { DPRINT(WF_DEBUG_LOAD_PATCH|WF_DEBUG_DATA, "sample[%d] = %d\n", i, header->hdr.ms.SampleNumber[i]); munge_int32 (header->hdr.ms.SampleNumber[i], &msample_hdr[3+(i*2)], 2); } /* Need a hack here to pass in the number of bytes to be written to the synth. This is ugly, and perhaps one day, I'll fix it. */ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_MULTISAMPLE, (unsigned char *) (long) ((num_samples*2)+3), msample_hdr)) { snd_printk ("download of multisample failed.\n"); kfree(msample_hdr); return -EIO; } dev->sample_status[header->number] = (WF_SLOT_FILLED|WF_ST_MULTISAMPLE); kfree(msample_hdr); return (0); } static int wavefront_fetch_multisample (snd_wavefront_t *dev, wavefront_patch_info *header) { int i; unsigned char log_ns[1]; unsigned char number[2]; int num_samples; munge_int32 (header->number, number, 2); if (snd_wavefront_cmd (dev, WFC_UPLOAD_MULTISAMPLE, log_ns, number)) { snd_printk ("upload multisample failed.\n"); return -EIO; } DPRINT (WF_DEBUG_DATA, "msample %d has %d samples\n", header->number, log_ns[0]); header->hdr.ms.NumberOfSamples = log_ns[0]; /* get the number of samples ... */ num_samples = (1 << log_ns[0]); for (i = 0; i < num_samples; i++) { char d[2]; int val; if ((val = wavefront_read (dev)) == -1) { snd_printk ("upload multisample failed " "during sample loop.\n"); return -EIO; } d[0] = val; if ((val = wavefront_read (dev)) == -1) { snd_printk ("upload multisample failed " "during sample loop.\n"); return -EIO; } d[1] = val; header->hdr.ms.SampleNumber[i] = demunge_int32 ((unsigned char *) d, 2); DPRINT (WF_DEBUG_DATA, "msample sample[%d] = %d\n", i, header->hdr.ms.SampleNumber[i]); } return (0); } static int wavefront_send_drum (snd_wavefront_t *dev, wavefront_patch_info *header) { unsigned char drumbuf[WF_DRUM_BYTES]; wavefront_drum *drum = &header->hdr.d; int i; DPRINT (WF_DEBUG_LOAD_PATCH, "downloading edrum for MIDI " "note %d, patch = %d\n", header->number, drum->PatchNumber); drumbuf[0] = header->number & 0x7f; for (i = 0; i < 4; i++) { munge_int32 (((unsigned char *)drum)[i], &drumbuf[1+(i*2)], 2); } if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_EDRUM_PROGRAM, NULL, drumbuf)) { snd_printk ("download drum failed.\n"); return -EIO; } return (0); } static int wavefront_find_free_sample (snd_wavefront_t *dev) { int i; for (i = 0; i < WF_MAX_SAMPLE; i++) { if (!(dev->sample_status[i] & WF_SLOT_FILLED)) { return i; } } snd_printk ("no free sample slots!\n"); return -1; } #if 0 static int wavefront_find_free_patch (snd_wavefront_t *dev) { int i; for (i = 0; i < WF_MAX_PATCH; i++) { if (!(dev->patch_status[i] & WF_SLOT_FILLED)) { return i; } } snd_printk ("no free patch slots!\n"); return -1; } #endif static int wavefront_load_patch (snd_wavefront_t *dev, const char __user *addr) { wavefront_patch_info *header; int err; header = kmalloc(sizeof(*header), GFP_KERNEL); if (! header) return -ENOMEM; if (copy_from_user (header, addr, sizeof(wavefront_patch_info) - sizeof(wavefront_any))) { snd_printk ("bad address for load patch.\n"); err = -EFAULT; goto __error; } DPRINT (WF_DEBUG_LOAD_PATCH, "download " "Sample type: %d " "Sample number: %d " "Sample size: %d\n", header->subkey, header->number, header->size); switch (header->subkey) { case WF_ST_SAMPLE: /* sample or sample_header, based on patch->size */ if (copy_from_user (&header->hdr.s, header->hdrptr, sizeof (wavefront_sample))) { err = -EFAULT; break; } err = wavefront_send_sample (dev, header, header->dataptr, 0); break; case WF_ST_MULTISAMPLE: if (copy_from_user (&header->hdr.s, header->hdrptr, sizeof (wavefront_multisample))) { err = -EFAULT; break; } err = wavefront_send_multisample (dev, header); break; case WF_ST_ALIAS: if (copy_from_user (&header->hdr.a, header->hdrptr, sizeof (wavefront_alias))) { err = -EFAULT; break; } err = wavefront_send_alias (dev, header); break; case WF_ST_DRUM: if (copy_from_user (&header->hdr.d, header->hdrptr, sizeof (wavefront_drum))) { err = -EFAULT; break; } err = wavefront_send_drum (dev, header); break; case WF_ST_PATCH: if (copy_from_user (&header->hdr.p, header->hdrptr, sizeof (wavefront_patch))) { err = -EFAULT; break; } err = wavefront_send_patch (dev, header); break; case WF_ST_PROGRAM: if (copy_from_user (&header->hdr.pr, header->hdrptr, sizeof (wavefront_program))) { err = -EFAULT; break; } err = wavefront_send_program (dev, header); break; default: snd_printk ("unknown patch type %d.\n", header->subkey); err = -EINVAL; break; } __error: kfree(header); return err; } /*********************************************************************** WaveFront: hardware-dependent interface ***********************************************************************/ static void process_sample_hdr (u8 *buf) { wavefront_sample s; u8 *ptr; ptr = buf; /* The board doesn't send us an exact copy of a "wavefront_sample" in response to an Upload Sample Header command. Instead, we have to convert the data format back into our data structure, just as in the Download Sample command, where we have to do something very similar in the reverse direction. */ *((u32 *) &s.sampleStartOffset) = demunge_int32 (ptr, 4); ptr += 4; *((u32 *) &s.loopStartOffset) = demunge_int32 (ptr, 4); ptr += 4; *((u32 *) &s.loopEndOffset) = demunge_int32 (ptr, 4); ptr += 4; *((u32 *) &s.sampleEndOffset) = demunge_int32 (ptr, 4); ptr += 4; *((u32 *) &s.FrequencyBias) = demunge_int32 (ptr, 3); ptr += 3; s.SampleResolution = *ptr & 0x3; s.Loop = *ptr & 0x8; s.Bidirectional = *ptr & 0x10; s.Reverse = *ptr & 0x40; /* Now copy it back to where it came from */ memcpy (buf, (unsigned char *) &s, sizeof (wavefront_sample)); } static int wavefront_synth_control (snd_wavefront_card_t *acard, wavefront_control *wc) { snd_wavefront_t *dev = &acard->wavefront; unsigned char patchnumbuf[2]; int i; DPRINT (WF_DEBUG_CMD, "synth control with " "cmd 0x%x\n", wc->cmd); /* Pre-handling of or for various commands */ switch (wc->cmd) { case WFC_DISABLE_INTERRUPTS: snd_printk ("interrupts disabled.\n"); outb (0x80|0x20, dev->control_port); dev->interrupts_are_midi = 1; return 0; case WFC_ENABLE_INTERRUPTS: snd_printk ("interrupts enabled.\n"); outb (0x80|0x40|0x20, dev->control_port); dev->interrupts_are_midi = 1; return 0; case WFC_INTERRUPT_STATUS: wc->rbuf[0] = dev->interrupts_are_midi; return 0; case WFC_ROMSAMPLES_RDONLY: dev->rom_samples_rdonly = wc->wbuf[0]; wc->status = 0; return 0; case WFC_IDENTIFY_SLOT_TYPE: i = wc->wbuf[0] | (wc->wbuf[1] << 7); if (i <0 || i >= WF_MAX_SAMPLE) { snd_printk ("invalid slot ID %d\n", i); wc->status = EINVAL; return -EINVAL; } wc->rbuf[0] = dev->sample_status[i]; wc->status = 0; return 0; case WFC_DEBUG_DRIVER: dev->debug = wc->wbuf[0]; snd_printk ("debug = 0x%x\n", dev->debug); return 0; case WFC_UPLOAD_PATCH: munge_int32 (*((u32 *) wc->wbuf), patchnumbuf, 2); memcpy (wc->wbuf, patchnumbuf, 2); break; case WFC_UPLOAD_MULTISAMPLE: /* multisamples have to be handled differently, and cannot be dealt with properly by snd_wavefront_cmd() alone. */ wc->status = wavefront_fetch_multisample (dev, (wavefront_patch_info *) wc->rbuf); return 0; case WFC_UPLOAD_SAMPLE_ALIAS: snd_printk ("support for sample alias upload " "being considered.\n"); wc->status = EINVAL; return -EINVAL; } wc->status = snd_wavefront_cmd (dev, wc->cmd, wc->rbuf, wc->wbuf); /* Post-handling of certain commands. In particular, if the command was an upload, demunge the data so that the user-level doesn't have to think about it. */ if (wc->status == 0) { switch (wc->cmd) { /* intercept any freemem requests so that we know we are always current with the user-level view of things. */ case WFC_REPORT_FREE_MEMORY: dev->freemem = demunge_int32 (wc->rbuf, 4); break; case WFC_UPLOAD_PATCH: demunge_buf (wc->rbuf, wc->rbuf, WF_PATCH_BYTES); break; case WFC_UPLOAD_PROGRAM: demunge_buf (wc->rbuf, wc->rbuf, WF_PROGRAM_BYTES); break; case WFC_UPLOAD_EDRUM_PROGRAM: demunge_buf (wc->rbuf, wc->rbuf, WF_DRUM_BYTES - 1); break; case WFC_UPLOAD_SAMPLE_HEADER: process_sample_hdr (wc->rbuf); break; case WFC_UPLOAD_SAMPLE_ALIAS: snd_printk ("support for " "sample aliases still " "being considered.\n"); break; case WFC_VMIDI_OFF: snd_wavefront_midi_disable_virtual (acard); break; case WFC_VMIDI_ON: snd_wavefront_midi_enable_virtual (acard); break; } } return 0; } int snd_wavefront_synth_open (struct snd_hwdep *hw, struct file *file) { if (!try_module_get(hw->card->module)) return -EFAULT; file->private_data = hw; return 0; } int snd_wavefront_synth_release (struct snd_hwdep *hw, struct file *file) { module_put(hw->card->module); return 0; } int snd_wavefront_synth_ioctl (struct snd_hwdep *hw, struct file *file, unsigned int cmd, unsigned long arg) { struct snd_card *card; snd_wavefront_t *dev; snd_wavefront_card_t *acard; wavefront_control *wc; void __user *argp = (void __user *)arg; int err; card = (struct snd_card *) hw->card; if (snd_BUG_ON(!card)) return -ENODEV; if (snd_BUG_ON(!card->private_data)) return -ENODEV; acard = card->private_data; dev = &acard->wavefront; switch (cmd) { case WFCTL_LOAD_SPP: if (wavefront_load_patch (dev, argp) != 0) { return -EIO; } break; case WFCTL_WFCMD: wc = memdup_user(argp, sizeof(*wc)); if (IS_ERR(wc)) return PTR_ERR(wc); if (wavefront_synth_control (acard, wc) < 0) err = -EIO; else if (copy_to_user (argp, wc, sizeof (*wc))) err = -EFAULT; else err = 0; kfree(wc); return err; default: return -EINVAL; } return 0; } /***********************************************************************/ /* WaveFront: interface for card-level wavefront module */ /***********************************************************************/ void snd_wavefront_internal_interrupt (snd_wavefront_card_t *card) { snd_wavefront_t *dev = &card->wavefront; /* Some comments on interrupts. I attempted a version of this driver that used interrupts throughout the code instead of doing busy and/or sleep-waiting. Alas, it appears that once the Motorola firmware is downloaded, the card *never* generates an RX interrupt. These are successfully generated during firmware loading, and after that wavefront_status() reports that an interrupt is pending on the card from time to time, but it never seems to be delivered to this driver. Note also that wavefront_status() continues to report that RX interrupts are enabled, suggesting that I didn't goof up and disable them by mistake. Thus, I stepped back to a prior version of wavefront_wait(), the only place where this really matters. Its sad, but I've looked through the code to check on things, and I really feel certain that the Motorola firmware prevents RX-ready interrupts. */ if ((wavefront_status(dev) & (STAT_INTR_READ|STAT_INTR_WRITE)) == 0) { return; } spin_lock(&dev->irq_lock); dev->irq_ok = 1; dev->irq_cnt++; spin_unlock(&dev->irq_lock); wake_up(&dev->interrupt_sleeper); } /* STATUS REGISTER 0 Host Rx Interrupt Enable (1=Enabled) 1 Host Rx Register Full (1=Full) 2 Host Rx Interrupt Pending (1=Interrupt) 3 Unused 4 Host Tx Interrupt (1=Enabled) 5 Host Tx Register empty (1=Empty) 6 Host Tx Interrupt Pending (1=Interrupt) 7 Unused */ static int snd_wavefront_interrupt_bits (int irq) { int bits; switch (irq) { case 9: bits = 0x00; break; case 5: bits = 0x08; break; case 12: bits = 0x10; break; case 15: bits = 0x18; break; default: snd_printk ("invalid IRQ %d\n", irq); bits = -1; } return bits; } static void wavefront_should_cause_interrupt (snd_wavefront_t *dev, int val, int port, unsigned long timeout) { wait_queue_entry_t wait; init_waitqueue_entry(&wait, current); spin_lock_irq(&dev->irq_lock); add_wait_queue(&dev->interrupt_sleeper, &wait); dev->irq_ok = 0; outb (val,port); spin_unlock_irq(&dev->irq_lock); while (!dev->irq_ok && time_before(jiffies, timeout)) { schedule_timeout_uninterruptible(1); barrier(); } } static int wavefront_reset_to_cleanliness (snd_wavefront_t *dev) { int bits; int hwv[2]; /* IRQ already checked */ bits = snd_wavefront_interrupt_bits (dev->irq); /* try reset of port */ outb (0x0, dev->control_port); /* At this point, the board is in reset, and the H/W initialization register is accessed at the same address as the data port. Bit 7 - Enable IRQ Driver 0 - Tri-state the Wave-Board drivers for the PC Bus IRQs 1 - Enable IRQ selected by bits 5:3 to be driven onto the PC Bus. Bit 6 - MIDI Interface Select 0 - Use the MIDI Input from the 26-pin WaveBlaster compatible header as the serial MIDI source 1 - Use the MIDI Input from the 9-pin D connector as the serial MIDI source. Bits 5:3 - IRQ Selection 0 0 0 - IRQ 2/9 0 0 1 - IRQ 5 0 1 0 - IRQ 12 0 1 1 - IRQ 15 1 0 0 - Reserved 1 0 1 - Reserved 1 1 0 - Reserved 1 1 1 - Reserved Bits 2:1 - Reserved Bit 0 - Disable Boot ROM 0 - memory accesses to 03FC30-03FFFFH utilize the internal Boot ROM 1 - memory accesses to 03FC30-03FFFFH are directed to external storage. */ /* configure hardware: IRQ, enable interrupts, plus external 9-pin MIDI interface selected */ outb (0x80 | 0x40 | bits, dev->data_port); /* CONTROL REGISTER 0 Host Rx Interrupt Enable (1=Enabled) 0x1 1 Unused 0x2 2 Unused 0x4 3 Unused 0x8 4 Host Tx Interrupt Enable 0x10 5 Mute (0=Mute; 1=Play) 0x20 6 Master Interrupt Enable (1=Enabled) 0x40 7 Master Reset (0=Reset; 1=Run) 0x80 Take us out of reset, mute output, master + TX + RX interrupts on. We'll get an interrupt presumably to tell us that the TX register is clear. */ wavefront_should_cause_interrupt(dev, 0x80|0x40|0x10|0x1, dev->control_port, (reset_time*HZ)/100); /* Note: data port is now the data port, not the h/w initialization port. */ if (!dev->irq_ok) { snd_printk ("intr not received after h/w un-reset.\n"); goto gone_bad; } /* Note: data port is now the data port, not the h/w initialization port. At this point, only "HW VERSION" or "DOWNLOAD OS" commands will work. So, issue one of them, and wait for TX interrupt. This can take a *long* time after a cold boot, while the ISC ROM does its RAM test. The SDK says up to 4 seconds - with 12MB of RAM on a Tropez+, it takes a lot longer than that (~16secs). Note that the card understands the difference between a warm and a cold boot, so subsequent ISC2115 reboots (say, caused by module reloading) will get through this much faster. XXX Interesting question: why is no RX interrupt received first ? */ wavefront_should_cause_interrupt(dev, WFC_HARDWARE_VERSION, dev->data_port, ramcheck_time*HZ); if (!dev->irq_ok) { snd_printk ("post-RAM-check interrupt not received.\n"); goto gone_bad; } if (!wavefront_wait (dev, STAT_CAN_READ)) { snd_printk ("no response to HW version cmd.\n"); goto gone_bad; } if ((hwv[0] = wavefront_read (dev)) == -1) { snd_printk ("board not responding correctly.\n"); goto gone_bad; } if (hwv[0] == 0xFF) { /* NAK */ /* Board's RAM test failed. Try to read error code, and tell us about it either way. */ if ((hwv[0] = wavefront_read (dev)) == -1) { snd_printk ("on-board RAM test failed " "(bad error code).\n"); } else { snd_printk ("on-board RAM test failed " "(error code: 0x%x).\n", hwv[0]); } goto gone_bad; } /* We're OK, just get the next byte of the HW version response */ if ((hwv[1] = wavefront_read (dev)) == -1) { snd_printk ("incorrect h/w response.\n"); goto gone_bad; } snd_printk ("hardware version %d.%d\n", hwv[0], hwv[1]); return 0; gone_bad: return (1); } static int wavefront_download_firmware (snd_wavefront_t *dev, char *path) { const unsigned char *buf; int len, err; int section_cnt_downloaded = 0; const struct firmware *firmware; err = request_firmware(&firmware, path, dev->card->dev); if (err < 0) { snd_printk(KERN_ERR "firmware (%s) download failed!!!\n", path); return 1; } len = 0; buf = firmware->data; for (;;) { int section_length = *(signed char *)buf; if (section_length == 0) break; if (section_length < 0 || section_length > WF_SECTION_MAX) { snd_printk(KERN_ERR "invalid firmware section length %d\n", section_length); goto failure; } buf++; len++; if (firmware->size < len + section_length) { snd_printk(KERN_ERR "firmware section read error.\n"); goto failure; } /* Send command */ if (wavefront_write(dev, WFC_DOWNLOAD_OS)) goto failure; for (; section_length; section_length--) { if (wavefront_write(dev, *buf)) goto failure; buf++; len++; } /* get ACK */ if (!wavefront_wait(dev, STAT_CAN_READ)) { snd_printk(KERN_ERR "time out for firmware ACK.\n"); goto failure; } err = inb(dev->data_port); if (err != WF_ACK) { snd_printk(KERN_ERR "download of section #%d not " "acknowledged, ack = 0x%x\n", section_cnt_downloaded + 1, err); goto failure; } section_cnt_downloaded++; } release_firmware(firmware); return 0; failure: release_firmware(firmware); snd_printk(KERN_ERR "firmware download failed!!!\n"); return 1; } static int wavefront_do_reset (snd_wavefront_t *dev) { char voices[1]; if (wavefront_reset_to_cleanliness (dev)) { snd_printk ("hw reset failed.\n"); goto gone_bad; } if (dev->israw) { if (wavefront_download_firmware (dev, ospath)) { goto gone_bad; } dev->israw = 0; /* Wait for the OS to get running. The protocol for this is non-obvious, and was determined by using port-IO tracing in DOSemu and some experimentation here. Rather than using timed waits, use interrupts creatively. */ wavefront_should_cause_interrupt (dev, WFC_NOOP, dev->data_port, (osrun_time*HZ)); if (!dev->irq_ok) { snd_printk ("no post-OS interrupt.\n"); goto gone_bad; } /* Now, do it again ! */ wavefront_should_cause_interrupt (dev, WFC_NOOP, dev->data_port, (10*HZ)); if (!dev->irq_ok) { snd_printk ("no post-OS interrupt(2).\n"); goto gone_bad; } /* OK, no (RX/TX) interrupts any more, but leave mute in effect. */ outb (0x80|0x40, dev->control_port); } /* SETUPSND.EXE asks for sample memory config here, but since i have no idea how to interpret the result, we'll forget about it. */ if ((dev->freemem = wavefront_freemem (dev)) < 0) { goto gone_bad; } snd_printk ("available DRAM %dk\n", dev->freemem / 1024); if (wavefront_write (dev, 0xf0) || wavefront_write (dev, 1) || (wavefront_read (dev) < 0)) { dev->debug = 0; snd_printk ("MPU emulation mode not set.\n"); goto gone_bad; } voices[0] = 32; if (snd_wavefront_cmd (dev, WFC_SET_NVOICES, NULL, voices)) { snd_printk ("cannot set number of voices to 32.\n"); goto gone_bad; } return 0; gone_bad: /* reset that sucker so that it doesn't bother us. */ outb (0x0, dev->control_port); dev->interrupts_are_midi = 0; return 1; } int snd_wavefront_start (snd_wavefront_t *dev) { int samples_are_from_rom; /* IMPORTANT: assumes that snd_wavefront_detect() and/or wavefront_reset_to_cleanliness() has already been called */ if (dev->israw) { samples_are_from_rom = 1; } else { /* XXX is this always true ? */ samples_are_from_rom = 0; } if (dev->israw || fx_raw) { if (wavefront_do_reset (dev)) { return -1; } } /* Check for FX device, present only on Tropez+ */ dev->has_fx = (snd_wavefront_fx_detect (dev) == 0); if (dev->has_fx && fx_raw) { snd_wavefront_fx_start (dev); } wavefront_get_sample_status (dev, samples_are_from_rom); wavefront_get_program_status (dev); wavefront_get_patch_status (dev); /* Start normal operation: unreset, master interrupt enabled, no mute */ outb (0x80|0x40|0x20, dev->control_port); return (0); } int snd_wavefront_detect (snd_wavefront_card_t *card) { unsigned char rbuf[4], wbuf[4]; snd_wavefront_t *dev = &card->wavefront; /* returns zero if a WaveFront card is successfully detected. negative otherwise. */ dev->israw = 0; dev->has_fx = 0; dev->debug = debug_default; dev->interrupts_are_midi = 0; dev->irq_cnt = 0; dev->rom_samples_rdonly = 1; if (snd_wavefront_cmd (dev, WFC_FIRMWARE_VERSION, rbuf, wbuf) == 0) { dev->fw_version[0] = rbuf[0]; dev->fw_version[1] = rbuf[1]; snd_printk ("firmware %d.%d already loaded.\n", rbuf[0], rbuf[1]); /* check that a command actually works */ if (snd_wavefront_cmd (dev, WFC_HARDWARE_VERSION, rbuf, wbuf) == 0) { dev->hw_version[0] = rbuf[0]; dev->hw_version[1] = rbuf[1]; } else { snd_printk ("not raw, but no " "hardware version!\n"); return -1; } if (!wf_raw) { return 0; } else { snd_printk ("reloading firmware as you requested.\n"); dev->israw = 1; } } else { dev->israw = 1; snd_printk ("no response to firmware probe, assume raw.\n"); } return 0; } MODULE_FIRMWARE(DEFAULT_OSPATH);
{ "pile_set_name": "Github" }
@managing_products Feature: Editing product's slug in multiple locales In order to manage access path to product page in many languages As an Administrator I want to be able to edit product's slug in multiple locales Background: Given the store operates on a single channel in "United States" And the store is available in "English (United States)" And the store is also available in "Polish (Poland)" And I am logged in as an administrator @ui Scenario: Creating a product with custom slugs Given I want to create a new simple product When I specify its code as "PUG_PUGGINTON_PLUSHIE" And I set its price to "$100.00" for "United States" channel And I set its price to "$100.00" for "United States" channel And I name it "Pug Pugginton Plushie" in "English (United States)" And I set its slug to "sir-pugginton" in "English (United States)" And I name it "Pluszak Mops Mopsiński" in "Polish (Poland)" And I set its slug to "pan-mopsinski" in "Polish (Poland)" And I add it Then the slug of the "Pug Pugginton Plushie" product should be "sir-pugginton" in the "English (United States)" locale And the slug of the "Pug Pugginton Plushie" product should be "pan-mopsinski" in the "Polish (Poland)" locale @ui @javascript Scenario: Creating a product with autogenerated slugs Given I want to create a new simple product When I specify its code as "PUG_PUGGINTON_PLUSHIE" And I set its price to "$100.00" for "United States" channel And I name it "Pug Pugginton Plushie" in "English (United States)" And I name it "Pluszak Mops Mopsiński" in "Polish (Poland)" And I add it Then the slug of the "Pug Pugginton Plushie" product should be "pug-pugginton-plushie" in the "English (United States)" locale And the slug of the "Pug Pugginton Plushie" product should be "pluszak-mops-mopsinski" in the "Polish (Poland)" locale @ui Scenario: Seeing disabled slug fields when editing a product Given the store has a product named "Pug Pugginton Plushie" in "English (United States)" locale and "Pluszak Mops Mopsiński" in "Polish (Poland)" locale When I want to modify this product Then the slug field in "English (United States)" should not be editable And the slug field in "Polish (Poland)" also should not be editable @ui @javascript Scenario: Slugs don't get updated while changing product's names Given the store has a product named "Pug Pugginton Plushie" in "English (United States)" locale and "Pluszak Mops Mopsiński" in "Polish (Poland)" locale When I want to modify this product And I rename it to "Pug Pugston the Third Plushie" in "English (United States)" And I rename it to "Pluszak Mops Mopsak Trzeci" in "Polish (Poland)" And I save my changes Then this product should still have slug "pug-pugginton-plushie" in "English (United States)" And this product should still have slug "pluszak-mops-mopsinski" in "Polish (Poland)" @ui @javascript Scenario: Enabling automatic slugs update on product's names change Given the store has a product named "Pug Pugginton Plushie" in "English (United States)" locale and "Pluszak Mops Mopsiński" in "Polish (Poland)" locale When I want to modify this product And I enable slug modification in "English (United States)" And I rename it to "Pug Pugston the Third Plushie" in "English (United States)" And I enable slug modification in "Polish (Poland)" And I rename it to "Pluszak Mops Mopsak Trzeci" in "Polish (Poland)" And I save my changes Then this product should have slug "pug-pugston-the-third-plushie" in "English (United States)" And this product should have slug "pluszak-mops-mopsak-trzeci" in "Polish (Poland)" @ui @javascript Scenario: Manually modifying slugs on product's names change Given the store has a product named "Pug Pugginton Plushie" in "English (United States)" locale and "Pluszak Mops Mopsiński" in "Polish (Poland)" locale When I want to modify this product And I enable slug modification in "English (United States)" And I rename it to "Pug Pugston the Third Plushie" in "English (United States)" And I set its slug to "sir-pugston-the-third" in "English (United States)" And I enable slug modification in "Polish (Poland)" And I rename it to "Pluszak Mops Mopsak Trzeci" in "Polish (Poland)" And I set its slug to "pan-mopsak-trzeci" in "Polish (Poland)" And I save my changes Then this product should have slug "sir-pugston-the-third" in "English (United States)" And this product should have slug "pan-mopsak-trzeci" in "Polish (Poland)"
{ "pile_set_name": "Github" }
# These test special (mostly error) UTF features of DFA matching. They are a # selection of the more comprehensive tests that are run for non-DFA matching. # The output is different for the different widths. #subject dfa /X/utf XX\x{d800} Failed: error -27: UTF-32 error: code points 0xd800-0xdfff are not defined at offset 2 XX\x{d800}\=offset=3 No match XX\x{d800}\=no_utf_check 0: X XX\x{da00} Failed: error -27: UTF-32 error: code points 0xd800-0xdfff are not defined at offset 2 XX\x{da00}\=no_utf_check 0: X XX\x{dc00} Failed: error -27: UTF-32 error: code points 0xd800-0xdfff are not defined at offset 2 XX\x{dc00}\=no_utf_check 0: X XX\x{de00} Failed: error -27: UTF-32 error: code points 0xd800-0xdfff are not defined at offset 2 XX\x{de00}\=no_utf_check 0: X XX\x{dfff} Failed: error -27: UTF-32 error: code points 0xd800-0xdfff are not defined at offset 2 XX\x{dfff}\=no_utf_check 0: X XX\x{110000} Failed: error -28: UTF-32 error: code points greater than 0x10ffff are not defined at offset 2 XX\x{d800}\x{1234} Failed: error -27: UTF-32 error: code points 0xd800-0xdfff are not defined at offset 2 /badutf/utf X\xdf No match XX\xef No match XXX\xef\x80 No match X\xf7 No match XX\xf7\x80 No match XXX\xf7\x80\x80 No match /shortutf/utf XX\xdf\=ph No match XX\xef\=ph No match XX\xef\x80\=ph No match \xf7\=ph No match \xf7\x80\=ph No match # End of testinput14
{ "pile_set_name": "Github" }
/********************************************************************* * Portions COPYRIGHT 2013 STMicroelectronics * * Portions SEGGER Microcontroller GmbH & Co. KG * * Solutions for real time microcontroller applications * ********************************************************************** * * * (c) 1996 - 2013 SEGGER Microcontroller GmbH & Co. KG * * * * Internet: www.segger.com Support: support@segger.com * * * ********************************************************************** ** emWin V5.22 - Graphical user interface for embedded applications ** All Intellectual Property rights in the Software belongs to SEGGER. emWin is protected by international copyright laws. Knowledge of the source code may not be used to write a similar product. This file may only be used in accordance with the following terms: The software has been licensed to STMicroelectronics International N.V. a Dutch company with a Swiss branch and its headquarters in Plan- les-Ouates, Geneva, 39 Chemin du Champ des Filles, Switzerland for the purposes of creating libraries for ARM Cortex-M-based 32-bit microcon_ troller products commercialized by Licensee only, sublicensed and dis_ tributed under the terms and conditions of the End User License Agree_ ment supplied by STMicroelectronics International N.V. Full source code is available at: www.segger.com We appreciate your understanding and fairness. ---------------------------------------------------------------------- File : CHOOSEFILE.h Purpose : File dialog interface --------------------END-OF-HEADER------------------------------------- */ /** ****************************************************************************** * @attention * * Licensed under MCD-ST Liberty SW License Agreement V2, (the "License"); * You may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.st.com/software_license_agreement_liberty_v2 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ****************************************************************************** */ #ifndef CHOOSEFILE_H #define CHOOSEFILE_H #include "WM.h" #if GUI_WINSUPPORT #if defined(__cplusplus) extern "C" { /* Make sure we have C-declarations in C++ programs */ #endif /********************************************************************* * * Defines * ********************************************************************** */ #define CHOOSEFILE_FINDFIRST 0 #define CHOOSEFILE_FINDNEXT 1 #define CHOOSEFILE_FLAG_DIRECTORY (1 << 0) #ifndef CHOOSEFILE_MAXLEN #define CHOOSEFILE_MAXLEN 256 #endif #define CHOOSEFILE_BI_CANCEL 0 #define CHOOSEFILE_BI_OK 1 #define CHOOSEFILE_BI_UP 2 /********************************************************************* * * Types * ********************************************************************** */ /********************************************************************* * * CHOOSEFILE_INFO */ typedef struct CHOOSEFILE_INFO CHOOSEFILE_INFO; struct CHOOSEFILE_INFO { int Cmd; // Command for GetData() function int Id; // Id of pressed button (for internal use only) const char * pMask; // Mask to be used for searching files char * pName; // (for internal use only) char * pExt; // (for internal use only) char * pAttrib; // (for internal use only) WM_TOOLTIP_HANDLE hToolTip; // (for internal use only) U32 SizeL; // FileSize low word U32 SizeH; // FileSize high word U32 Flags; // File flags char pRoot[CHOOSEFILE_MAXLEN]; // Buffer used internally and for passing result int (* pfGetData)(CHOOSEFILE_INFO * pInfo); // Pointer to GetData() function }; /********************************************************************* * * Functions * ********************************************************************** */ WM_HWIN CHOOSEFILE_Create(WM_HWIN hParent, // Parent window int xPos, // xPosition in window coordinates int yPos, // yPosition in window coordinates int xSize, // xSize in pixels int ySize, // ySize in pixels const char * apRoot[], // Pointers to root strings int NumRoot, // Number of roots int SelRoot, // Root to be selected at first const char * sCaption, // Shown in title bar int Flags, // Flags for FRAMEWINDOW CHOOSEFILE_INFO * pInfo // Pointer to CHOOSEFILE_INFO structure ); void CHOOSEFILE_Callback (WM_MESSAGE * pMsg); void CHOOSEFILE_EnableToolTips (void); void CHOOSEFILE_SetButtonText (WM_HWIN hWin, unsigned ButtonIndex, const char * pText); void CHOOSEFILE_SetDefaultButtonText(unsigned ButtonIndex, const char * pText); void CHOOSEFILE_SetDelim (char Delim); void CHOOSEFILE_SetToolTips (const TOOLTIP_INFO * pInfo, int NumItems); void CHOOSEFILE_SetTopMode (unsigned OnOff); #if defined(__cplusplus) } #endif #endif /* GUI_WINSUPPORT */ #endif /* CHOOSEFILE_H */
{ "pile_set_name": "Github" }
<?php /* * $Id$ * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This software consists of voluntary contributions made by many individuals * and is licensed under the MIT license. For more information, see * <http://www.doctrine-project.org>. */ namespace Doctrine\DBAL\Logging; /** * Includes executed SQLs in a Debug Stack * * * @link www.doctrine-project.org * @since 2.0 * @version $Revision$ * @author Benjamin Eberlei <kontakt@beberlei.de> * @author Guilherme Blanco <guilhermeblanco@hotmail.com> * @author Jonathan Wage <jonwage@gmail.com> * @author Roman Borschel <roman@code-factory.org> */ class DebugStack implements SQLLogger { /** @var array $queries Executed SQL queries. */ public $queries = array(); /** @var boolean $enabled If Debug Stack is enabled (log queries) or not. */ public $enabled = true; public $start = null; public $currentQuery = 0; /** * {@inheritdoc} */ public function startQuery($sql, array $params = null, array $types = null) { if ($this->enabled) { $this->start = microtime(true); $this->queries[++$this->currentQuery] = array('sql' => $sql, 'params' => $params, 'types' => $types, 'executionMS' => 0); } } /** * {@inheritdoc} */ public function stopQuery() { if ($this->enabled) { $this->queries[$this->currentQuery]['executionMS'] = microtime(true) - $this->start; } } }
{ "pile_set_name": "Github" }
<h1>Address Form</h1> <div class="alert alert-info" role="alert"> Error messages per field and disabled save button until entire form is valid. </div> <form (ngSubmit)="onSubmit()" [formGroup]="form"> <div class="form-row"> <div class="formHeading">First Name</div> <input type="text" id="firstName" formControlName="firstName"> <div class="errorMessage" *ngIf="form.controls.firstName.touched && !form.controls.firstName.valid">First Name is required</div> </div> <div class="form-row"> <div class="formHeading">Street Address</div> <input type="text" id="streetAddress" formControlName="streetAddress"> <div class="errorMessage" *ngIf="form.controls.streetAddress.touched && !form.controls.streetAddress.valid">Street Address is required</div> </div> <div class="form-row"> <div class="formHeading">Zip Code</div> <input type="text" id="zip" formControlName="zip"> <div class="errorMessage" *ngIf="form.controls.zip.touched && !form.controls.zip.valid">Zip code has to be 5 digits long</div> </div> <div class="form-row"> <div class="formHeading">Address Type</div> <select id="type" formControlName="type"> <option [value]="'home'">Home Address</option> <option [value]="'billing'">Billing Address</option> </select> </div> <div class="form-row"> <button type="submit" [disabled]="!form.valid">Save</button> </div> </form> <div class="form-row"> <div *ngIf="payLoad"><strong>The form contains the following values</strong></div> <div> {{payLoad}} </div> </div> <h4><a href="http://www.syntaxsuccess.com/viewarticle/forms-and-validation-in-angular-2.0">Read more here</a></h4>
{ "pile_set_name": "Github" }
--- name: "Ross Dakin" link: "https://rossdak.in" github: "rossdakin" ---
{ "pile_set_name": "Github" }
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. // // The following only applies to changes made to this file as part of YugaByte development. // // Portions Copyright (c) YugaByte, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed under the License // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express // or implied. See the License for the specific language governing permissions and limitations // under the License. // // Copyright (c) 2012 Facebook. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef ROCKSDB_LITE #include "yb/rocksdb/utilities/checkpoint.h" #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include <inttypes.h> #include <algorithm> #include <string> #include "yb/rocksdb/db/filename.h" #include "yb/rocksdb/db/wal_manager.h" #include "yb/rocksdb/db.h" #include "yb/rocksdb/env.h" #include "yb/rocksdb/transaction_log.h" #include "yb/rocksdb/util/file_util.h" #include "yb/rocksdb/port/port.h" #include "yb/util/random_util.h" #include "yb/util/string_util.h" namespace rocksdb { namespace checkpoint { // Builds an openable snapshot of RocksDB on the same disk, which // accepts an output directory on the same disk, and under the directory // (1) hard-linked SST files pointing to existing live SST files // SST files will be copied if output directory is on a different filesystem // (2) a copied manifest files and other files // The directory should not already exist and will be created by this API. // The directory will be an absolute path Status CreateCheckpoint(DB* db, const std::string& checkpoint_dir) { if (!db->GetCheckpointEnv()->IsPlainText()) { return STATUS(InvalidArgument, "db's checkpoint env is not plaintext."); } std::vector<std::string> live_files; uint64_t manifest_file_size = 0; uint64_t sequence_number = db->GetLatestSequenceNumber(); bool same_fs = true; VectorLogPtr live_wal_files; bool delete_checkpoint_dir = false; Status s = db->GetCheckpointEnv()->FileExists(checkpoint_dir); if (s.ok()) { delete_checkpoint_dir = true; } else if (!s.IsNotFound()) { assert(s.IsIOError()); return s; } s = db->DisableFileDeletions(); if (s.ok()) { // this will return live_files prefixed with "/" s = db->GetLiveFiles(live_files, &manifest_file_size, true); } // if we have more than one column family, we need to also get WAL files if (s.ok()) { s = db->GetSortedWalFiles(&live_wal_files); } if (!s.ok()) { WARN_NOT_OK(db->EnableFileDeletions(false), "Failed to disable file deletions"); return s; } size_t wal_size = live_wal_files.size(); RLOG(db->GetOptions().info_log, "Started the snapshot process -- creating snapshot in directory %s", checkpoint_dir.c_str()); const std::string full_private_path = checkpoint_dir + ".tmp." + ToString(yb::RandomUniformInt<uint64_t>()); // create snapshot directory s = db->GetCheckpointEnv()->CreateDir(full_private_path); // copy/hard link live_files for (size_t i = 0; s.ok() && i < live_files.size(); ++i) { uint64_t number; FileType type; bool ok = ParseFileName(live_files[i], &number, &type); if (!ok) { s = STATUS(Corruption, "Can't parse file name. This is very bad"); break; } // we should only get sst, manifest and current files here assert(type == kTableFile || type == kTableSBlockFile || type == kDescriptorFile || type == kCurrentFile); assert(live_files[i].size() > 0 && live_files[i][0] == '/'); std::string src_fname = live_files[i]; // rules: // * if it's kTableFile or kTableSBlockFile, then it's shared // * if it's kDescriptorFile, limit the size to manifest_file_size // * always copy if cross-device link bool is_table_file = type == kTableFile || type == kTableSBlockFile; if (is_table_file && same_fs) { RLOG(db->GetOptions().info_log, "Hard Linking %s", src_fname.c_str()); s = db->GetCheckpointEnv()->LinkFile(db->GetName() + src_fname, full_private_path + src_fname); if (s.IsNotSupported()) { same_fs = false; s = Status::OK(); } } if (!is_table_file || !same_fs) { RLOG(db->GetOptions().info_log, "Copying %s", src_fname.c_str()); std::string dest_name = full_private_path + src_fname; s = CopyFile(db->GetCheckpointEnv(), db->GetName() + src_fname, dest_name, type == kDescriptorFile ? manifest_file_size : 0); } } RLOG(db->GetOptions().info_log, "Number of log files %" ROCKSDB_PRIszt, live_wal_files.size()); // Link WAL files. Copy exact size of last one because it is the only one // that has changes after the last flush. for (size_t i = 0; s.ok() && i < wal_size; ++i) { if ((live_wal_files[i]->Type() == kAliveLogFile) && (live_wal_files[i]->StartSequence() >= sequence_number)) { if (i + 1 == wal_size) { RLOG(db->GetOptions().info_log, "Copying %s", live_wal_files[i]->PathName().c_str()); s = CopyFile(db->GetCheckpointEnv(), db->GetOptions().wal_dir + live_wal_files[i]->PathName(), full_private_path + live_wal_files[i]->PathName(), live_wal_files[i]->SizeFileBytes()); break; } if (same_fs) { // we only care about live log files RLOG(db->GetOptions().info_log, "Hard Linking %s", live_wal_files[i]->PathName().c_str()); s = db->GetCheckpointEnv()->LinkFile( db->GetOptions().wal_dir + live_wal_files[i]->PathName(), full_private_path + live_wal_files[i]->PathName()); if (s.IsNotSupported()) { same_fs = false; s = Status::OK(); } } if (!same_fs) { RLOG(db->GetOptions().info_log, "Copying %s", live_wal_files[i]->PathName().c_str()); s = CopyFile(db->GetCheckpointEnv(), db->GetOptions().wal_dir + live_wal_files[i]->PathName(), full_private_path + live_wal_files[i]->PathName(), 0); } } } // we copied all the files, enable file deletions RETURN_NOT_OK(db->EnableFileDeletions(false)); if (s.ok()) { if (delete_checkpoint_dir) { const Status s_del = DeleteRecursively(db->GetCheckpointEnv(), checkpoint_dir); RLOG( db->GetOptions().info_log, "Deleted dir %s -- %s", checkpoint_dir.c_str(), s_del.ToString().c_str()); } // move tmp private backup to real snapshot directory s = db->GetCheckpointEnv()->RenameFile(full_private_path, checkpoint_dir); } if (s.ok()) { unique_ptr<Directory> checkpoint_directory; RETURN_NOT_OK(db->GetCheckpointEnv()->NewDirectory(checkpoint_dir, &checkpoint_directory)); if (checkpoint_directory != nullptr) { s = checkpoint_directory->Fsync(); } } if (!s.ok()) { // clean all the files we might have created RLOG(db->GetOptions().info_log, "Snapshot failed -- %s", s.ToString().c_str()); // we have to delete the dir and all its children const Status s_del = DeleteRecursively(db->GetCheckpointEnv(), full_private_path); RLOG( db->GetOptions().info_log, "Deleted dir %s -- %s", full_private_path.c_str(), s_del.ToString().c_str()); return s; } // here we know that we succeeded and installed the new snapshot RLOG(db->GetOptions().info_log, "Checkpoint DONE. All is good"); RLOG(db->GetOptions().info_log, "Checkpoint sequence number: %" PRIu64, sequence_number); return s; } } // namespace checkpoint } // namespace rocksdb #endif // ROCKSDB_LITE
{ "pile_set_name": "Github" }
# Clusters A kaws cluster is a Kubernetes cluster managed by kaws. Clusters are isolated from each other because they each exist in their own [AWS VPC](https://aws.amazon.com/vpc/). The AWS resources that comprise a cluster are defined in kaws's Terraform module, which is imported in the file `terraform/kaws.tf` of a [kaws repository](repository.md). Each Kubernetes cluster created by kaws: * Uses CoreOS as the operating system for each server * Has one bastion server that allows external SSH access * Has three servers dedicated to running [etcd](https://coreos.com/etcd/) * Bootstraps etcd statically so no discovery token is required * Has an Autoscaling Group of Kubernetes master servers with an [AWS ELB](https://aws.amazon.com/elasticloadbalancing/) in front of them * Uses master election of the Kubernetes master servers for high availability * Has an Autoscaling Group of Kubernetes node servers * Uses SSL client certificates for authentication to the Kubernetes API, etcd's client API, and etcd's peer API * Uses Kubernetes's RBAC (role-based authorization control) for authorizing API requests * Accepts external traffic to the Kubernetes API only via SSL on port 443 * Accepts external traffic to Kubernetes nodes only on port 80 and 443 (though you should use HSTS to redirect requests from 80 to 443) * Has a DNS record for the Kubernetes API at kubernetes.example.com, where example.com is a value set at cluster creation time * Has a DNS record for the bastion SSH server at bastion.example.com, where example.com is a value set at cluster creation time * Enables the `batch/v2alpha1` API for the CronJob resource.
{ "pile_set_name": "Github" }
<?php /* * This file is part of the Behat Gherkin. * (c) Konstantin Kudryashov <ever.zet@gmail.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Behat\Gherkin\Loader; use Behat\Gherkin\Gherkin; use Behat\Gherkin\Node\FeatureNode; use RecursiveDirectoryIterator; use RecursiveIteratorIterator; /** * Directory contents loader. * * @author Konstantin Kudryashov <ever.zet@gmail.com> */ class DirectoryLoader extends AbstractFileLoader { protected $gherkin; /** * Initializes loader. * * @param Gherkin $gherkin Gherkin manager */ public function __construct(Gherkin $gherkin) { $this->gherkin = $gherkin; } /** * Checks if current loader supports provided resource. * * @param mixed $path Resource to load * * @return Boolean */ public function supports($path) { return is_string($path) && is_dir($this->findAbsolutePath($path)); } /** * Loads features from provided resource. * * @param string $path Resource to load * * @return FeatureNode[] */ public function load($path) { $path = $this->findAbsolutePath($path); $iterator = new RecursiveIteratorIterator( new RecursiveDirectoryIterator($path, RecursiveDirectoryIterator::SKIP_DOTS) ); $paths = array_map('strval', iterator_to_array($iterator)); uasort($paths, 'strnatcasecmp'); $features = array(); foreach ($paths as $path) { $path = (string) $path; $loader = $this->gherkin->resolveLoader($path); if (null !== $loader) { $features = array_merge($features, $loader->load($path)); } } return $features; } }
{ "pile_set_name": "Github" }
/* +------------------------------------------------------------------------+ | Mobile Robot Programming Toolkit (MRPT) | | https://www.mrpt.org/ | | | | Copyright (c) 2005-2020, Individual contributors, see AUTHORS file | | See: https://www.mrpt.org/Authors - All rights reserved. | | Released under BSD License. See: https://www.mrpt.org/License | +------------------------------------------------------------------------+ */ #include "vision-precomp.h" // Precompiled headers #include <mrpt/vision/CFeatureExtraction.h> // Universal include for all versions of OpenCV #include <mrpt/3rdparty/do_opencv_includes.h> using namespace mrpt; using namespace mrpt::vision; using namespace mrpt::img; using namespace mrpt::system; using namespace mrpt::img; using namespace std; void CFeatureExtraction::extractFeaturesORB( const mrpt::img::CImage& inImg, CFeatureList& feats, const unsigned int init_ID, const unsigned int nDesiredFeatures, [[maybe_unused]] const TImageROI& ROI) { MRPT_START mrpt::system::CTimeLoggerEntry tle(profiler, "extractFeaturesORB"); #if MRPT_HAS_OPENCV using namespace cv; vector<KeyPoint> cv_feats; // OpenCV keypoint output vector Mat cv_descs; // OpenCV descriptor output const bool use_precomputed_feats = feats.size() > 0; if (use_precomputed_feats) { cv_feats.resize(feats.size()); for (size_t k = 0; k < cv_feats.size(); ++k) { cv_feats[k].pt.x = feats[k].keypoint.pt.x; cv_feats[k].pt.y = feats[k].keypoint.pt.y; } } // Make sure we operate on a gray-scale version of the image: const CImage inImg_gray(inImg, FAST_REF_OR_CONVERT_TO_GRAY); const Mat& cvImg = inImg_gray.asCvMatRef(); // The detector and descriptor profiler.enter("extractFeaturesORB.openCV_detectAndCompute"); #if MRPT_OPENCV_VERSION_NUM < 0x300 Ptr<Feature2D> orb = Algorithm::create<Feature2D>("Feature2D.ORB"); orb->operator()(cvImg, Mat(), cv_feats, cv_descs, use_precomputed_feats); #else const size_t n_feats_2_extract = nDesiredFeatures == 0 ? 1000 : 3 * nDesiredFeatures; Ptr<cv::ORB> orb = cv::ORB::create( n_feats_2_extract, options.ORBOptions.scale_factor, options.ORBOptions.n_levels); orb->detectAndCompute( cvImg, Mat(), cv_feats, cv_descs, use_precomputed_feats); #endif profiler.leave("extractFeaturesORB.openCV_detectAndCompute"); CTimeLoggerEntry tle2(profiler, "extractFeaturesORB.fillFeatsStruct"); const size_t n_feats = cv_feats.size(); // if we had input features, just convert cv_feats to CFeatures and return const unsigned int patch_size_2 = options.patchSize / 2; unsigned int f_id = init_ID; if (use_precomputed_feats) { for (size_t k = 0; k < n_feats; ++k) { feats[k].descriptors.ORB->resize(cv_descs.cols); for (int m = 0; m < cv_descs.cols; ++m) (*feats[k].descriptors.ORB)[m] = cv_descs.at<uchar>(k, m); /* feats[k].response = cv_feats[k].response; feats[k].scale = cv_feats[k].size; feats[k].angle = cv_feats[k].orientation; feats[k].ID = f_id++; */ feats[k].type = featORB; if (options.ORBOptions.extract_patch && options.patchSize > 0) { inImg.extract_patch( *feats[k].patch, round(feats[k].keypoint.pt.x) - patch_size_2, round(feats[k].keypoint.pt.y) - patch_size_2, options.patchSize, options.patchSize); } } return; } // 1) Sort the fearues by "response": It's ~100 times faster to sort a list // of // indices "sorted_indices" than sorting directly the actual list of // features "cv_feats" std::vector<size_t> sorted_indices(n_feats); for (size_t i = 0; i < n_feats; i++) sorted_indices[i] = i; std::sort( sorted_indices.begin(), sorted_indices.end(), KeypointResponseSorter<vector<KeyPoint>>(cv_feats)); // 2) Filter by "min-distance" (in options.ORBOptions.min_distance) // 3) Convert to MRPT CFeatureList format. // Steps 2 & 3 are done together in the while() below. // The "min-distance" filter is done by means of a 2D binary matrix where // each cell is marked when one // feature falls within it. This is not exactly the same than a pure // "min-distance" but is pretty close // and for large numbers of features is much faster than brute force search // of kd-trees. // (An intermediate approach would be the creation of a mask image updated // for each accepted feature, etc.) const bool do_filter_min_dist = options.ORBOptions.min_distance > 1; const unsigned int occupied_grid_cell_size = options.ORBOptions.min_distance / 2; const float occupied_grid_cell_size_inv = 1.0f / occupied_grid_cell_size; unsigned int grid_lx = !do_filter_min_dist ? 1 : (unsigned int)(1 + inImg.getWidth() * occupied_grid_cell_size_inv); unsigned int grid_ly = !do_filter_min_dist ? 1 : (unsigned int)(1 + inImg.getHeight() * occupied_grid_cell_size_inv); mrpt::math::CMatrixBool occupied_sections( grid_lx, grid_ly); // See the comments above for an explanation. occupied_sections.fill(false); const size_t n_max_feats = nDesiredFeatures > 0 ? std::min(size_t(nDesiredFeatures), n_feats) : n_feats; if (!options.addNewFeatures) feats.clear(); // feats.reserve( feats.size() + n_max_feats ); const size_t imgH = inImg.getHeight(); const size_t imgW = inImg.getWidth(); size_t k = 0; size_t c_feats = 0; while (c_feats < n_max_feats && k < n_feats) { const size_t idx = sorted_indices[k++]; const KeyPoint& kp = cv_feats[idx]; if (options.ORBOptions.extract_patch && options.patchSize > 0) { // check image boundaries for extracting the patch const int xBorderInf = (int)floor(kp.pt.x - patch_size_2); const int xBorderSup = (int)floor(kp.pt.x + patch_size_2); const int yBorderInf = (int)floor(kp.pt.y - patch_size_2); const int yBorderSup = (int)floor(kp.pt.y + patch_size_2); if (!(xBorderSup < (int)imgW && xBorderInf > 0 && yBorderSup < (int)imgH && yBorderInf > 0)) continue; // nope, skip. } if (do_filter_min_dist) { // Check the min-distance: const auto sect_ix = size_t(kp.pt.x * occupied_grid_cell_size_inv); const auto sect_iy = size_t(kp.pt.y * occupied_grid_cell_size_inv); if (occupied_sections(sect_ix, sect_iy)) continue; // Already occupied! skip. // Mark section as occupied occupied_sections(sect_ix, sect_iy) = true; if (sect_ix > 0) occupied_sections(sect_ix - 1, sect_iy) = true; if (sect_iy > 0) occupied_sections(sect_ix, sect_iy - 1) = true; if (sect_ix < grid_lx - 1) occupied_sections(sect_ix + 1, sect_iy) = true; if (sect_iy < grid_ly - 1) occupied_sections(sect_ix, sect_iy + 1) = true; } // All tests passed: add new feature: CFeature ft; ft.type = featORB; ft.keypoint.ID = f_id++; ft.keypoint.pt.x = kp.pt.x; ft.keypoint.pt.y = kp.pt.y; ft.response = kp.response; ft.orientation = kp.angle; ft.keypoint.octave = kp.octave; ft.patchSize = 0; // descriptor ft.descriptors.ORB.emplace(); ft.descriptors.ORB->resize(cv_descs.cols); for (int m = 0; m < cv_descs.cols; ++m) (*ft.descriptors.ORB)[m] = cv_descs.at<uchar>(idx, m); if (options.ORBOptions.extract_patch && options.patchSize > 0) { ft.patchSize = options.patchSize; // The size of the feature patch ft.patch.emplace(); inImg.extract_patch( *ft.patch, round(kp.pt.x) - patch_size_2, round(kp.pt.y) - patch_size_2, options.patchSize, options.patchSize); // Image patch surronding the feature } feats.emplace_back(std::move(ft)); c_feats++; } #endif MRPT_END } void CFeatureExtraction::internal_computeORBDescriptors( const CImage& in_img, CFeatureList& in_features) { #if MRPT_HAS_OPENCV using namespace cv; mrpt::system::CTimeLoggerEntry tle( profiler, "internal_computeORBDescriptors"); const size_t n_feats = in_features.size(); const CImage inImg_gray(in_img, FAST_REF_OR_CONVERT_TO_GRAY); // convert from CFeatureList to vector<KeyPoint> vector<KeyPoint> cv_feats(n_feats); for (size_t k = 0; k < n_feats; ++k) { KeyPoint& kp = cv_feats[k]; kp.pt.x = in_features[k].keypoint.pt.x; kp.pt.y = in_features[k].keypoint.pt.y; kp.angle = in_features[k].orientation; kp.size = in_features[k].keypoint.octave; } // end-for const Mat& cvImg = inImg_gray.asCvMatRef(); Mat cv_descs; #if MRPT_OPENCV_VERSION_NUM < 0x300 Ptr<Feature2D> orb = Algorithm::create<Feature2D>("Feature2D.ORB"); orb->operator()( cvImg, cv::noArray(), cv_feats, cv_descs, true /* use_precomputed_feats */); #else Ptr<cv::ORB> orb = cv::ORB::create( n_feats, options.ORBOptions.scale_factor, options.ORBOptions.n_levels); orb->detectAndCompute( cvImg, cv::noArray(), cv_feats, cv_descs, true /* use_precomputed_feats */); #endif // add descriptor to CFeatureList for (size_t k = 0; k < n_feats; ++k) { in_features[k].descriptors.ORB.emplace(); auto& orb_desc = *in_features[k].descriptors.ORB; orb_desc.resize(cv_descs.cols); for (int i = 0; i < cv_descs.cols; ++i) orb_desc[i] = cv_descs.at<uchar>(k, i); } // end-for #endif } // end-internal_computeORBImageDescriptors
{ "pile_set_name": "Github" }
/* * This file is part of acados. * * acados is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * acados is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with acados; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * */ #if defined(SWIGPYTHON) %include "numpy.i" %fragment("NumPy_Fragments"); %init %{ import_array(); %} #endif #if defined(SWIGPYTHON) // %pythoncode %{ // from numpy import copy, copyto // class sequence_of_arrays(list): // def __init__(self): // super().__init__() // def __init__(self, iterable): // super().__init__(iterable) // def __getitem__(self, k): // return copy(super().__getitem__(k)) // def __setitem__(self, k, v): // try: // indices_to_set = range(*k.indices(len(self))) // except AttributeError: // # k is probably an integer // try: // indices_to_set = [int(k)] // except TypeError: // # k is probably tuple // indices_to_set = k // for index in indices_to_set: // copyto(super().__getitem__(index), v) // %} %{ // Global variable for Python module PyTypeObject *tuple_type = NULL; PyStructSequence_Desc *tuple_descriptor = NULL; PyObject *sequence_of_arrays_module = NULL; PyObject *copy_module = NULL; %} #endif %{ #include "swig/conversions.h" #include <algorithm> #include <stdexcept> #include <utility> #include <typeinfo> #if defined(SWIGMATLAB) template<typename T> mxClassID get_numeric_type() { if (typeid(T) == typeid(real_t)) return mxDOUBLE_CLASS; else if (typeid(T) == typeid(int_t)) return mxDOUBLE_CLASS; throw std::invalid_argument("Matrix can only have integer or floating point entries"); return mxUNKNOWN_CLASS; } #elif defined(SWIGPYTHON) template<typename T> int get_numeric_type() { if (typeid(T) == typeid(int_t)) return NPY_INT32; else if (typeid(T) == typeid(long)) return NPY_INT64; else if (typeid(T) == typeid(real_t)) return NPY_DOUBLE; throw std::invalid_argument("Matrix can only have integer or floating point entries"); return 0; } #endif bool is_integer(const LangObject *input) { #if defined(SWIGMATLAB) if (!mxIsScalar(input) || !mxIsNumeric(input)) return false; double input_as_double = mxGetScalar(input); int input_as_int = mxGetScalar(input); if ((double) input_as_int != input_as_double) return false; return true; #elif defined(SWIGPYTHON) if (!PyLong_Check((PyObject *) input)) return false; return true; #endif } bool is_real(const LangObject *input) { #if defined(SWIGMATLAB) if (!mxIsScalar(input) || !mxIsNumeric(input)) return false; return true; #elif defined(SWIGPYTHON) if (!PyFloat_Check((PyObject *) input)) return false; return true; #endif } int_t int_from(const LangObject *scalar) { #if defined(SWIGMATLAB) return (int_t) mxGetScalar(scalar); #elif defined(SWIGPYTHON) return (int_t) PyLong_AsLong((PyObject *) scalar); #endif } real_t real_from(const LangObject *scalar) { #if defined(SWIGMATLAB) return (real_t) mxGetScalar(scalar); #elif defined(SWIGPYTHON) return (real_t) PyFloat_AsDouble((PyObject *) scalar); #endif } LangObject *to_scalar(int_t scalar) { #if defined(SWIGMATLAB) return mxCreateDoubleScalar((double) scalar); #elif defined(SWIGPYTHON) return PyLong_FromLong((long) scalar); #endif } bool is_matrix(const LangObject *input) { #if defined(SWIGMATLAB) if (!mxIsNumeric(input)) return false; mwSize nb_dims = mxGetNumberOfDimensions(input); if (nb_dims != 2) return false; return true; #elif defined(SWIGPYTHON) if (!PyArray_Check(input)) return false; int nb_dims = PyArray_NDIM((PyArrayObject *) input); if (nb_dims < 1 || nb_dims > 2) return false; return true; #endif } int numRows(const LangObject *input) { if (!is_matrix(input)) throw std::invalid_argument("Input is not a valid matrix."); #if defined(SWIGMATLAB) const mwSize *dims = mxGetDimensions(input); return dims[0]; #elif defined(SWIGPYTHON) npy_intp *dims = PyArray_DIMS((PyArrayObject *) input); return dims[0]; #endif } int numColumns(const LangObject *input) { if (!is_matrix(input)) throw std::invalid_argument("Input is not a valid matrix."); #if defined(SWIGMATLAB) const mwSize *dims = mxGetDimensions(input); return dims[1]; #elif defined(SWIGPYTHON) int nb_dims = PyArray_NDIM((PyArrayObject *) input); npy_intp *dims = PyArray_DIMS((PyArrayObject *) input); if (nb_dims == 1) { // column vector return 1; } else { return dims[1]; } #endif } bool is_matrix(const LangObject *input, const int_t nb_rows, const int_t nb_columns) { if (!is_matrix(input)) return false; if (nb_rows != numRows(input) || nb_columns != numColumns(input)) return false; return true; } double *asDoublePointer(LangObject *input) { if (!is_matrix(input)) throw std::invalid_argument("Input is not of a valid matrix type."); #if defined(SWIGMATLAB) return (double *) mxGetData(input); #elif defined(SWIGPYTHON) PyObject *matrix = PyArray_FROM_OTF(input, NPY_FLOAT64, NPY_ARRAY_FARRAY_RO); if (matrix == NULL) { PyErr_Print(); throw std::runtime_error("Something went wrong while converting matrix"); } return (double *) PyArray_DATA((PyArrayObject *) matrix); #endif } template<typename T> LangObject *new_matrix(std::pair<int, int> dimensions, const T *data) { int_t nb_rows = dimensions.first; int_t nb_cols = dimensions.second; #if defined(SWIGMATLAB) mxArray *matrix = mxCreateNumericMatrix(nb_rows, nb_cols, get_numeric_type<T>(), mxREAL); double *new_array = (double *) mxCalloc(nb_rows*nb_cols, sizeof(double)); for (int_t i = 0; i < nb_rows*nb_cols; i++) new_array[i] = (double) data[i]; mxSetData(matrix, new_array); return matrix; #elif defined(SWIGPYTHON) PyObject *matrix = NULL; if (nb_cols == 1) { double *data_copy = (double *) calloc(nb_rows, sizeof(double)); std::copy_n(data, nb_rows, data_copy); npy_intp npy_dims[1] = {nb_rows}; matrix = PyArray_NewFromDataF(1, npy_dims, data_copy); } else { double *data_copy = (double *) calloc(nb_rows * nb_cols, sizeof(double)); std::copy_n(data, nb_rows * nb_cols, data_copy); npy_intp npy_dims[2] = {nb_rows, nb_cols}; matrix = PyArray_NewFromDataF(2, npy_dims, data_copy); } if (matrix == NULL) { PyErr_Print(); throw std::runtime_error("Something went wrong while copying array"); } return matrix; #endif } bool is_sequence(const LangObject *object) { #if defined(SWIGMATLAB) if (!mxIsCell(object)) return false; #elif defined(SWIGPYTHON) if (!PyList_Check((PyObject *) object)) return false; #endif return true; } bool is_sequence(const LangObject *input, int_t expected_length) { if (!is_sequence(input)) return false; #if defined(SWIGMATLAB) int_t length_of_sequence = mxGetNumberOfElements(input); #elif defined(SWIGPYTHON) int_t length_of_sequence = PyList_Size((PyObject *) input); #endif if (length_of_sequence != expected_length) return false; return true; } LangObject *from(const LangObject *sequence, int_t index) { #if defined(SWIGMATLAB) return mxGetCell(sequence, index); #elif defined(SWIGPYTHON) PyObject *item = PyList_GetItem((PyObject *) sequence, index); Py_INCREF(item); return item; #endif } LangObject *new_sequence(const int_t length) { #if defined(SWIGMATLAB) const mwSize dims[1] = {(const mwSize) length}; return mxCreateCellArray(1, dims); #elif defined(SWIGPYTHON) return PyList_New(length); #endif } template <typename T> LangObject *new_sequence_from(T *array, const int_t length) { LangObject *sequence = new_sequence(length); for (int_t index = 0; index < length; index++) { if (typeid(T) == typeid(int_t)) write_int_to(sequence, index, array[index]); else if (typeid(T) == typeid(real_t)) write_real_to(sequence, index, array[index]); } return sequence; } void fill_int_array_from(const LangObject *sequence, int_t *array, const int_t length) { for (int_t index = 0; index < length; index++) { LangObject *item = from(sequence, index); if (!is_integer(item)) { char err_msg[MAX_STR_LEN]; snprintf(err_msg, sizeof(err_msg), "Input %s elements must be scalars", LANG_SEQUENCE_NAME); throw std::invalid_argument(err_msg); } array[index] = int_from(item); } } void fill_real_array_from(const LangObject *sequence, real_t *array, const int_t length) { for (int_t index = 0; index < length; index++) { LangObject *item = from(sequence, index); if (!is_real(item)) { char err_msg[MAX_STR_LEN]; snprintf(err_msg, sizeof(err_msg), "Input %s elements must be scalars", LANG_SEQUENCE_NAME); throw std::invalid_argument(err_msg); } array[index] = real_from(item); } } bool is_map(const LangObject *object) { #if defined(SWIGMATLAB) if (!mxIsStruct(object)) return false; if (mxGetNumberOfElements(object) != 1) return false; #elif defined(SWIGPYTHON) if (!PyDict_Check(object)) return false; #endif return true; } bool has(const LangObject *map, const char *key) { #if defined(SWIGMATLAB) if (mxGetField(map, 0, key) == NULL) return false; #elif defined(SWIGPYTHON) if (PyDict_GetItemString((PyObject *) map, key) == NULL) return false; #endif return true; } int num_elems(const LangObject *map) { #if defined(SWIGMATLAB) return mxGetNumberOfFields(map); #elif defined(SWIGPYTHON) return PyDict_Size((PyObject *) map); #endif } LangObject *from(const LangObject *map, const char *key) { if (!has(map, key)) { char err_msg[MAX_STR_LEN]; snprintf(err_msg, sizeof(err_msg), "Input %s has no key %s", LANG_MAP_NAME, key); throw std::invalid_argument(err_msg); } #if defined(SWIGMATLAB) return mxGetField(map, 0, key); #elif defined(SWIGPYTHON) PyObject *item = PyDict_GetItemString((PyObject *) map, key); if (item) Py_INCREF(item); return item; #endif } const char *char_from(const LangObject *map, const char *key) { LangObject *value = from(map, key); #if defined(SWIGMATLAB) return (const char *) mxArrayToString(value); #elif defined(SWIGPYTHON) return (const char *) PyUnicode_AsUTF8AndSize(value, NULL); #endif } int_t int_from(const LangObject *map, const char *key) { LangObject *value = from(map, key); #if defined(SWIGMATLAB) return (int_t) mxGetScalar(value); #elif defined(SWIGPYTHON) return (int_t) PyLong_AsLong(value); #endif } real_t real_from(const LangObject *map, const char *key) { LangObject *value = from(map, key); #if defined(SWIGMATLAB) return (real_t) mxGetScalar(value); #elif defined(SWIGPYTHON) return (real_t) PyFloat_AsDouble(value); #endif } bool is_string(LangObject *input) { #if defined(SWIGMATLAB) return mxIsChar(input); #elif defined(SWIGPYTHON) return PyUnicode_Check(input); #endif } std::string string_from(LangObject *input) { const char *string; #if defined(SWIGMATLAB) string = mxArrayToUTF8String(input); if (string == NULL) throw std::runtime_error("Error during string conversion."); #elif defined(SWIGPYTHON) string = PyUnicode_AsUTF8(input); #endif return std::string(string); } bool is_boolean(LangObject *input) { #if defined(SWIGMATLAB) return mxIsLogicalScalar(input); #elif defined(SWIGPYTHON) return PyBool_Check(input); #endif } bool boolean_from(LangObject *input) { #if defined(SWIGMATLAB) return mxIsLogicalScalarTrue(input); #elif defined(SWIGPYTHON) return PyObject_IsTrue(input); #endif } bool is_valid_option_type(LangObject *input) { return is_integer(input) || is_real(input) || is_matrix(input) || is_map(input) || is_string(input) || is_boolean(input); } void to(LangObject *sequence, const int_t index, LangObject *item) { #if defined(SWIGMATLAB) mxSetCell(sequence, index, item); #elif defined(SWIGPYTHON) Py_INCREF(item); PyList_SetItem(sequence, index, item); #endif } void write_int_to(LangObject *sequence, const int_t index, const int_t number) { #if defined(SWIGMATLAB) mxArray *scalar = mxCreateDoubleScalar(number); to(sequence, index, scalar); #elif defined(SWIGPYTHON) to(sequence, index, PyLong_FromLong((long) number)); #endif } void write_real_to(LangObject *sequence, const int_t index, const real_t number) { #if defined(SWIGMATLAB) mxArray *scalar = mxCreateDoubleScalar(number); to(sequence, index, scalar); #elif defined(SWIGPYTHON) to(sequence, index, PyFloat_FromDouble((double) number)); #endif } LangObject *new_sequence_of_arrays(const int_t length) { #if defined(SWIGMATLAB) mxArray *sequence = new_sequence(length); #elif defined(SWIGPYTHON) // Try loading Python module into global variable if (sequence_of_arrays_module == NULL) sequence_of_arrays_module = PyImport_Import(PyString_FromString("acados")); // Check if loading was succesful if (sequence_of_arrays_module == NULL) SWIG_Error(SWIG_RuntimeError, "Something went wrong when importing Python module"); PyObject *pDict = PyModule_GetDict(sequence_of_arrays_module); PyObject *pClass = PyDict_GetItemString(pDict, "sequence_of_arrays"); if (pClass) Py_INCREF(pClass); PyObject *sequence = NULL; if (PyCallable_Check(pClass)) { PyObject *args = PyTuple_New(1); PyObject *list = PyList_New(length); for (int_t index = 0; index < length; index++) PyList_SetItem(list, index, PyLong_FromLong((long) index)); // fill list with dummies Py_INCREF(list); PyTuple_SetItem(args, 0, list); sequence = PyObject_CallObject(pClass, args); Py_DECREF(pClass); } #endif if (sequence == NULL) { char err_msg[MAX_STR_LEN]; snprintf(err_msg, sizeof(err_msg), "Something went wrong during construction of %s " "with length %d", LANG_SEQUENCE_NAME, length); SWIG_Error(SWIG_RuntimeError, err_msg); } return sequence; } template<typename T> LangObject *new_sequence_from(T **data, const int_t length, const int_t *nb_rows, const int_t *nb_columns) { LangObject *sequence = new_sequence_of_arrays(length); for (int_t index = 0; index < length; index++) { auto dims = std::make_pair(nb_rows[index], nb_columns[index]); LangObject *item = new_matrix<T>(dims, data[index]); to(sequence, index, item); } return sequence; } template<typename T> LangObject *new_sequence_from(T **data, const int_t length, const int_t *nb_elems) { int_t *nb_columns = (int_t *) calloc(length, sizeof(int_t)); for (int_t i = 0; i < length; i++) nb_columns[i] = 1; LangObject *result = new_sequence_from(data, length, nb_elems, nb_columns); free(nb_columns); return result; } bool dimensions_match(const LangObject *matrix, const int_t *nb_rows, const int_t *nb_cols, const int_t length) { int_t rows = nb_rows[0]; int_t cols = nb_cols[0]; for (int_t i = 1; i < length; i++) { if (nb_rows[i] != rows || nb_cols[i] != cols) { throw std::invalid_argument("If just given one matrix, dimensions for all stages " "must be equal"); return false; } } if (!is_matrix(matrix, rows, cols)) { throw std::invalid_argument("Input matrix has wrong dimensions"); return false; } return true; } template<typename T> void copy_from(const LangObject *matrix, T *data, const int_t nb_elems) { #if defined(SWIGMATLAB) if (!mxIsDouble(matrix)) throw std::invalid_argument("Only matrices with double precision numbers allowed"); double *matrix_data = (double *) mxGetData(matrix); std::copy(matrix_data, matrix_data + nb_elems, data); #elif defined(SWIGPYTHON) if (PyArray_TYPE((PyArrayObject *) matrix) == get_numeric_type<int_t>()) { int_t *matrix_data = (int_t *) PyArray_DATA((PyArrayObject *) matrix); std::copy(matrix_data, matrix_data + nb_elems, data); } else if (PyArray_TYPE((PyArrayObject *) matrix) == get_numeric_type<long>()) { long *matrix_data = (long *) PyArray_DATA((PyArrayObject *) matrix); std::copy(matrix_data, matrix_data + nb_elems, data); } else if (PyArray_TYPE((PyArrayObject *) matrix) == get_numeric_type<real_t>()) { real_t *matrix_data = (real_t *) PyArray_DATA((PyArrayObject *) matrix); std::copy(matrix_data, matrix_data + nb_elems, data); } else { throw std::invalid_argument("Only matrices with integer numbers or double " "precision numbers allowed"); } #endif } template<typename T> void fill_array_from(const LangObject *input, T **array, const int_t length, const int_t *nb_rows, const int_t *nb_columns) { if (is_matrix(input) && dimensions_match(input, nb_rows, nb_columns, length)) { int_t nb_elems = nb_rows[0]*nb_columns[0]; for (int_t index = 0; index < length; index++) { copy_from(input, array[index], nb_elems); } } else if (is_sequence(input, length)) { for (int_t index = 0; index < length; index++) { LangObject *item = from(input, index); if (is_matrix(item, nb_rows[index], nb_columns[index])) copy_from(item, array[index], nb_rows[index]*nb_columns[index]); } } else { char err_msg[MAX_STR_LEN]; snprintf(err_msg, sizeof(err_msg), "Expected %s or %s as input", LANG_SEQUENCE_NAME, LANG_MATRIX_NAME); throw std::invalid_argument(err_msg); } } template<typename T> void fill_array_from(const LangObject *input, T **array, const int_t length, const int_t *nb_elems) { int_t nb_columns[length]; for (int_t i = 0; i < length; i++) nb_columns[i] = 1; fill_array_from(input, array, length, nb_elems, nb_columns); } template<typename T> void fill_array_from(const LangObject *input, T **array, const int_t length) { int_t nb[length]; for (int_t i = 0; i < length; i++) nb[i] = 1; fill_array_from(input, array, length, nb, nb); } // TODO(roversch): This can probably be merged with the new_sequence_from functions. LangObject *new_output_list_from(const LangObject **input, const int_t length) { LangObject *output_list = new_sequence(length); for (int_t index = 0; index < length; index++) { to(output_list, index, (LangObject *) input[index]); } return output_list; } LangObject *sequence_concatenate(const LangObject *seq1, const LangObject *seq2) { #if defined(SWIGMATLAB) if (mxGetNumberOfDimensions(seq1) != 1 || mxGetNumberOfDimensions(seq2) != 1) throw std::invalid_argument("Can only concatenate 1-D cell arrays"); int_t length_seq1 = mxGetNumberOfElements(seq1); int_t length_seq2 = mxGetNumberOfElements(seq2); int_t total_length = length_seq1 + length_seq2; const mwSize dims[1] = {(const mwSize) total_length}; mxArray *output_array = mxCreateCellArray(1, dims); for (int_t index = 0; index < length_seq1; index++) mxSetCell(output_array, index, mxGetCell(seq1, index)); for (int_t index = 0; index < length_seq2; index++) mxSetCell(output_array, length_seq1 + index, mxGetCell(seq2, index)); return output_array; #elif defined(SWIGPYTHON) return PySequence_Concat((PyObject *) seq1, (PyObject *) seq2); #endif } bool is_named_tuple(const LangObject *object) { #if defined(SWIGMATLAB) return mxIsStruct(object); #elif defined(SWIGPYTHON) return PyTuple_Check((PyObject *) object); #endif } LangObject *new_output_tuple(int_t num_fields, const char **field_names, LangObject **content) { #if defined(SWIGMATLAB) const mwSize dims[1] = {(const mwSize) 1}; mxArray *named_tuple = mxCreateStructArray(1, dims, num_fields, field_names); for (int_t index = 0; index < num_fields; index++) mxSetField(named_tuple, 0, field_names[index], content[index]); return named_tuple; #elif defined(SWIGPYTHON) PyObject *content_copy[num_fields]; // Try loading Python module into global variable if (copy_module == NULL) copy_module = PyImport_Import(PyString_FromString("copy")); // Check if loading was succesful if (copy_module == NULL) SWIG_Error(SWIG_RuntimeError, "Something went wrong when importing Python module 'copy'"); PyObject *pDict = PyModule_GetDict(copy_module); PyObject *pFunction = PyDict_GetItemString(pDict, "deepcopy"); if (!PyCallable_Check(pFunction)) { SWIG_Error(SWIG_RuntimeError, "Function is not callable"); } for (int_t index = 0; index < num_fields; index++) { PyObject *args = PyTuple_New(1); PyTuple_SetItem(args, 0, content[index]); content_copy[index] = PyObject_CallObject(pFunction, args); } PyStructSequence_Field *fields; fields = (PyStructSequence_Field *) calloc(num_fields+1, sizeof(PyStructSequence_Field)); for (int_t index = 0; index < num_fields; index++) { fields[index].name = (char *) field_names[index]; fields[index].doc = NULL; } // The list of field names in named tuples must be NULL-terminated in Python fields[num_fields].name = NULL; fields[num_fields].doc = NULL; tuple_descriptor = (PyStructSequence_Desc *) malloc(sizeof(PyStructSequence_Desc)); tuple_descriptor->name = (char *) "output"; tuple_descriptor->doc = NULL; tuple_descriptor->fields = fields; tuple_descriptor->n_in_sequence = num_fields; tuple_type = (PyTypeObject *) malloc(sizeof(PyTypeObject)); PyStructSequence_InitType(tuple_type, tuple_descriptor); PyObject *named_tuple = PyStructSequence_New(tuple_type); for (int_t index = 0; index < num_fields; index++) PyStructSequence_SetItem(named_tuple, index, (PyObject *) content_copy[index]); return named_tuple; #endif } LangObject *new_ocp_output_tuple(LangObject *states, LangObject *controls) { const char *field_names[2] = {"states", "controls"}; LangObject *fields[2] = {states, controls}; return new_output_tuple(2, field_names, fields); } LangObject *new_sim_output_tuple(LangObject *final_state, LangObject *forward_sensitivities) { const char *field_names[2] = {"final_state", "forward_sensitivities"}; LangObject *fields[2] = {final_state, forward_sensitivities}; return new_output_tuple(2, field_names, fields); } LangObject *new_ocp_nlp_function_output_tuple( LangObject * y, LangObject * jac_y, LangObject * hess_y) { const char *field_names[3] = {"y", "jac_y", "hess_y"}; LangObject *fields[3] = {y, jac_y, hess_y}; return new_output_tuple(3, field_names, fields); } void fill_array_from(const LangObject *input, int_t *array, const int_t length) { if (is_integer(input)) { int_t number = int_from(input); for (int_t i = 0; i < length; i++) array[i] = number; } else if (is_sequence(input, length)) { fill_int_array_from(input, array, length); } else { char err_msg[MAX_STR_LEN]; snprintf(err_msg, sizeof(err_msg), \ "Expected scalar or %s of length %d", LANG_SEQUENCE_NAME, length); throw std::invalid_argument(err_msg); } } void fill_array_from(const LangObject *input, real_t *array, const int_t length) { if (is_real(input)) { real_t number = real_from(input); for (int_t i = 0; i < length; i++) array[i] = number; } else if (is_sequence(input, length)) { fill_real_array_from(input, array, length); } else if (is_matrix(input, length, 1)) { copy_from(input, array, length); } else { char err_msg[MAX_STR_LEN]; snprintf(err_msg, sizeof(err_msg), \ "Expected scalar or %s of length %d", LANG_SEQUENCE_NAME, length); throw std::invalid_argument(err_msg); } } void fill_array_from(const LangObject *map, const char *key, int_t *array, int_t array_length) { if (!has(map, key)) { memset(array, 0, array_length*sizeof(*array)); } else { LangObject *item = from(map, key); fill_array_from(item, array, array_length); } } #include "acados_cpp/options.hpp" namespace acados { template<typename T> option_t *as_option_ptr(T val) { return new option<T>(val); } option_t *make_option_map(LangObject *val); template<> option_t *as_option_ptr(LangObject *val) { if (is_integer(val)) return new option<int>(int_from(val)); else if (is_real(val)) return new option<double>(real_from(val)); else if (is_boolean(val)) return new option<bool>(boolean_from(val)); else if (is_string(val)) return new option<std::string>(string_from(val)); else if (is_map(val)) return make_option_map(val); else throw std::invalid_argument("Option does not have a valid type"); } option_t *make_option_map(LangObject *val) { std::map<std::string, option_t *> option_map; #if defined(SWIGMATLAB) int num_fields = mxGetNumberOfFields(val); for (int i = 0; i < num_fields; ++i) { std::string field_name {mxGetFieldNameByNumber(val, i)}; option_map[field_name] = as_option_ptr(mxGetField(val, 0, field_name.c_str())); } #elif defined(SWIGPYTHON) PyObject *key, *value; Py_ssize_t pos = 0; while (PyDict_Next(val, &pos, &key, &value)) { std::string field_name {PyUnicode_AsUTF8AndSize(key, NULL)}; option_map[field_name] = as_option_ptr(value); } #endif return new option<std::map<std::string, option_t *>>(option_map); } } // namespace acados %}
{ "pile_set_name": "Github" }
// Generated from definition io.k8s.api.authorization.v1.NonResourceAttributes /// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface #[derive(Clone, Debug, Default, PartialEq)] pub struct NonResourceAttributes { /// Path is the URL path of the request pub path: Option<String>, /// Verb is the standard HTTP verb pub verb: Option<String>, } impl<'de> serde::Deserialize<'de> for NonResourceAttributes { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_path, Key_verb, Other, } impl<'de> serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error { Ok(match v { "path" => Field::Key_path, "verb" => Field::Key_verb, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = NonResourceAttributes; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("NonResourceAttributes") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> { let mut value_path: Option<String> = None; let mut value_verb: Option<String> = None; while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_path => value_path = serde::de::MapAccess::next_value(&mut map)?, Field::Key_verb => value_verb = serde::de::MapAccess::next_value(&mut map)?, Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(NonResourceAttributes { path: value_path, verb: value_verb, }) } } deserializer.deserialize_struct( "NonResourceAttributes", &[ "path", "verb", ], Visitor, ) } } impl serde::Serialize for NonResourceAttributes { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer { let mut state = serializer.serialize_struct( "NonResourceAttributes", self.path.as_ref().map_or(0, |_| 1) + self.verb.as_ref().map_or(0, |_| 1), )?; if let Some(value) = &self.path { serde::ser::SerializeStruct::serialize_field(&mut state, "path", value)?; } if let Some(value) = &self.verb { serde::ser::SerializeStruct::serialize_field(&mut state, "verb", value)?; } serde::ser::SerializeStruct::end(state) } }
{ "pile_set_name": "Github" }
# (C) Datadog, Inc. 2019-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) from __future__ import absolute_import import os from contextlib import contextmanager import psutil from six import PY3 from .conditions import WaitForPortListening from .env import environment_run from .structures import LazyFunction, TempDir from .utils import ON_WINDOWS, find_free_port, get_ip if PY3: import subprocess else: import subprocess32 as subprocess PID_FILE = 'ssh.pid' def run_background_command(command, pid_filename, env=None): """Run `command` in the background, writing its PID in `pid_filename`.""" if ON_WINDOWS: process = subprocess.Popen(command, env=env, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP) else: process = subprocess.Popen(command, env=env, start_new_session=True) with open(pid_filename, 'w') as pid_file: pid_file.write(str(process.pid)) @contextmanager def socks_proxy(host, user, private_key): """Open a SSH connection with a SOCKS proxy.""" set_up = SocksProxyUp(host, user, private_key) tear_down = KillProcess('socks_proxy', PID_FILE) with environment_run(up=set_up, down=tear_down) as result: yield result class SocksProxyUp(LazyFunction): """Create a SOCKS proxy using `ssh`. It returns the (`ip`, `port`) on which the proxy is listening. """ def __init__(self, host, user, private_key): self.host = host self.user = user self.private_key = private_key def __call__(self): with TempDir('socks_proxy') as temp_dir: ip = get_ip() local_port = find_free_port(ip) key_file = os.path.join(temp_dir, 'ssh_key') with open(key_file, 'w') as f: f.write(self.private_key) os.chmod(key_file, 0o600) command = [ 'ssh', '-N', '-D', '{}:{}'.format(ip, local_port), '-i', key_file, '-o', 'BatchMode=yes', '-o', 'UserKnownHostsFile={}'.format(os.devnull), '-o', 'StrictHostKeyChecking=no', '{}@{}'.format(self.user, self.host), ] run_background_command(command, os.path.join(temp_dir, PID_FILE)) WaitForPortListening(ip, local_port)() return ip, local_port @contextmanager def tcp_tunnel(host, user, private_key, remote_port): """Open a SSH connection with a TCP tunnel proxy.""" set_up = TCPTunnelUp(host, user, private_key, remote_port) tear_down = KillProcess('tcp_tunnel', PID_FILE) with environment_run(up=set_up, down=tear_down) as result: yield result class TCPTunnelUp(LazyFunction): """Create a TCP tunnel using `ssh`. It returns the (`ip`, `port`) on which the tunnel is listening, connecting to `remote_port`. """ def __init__(self, host, user, private_key, remote_port): self.host = host self.user = user self.private_key = private_key self.remote_port = remote_port def __call__(self): with TempDir('tcp_tunnel') as temp_dir: ip = get_ip() local_port = find_free_port(ip) key_file = os.path.join(temp_dir, 'ssh_key') with open(key_file, 'w') as f: f.write(self.private_key) os.chmod(key_file, 0o600) command = [ 'ssh', '-N', '-L', '{}:{}:localhost:{}'.format(ip, local_port, self.remote_port), '-i', key_file, '-o', 'BatchMode=yes', '-o', 'UserKnownHostsFile={}'.format(os.devnull), '-o', 'StrictHostKeyChecking=no', '{}@{}'.format(self.user, self.host), ] run_background_command(command, os.path.join(temp_dir, PID_FILE)) WaitForPortListening(ip, local_port)() return ip, local_port class KillProcess(LazyFunction): """Kill a process with the `pid_file` residing in the temporary directory `temp_name`.""" def __init__(self, temp_name, pid_file): self.temp_name = temp_name self.pid_file = pid_file def __call__(self): with TempDir(self.temp_name) as temp_dir: with open(os.path.join(temp_dir, self.pid_file)) as pid_file: pid = int(pid_file.read()) # TODO: Remove psutil as a dependency when we drop Python 2, on Python 3 os.kill supports Windows process = psutil.Process(pid) process.kill() return 0
{ "pile_set_name": "Github" }
--- title: "'System.STAThreadAttribute' y 'System.MTAThreadAttribute' no se pueden aplicar a la vez a '|1'" ms.date: 07/20/2015 f1_keywords: - bc31513 - vbc31513 helpviewer_keywords: - BC31513 ms.assetid: 7efb4c8e-d31c-4273-9d85-8cd2bef4d120 ms.openlocfilehash: 6502cc3517047158864731aad44c7ed0f7db3576 ms.sourcegitcommit: bf5c5850654187705bc94cc40ebfb62fe346ab02 ms.translationtype: MT ms.contentlocale: es-ES ms.lasthandoff: 09/23/2020 ms.locfileid: "91095322" --- # <a name="systemstathreadattribute-and-systemmtathreadattribute-cannot-both-be-applied-to-1"></a>'System.STAThreadAttribute' y 'System.MTAThreadAttribute' no se pueden aplicar a la vez a '|1' Los atributos `System.STAThreadAttribute` y `System.MTAThreadAttribute` son mutuamente excluyentes. **Identificador de error:** BC31513 ## <a name="to-correct-this-error"></a>Para corregir este error 1. Aplique `System.MTAThreadAttribute` o `System.STAThreadAttribute`, pero no ambos. ## <a name="see-also"></a>Vea también - <xref:System.STAThreadAttribute> - <xref:System.MTAThreadAttribute> - [Información general de atributos](../programming-guide/concepts/attributes/index.md)
{ "pile_set_name": "Github" }
f(UpgradeNode). UpgradeNode = fun () -> case logplex_app:config(git_branch) of "v61" -> io:format(whereis(user), "at=upgrade_start cur_vsn=61~n", []); "v62" -> io:format(whereis(user), "at=upgrade type=retry cur_vsn=61 old_vsn=62~n", []); Else -> io:format(whereis(user), "at=upgrade_start old_vsn=~p abort=wrong_version", [tl(Else)]), erlang:error({wrong_version, Else}) end, %% stateless -- didn't change in this release, but might not have been %% reloaded in older ones l(logplex_msg_buffer), %% Stateful changes to HTTP drains -- gotta suspend, reload, and then %% resume all drains before going for the stateless drain buffer update Drains = [Pid || {Pid, http} <- gproc:lookup_local_properties(drain_type)], _ = [sys:suspend(Pid) || Pid <- Drains], l(logplex_http_drain), _ = [sys:change_code(Pid, logplex_http_drain, v61, undefined) || Pid <- Drains, erlang:is_process_alive(Pid)], _ = [sys:resume(Pid) || Pid <- Drains], %% stateless l(logplex_drain_buffer), io:format(whereis(user), "at=upgrade_end cur_vsn=62~n", []), application:set_env(logplex, git_branch, "v62"), ok end. f(NodeVersions). NodeVersions = fun () -> lists:keysort(3, [ {N, element(2, rpc:call(N, application, get_env, [logplex, git_branch])), rpc:call(N, os, getenv, ["INSTANCE_NAME"])} || N <- [node() | nodes()] ]) end. f(NodesAt). NodesAt = fun (Vsn) -> [ N || {N, V, _} <- NodeVersions(), V =:= Vsn ] end. f(RollingUpgrade). RollingUpgrade = fun (Nodes) -> lists:foldl(fun (N, {good, Upgraded}) -> case rpc:call(N, erlang, apply, [ UpgradeNode, [] ]) of ok -> {good, [N | Upgraded]}; Else -> {{bad, N, Else}, Upgraded} end; (N, {_, _} = Acc) -> Acc end, {good, []}, Nodes) end.
{ "pile_set_name": "Github" }
; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck -check-prefix=CHECK-NO-FP %s ; RUN: llc < %s -mtriple=x86_64-apple-darwin -disable-fp-elim | FileCheck -check-prefix=CHECK-FP %s ; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck -check-prefix=LINUX-NO-FP %s ; RUN: llc < %s -mtriple=x86_64-linux-gnu -disable-fp-elim | FileCheck -check-prefix=LINUX-FP %s define void @func() { entry: unreachable } ; MachO cannot handle an empty function. ; CHECK-NO-FP: _func: ; CHECK-NO-FP-NEXT: .cfi_startproc ; CHECK-NO-FP: nop ; CHECK-NO-FP-NEXT: .cfi_endproc ; CHECK-FP: _func: ; CHECK-FP-NEXT: .cfi_startproc ; CHECK-FP-NEXT: : ; CHECK-FP-NEXT: pushq %rbp ; CHECK-FP-NEXT: : ; CHECK-FP-NEXT: .cfi_def_cfa_offset 16 ; CHECK-FP-NEXT: : ; CHECK-FP-NEXT: .cfi_offset %rbp, -16 ; CHECK-FP-NEXT: movq %rsp, %rbp ; CHECK-FP-NEXT: : ; CHECK-FP-NEXT: .cfi_def_cfa_register %rbp ; CHECK-FP-NEXT: .cfi_endproc ; An empty function is perfectly fine on ELF. ; LINUX-NO-FP: func: ; LINUX-NO-FP-NEXT: .cfi_startproc ; LINUX-NO-FP-NEXT: {{^}}# ; LINUX-NO-FP-NEXT: {{^}}.L{{.*}}:{{$}} ; LINUX-NO-FP-NEXT: .size func, .L{{.*}}-func ; LINUX-NO-FP-NEXT: .cfi_endproc ; A cfi directive can point to the end of a function. It (and in fact the ; entire body) could be optimized out because of the unreachable, but we ; don't do it right now. ; LINUX-FP: func: ; LINUX-FP-NEXT: .cfi_startproc ; LINUX-FP-NEXT: {{^}}# ; LINUX-FP-NEXT: pushq %rbp ; LINUX-FP-NEXT: {{^}}.L{{.*}}:{{$}} ; LINUX-FP-NEXT: .cfi_def_cfa_offset 16 ; LINUX-FP-NEXT: {{^}}.L{{.*}}:{{$}} ; LINUX-FP-NEXT: .cfi_offset %rbp, -16 ; LINUX-FP-NEXT: movq %rsp, %rbp ; LINUX-FP-NEXT:{{^}}.L{{.*}}:{{$}} ; LINUX-FP-NEXT: .cfi_def_cfa_register %rbp ; LINUX-FP-NEXT:{{^}}.L{{.*}}:{{$}} ; LINUX-FP-NEXT: .size func, .Ltmp3-func ; LINUX-FP-NEXT: .cfi_endproc
{ "pile_set_name": "Github" }
from onl.platform.base import * from onl.platform.mellanox import * class OnlPlatform_x86_64_mlnx_msn2700_r0(OnlPlatformMellanox, OnlPlatformPortConfig_32x100): PLATFORM='x86-64-mlnx-msn2700-r0' MODEL="SN2700" SYS_OBJECT_ID=".2700.1" def baseconfig(self): # load modules import os # necessary if there are issues with the install # os.system("/usr/bin/apt-get install") self.syseeprom_export(); return True
{ "pile_set_name": "Github" }
<view class="container"> <image class="logo" src="/images/search-logo.png"></image> <text class="name">咩咩辞典</text> <view class="input-container"> <input class="input-search" bindconfirm="search" placeholder="请输入您要查询的单词" type="text"/> </view> <view class="button-search" bindtap="help"> <text class="text-search">说明</text> </view> </view>
{ "pile_set_name": "Github" }
----------------------------------------------------------------------------- -- Name: minimal.wx.lua -- Purpose: 'Minimal' wxLua sample -- Author: J Winwood -- Modified by: -- Created: 16/11/2001 -- RCS-ID: $Id: veryminimal.wx.lua,v 1.7 2008/01/22 04:45:39 jrl1 Exp $ -- Copyright: (c) 2001 J Winwood. All rights reserved. -- Licence: wxWidgets licence ----------------------------------------------------------------------------- -- Load the wxLua module, does nothing if running from wxLua, wxLuaFreeze, or wxLuaEdit package.cpath = package.cpath..";./?.dll;./?.so;../lib/?.so;../lib/vc_dll/?.dll;../lib/bcc_dll/?.dll;../lib/mingw_dll/?.dll;" require("wx") frame = nil function main() -- create the frame window frame = wx.wxFrame( wx.NULL, wx.wxID_ANY, "wxLua Very Minimal Demo", wx.wxDefaultPosition, wx.wxSize(450, 450), wx.wxDEFAULT_FRAME_STYLE ) -- show the frame window frame:Show(true) end main() -- Call wx.wxGetApp():MainLoop() last to start the wxWidgets event loop, -- otherwise the wxLua program will exit immediately. -- Does nothing if running from wxLua, wxLuaFreeze, or wxLuaEdit since the -- MainLoop is already running or will be started by the C++ program. wx.wxGetApp():MainLoop()
{ "pile_set_name": "Github" }
table border-collapse: collapse margin: $small-spacing 0 table-layout: fixed width: 100% th border-bottom: 1px solid shade($base-border-color, 25%) font-weight: 600 padding: $small-spacing 0 text-align: left td border-bottom: $base-border padding: $small-spacing 0 tr, td, th vertical-align: middle
{ "pile_set_name": "Github" }
<?php namespace Forone\Providers; /** * This file is part of Entrust, * a role & permission management solution for Laravel. * * @license MIT * @package Zizaco\Entrust */ use Illuminate\Support\ServiceProvider; use Zizaco\Entrust\ClassCreatorCommand; use Zizaco\Entrust\Entrust; use Zizaco\Entrust\MigrationCommand; class EntrustServiceProvider extends ServiceProvider { /** * Indicates if loading of the provider is deferred. * * @var bool */ protected $defer = false; /** * Bootstrap the application events. * * @return void */ public function boot() { // Publish config files $this->publishes([ __DIR__.'/../../config/entrust.php' => config_path('entrust.php'), ]); // Register commands $this->commands('command.entrust.migration'); $this->commands('command.entrust.classes'); } /** * Register the service provider. * * @return void */ public function register() { $this->registerEntrust(); $this->registerCommands(); $this->mergeConfig(); } /** * Register the application bindings. * * @return void */ private function registerEntrust() { $this->app->bind('entrust', function ($app) { return new Entrust($app); }); } /** * Register the artisan commands. * * @return void */ private function registerCommands() { $this->app->singleton('command.entrust.migration', function ($app) { return new MigrationCommand(); }); $this->app->singleton('command.entrust.classes', function ($app) { return new ClassCreatorCommand(); }); } /** * Merges user's and entrust's configs. * * @return void */ private function mergeConfig() { $this->mergeConfigFrom( __DIR__.'/../../config/entrust.php', 'entrust' ); } /** * Get the services provided. * * @return array */ public function provides() { return [ 'command.entrust.migration', 'command.entrust.classes' ]; } }
{ "pile_set_name": "Github" }
package com.uber.okbuck.example import dagger.Component import dagger.android.AndroidInjector import dagger.android.support.AndroidSupportInjectionModule import javax.inject.Singleton @Singleton @Component( modules = [ AndroidSupportInjectionModule::class, TestAnalyticsModule::class, BindingModule::class ] ) interface TestAppComponent: AndroidInjector<MainApp> { @Component.Factory abstract class Builder : AndroidInjector.Factory<MainApp> }
{ "pile_set_name": "Github" }
%YAML 1.1 %TAG !u! tag:unity3d.com,2011: --- !u!11 &1 AudioManager: m_ObjectHideFlags: 0 m_Volume: 1 Rolloff Scale: 1 Doppler Factor: 1 Default Speaker Mode: 2 m_SampleRate: 0 m_DSPBufferSize: 0 m_VirtualVoiceCount: 512 m_RealVoiceCount: 32 m_DisableAudio: 0
{ "pile_set_name": "Github" }
<li> No bios yet. </li>
{ "pile_set_name": "Github" }
/** * Module dependencies. */ var lingo = require('lingo') , assert = require('assert') , en = lingo.en; var fr = new lingo.Language('fr'); fr.translations = { 'Hello World': 'Bonjour tout le monde' , 'Hello {name}': 'Bonjour {name}' , 'Hello {first} {last}': 'Bonjour {first} {last}' }; module.exports = { 'test .translate()': function(){ assert.equal('Hello World', en.translate('Hello World')); assert.equal('Hello TJ', en.translate('Hello {name}', { name: 'TJ' })); assert.equal('Hello foo bar', en.translate('Hello {first} {last}', { first: 'foo', last: 'bar' })); }, 'test .translate() with translations': function(){ assert.equal('Bonjour tout le monde', fr.translate('Hello World')); assert.equal('Bonjour TJ', fr.translate('Hello {name}', { name: 'TJ' })); assert.equal('Bonjour foo bar', fr.translate('Hello {first} {last}', { first: 'foo', last: 'bar' })); } }
{ "pile_set_name": "Github" }
# Lines starting with '#' and sections without content # are not displayed by a call to 'details' # [Website] http://www.time.com/time/ [filters] http://img.timeinc.net/tii/omniture/h/config/time_s_code.js http://img.timeinc.net/time/assets/js/responsive-ads.min.js [other] # Any other details [comments] fanboy
{ "pile_set_name": "Github" }
# 音乐和音效 - [音频播放](audio.md) - [AudioSource 组件参考](../components/audiosource.md) - [兼容性说明](compatibility.md)
{ "pile_set_name": "Github" }
// -*- C++ -*- //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // <set> // class multiset // bool empty() const noexcept; // UNSUPPORTED: c++03, c++11, c++14, c++17 #include <set> #include "test_macros.h" int main(int, char**) { std::multiset<int> c; c.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} return 0; }
{ "pile_set_name": "Github" }
public class LSD { private static final int BITS_PER_BYTE = 8; private LSD() { } public static void sort(String[] a, int w) { int n = a.length; int R = 256; String[] aux = new String[n]; for (int d = w-1; d >= 0; d--) { int[] count = new int[R+1]; for (int i = 0; i < n; i++) count[a[i].charAt(d) + 1]++; for (int r = 0; r < R; r++) count[r+1] += count[r]; for (int i = 0; i < n; i++) aux[count[a[i].charAt(d)]++] = a[i]; for (int i = 0; i < n; i++) a[i] = aux[i]; } } public static void sort(int[] a) { final int BITS = 32; final int R = 1 << BITS_PER_BYTE; final int MASK = R - 1; final int w = BITS / BITS_PER_BYTE; int n = a.length; int[] aux = new int[n]; for (int d = 0; d < w; d++) { int[] count = new int[R+1]; for (int i = 0; i < n; i++) { int c = (a[i] >> BITS_PER_BYTE*d) & MASK; count[c + 1]++; } for (int r = 0; r < R; r++) count[r+1] += count[r]; if (d == w-1) { int shift1 = count[R] - count[R/2]; int shift2 = count[R/2]; for (int r = 0; r < R/2; r++) count[r] += shift1; for (int r = R/2; r < R; r++) count[r] -= shift2; } for (int i = 0; i < n; i++) { int c = (a[i] >> BITS_PER_BYTE*d) & MASK; aux[count[c]++] = a[i]; } for (int i = 0; i < n; i++) a[i] = aux[i]; } } public static void main(String[] args) { String[] a = StdIn.readAllStrings(); int n = a.length; int w = a[0].length(); for (int i = 0; i < n; i++) assert a[i].length() == w : "Strings must have fixed length"; sort(a, w); for (int i = 0; i < n; i++) StdOut.println(a[i]); } }
{ "pile_set_name": "Github" }
/*----------------------------------------------------------------------------*/ /* Copyright (c) 2019 FIRST. All Rights Reserved. */ /* Open Source Software - may be modified and shared by FRC teams. The code */ /* must be accompanied by the FIRST BSD license file in the root directory of */ /* the project. */ /*----------------------------------------------------------------------------*/ #pragma once #include "frc/PWMSpeedController.h" namespace frc { /** * Playing with Fusion Venom Smart Motor with PWM control. * * Note that the Venom uses the following bounds for PWM values. These * values should work reasonably well for most controllers, but if users * experience issues such as asymmetric behavior around the deadband or * inability to saturate the controller in either direction, calibration is * recommended. * * \li 2.004ms = full "forward" * \li 1.520ms = the "high end" of the deadband range * \li 1.500ms = center of the deadband range (off) * \li 1.480ms = the "low end" of the deadband range * \li 0.997ms = full "reverse" */ class PWMVenom : public PWMSpeedController { public: /** * Construct a Venom connected via PWM. * * @param channel The PWM channel that the Venom is attached to. 0-9 are * on-board, 10-19 are on the MXP port */ explicit PWMVenom(int channel); PWMVenom(PWMVenom&&) = default; PWMVenom& operator=(PWMVenom&&) = default; }; } // namespace frc
{ "pile_set_name": "Github" }
/* * Digital Audio (PCM) abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/io.h> #include <linux/time.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/moduleparam.h> #include <linux/vmalloc.h> #include <linux/export.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/info.h> #include <sound/initval.h> static int preallocate_dma = 1; module_param(preallocate_dma, int, 0444); MODULE_PARM_DESC(preallocate_dma, "Preallocate DMA memory when the PCM devices are initialized."); static int maximum_substreams = 4; module_param(maximum_substreams, int, 0444); MODULE_PARM_DESC(maximum_substreams, "Maximum substreams with preallocated DMA memory."); static const size_t snd_minimum_buffer = 16384; /* * try to allocate as the large pages as possible. * stores the resultant memory size in *res_size. * * the minimum size is snd_minimum_buffer. it should be power of 2. */ static int preallocate_pcm_pages(struct snd_pcm_substream *substream, size_t size) { struct snd_dma_buffer *dmab = &substream->dma_buffer; size_t orig_size = size; int err; do { if ((err = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab)) < 0) { if (err != -ENOMEM) return err; /* fatal error */ } else return 0; size >>= 1; } while (size >= snd_minimum_buffer); dmab->bytes = 0; /* tell error */ pr_warn("ALSA pcmC%dD%d%c,%d:%s: cannot preallocate for size %zu\n", substream->pcm->card->number, substream->pcm->device, substream->stream ? 'c' : 'p', substream->number, substream->pcm->name, orig_size); return 0; } /* * release the preallocated buffer if not yet done. */ static void snd_pcm_lib_preallocate_dma_free(struct snd_pcm_substream *substream) { if (substream->dma_buffer.area == NULL) return; snd_dma_free_pages(&substream->dma_buffer); substream->dma_buffer.area = NULL; } /** * snd_pcm_lib_preallocate_free - release the preallocated buffer of the specified substream. * @substream: the pcm substream instance * * Releases the pre-allocated buffer of the given substream. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_lib_preallocate_free(struct snd_pcm_substream *substream) { snd_pcm_lib_preallocate_dma_free(substream); #ifdef CONFIG_SND_VERBOSE_PROCFS snd_info_free_entry(substream->proc_prealloc_max_entry); substream->proc_prealloc_max_entry = NULL; snd_info_free_entry(substream->proc_prealloc_entry); substream->proc_prealloc_entry = NULL; #endif return 0; } /** * snd_pcm_lib_preallocate_free_for_all - release all pre-allocated buffers on the pcm * @pcm: the pcm instance * * Releases all the pre-allocated buffers on the given pcm. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_lib_preallocate_free_for_all(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; int stream; for (stream = 0; stream < 2; stream++) for (substream = pcm->streams[stream].substream; substream; substream = substream->next) snd_pcm_lib_preallocate_free(substream); return 0; } EXPORT_SYMBOL(snd_pcm_lib_preallocate_free_for_all); #ifdef CONFIG_SND_VERBOSE_PROCFS /* * read callback for prealloc proc file * * prints the current allocated size in kB. */ static void snd_pcm_lib_preallocate_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcm_substream *substream = entry->private_data; snd_iprintf(buffer, "%lu\n", (unsigned long) substream->dma_buffer.bytes / 1024); } /* * read callback for prealloc_max proc file * * prints the maximum allowed size in kB. */ static void snd_pcm_lib_preallocate_max_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcm_substream *substream = entry->private_data; snd_iprintf(buffer, "%lu\n", (unsigned long) substream->dma_max / 1024); } /* * write callback for prealloc proc file * * accepts the preallocation size in kB. */ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcm_substream *substream = entry->private_data; char line[64], str[64]; size_t size; struct snd_dma_buffer new_dmab; if (substream->runtime) { buffer->error = -EBUSY; return; } if (!snd_info_get_line(buffer, line, sizeof(line))) { snd_info_get_str(str, line, sizeof(str)); size = simple_strtoul(str, NULL, 10) * 1024; if ((size != 0 && size < 8192) || size > substream->dma_max) { buffer->error = -EINVAL; return; } if (substream->dma_buffer.bytes == size) return; memset(&new_dmab, 0, sizeof(new_dmab)); new_dmab.dev = substream->dma_buffer.dev; if (size > 0) { if (snd_dma_alloc_pages(substream->dma_buffer.dev.type, substream->dma_buffer.dev.dev, size, &new_dmab) < 0) { buffer->error = -ENOMEM; return; } substream->buffer_bytes_max = size; } else { substream->buffer_bytes_max = UINT_MAX; } if (substream->dma_buffer.area) snd_dma_free_pages(&substream->dma_buffer); substream->dma_buffer = new_dmab; } else { buffer->error = -EINVAL; } } static inline void preallocate_info_init(struct snd_pcm_substream *substream) { struct snd_info_entry *entry; if ((entry = snd_info_create_card_entry(substream->pcm->card, "prealloc", substream->proc_root)) != NULL) { entry->c.text.read = snd_pcm_lib_preallocate_proc_read; entry->c.text.write = snd_pcm_lib_preallocate_proc_write; entry->mode |= S_IWUSR; entry->private_data = substream; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } substream->proc_prealloc_entry = entry; if ((entry = snd_info_create_card_entry(substream->pcm->card, "prealloc_max", substream->proc_root)) != NULL) { entry->c.text.read = snd_pcm_lib_preallocate_max_proc_read; entry->private_data = substream; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } substream->proc_prealloc_max_entry = entry; } #else /* !CONFIG_SND_VERBOSE_PROCFS */ #define preallocate_info_init(s) #endif /* CONFIG_SND_VERBOSE_PROCFS */ /* * pre-allocate the buffer and create a proc file for the substream */ static int snd_pcm_lib_preallocate_pages1(struct snd_pcm_substream *substream, size_t size, size_t max) { if (size > 0 && preallocate_dma && substream->number < maximum_substreams) preallocate_pcm_pages(substream, size); if (substream->dma_buffer.bytes > 0) substream->buffer_bytes_max = substream->dma_buffer.bytes; substream->dma_max = max; preallocate_info_init(substream); return 0; } /** * snd_pcm_lib_preallocate_pages - pre-allocation for the given DMA type * @substream: the pcm substream instance * @type: DMA type (SNDRV_DMA_TYPE_*) * @data: DMA type dependent data * @size: the requested pre-allocation size in bytes * @max: the max. allowed pre-allocation size * * Do pre-allocation for the given DMA buffer type. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_lib_preallocate_pages(struct snd_pcm_substream *substream, int type, struct device *data, size_t size, size_t max) { substream->dma_buffer.dev.type = type; substream->dma_buffer.dev.dev = data; return snd_pcm_lib_preallocate_pages1(substream, size, max); } EXPORT_SYMBOL(snd_pcm_lib_preallocate_pages); /** * snd_pcm_lib_preallocate_pages_for_all - pre-allocation for continuous memory type (all substreams) * @pcm: the pcm instance * @type: DMA type (SNDRV_DMA_TYPE_*) * @data: DMA type dependent data * @size: the requested pre-allocation size in bytes * @max: the max. allowed pre-allocation size * * Do pre-allocation to all substreams of the given pcm for the * specified DMA type. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm, int type, void *data, size_t size, size_t max) { struct snd_pcm_substream *substream; int stream, err; for (stream = 0; stream < 2; stream++) for (substream = pcm->streams[stream].substream; substream; substream = substream->next) if ((err = snd_pcm_lib_preallocate_pages(substream, type, data, size, max)) < 0) return err; return 0; } EXPORT_SYMBOL(snd_pcm_lib_preallocate_pages_for_all); #ifdef CONFIG_SND_DMA_SGBUF /** * snd_pcm_sgbuf_ops_page - get the page struct at the given offset * @substream: the pcm substream instance * @offset: the buffer offset * * Used as the page callback of PCM ops. * * Return: The page struct at the given buffer offset. %NULL on failure. */ struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream, unsigned long offset) { struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream); unsigned int idx = offset >> PAGE_SHIFT; if (idx >= (unsigned int)sgbuf->pages) return NULL; return sgbuf->page_table[idx]; } EXPORT_SYMBOL(snd_pcm_sgbuf_ops_page); #endif /* CONFIG_SND_DMA_SGBUF */ /** * snd_pcm_lib_malloc_pages - allocate the DMA buffer * @substream: the substream to allocate the DMA buffer to * @size: the requested buffer size in bytes * * Allocates the DMA buffer on the BUS type given earlier to * snd_pcm_lib_preallocate_xxx_pages(). * * Return: 1 if the buffer is changed, 0 if not changed, or a negative * code on failure. */ int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size) { struct snd_pcm_runtime *runtime; struct snd_dma_buffer *dmab = NULL; if (PCM_RUNTIME_CHECK(substream)) return -EINVAL; if (snd_BUG_ON(substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_UNKNOWN)) return -EINVAL; runtime = substream->runtime; if (runtime->dma_buffer_p) { /* perphaps, we might free the large DMA memory region to save some space here, but the actual solution costs us less time */ if (runtime->dma_buffer_p->bytes >= size) { runtime->dma_bytes = size; return 0; /* ok, do not change */ } snd_pcm_lib_free_pages(substream); } if (substream->dma_buffer.area != NULL && substream->dma_buffer.bytes >= size) { dmab = &substream->dma_buffer; /* use the pre-allocated buffer */ } else { dmab = kzalloc(sizeof(*dmab), GFP_KERNEL); if (! dmab) return -ENOMEM; dmab->dev = substream->dma_buffer.dev; if (snd_dma_alloc_pages(substream->dma_buffer.dev.type, substream->dma_buffer.dev.dev, size, dmab) < 0) { kfree(dmab); return -ENOMEM; } } snd_pcm_set_runtime_buffer(substream, dmab); runtime->dma_bytes = size; return 1; /* area was changed */ } EXPORT_SYMBOL(snd_pcm_lib_malloc_pages); /** * snd_pcm_lib_free_pages - release the allocated DMA buffer. * @substream: the substream to release the DMA buffer * * Releases the DMA buffer allocated via snd_pcm_lib_malloc_pages(). * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; if (PCM_RUNTIME_CHECK(substream)) return -EINVAL; runtime = substream->runtime; if (runtime->dma_area == NULL) return 0; if (runtime->dma_buffer_p != &substream->dma_buffer) { /* it's a newly allocated buffer. release it now. */ snd_dma_free_pages(runtime->dma_buffer_p); kfree(runtime->dma_buffer_p); } snd_pcm_set_runtime_buffer(substream, NULL); return 0; } EXPORT_SYMBOL(snd_pcm_lib_free_pages); int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream, size_t size, gfp_t gfp_flags) { struct snd_pcm_runtime *runtime; if (PCM_RUNTIME_CHECK(substream)) return -EINVAL; runtime = substream->runtime; if (runtime->dma_area) { if (runtime->dma_bytes >= size) return 0; /* already large enough */ vfree(runtime->dma_area); } runtime->dma_area = __vmalloc(size, gfp_flags, PAGE_KERNEL); if (!runtime->dma_area) return -ENOMEM; runtime->dma_bytes = size; return 1; } EXPORT_SYMBOL(_snd_pcm_lib_alloc_vmalloc_buffer); /** * snd_pcm_lib_free_vmalloc_buffer - free vmalloc buffer * @substream: the substream with a buffer allocated by * snd_pcm_lib_alloc_vmalloc_buffer() * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_lib_free_vmalloc_buffer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; if (PCM_RUNTIME_CHECK(substream)) return -EINVAL; runtime = substream->runtime; vfree(runtime->dma_area); runtime->dma_area = NULL; return 0; } EXPORT_SYMBOL(snd_pcm_lib_free_vmalloc_buffer); /** * snd_pcm_lib_get_vmalloc_page - map vmalloc buffer offset to page struct * @substream: the substream with a buffer allocated by * snd_pcm_lib_alloc_vmalloc_buffer() * @offset: offset in the buffer * * This function is to be used as the page callback in the PCM ops. * * Return: The page struct, or %NULL on failure. */ struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream, unsigned long offset) { return vmalloc_to_page(substream->runtime->dma_area + offset); } EXPORT_SYMBOL(snd_pcm_lib_get_vmalloc_page);
{ "pile_set_name": "Github" }
/*++ Copyright (c) 1989-1993 Microsoft Corporation Module Name: receive.c Abstract: This module contains the code to handle receive indication and posted receives for the Netbios module of the ISN transport. Author: Adam Barr (adamba) 22-November-1993 Environment: Kernel mode Revision History: --*/ #include "precomp.h" #pragma hdrstop // // This routine is a no-op to put in the NbiCallbacks table so // we can avoid checking for runt session frames (this is because // of how the if is structure below). // VOID NbiProcessSessionRunt( IN PIPX_LOCAL_TARGET RemoteAddress, IN ULONG MacOptions, IN PUCHAR PacketBuffer, IN UINT PacketSize ) { return; } NB_CALLBACK_NO_TRANSFER NbiCallbacksNoTransfer[] = { NbiProcessFindName, NbiProcessNameRecognized, NbiProcessAddName, NbiProcessAddName, // processes name in use frames also NbiProcessDeleteName, NbiProcessSessionRunt, // in case get a short session packet NbiProcessSessionEnd, NbiProcessSessionEndAck, NbiProcessStatusQuery }; #ifdef RSRC_TIMEOUT_DBG VOID NbiProcessDeathPacket( IN NDIS_HANDLE MacBindingHandle, IN NDIS_HANDLE MacReceiveContext, IN PIPX_LOCAL_TARGET RemoteAddress, IN ULONG MacOptions, IN PUCHAR LookaheadBuffer, IN UINT LookaheadBufferSize, IN UINT LookaheadBufferOffset, IN UINT PacketSize ) /*++ Routine Description: This routine handles NB_CMD_SESSION_DATA frames. Arguments: MacBindingHandle - A handle to use when calling NdisTransferData. MacReceiveContext - A context to use when calling NdisTransferData. RemoteAddress - The local target this packet was received from. MacOptions - The MAC options for the underlying NDIS binding. LookaheadBuffer - The lookahead buffer, starting at the IPX header. LookaheadBufferSize - The length of the lookahead data. LookaheadBufferOffset - The offset to add when calling NdisTransferData. PacketSize - The total length of the packet, starting at the IPX header. Return Value: None. --*/ { NB_CONNECTION UNALIGNED * Conn = (NB_CONNECTION UNALIGNED *)LookaheadBuffer; NB_SESSION UNALIGNED * Sess = (NB_SESSION UNALIGNED *)(&Conn->Session); PCONNECTION Connection; PDEVICE Device = NbiDevice; ULONG Hash; NB_DEFINE_LOCK_HANDLE (LockHandle) DbgPrint("******Received death packet - connid %x\n",Sess->DestConnectionId); if ( !NbiGlobalDebugResTimeout ) { return; } if (Sess->DestConnectionId != 0xffff) { // // This is an active connection, find it using // our session id. // Hash = (Sess->DestConnectionId & CONNECTION_HASH_MASK) >> CONNECTION_HASH_SHIFT; NB_SYNC_GET_LOCK (&Device->Lock, &LockHandle); Connection = Device->ConnectionHash[Hash].Connections; while (Connection != NULL) { if (Connection->LocalConnectionId == Sess->DestConnectionId) { break; } Connection = Connection->NextConnection; } if (Connection == NULL) { DbgPrint("********No Connection found with %x id\n",Sess->DestConnectionId); NB_SYNC_FREE_LOCK (&Device->Lock, LockHandle); return; } DbgPrint("******Received death packet on conn %lx from <%.16s>\n",Connection,Connection->RemoteName); DbgBreakPoint(); NB_SYNC_FREE_LOCK (&Device->Lock, LockHandle); } } #endif //RSRC_TIMEOUT_DBG VOID NbiReceive( IN NDIS_HANDLE MacBindingHandle, IN NDIS_HANDLE MacReceiveContext, IN PIPX_LOCAL_TARGET RemoteAddress, IN ULONG MacOptions, IN PUCHAR LookaheadBuffer, IN UINT LookaheadBufferSize, IN UINT LookaheadBufferOffset, IN UINT PacketSize ) /*++ Routine Description: This routine handles receive indications from IPX. Arguments: MacBindingHandle - A handle to use when calling NdisTransferData. MacReceiveContext - A context to use when calling NdisTransferData. RemoteAddress - The local target this packet was received from. MacOptions - The MAC options for the underlying NDIS binding. LookaheadBuffer - The lookahead buffer, starting at the IPX header. LookaheadBufferSize - The length of the lookahead data. LookaheadBufferOffset - The offset to add when calling NdisTransferData. PacketSize - The total length of the packet, starting at the IPX header. Return Value: None. --*/ { PNB_FRAME NbFrame = (PNB_FRAME)LookaheadBuffer; UCHAR DataStreamType; // // We know that this is a frame with a valid IPX header // because IPX would not give it to use otherwise. However, // it does not check the source socket. // if (NbFrame->Connectionless.IpxHeader.SourceSocket != NB_SOCKET) { return; } ++NbiDevice->Statistics.PacketsReceived; // First assume that the DataStreamType is at the normal place i.e 2nd byte // // Now see if this is a name frame. // if ( PacketSize == sizeof(IPX_HEADER) + sizeof(NB_NAME_FRAME) ) { // In the internet mode, the DataStreamType2 becomes DataStreamType if (NbFrame->Connectionless.IpxHeader.PacketType == 0x14 ) { DataStreamType = NbFrame->Connectionless.NameFrame.DataStreamType2; } else { DataStreamType = NbFrame->Connectionless.NameFrame.DataStreamType; } // Is this a name frame? // NB_CMD_FIND_NAME = 1 .... NB_CMD_DELETE_NAME = 5 // if ((DataStreamType >= NB_CMD_FIND_NAME) && (DataStreamType <= NB_CMD_DELETE_NAME)) { if (LookaheadBufferSize == PacketSize) { (*NbiCallbacksNoTransfer[DataStreamType-1])( RemoteAddress, MacOptions, LookaheadBuffer, LookaheadBufferSize); } return; } } #ifdef RSRC_TIMEOUT_DBG if ((PacketSize >= sizeof(NB_CONNECTION)) && (NbFrame->Connection.Session.DataStreamType == NB_CMD_DEATH_PACKET)) { NbiProcessDeathPacket( MacBindingHandle, MacReceiveContext, RemoteAddress, MacOptions, LookaheadBuffer, LookaheadBufferSize, LookaheadBufferOffset, PacketSize); } #endif //RSRC_TIMEOUT_DBG if ((PacketSize >= sizeof(NB_CONNECTION)) && (NbFrame->Connection.Session.DataStreamType == NB_CMD_SESSION_DATA)) { NbiProcessSessionData( MacBindingHandle, MacReceiveContext, RemoteAddress, MacOptions, LookaheadBuffer, LookaheadBufferSize, LookaheadBufferOffset, PacketSize); } else { DataStreamType = NbFrame->Connectionless.NameFrame.DataStreamType; // Handle NB_CMD_SESSION_END = 7 ... NB_CMD_STATUS_QUERY = 9 // if ((DataStreamType >= NB_CMD_SESSION_END ) && (DataStreamType <= NB_CMD_STATUS_QUERY)) { if (LookaheadBufferSize == PacketSize) { (*NbiCallbacksNoTransfer[DataStreamType-1])( RemoteAddress, MacOptions, LookaheadBuffer, LookaheadBufferSize); } } else if (DataStreamType == NB_CMD_STATUS_RESPONSE) { NbiProcessStatusResponse( MacBindingHandle, MacReceiveContext, RemoteAddress, MacOptions, LookaheadBuffer, LookaheadBufferSize, LookaheadBufferOffset, PacketSize); } else if ((DataStreamType == NB_CMD_DATAGRAM) || (DataStreamType == NB_CMD_BROADCAST_DATAGRAM)) { NbiProcessDatagram( MacBindingHandle, MacReceiveContext, RemoteAddress, MacOptions, LookaheadBuffer, LookaheadBufferSize, LookaheadBufferOffset, PacketSize, (BOOLEAN)(DataStreamType == NB_CMD_BROADCAST_DATAGRAM)); } } } /* NbiReceive */ VOID NbiReceiveComplete( IN USHORT NicId ) /*++ Routine Description: This routine handles receive complete indications from IPX. Arguments: NicId - The NIC ID on which a receive was previously indicated. Return Value: None. --*/ { PLIST_ENTRY p; PADDRESS Address; PREQUEST Request; PNB_RECEIVE_BUFFER ReceiveBuffer; PDEVICE Device = NbiDevice; LIST_ENTRY LocalList; PCONNECTION Connection; NB_DEFINE_LOCK_HANDLE (LockHandle); // // Complete any pending receive requests. // if (!IsListEmpty (&Device->ReceiveCompletionQueue)) { p = NB_REMOVE_HEAD_LIST( &Device->ReceiveCompletionQueue, &Device->Lock); while (!NB_LIST_WAS_EMPTY(&Device->ReceiveCompletionQueue, p)) { Request = LIST_ENTRY_TO_REQUEST (p); // // BUGBUG: Cache the connection somewhere easier // to retrieve? // Connection = (PCONNECTION)REQUEST_OPEN_CONTEXT(Request); NB_DEBUG2 (RECEIVE, ("Completing receive %lx (%d), status %lx\n", Request, REQUEST_INFORMATION(Request), REQUEST_STATUS(Request))); NbiCompleteRequest (Request); NbiFreeRequest (NbiDevice, Request); Connection->ReceiveState = CONNECTION_RECEIVE_IDLE; NbiDereferenceConnection (Connection, CREF_RECEIVE); p = NB_REMOVE_HEAD_LIST( &Device->ReceiveCompletionQueue, &Device->Lock); } } // // Indicate any datagrams to clients. // if (!IsListEmpty (&Device->ReceiveDatagrams)) { p = NB_REMOVE_HEAD_LIST( &Device->ReceiveDatagrams, &Device->Lock); while (!NB_LIST_WAS_EMPTY(&Device->ReceiveDatagrams, p)) { ReceiveBuffer = CONTAINING_RECORD (p, NB_RECEIVE_BUFFER, WaitLinkage); Address = ReceiveBuffer->Address; NbiIndicateDatagram( Address, ReceiveBuffer->RemoteName, ReceiveBuffer->Data, ReceiveBuffer->DataLength); #if defined(_PNP_POWER) NbiPushReceiveBuffer ( ReceiveBuffer ); #else NB_PUSH_ENTRY_LIST( &Device->ReceiveBufferList, &ReceiveBuffer->PoolLinkage, &Device->Lock); #endif _PNP_POWER NbiDereferenceAddress (Address, AREF_FIND); p = NB_REMOVE_HEAD_LIST( &Device->ReceiveDatagrams, &Device->Lock); } } // // Start packetizing connections. // if (!IsListEmpty (&Device->PacketizeConnections)) { NB_SYNC_GET_LOCK (&Device->Lock, &LockHandle); // // Check again because it may just have become // empty, and the code below depends on it being // non-empty. // if (!IsListEmpty (&Device->PacketizeConnections)) { // // We copy the list locally, in case someone gets // put back on it. We have to hack the end so // it points to LocalList instead of PacketizeConnections. // LocalList = Device->PacketizeConnections; LocalList.Flink->Blink = &LocalList; LocalList.Blink->Flink = &LocalList; InitializeListHead (&Device->PacketizeConnections); // // Set all these connections to not be on the list, so // NbiStopConnection won't try to take them off. // for (p = LocalList.Flink; p != &LocalList; p = p->Flink) { Connection = CONTAINING_RECORD (p, CONNECTION, PacketizeLinkage); CTEAssert (Connection->OnPacketizeQueue); Connection->OnPacketizeQueue = FALSE; } NB_SYNC_FREE_LOCK (&Device->Lock, LockHandle); while (TRUE) { p = RemoveHeadList (&LocalList); if (p == &LocalList) { break; } Connection = CONTAINING_RECORD (p, CONNECTION, PacketizeLinkage); NB_SYNC_GET_LOCK (&Connection->Lock, &LockHandle); if ((Connection->State == CONNECTION_STATE_ACTIVE) && (Connection->SubState == CONNECTION_SUBSTATE_A_PACKETIZE)) { NbiPacketizeSend( Connection NB_LOCK_HANDLE_ARG (LockHandle) ); } else { NB_SYNC_FREE_LOCK (&Connection->Lock, LockHandle); } NbiDereferenceConnection (Connection, CREF_PACKETIZE); } } else { NB_SYNC_FREE_LOCK (&Device->Lock, LockHandle); } } } /* NbiReceiveComplete */ VOID NbiTransferDataComplete( IN PNDIS_PACKET Packet, IN NDIS_STATUS Status, IN UINT BytesTransferred ) /*++ Routine Description: This routine handles a transfer data complete indication from IPX, indicating that a previously issued NdisTransferData call has completed. Arguments: Packet - The packet associated with the transfer. Status - The status of the transfer. BytesTransferred - The number of bytes transferred. Return Value: None. --*/ { PNB_RECEIVE_RESERVED ReceiveReserved; PNB_RECEIVE_BUFFER ReceiveBuffer; PADDRESS Address; PCONNECTION Connection; PNDIS_BUFFER CurBuffer, TmpBuffer; PREQUEST AdapterStatusRequest; PDEVICE Device = NbiDevice; CTELockHandle CancelLH; NB_DEFINE_LOCK_HANDLE (LockHandle); ReceiveReserved = (PNB_RECEIVE_RESERVED)(Packet->ProtocolReserved); switch (ReceiveReserved->Type) { case RECEIVE_TYPE_DATA: CTEAssert (ReceiveReserved->TransferInProgress); ReceiveReserved->TransferInProgress = FALSE; Connection = ReceiveReserved->u.RR_CO.Connection; NB_GET_CANCEL_LOCK( &CancelLH ); NB_SYNC_GET_LOCK (&Connection->Lock, &LockHandle); if (Status != NDIS_STATUS_SUCCESS) { if (Connection->State == CONNECTION_STATE_ACTIVE) { Connection->CurrentReceive = Connection->PreviousReceive; Connection->ReceiveState = CONNECTION_RECEIVE_ACTIVE; NB_SYNC_FREE_LOCK (&Connection->Lock, LockHandle); NB_FREE_CANCEL_LOCK( CancelLH ); // // BUGBUG: Send a resend ack? // } else { // // This aborts the current receive and // releases the connection lock. // NbiCompleteReceive( Connection, ReceiveReserved->u.RR_CO.EndOfMessage, CancelLH NB_LOCK_HANDLE_ARG(LockHandle)); } } else { Connection->CurrentReceive.Offset += BytesTransferred; Connection->CurrentReceive.MessageOffset += BytesTransferred; if (ReceiveReserved->u.RR_CO.CompleteReceive || (Connection->State != CONNECTION_STATE_ACTIVE)) { if (ReceiveReserved->u.RR_CO.EndOfMessage) { CTEAssert (!ReceiveReserved->u.RR_CO.PartialReceive); ++Connection->ReceiveSequence; ++Connection->LocalRcvSequenceMax; // harmless if NewNetbios is FALSE Connection->CurrentReceive.MessageOffset = 0; Connection->CurrentIndicateOffset = 0; } else if (Connection->NewNetbios) { if (ReceiveReserved->u.RR_CO.PartialReceive) { Connection->CurrentIndicateOffset += BytesTransferred; } else { ++Connection->ReceiveSequence; ++Connection->LocalRcvSequenceMax; Connection->CurrentIndicateOffset = 0; } } // // This sends an ack and releases the connection lock. // NbiCompleteReceive( Connection, ReceiveReserved->u.RR_CO.EndOfMessage, CancelLH NB_LOCK_HANDLE_ARG(LockHandle)); } else { NB_SYNC_SWAP_IRQL( CancelLH, LockHandle ); NB_FREE_CANCEL_LOCK( CancelLH ); Connection->ReceiveState = CONNECTION_RECEIVE_ACTIVE; if (Connection->NewNetbios) { // // A partial receive should only happen if we are // completing the receive. // CTEAssert (!ReceiveReserved->u.RR_CO.PartialReceive); ++Connection->ReceiveSequence; ++Connection->LocalRcvSequenceMax; Connection->CurrentIndicateOffset = 0; if ((Connection->CurrentReceiveNoPiggyback) || ((Device->AckWindow != 0) && (++Connection->ReceivesWithoutAck >= Device->AckWindow))) { NbiSendDataAck( Connection, NbiAckResponse NB_LOCK_HANDLE_ARG(LockHandle)); } else { NB_SYNC_FREE_LOCK (&Connection->Lock, LockHandle); } } else { NbiSendDataAck( Connection, NbiAckResponse NB_LOCK_HANDLE_ARG(LockHandle)); } } } // // Free the NDIS buffer chain if we allocated one. // if (!ReceiveReserved->u.RR_CO.NoNdisBuffer) { NdisQueryPacket (Packet, NULL, NULL, &CurBuffer, NULL); while (CurBuffer) { TmpBuffer = NDIS_BUFFER_LINKAGE (CurBuffer); NdisFreeBuffer (CurBuffer); CurBuffer = TmpBuffer; } } NdisReinitializePacket (Packet); ExInterlockedPushEntrySList( &Device->ReceivePacketList, &ReceiveReserved->PoolLinkage, &NbiGlobalPoolInterlock); NbiDereferenceConnection (Connection, CREF_INDICATE); break; case RECEIVE_TYPE_DATAGRAM: CTEAssert (ReceiveReserved->TransferInProgress); ReceiveReserved->TransferInProgress = FALSE; ReceiveBuffer = ReceiveReserved->u.RR_DG.ReceiveBuffer; // // Free the packet used for the transfer. // ReceiveReserved->u.RR_DG.ReceiveBuffer = NULL; NdisReinitializePacket (Packet); ExInterlockedPushEntrySList( &Device->ReceivePacketList, &ReceiveReserved->PoolLinkage, &NbiGlobalPoolInterlock); // // If it succeeded then queue it for indication, // otherwise free the receive buffer also. // if (Status == STATUS_SUCCESS) { ReceiveBuffer->DataLength = BytesTransferred; NB_INSERT_HEAD_LIST( &Device->ReceiveDatagrams, &ReceiveBuffer->WaitLinkage, &Device->Lock); } else { Address = ReceiveBuffer->Address; #if defined(_PNP_POWER) NbiPushReceiveBuffer ( ReceiveBuffer ); #else NB_PUSH_ENTRY_LIST( &Device->ReceiveBufferList, &ReceiveBuffer->PoolLinkage, &Device->Lock); #endif _PNP_POWER NbiDereferenceAddress (Address, AREF_FIND); } break; case RECEIVE_TYPE_ADAPTER_STATUS: CTEAssert (ReceiveReserved->TransferInProgress); ReceiveReserved->TransferInProgress = FALSE; AdapterStatusRequest = ReceiveReserved->u.RR_AS.Request; // // Free the packet used for the transfer. // NdisReinitializePacket (Packet); ExInterlockedPushEntrySList( &Device->ReceivePacketList, &ReceiveReserved->PoolLinkage, &NbiGlobalPoolInterlock); // // Complete the request. // if (Status == STATUS_SUCCESS) { // // REQUEST_STATUS() is already to set to SUCCESS or // BUFFER_OVERFLOW based on whether the buffer was // big enough. // REQUEST_INFORMATION(AdapterStatusRequest) = BytesTransferred; } else { REQUEST_INFORMATION(AdapterStatusRequest) = 0; REQUEST_STATUS(AdapterStatusRequest) = STATUS_UNEXPECTED_NETWORK_ERROR; } NbiCompleteRequest (AdapterStatusRequest); NbiFreeRequest (Device, AdapterStatusRequest); NbiDereferenceDevice (Device, DREF_STATUS_QUERY); break; } } /* NbiTransferDataComplete */ VOID NbiAcknowledgeReceive( IN PCONNECTION Connection IN NB_LOCK_HANDLE_PARAM(LockHandle) ) /*++ Routine Description: This routine is called when a receive needs to be acked to the remote. It either sends a data ack or queues up a piggyback ack request. NOTE: THIS FUNCTION IS CALLED WITH THE CONNECTION LOCK HELD AND RETURNS WITH IT RELEASED. Arguments: Connection - Pointer to the connection. LockHandle - The handle with which Connection->Lock was acquired. Return Value: None. --*/ { PDEVICE Device = NbiDevice; if (Connection->NewNetbios) { // // CurrentReceiveNoPiggyback is based on the bits he // set in his frame, NoPiggybackHeuristic is based on // guesses about the traffic pattern, it is set to // TRUE if we think we should not piggyback. // if ((!Device->EnablePiggyBackAck) || (Connection->CurrentReceiveNoPiggyback) || (Connection->PiggybackAckTimeout) || (Connection->NoPiggybackHeuristic)) { // // This releases the lock. // NbiSendDataAck( Connection, NbiAckResponse NB_LOCK_HANDLE_ARG(LockHandle)); } else { if (!Connection->DataAckPending) { NB_DEFINE_LOCK_HANDLE (LockHandle1) // // Some stacks can have multiple messages // outstanding, so we may already have an // ack queued. // Connection->DataAckTimeouts = 0; Connection->DataAckPending = TRUE; ++Device->Statistics.PiggybackAckQueued; if (!Connection->OnDataAckQueue) { NB_SYNC_GET_LOCK (&Device->TimerLock, &LockHandle1); if (!Connection->OnDataAckQueue) { Connection->OnDataAckQueue = TRUE; InsertTailList (&Device->DataAckConnections, &Connection->DataAckLinkage); } if (!Device->DataAckActive) { NbiStartShortTimer (Device); Device->DataAckActive = TRUE; } NB_SYNC_FREE_LOCK (&Device->TimerLock, LockHandle1); } // // Clear this, since a message ack resets the count. // Connection->ReceivesWithoutAck = 0; } NB_SYNC_FREE_LOCK (&Connection->Lock, LockHandle); } } else { // // This releases the lock. // NbiSendDataAck( Connection, NbiAckResponse NB_LOCK_HANDLE_ARG(LockHandle)); } } VOID NbiCompleteReceive( IN PCONNECTION Connection, IN BOOLEAN EndOfMessage, IN CTELockHandle CancelLH IN NB_LOCK_HANDLE_PARAM(LockHandle) ) /*++ Routine Description: This routine is called when we have filled up a receive request and need to complete it. NOTE: THIS FUNCTION IS CALLED WITH THE CONNECTION LOCK HELD AND RETURNS WITH IT RELEASED. THIS ROUTINE ALSO HOLDS CANCEL SPIN LOCK WHEN IT IS CALLED AND RELEASES IT WHEN IT RETURNS. Arguments: Connection - Pointer to the connection. EndOfMessage - BOOLEAN set to true if the message end was received. LockHandle - The handle with which Connection->Lock was acquired. Return Value: None. --*/ { PREQUEST Request; PDEVICE Device = NbiDevice; // // Complete the current receive request. If the connection // has shut down then we complete it right here, otherwise // we queue it for completion in the receive complete // handler. // Request = Connection->ReceiveRequest; IoSetCancelRoutine (Request, (PDRIVER_CANCEL)NULL); NB_SYNC_SWAP_IRQL( CancelLH, LockHandle ); NB_FREE_CANCEL_LOCK( CancelLH ); if (Connection->State != CONNECTION_STATE_ACTIVE) { Connection->ReceiveRequest = NULL; // StopConnection won't do this REQUEST_STATUS(Request) = Connection->Status; NB_SYNC_FREE_LOCK (&Connection->Lock, LockHandle); NB_DEBUG2 (RECEIVE, ("Completing receive %lx (%d), status %lx\n", Request, REQUEST_INFORMATION(Request), REQUEST_STATUS(Request))); NbiCompleteRequest (Request); NbiFreeRequest (NbiDevice, Request); ++Connection->ConnectionInfo.ReceiveErrors; NbiDereferenceConnection (Connection, CREF_RECEIVE); } else { REQUEST_INFORMATION (Request) = Connection->CurrentReceive.Offset; if (EndOfMessage) { REQUEST_STATUS(Request) = STATUS_SUCCESS; } else { REQUEST_STATUS(Request) = STATUS_BUFFER_OVERFLOW; } // // If we indicated to the client, adjust this down by the // amount of data taken, when it hits zero we can reindicate. // if (Connection->ReceiveUnaccepted) { NB_DEBUG2 (RECEIVE, ("Moving Unaccepted %d down by %d\n", Connection->ReceiveUnaccepted, Connection->CurrentReceive.Offset)); if (Connection->CurrentReceive.Offset >= Connection->ReceiveUnaccepted) { Connection->ReceiveUnaccepted = 0; } else { Connection->ReceiveUnaccepted -= Connection->CurrentReceive.Offset; } } // // BUGBUG: Check whether to activate another receive? // Connection->ReceiveState = CONNECTION_RECEIVE_PENDING; Connection->ReceiveRequest = NULL; // // This releases the lock. // if (Connection->NewNetbios) { if (EndOfMessage) { NbiAcknowledgeReceive( Connection NB_LOCK_HANDLE_ARG(LockHandle)); } else { if (Connection->CurrentIndicateOffset != 0) { NbiSendDataAck( Connection, NbiAckResend NB_LOCK_HANDLE_ARG(LockHandle)); } else if ((Connection->CurrentReceiveNoPiggyback) || ((Device->AckWindow != 0) && (++Connection->ReceivesWithoutAck >= Device->AckWindow))) { NbiSendDataAck( Connection, NbiAckResponse NB_LOCK_HANDLE_ARG(LockHandle)); } else { NB_SYNC_FREE_LOCK (&Connection->Lock, LockHandle); } } } else { NbiSendDataAck( Connection, EndOfMessage ? NbiAckResponse : NbiAckResend NB_LOCK_HANDLE_ARG(LockHandle)); } ++Connection->ConnectionInfo.ReceivedTsdus; // // This will complete the request inside ReceiveComplete, // dereference the connection, and set the state to IDLE. // NB_INSERT_TAIL_LIST( &Device->ReceiveCompletionQueue, REQUEST_LINKAGE (Request), &Device->Lock); } } /* NbiCompleteReceive */ NTSTATUS NbiTdiReceive( IN PDEVICE Device, IN PREQUEST Request ) /*++ Routine Description: This routine does a receive on an active connection. Arguments: Device - The netbios device. Request - The request describing the receive. Return Value: NTSTATUS - status of operation. --*/ { PCONNECTION Connection; NB_DEFINE_SYNC_CONTEXT (SyncContext) NB_DEFINE_LOCK_HANDLE (LockHandle) CTELockHandle CancelLH; // // First make sure the connection is valid. // Connection = (PCONNECTION)REQUEST_OPEN_CONTEXT(Request); if (Connection->Type == NB_CONNECTION_SIGNATURE) { NB_GET_CANCEL_LOCK( &CancelLH ); NB_BEGIN_SYNC (&SyncContext); NB_SYNC_GET_LOCK (&Connection->Lock, &LockHandle); // // Make sure the connection is in a good state. // if (Connection->State == CONNECTION_STATE_ACTIVE) { // // If the connection is idle then send it now, otherwise // queue it. // if (!Request->Cancel) { IoSetCancelRoutine (Request, NbiCancelReceive); NB_SYNC_SWAP_IRQL( CancelLH, LockHandle ); NB_FREE_CANCEL_LOCK( CancelLH ); NbiReferenceConnectionSync (Connection, CREF_RECEIVE); // // Insert this in our queue, then see if we need // to wake up the remote. // REQUEST_SINGLE_LINKAGE(Request) = NULL; REQUEST_LIST_INSERT_TAIL(&Connection->ReceiveQueue, Request); if (Connection->ReceiveState != CONNECTION_RECEIVE_W_RCV) { NB_DEBUG2 (RECEIVE, ("Receive %lx, connection %lx idle\n", Request, Connection)); NB_SYNC_FREE_LOCK (&Connection->Lock, LockHandle); } else { NB_DEBUG2 (RECEIVE, ("Receive %lx, connection %lx awakened\n", Request, Connection)); Connection->ReceiveState = CONNECTION_RECEIVE_IDLE; // // This releases the lock. // if (Connection->NewNetbios) { Connection->LocalRcvSequenceMax = (USHORT) (Connection->ReceiveSequence + Connection->ReceiveWindowSize - 1); } NbiSendDataAck( Connection, NbiAckResend NB_LOCK_HANDLE_ARG(LockHandle)); } NB_END_SYNC (&SyncContext); return STATUS_PENDING; } else { NB_DEBUG2 (RECEIVE, ("Receive %lx, connection %lx cancelled\n", Request, Connection)); NB_SYNC_FREE_LOCK (&Connection->Lock, LockHandle); NB_END_SYNC (&SyncContext); NB_FREE_CANCEL_LOCK( CancelLH ); return STATUS_CANCELLED; } } else { NB_DEBUG2 (RECEIVE, ("Receive connection %lx state is %d\n", Connection, Connection->State)); NB_SYNC_FREE_LOCK (&Connection->Lock, LockHandle); NB_END_SYNC (&SyncContext); NB_FREE_CANCEL_LOCK( CancelLH ); return STATUS_INVALID_CONNECTION; } } else { NB_DEBUG (RECEIVE, ("Receive connection %lx has bad signature\n", Connection)); return STATUS_INVALID_CONNECTION; } } /* NbiTdiReceive */ VOID NbiCancelReceive( IN PDEVICE_OBJECT DeviceObject, IN PIRP Irp ) /*++ Routine Description: This routine is called by the I/O system to cancel a receive. The request is found on the connection's receive queue. NOTE: This routine is called with the CancelSpinLock held and is responsible for releasing it. Arguments: DeviceObject - Pointer to the device object for this driver. Irp - Pointer to the request packet representing the I/O request. Return Value: none. --*/ { PCONNECTION Connection; PREQUEST Request = (PREQUEST)Irp; NB_DEFINE_LOCK_HANDLE (LockHandle) NB_DEFINE_SYNC_CONTEXT (SyncContext) CTEAssert ((REQUEST_MAJOR_FUNCTION(Request) == IRP_MJ_INTERNAL_DEVICE_CONTROL) && (REQUEST_MINOR_FUNCTION(Request) == TDI_RECEIVE)); CTEAssert (REQUEST_OPEN_TYPE(Request) == (PVOID)TDI_CONNECTION_FILE); Connection = (PCONNECTION)REQUEST_OPEN_CONTEXT(Request); // // Just stop the connection, that will tear down any // receives. // // BUGBUG: Do we care about cancelling non-active // receives without stopping the connection?? // // BUGBUG: This routine is the same as NbiCancelSend, // so if we don't make it more specific, merge the two. // NbiReferenceConnectionSync (Connection, CREF_CANCEL); IoReleaseCancelSpinLock (Irp->CancelIrql); NB_BEGIN_SYNC (&SyncContext); NB_SYNC_GET_LOCK (&Connection->Lock, &LockHandle); // // This frees the lock, cancels any sends, etc. // NbiStopConnection( Connection, STATUS_CANCELLED NB_LOCK_HANDLE_ARG (LockHandle)); NbiDereferenceConnection (Connection, CREF_CANCEL); NB_END_SYNC (&SyncContext); } /* NbiCancelReceive */
{ "pile_set_name": "Github" }
var castPath = require('./_castPath'), isFunction = require('./isFunction'), isKey = require('./_isKey'), toKey = require('./_toKey'); /** * This method is like `_.get` except that if the resolved value is a * function it's invoked with the `this` binding of its parent object and * its result is returned. * * @static * @since 0.1.0 * @memberOf _ * @category Object * @param {Object} object The object to query. * @param {Array|string} path The path of the property to resolve. * @param {*} [defaultValue] The value returned for `undefined` resolved values. * @returns {*} Returns the resolved value. * @example * * var object = { 'a': [{ 'b': { 'c1': 3, 'c2': _.constant(4) } }] }; * * _.result(object, 'a[0].b.c1'); * // => 3 * * _.result(object, 'a[0].b.c2'); * // => 4 * * _.result(object, 'a[0].b.c3', 'default'); * // => 'default' * * _.result(object, 'a[0].b.c3', _.constant('default')); * // => 'default' */ function result(object, path, defaultValue) { path = isKey(path, object) ? [path] : castPath(path); var index = -1, length = path.length; // Ensure the loop is entered when path is empty. if (!length) { object = undefined; length = 1; } while (++index < length) { var value = object == null ? undefined : object[toKey(path[index])]; if (value === undefined) { index = length; value = defaultValue; } object = isFunction(value) ? value.call(object) : value; } return object; } module.exports = result;
{ "pile_set_name": "Github" }
# libiconv.la - a libtool library file # Generated by libtool (GNU libtool) 2.4 # # Please DO NOT delete this file! # It is necessary for linking the library. # The name that we can dlopen(3). dlname='../bin/libiconv-2.dll' # Names of this library. library_names='libiconv.dll.a' # The name of the static archive. old_library='' # Linker flags that can not go in dependency_libs. inherited_linker_flags='' # Libraries that this one depends upon. dependency_libs='' # Names of additional weak libraries provided by this library weak_library_names='' # Version information for libiconv. current=7 age=5 revision=1 # Is this an already installed library? installed=yes # Should we warn about portability when linking against -modules? shouldnotlink=no # Files to dlopen/dlpreopen dlopen='' dlpreopen='' # Directory that this library needs to be installed in: libdir='/mingw/lib'
{ "pile_set_name": "Github" }
-- Copyright (c) 2013 Snowplow Analytics Ltd. All rights reserved. -- -- This program is licensed to you under the Apache License Version 2.0, -- and you may not use this file except in compliance with the Apache License Version 2.0. -- You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. -- -- Unless required by applicable law or agreed to in writing, -- software distributed under the Apache License Version 2.0 is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. -- -- Version: Ports version 0.1.0 to version 0.2.0 -- URL: - -- -- Authors: Alex Dean -- Copyright: Copyright (c) 2013 Snowplow Analytics Ltd -- License: Apache License Version 2.0 -- First rename the existing table (don't delete it) ALTER TABLE events DROP CONSTRAINT event_id_pk; ALTER TABLE events RENAME TO events_010; -- Now create the new table (copy-and-pasted from table-def.sql) CREATE TABLE events ( -- App app_id varchar(255) encode text255, platform varchar(255) encode text255, -- Date/time collector_tstamp timestamp not null, dvce_tstamp timestamp, -- Event event varchar(128) encode text255, event_vendor varchar(128) encode text32k not null, event_id varchar(38) not null unique, txn_id int, -- Versioning v_tracker varchar(100) encode text255, v_collector varchar(100) encode text255 not null, v_etl varchar(100) encode text255 not null, -- User and visit user_id varchar(255) encode runlength, user_ipaddress varchar(19) encode runlength, user_fingerprint varchar(50) encode runlength, domain_userid varchar(16), domain_sessionidx smallint, network_userid varchar(38), -- Location geo_country char(2), -- New in 0.2.0 geo_region char(2), -- New in 0.2.0 geo_city varchar(75), -- New in 0.2.0 geo_zipcode varchar(15), -- New in 0.2.0 geo_latitude double precision, -- New in 0.2.0 geo_longitude double precision, -- New in 0.2.0 -- Page page_title varchar(2000), -- Page URL components page_urlscheme varchar(16) encode text255, page_urlhost varchar(255) encode text255, page_urlport smallint, page_urlpath varchar(1000) encode text32k, page_urlquery varchar(3000), page_urlfragment varchar(255), -- Referrer URL components refr_urlscheme varchar(16) encode text255, refr_urlhost varchar(255) encode text255, refr_urlport smallint, refr_urlpath varchar(1000) encode text32k, refr_urlquery varchar(3000), refr_urlfragment varchar(255), -- Referrer details refr_medium varchar(25) encode text255, refr_source varchar(50) encode text255, refr_term varchar(255) encode raw, -- Marketing mkt_medium varchar(255) encode text255, mkt_source varchar(255) encode text255, mkt_term varchar(255) encode raw, mkt_content varchar(500) encode raw, mkt_campaign varchar(255) encode text32k, -- Custom structured event se_category varchar(255) encode text255, -- Renamed from ev_ se_action varchar(255) encode text255, -- Renamed from ev_ se_label varchar(255) encode text32k, -- Renamed from ev_ se_property varchar(255) encode text32k, -- Renamed from ev_ se_value float, -- Renamed from ev_ -- Ecommerce tr_orderid varchar(255) encode raw, tr_affiliation varchar(255) encode text255, tr_total dec(18,2), tr_tax dec(18,2), tr_shipping dec(18,2), tr_city varchar(255) encode text32k, tr_state varchar(255) encode text32k, tr_country varchar(255) encode text32k, ti_orderid varchar(255) encode raw, ti_sku varchar(255) encode text32k, ti_name varchar(255) encode text32k, ti_category varchar(255) encode text255, ti_price dec(18,2), ti_quantity int, -- Page ping pp_xoffset_min integer, pp_xoffset_max integer, pp_yoffset_min integer, pp_yoffset_max integer, -- User Agent useragent varchar(1000) encode text32k, -- Browser br_name varchar(50) encode text255, br_family varchar(50) encode text255, br_version varchar(50) encode text255, br_type varchar(50) encode text255, br_renderengine varchar(50) encode text255, br_lang varchar(255) encode text255, br_features_pdf boolean, br_features_flash boolean, br_features_java boolean, br_features_director boolean, br_features_quicktime boolean, br_features_realplayer boolean, br_features_windowsmedia boolean, br_features_gears boolean , br_features_silverlight boolean, br_cookies boolean, br_colordepth varchar(12) encode text255, br_viewwidth integer, br_viewheight integer, -- Operating System os_name varchar(50) encode text255, os_family varchar(50) encode text255, os_manufacturer varchar(50) encode text255, os_timezone varchar(255) encode text255, -- Device/Hardware dvce_type varchar(50) encode text255, dvce_ismobile boolean, dvce_screenwidth integer, dvce_screenheight integer, -- Document doc_charset varchar(128) encode text255, doc_width integer, doc_height integer, CONSTRAINT event_id_pk PRIMARY KEY(event_id) ) DISTSTYLE KEY DISTKEY (domain_userid) SORTKEY (collector_tstamp); -- Finally copy all the old data into the new format INSERT INTO events SELECT -- App app_id, platform, -- Date/time collector_tstamp, dvce_tstamp, -- Event event, event_vendor, event_id, txn_id, -- Versioning v_tracker, v_collector, v_etl, -- User and visit user_id, user_ipaddress, user_fingerprint, domain_userid, domain_sessionidx, network_userid, -- Location null AS geo_country, -- Placeholder null AS geo_region, -- Placeholder null AS geo_city, -- Placeholder null AS geo_zipcode, -- Placeholder null AS geo_latitude, -- Placeholder null AS geo_longitude, -- Placeholder -- Page page_title, -- Page URL components page_urlscheme, page_urlhost, page_urlport, page_urlpath, page_urlquery, page_urlfragment, -- Referrer URL components refr_urlscheme, refr_urlhost, refr_urlport, refr_urlpath, refr_urlquery, refr_urlfragment, -- Referrer details refr_medium, refr_source, refr_term, -- Marketing mkt_medium, mkt_source, mkt_term, mkt_content, mkt_campaign, -- Custom Event ev_category AS se_category, -- Renamed ev_action AS se_action, -- Renamed ev_label AS se_label, -- Renamed ev_property AS se_property, -- Renamed ev_value AS se_value, -- Renamed -- Ecommerce tr_orderid, tr_affiliation, tr_total, tr_tax, tr_shipping, tr_city, tr_state, tr_country, ti_orderid, ti_sku, ti_name, ti_category, ti_price, ti_quantity, -- Page ping pp_xoffset_min, pp_xoffset_max, pp_yoffset_min, pp_yoffset_max, -- User Agent useragent, -- Browser br_name, br_family, br_version, br_type, br_renderengine, br_lang, br_features_pdf, br_features_flash, br_features_java, br_features_director, br_features_quicktime, br_features_realplayer, br_features_windowsmedia, br_features_gears, br_features_silverlight, br_cookies, br_colordepth, br_viewwidth, br_viewheight, -- Operating System os_name, os_family, os_manufacturer, os_timezone, -- Device/Hardware dvce_type, dvce_ismobile, dvce_screenwidth, dvce_screenheight, -- Document doc_charset, doc_width, doc_height FROM events_010;
{ "pile_set_name": "Github" }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/os.manage.R \name{os.sleep} \alias{os.sleep} \title{Sleeps the operating system (Windows) through a shell command} \usage{ os.sleep(s = 0, m = 0, h = 0, first_turn_hibernate_off = TRUE) } \arguments{ \item{s}{time to wait before shutting down (in seconds), added to m and h; passed to \code{\link[base]{Sys.sleep}}} \item{m}{time to wait before shutting down (in minutes), added to s and h; passed to \code{\link[base]{Sys.sleep}}} \item{h}{time to wait before shutting down (in hours), added to s and m; passed to \code{\link[base]{Sys.sleep}}} \item{first_turn_hibernate_off}{The command rundll32.exe powrprof.dll,SetSuspendState 0,1,0 for sleep is correct - however, it will hibernate instead of sleep if you don't turn the hibernation off. I'm not sure this is true, but that's what is explained in the linke (see bellow)} } \value{ The status code of \code{\link[base]{shell}}. } \description{ This sleeps Windows after set amount of time. } \examples{ \dontrun{ ## when your code is extremely time-consuming, # you may need this function to run at the end of # the simulation. os.sleep() } } \references{ \url{http://superuser.com/questions/42124/how-can-i-put-the-computer-to-sleep-from-command-prompt-run-menu} , \url{http://www.howtogeek.com/howto/windows-vista/quick-tip-create-shutdown-restart-lock-icons-in-windows-vista/}, \url{http://superuser.com/a/135450/28536} } \seealso{ \code{\link[base]{system}},\code{\link[base]{shell}}, \code{\link[base]{Sys.sleep}}, \code{\link{is.windows}}, \code{\link{os.shutdown}}, \code{\link{os.sleep}}, \code{\link{os.hibernate}}, \code{\link{os.lock}}, \code{\link{os.restart}} } \author{ Tal Galili }
{ "pile_set_name": "Github" }
### ### DO NOT MODIFY THIS FILE. THIS FILE HAS BEEN AUTOGENERATED ### FROM circleci/php:7.2.9-fpm-stretch # install java 8 # RUN if grep -q Debian /etc/os-release && grep -q jessie /etc/os-release; then \ echo "deb http://http.us.debian.org/debian/ jessie-backports main" | sudo tee -a /etc/apt/sources.list \ && echo "deb-src http://http.us.debian.org/debian/ jessie-backports main" | sudo tee -a /etc/apt/sources.list \ && sudo apt-get update; sudo apt-get install -y -t jessie-backports openjdk-8-jre openjdk-8-jre-headless openjdk-8-jdk openjdk-8-jdk-headless \ ; elif grep -q Ubuntu /etc/os-release && grep -q Trusty /etc/os-release; then \ echo "deb http://ppa.launchpad.net/openjdk-r/ppa/ubuntu trusty main" | sudo tee -a /etc/apt/sources.list \ && echo "deb-src http://ppa.launchpad.net/openjdk-r/ppa/ubuntu trusty main" | sudo tee -a /etc/apt/sources.list \ && sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-key DA1A4A13543B466853BAF164EB9B1D8886F44E2A \ && sudo apt-get update; sudo apt-get install -y openjdk-8-jre openjdk-8-jre-headless openjdk-8-jdk openjdk-8-jdk-headless \ ; else \ sudo apt-get update; sudo apt-get install -y openjdk-8-jre openjdk-8-jre-headless openjdk-8-jdk openjdk-8-jdk-headless \ ; fi \ && sudo apt-get install -y bzip2 libgconf-2-4 # for extracting firefox and running chrome, respectively # install firefox # RUN FIREFOX_URL="https://download.mozilla.org/?product=firefox-latest-ssl&os=linux64&lang=en-US" \ && ACTUAL_URL=$(curl -Ls -o /dev/null -w %{url_effective} $FIREFOX_URL) \ && curl --silent --show-error --location --fail --retry 3 --output /tmp/firefox.tar.bz2 $ACTUAL_URL \ && sudo tar -xvjf /tmp/firefox.tar.bz2 -C /opt \ && sudo ln -s /opt/firefox/firefox /usr/local/bin/firefox \ && sudo apt-get install -y libgtk3.0-cil-dev libasound2 libasound2 libdbus-glib-1-2 libdbus-1-3 \ && rm -rf /tmp/firefox.* \ && firefox --version # install geckodriver RUN export GECKODRIVER_LATEST_RELEASE_URL=$(curl https://api.github.com/repos/mozilla/geckodriver/releases/latest | jq -r ".assets[] | select(.name | test(\"linux64\")) | .browser_download_url") \ && curl --silent --show-error --location --fail --retry 3 --output /tmp/geckodriver_linux64.tar.gz "$GECKODRIVER_LATEST_RELEASE_URL" \ && cd /tmp \ && tar xf geckodriver_linux64.tar.gz \ && rm -rf geckodriver_linux64.tar.gz \ && sudo mv geckodriver /usr/local/bin/geckodriver \ && sudo chmod +x /usr/local/bin/geckodriver \ && geckodriver --version # install chrome RUN curl --silent --show-error --location --fail --retry 3 --output /tmp/google-chrome-stable_current_amd64.deb https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb \ && (sudo dpkg -i /tmp/google-chrome-stable_current_amd64.deb || sudo apt-get -fy install) \ && rm -rf /tmp/google-chrome-stable_current_amd64.deb \ && sudo sed -i 's|HERE/chrome"|HERE/chrome" --disable-setuid-sandbox --no-sandbox|g' \ "/opt/google/chrome/google-chrome" \ && google-chrome --version RUN export CHROMEDRIVER_RELEASE=$(curl --location --fail --retry 3 http://chromedriver.storage.googleapis.com/LATEST_RELEASE) \ && curl --silent --show-error --location --fail --retry 3 --output /tmp/chromedriver_linux64.zip "http://chromedriver.storage.googleapis.com/$CHROMEDRIVER_RELEASE/chromedriver_linux64.zip" \ && cd /tmp \ && unzip chromedriver_linux64.zip \ && rm -rf chromedriver_linux64.zip \ && sudo mv chromedriver /usr/local/bin/chromedriver \ && sudo chmod +x /usr/local/bin/chromedriver \ && chromedriver --version # start xvfb automatically to avoid needing to express in circle.yml ENV DISPLAY :99 RUN printf '#!/bin/sh\nXvfb :99 -screen 0 1280x1024x24 &\nexec "$@"\n' > /tmp/entrypoint \ && chmod +x /tmp/entrypoint \ && sudo mv /tmp/entrypoint /docker-entrypoint.sh # ensure that the build agent doesn't override the entrypoint LABEL com.circleci.preserve-entrypoint=true ENTRYPOINT ["/docker-entrypoint.sh"] CMD ["/bin/sh"]
{ "pile_set_name": "Github" }
<?php // autoload_static.php @generated by Composer namespace Composer\Autoload; class ComposerStaticInit2db7e2fa11a5b7e8309e5b34a51b960e { public static $files = array ( '7b11c4dc42b3b3023073cb14e519683c' => __DIR__ . '/..' . '/ralouphie/getallheaders/src/getallheaders.php', '0e6d7bf4a5811bfa5cf40c5ccd6fae6a' => __DIR__ . '/..' . '/symfony/polyfill-mbstring/bootstrap.php', 'a0edc8309cc5e1d60e3047b5df6b7052' => __DIR__ . '/..' . '/guzzlehttp/psr7/src/functions_include.php', 'c964ee0ededf28c96ebd9db5099ef910' => __DIR__ . '/..' . '/guzzlehttp/promises/src/functions_include.php', 'd767e4fc2dc52fe66584ab8c6684783e' => __DIR__ . '/..' . '/adbario/php-dot-notation/src/helpers.php', '65fec9ebcfbb3cbb4fd0d519687aea01' => __DIR__ . '/..' . '/danielstjules/stringy/src/Create.php', '37a3dc5111fe8f707ab4c132ef1dbc62' => __DIR__ . '/..' . '/guzzlehttp/guzzle/src/functions_include.php', 'b067bc7112e384b61c701452d53a14a8' => __DIR__ . '/..' . '/mtdowling/jmespath.php/src/JmesPath.php', '66453932bc1be9fb2f910a27947d11b6' => __DIR__ . '/..' . '/alibabacloud/client/src/Functions.php', ); public static $prefixLengthsPsr4 = array ( 'c' => array ( 'clagiordano\\weblibs\\configmanager\\' => 34, ), 'S' => array ( 'Symfony\\Polyfill\\Mbstring\\' => 26, 'Stringy\\' => 8, ), 'Q' => array ( 'Qcloud\\Sms\\' => 11, ), 'P' => array ( 'Psr\\Http\\Message\\' => 17, 'PHPMailer\\PHPMailer\\' => 20, ), 'O' => array ( 'OSS\\' => 4, ), 'J' => array ( 'JmesPath\\' => 9, ), 'I' => array ( 'Intervention\\Image\\' => 19, ), 'G' => array ( 'GuzzleHttp\\Psr7\\' => 16, 'GuzzleHttp\\Promise\\' => 19, 'GuzzleHttp\\' => 11, ), 'A' => array ( 'AlibabaCloud\\Client\\' => 20, 'Adbar\\' => 6, ), ); public static $prefixDirsPsr4 = array ( 'clagiordano\\weblibs\\configmanager\\' => array ( 0 => __DIR__ . '/..' . '/clagiordano/weblibs-configmanager/src', ), 'Symfony\\Polyfill\\Mbstring\\' => array ( 0 => __DIR__ . '/..' . '/symfony/polyfill-mbstring', ), 'Stringy\\' => array ( 0 => __DIR__ . '/..' . '/danielstjules/stringy/src', ), 'Qcloud\\Sms\\' => array ( 0 => __DIR__ . '/..' . '/qcloudsms/qcloudsms_php/src', ), 'Psr\\Http\\Message\\' => array ( 0 => __DIR__ . '/..' . '/psr/http-message/src', ), 'PHPMailer\\PHPMailer\\' => array ( 0 => __DIR__ . '/..' . '/phpmailer/phpmailer/src', ), 'OSS\\' => array ( 0 => __DIR__ . '/..' . '/aliyuncs/oss-sdk-php/src/OSS', ), 'JmesPath\\' => array ( 0 => __DIR__ . '/..' . '/mtdowling/jmespath.php/src', ), 'Intervention\\Image\\' => array ( 0 => __DIR__ . '/..' . '/intervention/image/src/Intervention/Image', ), 'GuzzleHttp\\Psr7\\' => array ( 0 => __DIR__ . '/..' . '/guzzlehttp/psr7/src', ), 'GuzzleHttp\\Promise\\' => array ( 0 => __DIR__ . '/..' . '/guzzlehttp/promises/src', ), 'GuzzleHttp\\' => array ( 0 => __DIR__ . '/..' . '/guzzlehttp/guzzle/src', ), 'AlibabaCloud\\Client\\' => array ( 0 => __DIR__ . '/..' . '/alibabacloud/client/src', ), 'Adbar\\' => array ( 0 => __DIR__ . '/..' . '/adbario/php-dot-notation/src', ), ); public static function getInitializer(ClassLoader $loader) { return \Closure::bind(function () use ($loader) { $loader->prefixLengthsPsr4 = ComposerStaticInit2db7e2fa11a5b7e8309e5b34a51b960e::$prefixLengthsPsr4; $loader->prefixDirsPsr4 = ComposerStaticInit2db7e2fa11a5b7e8309e5b34a51b960e::$prefixDirsPsr4; }, null, ClassLoader::class); } }
{ "pile_set_name": "Github" }
{ "type": "minecraft:block", "pools": [ { "rolls": 1, "entries": [ { "type": "minecraft:item", "name": "immersiveengineering:alu_scaffolding_grate_top" } ], "conditions": [ { "condition": "minecraft:survives_explosion" } ] } ] }
{ "pile_set_name": "Github" }
import * as M from '@meetalva/message'; import { MessageHandlerContext, MessageHandler } from '../create-handlers'; import * as Types from '@meetalva/types'; export function deleteSelected({ app, store }: MessageHandlerContext): MessageHandler<M.DeleteSelected> { return m => { if (app.getHasFocusedInput()) { return; } const project = store.getProject(); if (!project) { return; } switch (project.getFocusedItemType()) { case Types.ItemType.Element: store.removeSelectedElement(); break; case Types.ItemType.Page: store.removeSelectedPage(); } }; }
{ "pile_set_name": "Github" }
:tocdepth: 3 ==================================== Cyrus IMAP 3.0.0 rc2 Release Notes ==================================== .. WARNING:: Cyrus IMAP 3.0 is under active development, and subject to change. Do **NOT** use this version unless you're a developer of sorts. Download from GitHub: * https://github.com/cyrusimap/cyrus-imapd/releases/download/cyrus-imapd-3.0.0-rc2/cyrus-imapd-3.0.0-rc2.tar.gz * https://github.com/cyrusimap/cyrus-imapd/releases/download/cyrus-imapd-3.0.0-rc2/cyrus-imapd-3.0.0-rc2.tar.gz.sig .. _relnotes-3.0.0-rc2-changes: Major changes since the 2.5.x series ==================================== * The source repository has moved to :ref:`GitHub <github-guide>`, Bugzilla and Phabricator have been deprecated. Their issue lists are being imported into GitHub. * Support added for FastMail-style conversations (threaded messages). (See the ``conversations`` options in :ref:`imapd.conf <imap-reference-manpages-configs-imapd.conf>`) * Optional Xapian_ integration for faster and shinier search. Note that this requires custom patches to Xapian, in this release. (See the ``search_engine`` option in :ref:`imapd.conf <imap-reference-manpages-configs-imapd.conf>`, and ``doc/README.xapian`` in the source distribution.)) Compiling Xapian support requires gcc 4.9 or later. * Archive support has arrived! Requires addition of an archive partition. (See ``archive_*`` options in :ref:`imapd.conf <imap-reference-manpages-configs-imapd.conf>`) * Basic JMAP_ support. (See ``httpmodules`` in :ref:`imapd.conf <imap-reference-manpages-configs-imapd.conf>`) * ClamAV_ integration for AntiVirus support is now working again. * Dropped support for Berkeley DB. * The handling of configure options has changed, dropping support for ``--with-cyrus-prefix`` and ``--with-service-path`` in favour of the more conventional ``--prefix``, ``--bindir``, ``--sbindir``, and ``--libexecdir``. * Binaries executed by the :ref:`master(8) <imap-reference-manpages-systemcommands-master>` service process are now installed to ``--libexecdir``. Other binaries are installed to ``--bindir`` and/or ``--sbindir``. The ``make installbinsymlinks`` target can be used to set up symlinks to everything in ``--bindir``, if you need that in your environment. * Added support for replicating mailboxes/users to a particular partition. ``sync_client`` now accepts a ``-p`` option. * The ``defaultdomain`` setting in :ref:`imapd.conf <imap-reference-manpages-configs-imapd.conf>` now defaults to "internal" (was: NULL) * Added experimental backup service. See :ref:`Cyrus Backups <cyrus-backups>` * Support for Apple's Push service (XAPPLEPUSH). See :ref:`Cyrus Eventsource <cyrus-eventsource>` * Sieve now supports special use folders. See :ref:`Cyrus Sieve <cyrus-sieve-specialuse>` * Sieve now supports the following :ref:`extensions <cyrus-sieve-extensions>`: * :rfc:`5490` Checking Mailbox Status and Accessing Mailbox Metadata * :rfc:`5229` Variables * Sieve bytecode is now automatically recompiled when it is detected to be missing or out of date * New numeric header available for sorting: X-Spam-Score * Added IMIP notification support to :ref:`notifyd(8) <imap-reference-manpages-systemcommands-notifyd>`. See ``imipnotifier`` in :ref:`imapd.conf <imap-reference-manpages-configs-imapd.conf>` * Optimised CRC32 implementation * Added support for reverse ACLs, enabling faster LIST response. See ``reverseacls`` in :ref:`imapd.conf <imap-reference-manpages-configs-imapd.conf>` * Added cross-domain sharing support. See ``crossdomains`` and ``crossdomains_onlyother`` in :ref:`imapd.conf <imap-reference-manpages-configs-imapd.conf>` * Added experimental object storage for mail data, with preliminary support for OpenIO and Caringo backends. See configure.ac ``--enable-objectstore``, and the ``object_storage_*``, ``openio_*`` and ``caringo_*`` options in :ref:`imapd.conf <imap-reference-manpages-configs-imapd.conf>`. * Configurable POP3 UIDL format for compatibility with other mail servers. Supports Courier Mail Server and Dovecot formats, in addition to Cyrus formats. See ``uidl_format`` in :ref:`imapd.conf <imap-reference-manpages-configs-imapd.conf>` * Under ``delete_mode: delayed``, only the 20 most recently deleted mailboxes are kept for any given name. * Documentation cleanup: we are moving toward having a single restructured text source for man pages and html/text documentation, which are pre-generated for inclusion in release tarballs. If you wish to rebuild these, or are building from a git clone in which they are not pregenerated, you will need to install ``sphinx-build``, and the perl module ``Pod::POM::View::Restructured``. The top level Makefile now has ``man`` and ``doc`` targets for building these. * Added support for OpenSSL 1.1.0. * Replication now supports incremental partial updates, so massive updates (such as mail imports) won't stall replication for other users. See ``sync_batchsize`` in :ref:`imapd.conf <imap-reference-manpages-configs-imapd.conf>` * The mailbox cache minor version has been incremented to store a GUID for each body part. Updates to default configuration ================================ * ``unixhierarchysep`` (``/``) is now ``on`` by default (instead of netnews style ``.``) * ``altnamespace`` is now ``on`` by default (was ``off``) * ``virtdomains`` is actually **still** ``off`` by default. (Previously we stated it was now defaulting to ``userid``: this was an error. ) Significant bugfixes ==================== * Lots of fixes to caldav and carddav. Includes the addition of a new daemon (calalarmd) which periodically processes a global database containing the "next" alarm for each item, and sends the relevant mboxevents. (See configure.ac ``--with-calalarmd``) * Replication reliability fixes. * Improved ``LIST-EXTENDED``: more imap tests now succeed. * Extensive cleanup of mailbox name handling * ``master`` now requests a sane maximum number of open file descriptors, and only complains if this isn't allowed. * Fixes to compiling on Solaris (thanks Jens Erat, Marty Lee) * Improved handling of mailbox renames during replication .. _Xapian: https://xapian.org .. _ClamAV: https://www.clamav.net .. _JMAP: http://jmap.io
{ "pile_set_name": "Github" }
Feature: Display As a developer of a system using alpaca I want my file fields to display correctly So that I can develop applications faster Scenario: Basic Field Given I am on a page with a file field Then I should see 1 "input[type='file']" tag
{ "pile_set_name": "Github" }
module namespace ns = "http://www.example.com/example"; declare collection ns:collection as node()*; declare collection ns:collection2 as node()*; declare function ns:test2() { () };
{ "pile_set_name": "Github" }
// // GitHubApiSession.swift // FluxExample // // Created by marty-suzuki on 2018/07/31. // Copyright © 2018年 marty-suzuki. All rights reserved. // import GitHub protocol GitHubApiRequestable: class { func searchRepositories(query: String, page: Int, completion: @escaping (GitHub.Result<([GitHub.Repository], GitHub.Pagination)>) -> ()) } final class GitHubApiSession: GitHubApiRequestable { static let shared = GitHubApiSession() private let session = GitHub.Session() func searchRepositories(query: String, page: Int, completion: @escaping (GitHub.Result<([GitHub.Repository], GitHub.Pagination)>) -> ()) { let request = SearchRepositoriesRequest(query: query, sort: .stars, order: .desc, page: page, perPage: nil) session.send(request) { result in switch result { case let .success(response, pagination): completion(.success((response.items, pagination))) case let .failure(error): completion(.failure(error)) } } } }
{ "pile_set_name": "Github" }
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <fabien@symfony.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Symfony\Component\CssSelector\XPath\Extension; use Symfony\Component\CssSelector\Exception\ExpressionErrorException; use Symfony\Component\CssSelector\Node\FunctionNode; use Symfony\Component\CssSelector\XPath\Translator; use Symfony\Component\CssSelector\XPath\XPathExpr; /** * XPath expression translator HTML extension. * * This component is a port of the Python cssselect library, * which is copyright Ian Bicking, @see https://github.com/SimonSapin/cssselect. * * @author Jean-François Simon <jeanfrancois.simon@sensiolabs.com> */ class HtmlExtension extends AbstractExtension { /** * Constructor. * * @param Translator $translator */ public function __construct(Translator $translator) { $translator ->getExtension('node') ->setFlag(NodeExtension::ELEMENT_NAME_IN_LOWER_CASE, true) ->setFlag(NodeExtension::ATTRIBUTE_NAME_IN_LOWER_CASE, true); } /** * {@inheritdoc} */ public function getPseudoClassTranslators() { return array( 'checked' => array($this, 'translateChecked'), 'link' => array($this, 'translateLink'), 'disabled' => array($this, 'translateDisabled'), 'enabled' => array($this, 'translateEnabled'), 'selected' => array($this, 'translateSelected'), 'invalid' => array($this, 'translateInvalid'), 'hover' => array($this, 'translateHover'), 'visited' => array($this, 'translateVisited'), ); } /** * {@inheritdoc} */ public function getFunctionTranslators() { return array( 'lang' => array($this, 'translateLang'), ); } /** * @param XPathExpr $xpath * * @return XPathExpr */ public function translateChecked(XPathExpr $xpath) { return $xpath->addCondition( '(@checked ' ."and (name(.) = 'input' or name(.) = 'command')" ."and (@type = 'checkbox' or @type = 'radio'))" ); } /** * @param XPathExpr $xpath * * @return XPathExpr */ public function translateLink(XPathExpr $xpath) { return $xpath->addCondition("@href and (name(.) = 'a' or name(.) = 'link' or name(.) = 'area')"); } /** * @param XPathExpr $xpath * * @return XPathExpr */ public function translateDisabled(XPathExpr $xpath) { return $xpath->addCondition( '(' .'@disabled and' .'(' ."(name(.) = 'input' and @type != 'hidden')" ." or name(.) = 'button'" ." or name(.) = 'select'" ." or name(.) = 'textarea'" ." or name(.) = 'command'" ." or name(.) = 'fieldset'" ." or name(.) = 'optgroup'" ." or name(.) = 'option'" .')' .') or (' ."(name(.) = 'input' and @type != 'hidden')" ." or name(.) = 'button'" ." or name(.) = 'select'" ." or name(.) = 'textarea'" .')' .' and ancestor::fieldset[@disabled]' ); // todo: in the second half, add "and is not a descendant of that fieldset element's first legend element child, if any." } /** * @param XPathExpr $xpath * * @return XPathExpr */ public function translateEnabled(XPathExpr $xpath) { return $xpath->addCondition( '(' .'@href and (' ."name(.) = 'a'" ." or name(.) = 'link'" ." or name(.) = 'area'" .')' .') or (' .'(' ."name(.) = 'command'" ." or name(.) = 'fieldset'" ." or name(.) = 'optgroup'" .')' .' and not(@disabled)' .') or (' .'(' ."(name(.) = 'input' and @type != 'hidden')" ." or name(.) = 'button'" ." or name(.) = 'select'" ." or name(.) = 'textarea'" ." or name(.) = 'keygen'" .')' .' and not (@disabled or ancestor::fieldset[@disabled])' .') or (' ."name(.) = 'option' and not(" .'@disabled or ancestor::optgroup[@disabled]' .')' .')' ); } /** * @param XPathExpr $xpath * @param FunctionNode $function * * @return XPathExpr * * @throws ExpressionErrorException */ public function translateLang(XPathExpr $xpath, FunctionNode $function) { $arguments = $function->getArguments(); foreach ($arguments as $token) { if (!($token->isString() || $token->isIdentifier())) { throw new ExpressionErrorException( 'Expected a single string or identifier for :lang(), got ' .implode(', ', $arguments) ); } } return $xpath->addCondition(sprintf( 'ancestor-or-self::*[@lang][1][starts-with(concat(' ."translate(@%s, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), '-')" .', %s)]', 'lang', Translator::getXpathLiteral(strtolower($arguments[0]->getValue()).'-') )); } /** * @param XPathExpr $xpath * * @return XPathExpr */ public function translateSelected(XPathExpr $xpath) { return $xpath->addCondition("(@selected and name(.) = 'option')"); } /** * @param XPathExpr $xpath * * @return XPathExpr */ public function translateInvalid(XPathExpr $xpath) { return $xpath->addCondition('0'); } /** * @param XPathExpr $xpath * * @return XPathExpr */ public function translateHover(XPathExpr $xpath) { return $xpath->addCondition('0'); } /** * @param XPathExpr $xpath * * @return XPathExpr */ public function translateVisited(XPathExpr $xpath) { return $xpath->addCondition('0'); } /** * {@inheritdoc} */ public function getName() { return 'html'; } }
{ "pile_set_name": "Github" }
testing handlerClass ^ WATextFileHandlerListing
{ "pile_set_name": "Github" }
#ifndef BOOST_ARCHIVE_BASIC_SERIALIZER_HPP #define BOOST_ARCHIVE_BASIC_SERIALIZER_HPP // MS compatible compilers support #pragma once #if defined(_MSC_VER) # pragma once #endif /////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8 // basic_serializer.hpp: extenstion of type_info required for serialization. // (C) Copyright 2002 Robert Ramey - http://www.rrsd.com . // Use, modification and distribution is subject to the Boost Software // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // See http://www.boost.org for updates, documentation, and revision history. #include <boost/assert.hpp> #include <cstddef> // NULL #include <boost/noncopyable.hpp> #include <boost/config.hpp> #include <boost/serialization/extended_type_info.hpp> #ifdef BOOST_MSVC # pragma warning(push) # pragma warning(disable : 4511 4512) #endif namespace boost { namespace archive { namespace detail { class basic_serializer : private boost::noncopyable { const boost::serialization::extended_type_info * m_eti; protected: explicit basic_serializer( const boost::serialization::extended_type_info & eti ) : m_eti(& eti) {} public: inline bool operator<(const basic_serializer & rhs) const { // can't compare address since there can be multiple eti records // for the same type in different execution modules (that is, DLLS) // leave this here as a reminder not to do this! // return & lhs.get_eti() < & rhs.get_eti(); return get_eti() < rhs.get_eti(); } const char * get_debug_info() const { return m_eti->get_debug_info(); } const boost::serialization::extended_type_info & get_eti() const { return * m_eti; } }; class basic_serializer_arg : public basic_serializer { public: basic_serializer_arg(const serialization::extended_type_info & eti) : basic_serializer(eti) {} }; } // namespace detail } // namespace archive } // namespace boost #ifdef BOOST_MSVC #pragma warning(pop) #endif #endif // BOOST_ARCHIVE_BASIC_SERIALIZER_HPP
{ "pile_set_name": "Github" }
header.title { position: fixed; top: 0; padding: 10px; margin-top: px; box-shadow: 0 1px 3px rgba(10, 10, 10, 0.1); width: 100%; } svg.Zi.Zi--LabelSpecial { height: 20px; right: 8px; position: relative; } div.voteup { box-shadow: 0px 0px 4px 4px antiquewhite; padding: 12px; } img.content_image.lazy { display: none; } svg.Icon.ZhihuLogo.ZhihuLogo--blue.Icon--logo { fill: #0084ff; margin-left: 5px; margin-bottom: -4px; } img.origin_image.zh-lightbox-thumb { box-shadow: 0px 0px 10px powderblue; } body.vscode-light, body.vscode-dark { padding: 0; } body.vscode-dark > .header { box-shadow: 3px 0px 6px 3px rgba(255, 252, 252, 0.1); /* width: 700px; */ } body.vscode-dark > .container { /* border-style: groove; */ box-shadow: 0px 0px 6px rgba(253, 250, 250, 0.1); } button#favorite, button#share, button#open { float: right; margin-right: 20px; min-width: 0; border: none; border-style: none; font-size: 13px; padding: 5px; width: 30px; height: 30px; box-shadow: 0 3px 1px -2px rgba(0,0,0,.2), 0 2px 2px 0 rgba(0,0,0,.14), 0 1px 5px 0 rgba(0,0,0,.12); cursor: pointer; border-radius: 50%; } img.qrcode { margin: 0 auto; width: 200px; /* margin-top: 180px; */ box-shadow: 0px 0px 9px rgba(76, 49, 40, 1); } .qr-container { margin: 0 auto; text-align: center; } li { margin-top: 16px; } svg.Zi.Zi--Star.Button-zi { vertical-align: bottom; } a.internal, a.external { text-decoration: none; background: hsla(177, 100%, 91%, 0.88); padding: 2px; border-radius: 10px; box-shadow: 0 5px 5px wheat; } p { font-weight: 500; font-size: 16px; } .voteup { box-shadow: 0px 0px 4px 4px antiquewhite; padding: 12px; } .profile { display: inline-block; margin-left: 14px; } .author-name { font-size: 1.3em; font-weight: bolder; } .author { border-bottom: #99ded8; border-bottom-style: double; border-bottom-width: 2px; padding-bottom: 10px; /* border-style: dashed; */ } img.avatar-img { border-radius: 10px; width: 60px; box-shadow: 0px 0px 5px #95cab6; } svg.Zi.Zi--Share.Button-zi { fill: #0084ff; margin-top: 2px; } svg.Zi.Zi--Star.Button-zi { vertical-align: bottom; fill: #0084ff; } svg.Zi.Zi--LabelSpecial { fill: #0084ff; } div.voteup > button { cursor: pointer; border-style: none; border-radius: 5px; background: #0084ff; color: white; padding: 5px; padding-left: 10px; padding-right: 10px; } .container { /* border-style: groove; */ padding: 25px; box-shadow: 0 1px 3px rgba(26,26,26,.1); width: 700px; margin: 0 auto; margin-top: 15px; } .header { padding: 5px; box-shadow: 0 1px 3px rgba(26,26,26,.1); /* width: 700px; */ margin: 0 auto; margin-top: 65px; } .description { width: 900px; margin: 0 auto; } img.origin_image.zh-lightbox-thumb.lazy { display: none; }
{ "pile_set_name": "Github" }
package deprecated; public class Foo { // ref in Bar.test and Bar.foo() @Deprecated public Foo(String s) { } // no ref should be deleted @Deprecated public void test1() { test2(); } // ref in Foo.test1() @Deprecated public boolean test2() { return true; } // should be deleted because only ref in Foo.test3() @Deprecated public void test3() { test3(); } // ref in Bar.foo() @Deprecated public void test4() { } // ref in Bar.test @Deprecated public void test5() { } }
{ "pile_set_name": "Github" }
/***************************************************************************/ /* */ /* t1gload.h */ /* */ /* Type 1 Glyph Loader (specification). */ /* */ /* Copyright 1996-2015 by */ /* David Turner, Robert Wilhelm, and Werner Lemberg. */ /* */ /* This file is part of the FreeType project, and may only be used, */ /* modified, and distributed under the terms of the FreeType project */ /* license, LICENSE.TXT. By continuing to use, modify, or distribute */ /* this file you indicate that you have read the license and */ /* understand and accept it fully. */ /* */ /***************************************************************************/ #ifndef __T1GLOAD_H__ #define __T1GLOAD_H__ #include <ft2build.h> #include "t1objs.h" FT_BEGIN_HEADER FT_LOCAL( FT_Error ) T1_Compute_Max_Advance( T1_Face face, FT_Pos* max_advance ); FT_LOCAL( FT_Error ) T1_Get_Advances( FT_Face face, FT_UInt first, FT_UInt count, FT_Int32 load_flags, FT_Fixed* advances ); FT_LOCAL( FT_Error ) T1_Load_Glyph( FT_GlyphSlot glyph, FT_Size size, FT_UInt glyph_index, FT_Int32 load_flags ); FT_END_HEADER #endif /* __T1GLOAD_H__ */ /* END */
{ "pile_set_name": "Github" }
VoicForm u => blackhole; 1::samp => now; u =< blackhole; null @=> u; <<< "success" >>>;
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <?import java.lang.*?> <?import javafx.collections.*?> <?import javafx.scene.*?> <?import javafx.scene.control.*?> <?import javafx.scene.effect.*?> <?import javafx.scene.layout.*?> <?import javafx.scene.layout.GridPane?> <?import javafx.scene.control.ToolBar?> <?import javafx.scene.control.Button?> <?import javafx.scene.layout.HBox?> <?import javafx.scene.control.MenuBar?> <?import javafx.scene.control.MenuItem?> <?import javafx.scene.control.Menu?> <?import javafx.scene.layout.VBox?> <?import javafx.scene.shape.Circle?> <?import javafx.scene.shape.Rectangle?> <?import javafx.scene.text.Text?> <?import javafx.scene.control.ProgressBar?> <?import javafx.scene.control.Label?> <?import javafx.scene.control.Hyperlink?> <Group xmlns:fx="http://javafx.com/fxml" fx:controller="game.BallGameController" fx:id="area"> <Circle fx:id="ball" radius="10.0" fill="BLACK" /> <Rectangle fx:id="borderTop" x="0" y="30" width="500" height="2" /> <Rectangle fx:id="borderBottom" x="0" y="500" width="500" height="2"/> <Rectangle fx:id="borderLeft" x="0" y="0" width="2" height="500"/> <Rectangle fx:id="borderRight" x="498" y="0" width="2" height="500"/> <Rectangle fx:id="paddle" x="200" y="460" width="100" height="15" layoutX="20" fill="BLACK"/> <Text fx:id="gameOverText" text="Game Over" fill="RED" layoutX="150" layoutY="330"/> <Text fx:id="winnerText" text="You've won!" fill="GREEN" layoutX="150" layoutY="330"/> <ToolBar minWidth="500"> <Button fx:id="startButton" text="Start"/> <Button fx:id="quitButton" text="Quit"/> <ProgressBar fx:id="progressBar" progress="100"/> <Label fx:id="remainingBlocksLabel"/> </ToolBar> <ToolBar minWidth="500" layoutY="500"> <Hyperlink text="www.hascode.com" layoutX="360" layoutY="505" /> </ToolBar> </Group>
{ "pile_set_name": "Github" }
{ "version": "1.0.0", "name": "NewTabPageOverride", "manifest_version": 2, "description": "NTP override", "chrome_url_overrides" : { "newtab": "new_tab_override.html" }, "permissions": [ "contentSettings", "tabs", "storage", "webNavigation" ] }
{ "pile_set_name": "Github" }
# AWS Unity Chess Game Sample This is a chess game built with [Unity](http://unity3d.com/), using the [AWS Mobile SDK for Unity](https://github.com/aws/aws-sdk-unity). This sample's purpose is to demonstate how to add a wealth of backend features to a game by leveraging the power of AWS. ## Why Are You Here? I'm assuming you are here to learn how to make use of the AWS Mobile SDK for Unity in a game. If you want to learn by following a step-by-step tutorial that will take you from start to finish, you should ignore the [Quick Start](#quick-start) section below and skip to the [tutorial](#tutorial) section. If you would rather learn by taking the minimal amount of steps to get the game running, so that you can tinker around with the working sample, continue into the [Quick Start](#quick-start) section. ## Quick Start This *quick start* assumes a familiarity with some AWS tools. If at any point you feel you are missing some prerequisite knowledge, head to the included [tutorial](#tutorial) section below for more context. ### Things to Download * You need [Unity 5.2.2](http://unity3d.com/get-unity/download?) or greater to open and run the sample. * For the Amazon Simple Notification portion of this sample to work on Android, you will need `google-play-services.jar`, which you can find in the location `<android-sdk>/extras/google/google_play_services/libproject/google-play-services_lib/libs/`. Copy this jar file to the `ChessGame/Assets/Plugin/Android` folder in the project. ### Things to Create #### AWS Account If you do not already have an AWS account, you can create one [here](https://aws.amazon.com/getting-started/), and take advantage of the [AWS Free Usage Tier](http://aws.amazon.com/free/). #### Amazon DynamoDB and AWS Lambda Create a new **CloudFormation Stack** in the [AWS Cloud Formation Console](https://console.aws.amazon.com/cloudformation/home#/stacks/new) using the included `ChessGameCloudFormationTemplate.json` template. This will set up the three DynamoDB tables and two Lambda Functions that you need. You will be prompted for one parameter when creating the stack, `SNSRegion`, which you should set to the AWS Region Endpoint in which you have created your SNS Applications (or in which you intend to create them, see below). The rest of the resources that are needed for the game cannot be created by CloudFormation (at least at the time this is being written). #### Amazon DynamoDB Stream With AWS Lambda The only remaining step with Lambda and DynamoDB is to allow one of our Lambda Functions to react to a DynamoDB Stream. From the `NewMoveNotifier` Lambda function (you can find this function's full name in the output of the CloudFormation stack), add an event source with `DynamoDB` as the *event source type* and `ChessMatches`as the *DynamoDB* table. #### Amazon Cognito You need an [Amazon Cognito](https://console.aws.amazon.com/cognito/home) Identity pool named `Chess`, with [Facebook as an authenticated provider](https://docs.aws.amazon.com/cognito/devguide/identity/external-providers/facebook/), and the following policy for authenticated and unauthenticated identities, with each instance of `<RESOURCE REGION>` replaced with the region you are using for the corresponding resource: ``` { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "dynamodb:GetItem", "dynamodb:PutItem", "dynamodb:Query", "dynamodb:UpdateItem", "dynamodb:DescribeTable" ], "Resource": [ "arn:aws:dynamodb:<RESOURCE REGION>:*:table/ChessMatches", "arn:aws:dynamodb:<RESOURCE REGION>:*:table/ChessPlayers", "arn:aws:dynamodb:<RESOURCE REGION>:*:table/SNSEndpointLookup", "arn:aws:dynamodb:<RESOURCE REGION>:*:table/ChessMatches/index/*" ] }, { "Effect": "Allow", "Action": [ "lambda:InvokeFunction" ], "Resource": [ "arn:aws:lambda:<RESOURCE REGION>:*:function:*NewChessMatch*" ] }, { "Effect": "Allow", "Action": [ "sns:CreatePlatformEndpoint" ], "Resource": [ "arn:aws:sns:<RESOURCE REGION>:*:app/APNS_SANDBOX/ChessGame", "arn:aws:sns:<RESOURCE REGION>:*:app/GCM/ChessGame" ] }, { "Effect": "Allow", "Action": [ "mobileanalytics:PutEvents", "cognito-sync:*" ], "Resource": [ "*" ] } ] } ``` ##### Amazon Mobile Analytics You will need an [Amazon Mobile Analytics](https://console.aws.amazon.com/mobileanalytics/home/) App, with a name of your choosing. ##### Amazon Simple Notification Service You will need two [Amazon Simple Notification Service](https://console.aws.amazon.com/sns/v2/home) Applications: one for each [Android](http://docs.aws.amazon.com/sns/latest/dg/mobile-push-gcm.html) and [iOS](http://docs.aws.amazon.com/sns/latest/dg/mobile-push-apns.html). If you only intend to build for one platform, feel free to skip creating a SNS application for the other platform. Also keep track of the **Google Project Number** for the project used to create your Android SNS Application. #### Configuring the Game to use Your Resources Now that you have all of the resources required, open up `ChessGame/Assets/ChessGameScripts/ChessNetworkManager.cs` and fill in the following lines to refer to your resources: ``` private const string CognitoIdentityPoolId = null; private const string MobileAnaylticsAppId = null; // Needed only when building for Android private const string AndroidPlatformApplicationArn = null; private const string GoogleConsoleProjectId = null; // Needed only when building for iOS private const string IOSPlatformApplicationArn = null; private const string NewMatchLambdaFunctionName = "NewChessMatch"; ``` You can find the `NewMatchLambdaFunctionName` value in the output of the CloudFormation stack. If all your resources are in the same Region, change the region value in `ChessGame/Assets/AWSSDK/src/Core/Resources/awsconfig.xml`, which has the default `region ="us-east-1"`. If your resources have different endpoints, change the lines in `ChessNetworkManager.cs` in the following way: ``` private static readonly RegionEndpoint _cognitoRegion = null; // If null, the ChessNetworkManager uses the value from awsconfig.xml for the corresponding service. private static readonly RegionEndpoint _mobileAnalyticsRegion = RegionEndpoint.APNortheast1; // ap-northeast-1 private static readonly RegionEndpoint _dynamoDBRegion = RegionEndpoint.USEast1; // us-east-1 private static readonly RegionEndpoint _lambdaRegion = null; private static readonly RegionEndpoint _snsRegion = null; ``` ### Build and Run Open up the project in Unity, go to *Build Settings*, and drag in all the scenes in `ChessGame/Assets/ChessGameScenes` into *Scenes In Build*. Make sure the `PersistentObjectInit` is first in the *Scenes In Build* order. Now you can build and run for iOS or Android, or run the game in the Unity editor. If running in the Unity editor, make sure you have the `PersistentObjectInit` scene open when you press play. # Tutorial - Building a Cross-Platform Mobile Game Using the AWS Mobile SDK for Unity There are many reasons for the unprecedented success of mobile games and apps. The convenience and power of phones and tablets have improved at breakneck speed, and advances in cellular networking have dramatically increased the networking capabilities of these ubiquitous devices. With its powerful tools for graphics, physics, multiplatform support, and more, Unity does a lot of the heavy lifting and allows developers to bring their ideas to life. However, when developers decide they want custom back-end services in their game, it is easy to lose creative momentum when tasked with designing, developing, and hosting a back end capable of managing player identity, cross-platform saves, achievements, leaderboards, and push notifications. A great idea developed by a small team can quickly turn into a herculean effort that splits your team’s focus into developing and managing your game and the infrastructure that supports it. Accessing the power, flexibility, and ease of use of AWS is the perfect solution to this problem. The AWS Mobile SDK for Unity allows developers to easily connect their Unity game to Amazon Web Services. In this tutorial, we will consider a chess game I have created with Unity, and show how I have integrated the AWS Mobile SDK for Unity to give it awesome features. ## What We Will Add For this example, we will add the following features to the chess game: * Identity management using Amazon Cognito Identity. * Cross-device syncing of a user's data using Amazon Cognito Sync. * Creation of new multiplayer matches using AWS Lambda and Amazon DynamoDB. * Saving and loading of public information and multiplayer matches using Amazon DynamoDB. * Reacting to game saves to notify players it's their turn using AWS Lambda, Amazon DynamoDB Streams, and Amazon Simple Notification Service. * Measuring game usage using Amazon Mobile Analytics. ## First Things First Before we get to the good stuff, you will need some resources. * [Here](https://github.com/awslabs/aws-sdk-unity-samples/tree/master/Chess%20Game%20Example%20Project) is the project in its final state. This tutorial is best followed with the **ChessGame** Unity project opened in the Unity Editor and the code open in your favorite editor or IDE. Take a look at the **Assets** folder: * The **AWSSDK** folder includes the parts of the AWS Mobile SDK for Unity we will be using for this game. When you make your own game, all you have to do is [download the AWS Mobile SDK for Unity](https://s3.amazonaws.com/aws-unity-sdk/latest/aws-unity-sdk.zip) and import the Unity packages for the services you are using. * **ChessGameScenes** ccontains the Unity scenes that make up our game. Start the **PersistentObjectInit** scene first. It creates objects that exist throughout the execution of the application. This includes `AWSPrefab`, which any Unity game must have present to use the AWS Mobile SDK and `ChessNetworkManager`, which we will talk about throughout this tutorial. The rest of the scenes are fairly self-explanatory. For example, the **MainMenu** scene is the UI screen the user sees first. The **Board** scene displays a chess board for the match the user is playing. * **ChessGameScripts** contains C# files that drive the functionality of this sample game. The **BasicUI** and **ChessLogic** subfolders contain logic that describes the way our UI behaves and the logic of an actual chess match, respectively. We will mostly ignore these folders for this tutorial, because they are not directly related to using the AWS Mobile SDK. The remaining files are as follows: * `ChessNetworkManager` describes a Unity component that exists during the entire execution of the game. As the name implies, this game object handles all of the networking for the game. As you might have assumed, we do all of this networking using the AWS Mobile SDK, and most of this tutorial focuses on the `ChessNetworkManager` file. * `GameManager` is a singleton class that manages the state of the game. This includes deciding when and how to use the `ChessNetworkManager` so that individual scenes can be ignorant of how the network is used. * `GameState` encapsulates all of the application state information we want to save. Before integrating a back end, this class defines which information to save to disk. When the back end is added, all we have to do is save it over the network instead. * `GCM` is the same C# file in the [SNS Example](https://github.com/awslabs/aws-sdk-unity-samples/blob/master/SNS/GCM.cs),It is used to communicate with the Google Cloud Messaging Java libraries. (We'll talk about these in Prerequisites.) * The **LambdaFunctions** folder contains Node.js code for our AWS Lambda functions. ## Prerequisites This game sample is built using Unity 5.2.2 and the AWS Mobile SDK for Unity 2.1.0.0. Do not use versions of Unity earlier than 5.2.2. To get the game to work, you first need to set up your AWS services. We will go through these step by step, but this tutorial will not cover creating your AWS account. If you do not already have an AWS account, you can create one [here](https://aws.amazon.com/getting-started/), and take advantage of the [AWS Free Usage Tier](http://aws.amazon.com/free/). For the Amazon Simple Notification portion of this sample to work on Android, you will need google-play-services.jar, which you can find in the location `<android-sdk>/extras/google/google_play_services/libproject/google-play-services_lib/libs/`. Copy the jar file to the `ChessGame/Assets/Plugin/Android` folder in the project. You can remove all SNS-related code from the game. Other functionalities do not depend on it. ## Regions Throughout this tutorial, we will using the [AWS Console](https://console.aws.amazon.com/) to create our AWS resources. You'll see regions (e.g., N. Virginia, Ireland, Singapore) are displayed in the console's menu bar. The resources you create will be hosted in the region you choose, so you may want to choose one close to you (or, in the long term, to your customers). Because some services are available in certain regions only, you may have to create different resources in different regions. All of the services we use in this tutorial are available in the `us-east-1` region. That is the way the sample is configured, but you can change the region. To change the default region for all of your services, change the region value (`currently region="us-east-1"`) in `ChessGame/Assets/AWSSDK/src/Core/Resources/awsconfig.xml`. It is suggested that you use the same region for all services except for services that are unavailable in your preferred region. To have individual services use different regions, change the lines in `ChessNetworkManager.cs` in the following way: ``` private static readonly RegionEndpoint _cognitoRegion = null; // If null, the ChessNetworkManager uses the value from awsconfig.xml for the corresponding service. private static readonly RegionEndpoint _mobileAnalyticsRegion = RegionEndpoint.APNortheast1; // ap-northeast-1 private static readonly RegionEndpoint _dynamoDBRegion = RegionEndpoint.USEast1; // us-east-1 private static readonly RegionEndpoint _lambdaRegion = null; private static readonly RegionEndpoint _snsRegion = null; ``` Make sure to keep these values up-to-date as you create your services. ## User Identity The first thing an online game needs is user identities. User identities are useful for keeping track of a user's information, allowing or restricting access to online assets, and much more. It's very simple to create user identities with Amazon Cognito Identity. Look in `ChessNetworkManager.cs` to find the following: ``` private CognitoAWSCredentials _credentials; private CognitoAWSCredentials Credentials { get { if (_credentials == null) _credentials = new CognitoAWSCredentials( CognitoIdentityPoolId, CognitoRegion); return _credentials; } } ``` This CognitoAWSCredentials object is useful in many ways. For one, we can retrieve the unique user ID from it by calling `Credentials.GetIdentityId()`. We will also use it to access other AWS resources based on the permissions set for your Cognito identity pool. You may be asking, "What Cognito identity pool and what permissions?" We are about to set that up in the [Cognito Console](https://console.aws.amazon.com/cognito/home)=. In the console, choose **Create new identity pool**, name your pool "Chess," select **Enable access to unauthenticated identities**, and create the pool. We will add authenticated identities later. You will be prompted to choose roles for identities in your pool. Create a role for each authenticated and unauthenticated identity. Edit each policy to look like the following, except with each instance of `<RESOURCE REGION>` replaced with the region you are using for the corresponding resource (for example, `us-east-1`): ``` { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "dynamodb:GetItem", "dynamodb:PutItem", "dynamodb:Query", "dynamodb:UpdateItem", "dynamodb:DescribeTable" ], "Resource": [ "arn:aws:dynamodb:<RESOURCE REGION>:*:table/ChessMatches", "arn:aws:dynamodb:<RESOURCE REGION>:*:table/ChessPlayers", "arn:aws:dynamodb:<RESOURCE REGION>:*:table/SNSEndpointLookup", "arn:aws:dynamodb:<RESOURCE REGION>:*:table/ChessMatches/index/*" ] }, { "Effect": "Allow", "Action": [ "lambda:InvokeFunction" ], "Resource": [ "arn:aws:lambda:<RESOURCE REGION>:*:function:NewChessMatch" ] }, { "Effect": "Allow", "Action": [ "sns:CreatePlatformEndpoint" ], "Resource": [ "arn:aws:sns:<RESOURCE REGION>:*:app/APNS_SANDBOX/ChessGame", "arn:aws:sns:<RESOURCE REGION>:*:app/GCM/ChessGame" ] }, { "Effect": "Allow", "Action": [ "mobileanalytics:PutEvents", "cognito-sync:*" ], "Resource": [ "*" ] } ] } ``` This policy allows any identity in your Cognito identity pool access to the resources we will create in the tutorial. From the [Cognito Console](https://console.aws.amazon.com/cognito/home), choose the link for your "Chess" identity pool, choose the Edit identity pool button, and there you will find your identity pool ID. It should look something like this: ``` us-east-1a2b3c4c5-6789-abcd-0123-012345abcdef ``` Back in `ChessNetworkManager.cs`, find the declaration of the `CognitoIdentityPoolId` string, and provide your identity pool ID as the value. For example: ``` private const string CognitoIdentityPoolId = "us-east-1a2b3c4d5-6789-abcd-0123-012345abcdef"; ``` We've set up the user identity, so let's do something with it. ## Syncing User Data A common use case for mobile games is saving user data in the cloud and synchronizing it across devices. For our chess game, we want to allow users to play a local game on their phone, and then switch to their tablet and pick up right where they left off. Amazon Cognito Sync makes this easy. Because our Cognito identity pool is ready to go with a policy that allows access to Cognito Sync, we can dive right into the code in `ChessNetworkManager.cs`. Creation of the `CognitoSyncManager` is as simple as providing our credentials and region endpoint: ``` new CognitoSyncManager(Credentials, new AmazonCognitoSyncConfig { RegionEndpoint = CognitoRegion }); ``` All of our AWS-related managers, clients, and contexts are created in a similar fashion. When you see references to `AnalyticsManager`, `DynamoDBClient`, `DynamoDBContext`, `DynamoDBContext`, `LambdaClient`, and `SNSClient` in this tutorial, take a look at their declarations and initializations first. ###Using the `CognitoSyncManager` To understand how the chess game uses the `CognitoSyncManager`, take a look at the `SaveGameStateLocal` method in `ChessNetworkManager.cs`. In this method, we locally cache the current state of our game in a `Dataset` object. A `Dataset` allows us to store key-value pairs called `Records`, which we can synchronize with Cognito Sync later. For our use case, we create two `Dataset` objects: * One that stores all of our friends in which a friend's ID is the key and the friend's name is the value. (We'll talk about adding friends later.) * One that stores all of our local matches with the match ID as the key and a JSON representation of the match as the value. When a player makes a move in a local game, we call `SaveGameStateLocal`, but do not synchronize the `Datasets` with Cognito Sync until the user exists the **Board** scene. To synchronize the dataset with Cognito Sync, we call the `Synchronize()` method on the dataset. We also want to define the behavior of our game when the synchronization succeeds or fails. To see the syntax for defining this behavior, check out the `SynchronizeLocalDataAsync()` method in `ChessNetworkManager.cs`. In the case of our game, if the synchronization fails, all we do is log a warning because it is most likely due to a lack of network connection. Even if this is the case, the locally cached data remains available to the user. Finally, we want to be able to load data from Cognito Sync. Take a look at the `LoadGameStateAsync` method in `ChessNetworkManager.cs`, where we use the `Synchronize()` method again. When the synchronization is successful, we open the `Dataset` we are synchronizing and use all of the `Records` in it to re-create our `GameState` object. On failure, we re-create the `GameState` using the locally cached `Datasets` instead. We are now able to call these functions from the `GameManager` singleton when it determines it is a good time to save, sync, or load. ### User Sign-In Our credentials are valid only for the device they are created from, because we haven't given Cognito Identity a way to know which devices should use the same identity. The solution is to use authenticated identities. That is, we will have the user sign in using either a public identity provider like Facebook or a custom one. For this example, we use Facebook. To learn how to get your `Chess` Cognito identity pool set up to use Facebook as an authenticated provider, head over to [this page](https://docs.aws.amazon.com/cognito/devguide/identity/external-providers/facebook/). Check out the `LogInToFacebookAsync` and `FacebookLoginCallback` methods in `ChessNetworkManager.cs` to see how we use the Facebook SDK to get an access token and register that token with our Cognito credentials. The `LoginToFacebook` method is called when the **Sign In** button on the main menu is pressed. Now, when a user is signed in on multiple devices, we have full-featured, cross-device synchronization! ## Online Matches Online multiplayer adds a whole new dimension to any game. Because chess is a turn-based game, a player can make a move whenever it is the player's turn, whether that be seconds or days after the player's opponent. This sort of multiplayer experience is popular with mobile games because users are often on the go and unable to play a full game in one session. ### Creating a Match Using AWS Lambda AWS Lambda is a powerful and versatile tool for creating back-end services, and it is immeasurably useful for building an online game. We use Lambda a couple ways in this sample, but the possible uses are endless. At the end of this tutorial, take a moment to consider how you might use Lambda to make the game more robust, secure, and full-featured. The first step is to create an AWS Lambda function. For this sample, we are going to write our Lambda functions in [Node.js](http://docs.aws.amazon.com/lambda/latest/dg/authoring-function-in-nodejs.html), but you can choose [Node.js](http://docs.aws.amazon.com/lambda/latest/dg/authoring-function-in-nodejs.html), [Java](http://docs.aws.amazon.com/lambda/latest/dg/java-lambda.html), [Python](http://docs.aws.amazon.com/lambda/latest/dg/python-lambda.html), or even another JVM language like [Clojure](https://aws.amazon.com/blogs/compute/clojure/) for your game! In the [Lambda console](https://console.aws.amazon.com/lambda/home), choose **Create a Lambda function**. When prompted to select a blueprint, choose **Skip**. On the **Configure function** page, name the function "NewChessMatch" and use the Node.js runtime. The easiest way to upload our code is to choose **Edit code** inline. Open the `NewChessMatch.js` file in the **LambdaFunctions** folder and just copy and paste it. Before we finish our Lambda function configuration, we need to choose a role. This role will give the function access to the AWS resources it needs. This is separate from the Cognito Identity role. Our "NewChessMatch" function will need to access a DynamoDB table to create an entry for a new game (we'll talk about defining our DynamoDB table later), so let's add a "Basic with Dynamo" role. Under **Create new role**, from the **Role** drop-down list, choose "Basic with Dynamo". On the page that appears, view the new policy, and then choose **Create**. Choose **Next**, and then choose **Create function**. You now have an AWS Lambda function ready to invoke. But wait! If you look in the Lambda function code, you will see it reads from and writes to a DynamoDB table called "ChessMatches." Let's create this table. #### Creating a DynamoDB Table to Store Chess Matches We want a table that holds the following information: * A unique match ID. * The ID of the player using white chess pieces. * The ID of the player using black chess pieces. * The Forsyth-Edwards Notation (FEN) that describes the state of the board. * The long algebraic notation that describes the previous move. We want to access the table in the following ways: * Get all information about a match if we have the match ID. * Insert or update matches. * Get all match IDs in which a given player is either the white or black player. To satisfy these requirements, we will design our table as follows: * A string named `MatchId` as the **Primary Hash Key**. This means we can make a `GetItem` request on the table with a `MatchId` value, and get the item with that match ID. * Two **Global Secondary Indexes** with **Index Hash Keys** `WhitePlayerId` and `BlackPlayerId` and **Index Names** `WhitePlayerId-index` and `BlackPlayerId-index`, respectively. This allows us to query on the table to find all matches for `WhitePlayerId` and `BlackPlayerId` values. * We don't have to explicitly define `AlgebraicNotation` and `FEN` keys. We just assume any item put into this table will contain values for these keys Create this table in the [DynamoDB console](https://console.aws.amazon.com/dynamodb/home). Choose **Create Table** and name the table "ChessMatches." For **Primary Key Type**, choose **Hash**. For **Hash Attribute Name**, use `MatchId`. Make sure **string** is selected because our `MatchId` values are strings. Continue to **Add Indexes**. For **Index Type**, choose **Global Secondary Indexes**. For **Index Hash Key**, type `WhitePlayerId`. The console should automatically determine your **Index Name** is `WhitePlayerId-index`. Choose **Add Index To Table**, repeat for `BlackPlayerId`, then choose Continue. On **Provisioned Throughput Capacity**, accept the defaults and press **Continue**. On the next page, select or clear **Use Basic Alarms**. We will not cover them in this tutorial. Choose **Continue**, confirm your table looks as expected, and then choose **Create**. #### Invoking the NewChessMatch function Now that our Lambda function and DynamoDB table are set up, we can invoke the `NewChessMatch` Lambda function from our game to create a new match. In the `ChessNetworkManager.cs` file, take a look at the `NewMatchAsync` method. In this method, we create a JSON string that specifies the `requesterId` (the identity of the player who is creating the match) and `opponentId` (the identity of the opponent), and then use the `LambdaClient` to call `InvokeAsync` with a request that specifies our function name, parameters in JSON format, and invocation type so that Lambda knows we are waiting for a response. When we get a response, we pull out the `MatchId` we have defined our Lambda function to return. ### Adding Friends We are able to invoke an AWS Lambda function to create a new match with a friend, and we are able to keep track of our friends with Cognito Sync, but we have yet to define a way to add friends. We are going to take a simple approach in which we have a publicly accessible DynamoDB table that maps from player identities (specifically, Cognito identities) to player names. For this sample, we will allow users to directly access the DynamoDB table to update their info or find friends' names if they know their friends' IDs. Creating this table is simple. Like before, head to the [DynamoDB console](https://console.aws.amazon.com/dynamodb/home), and then create a table named `ChessPlayers` with **Primary Hash Key** `Id`. In `ChessNetworkManager.cs`, take a look at the `PubliclyRegisterIdentityAsync` method, in which we use our `DynamoDBContext` object to perform `SaveAsync`. When calling `SaveAsync`, we specify the type `GameState.PlayerInfo`. If you take a look at the `PlayerInfo` class in `GameState.cs`, you will see some attributes that provide `DynamoDBContext` with information about how to save the object in DynamoDB: * `[DynamoDBTable("ChessPlayers")]` means the object should be saved to the `ChessPlayers` table. * `[DynamoDBHashKey]` applied to the `Id` property means the table's **Primary Hash Key** is named `Id`. * `[DynamoDBProperty]` applied to the `Name` property means the name should be a property of the item put to the table. * These attributes work the same way when loading from the table, making it just as easy to create a `PlayerInfo` object from a DynamoDB item as it is to create a DynamoDB item from a `PlayerInfo` object. If the `SaveAsync` call is successful, we make a similar call to a different table. We will talk about this when we set up Amazon Simple Notification Service. Also check out the `FindPlayerByIdAsync` in `ChessNetworkManager.cs`, in which we load a `PlayerInfo` object, if supplied, with an ID. These methods are called by our `GameManager` singleton. `PubliclyRegisterIdentityAsync` is called when the user changes his/her name or identity. `FindPlayerByIdAsync` is called when the user clicks the **Add Friend** button in the **SettingsMenu** scene. ### Playing Online Matches e have friends and a match ready to be played, so we need to be able to find, save, and load the matches we are involved in. Let's start by taking a look at the `SimpleMatchInfo` class in `ChessNetworkManager.cs`. You should recognize some of the attributes from our `PlayerInfo class`. In addition, there are `[DynamoDBGlobalSecondaryIndexHashKey]` attributes attached to the `BlackPlayerId` and `WhitePlayerId` properties, which makes sense based on the way we created our `ChessMatches` table. There is also a method in this class to create a `GameState.MatchState` object from the data in the `SimpleMatchInfo`. Let's say we just created a new match with our `NewMatchAsync` method. This method provides us with the `MatchId` value, so we will load the match with that ID. As you might have expected, this is what the `LoadMatchAsync` method in `ChessNetworkManager.cs` is for. Just like we did in `FindPlayerByIdAsync`, we use `DynamoDBContext.LoadAsync` to get the item from our table and make a `SimpleMatchInfo` out of it. `SaveOnlineMatchAsync` should also look very familiar to `PubliclyRegisterIdentityAsync` because `DynamoDBContext.SaveAsync` is used in the same way. All that's left is to use our `GetOnlineMatchesAsync` method to find all matches in which either `BlackPlayerId` or `WhitePlayerId` match the user's ID. We make two `DynamoDBContext.QueryAsync` calls: one for black player ID and one for white player ID. For each, we specify which **Global Secondary Index** we are querying by providing a `DynamoDBOperationConfig` like this: ``` new DynamoDBOperationConfig() { IndexName = WhitePlayerDynamoDBIndexKey } ``` For each of the two `AsyncSearch` objects, we get all of the matches. We can now provide the user with the current state of all the matches he is playing, and allow the game to individually reload any match or update any match with a new move! From the user's perspective, he/she selects a friend to create a new game in the **NewGameMenu** scene, and goes to the **Board** scene, which tells the user it is loading. While it waits for the Lambda function to create the match, the match with that ID is loaded from DynamoDB. The user then has a fresh game board. If it is his/her turn, the user makes a move, which saves the match to the DynamoDB table. The user's opponent can refresh the board to load the updated match state. It might be nice if we were able to notify a user when it is his/her turn... ## Notify Users When It Is Their Turn You guessed it! This section is about telling a user that his/her friend has made a move and it's now his/her turn to prove he/she is a real chess master. We are going to use these tools to do this: AWS Lambda, a new Amazon DynamoDB table, Amazon DynamoDB streams, and Amazon Simple Notification Service. The plan is as follows: 1. Create a Google project with **Google Cloud Messaging** (GCM) access and an iOS app with **Apple Push Notification Service** (APNS) access. 2. Create an **Amazon Simple Notification Service** (SNS) application that targets our GCM and APNS applications. 3. Use the AWS Mobile SDK for Unity to create a SNS endpoint ARN for the user's device with those applications. 4. Create a DynamoDB table that maps from user identity to the SNS endpoint ARNs for that user's devices. 5. Create a Lambda function that responds to changes in our ChessMatches tables, determines which player to notify, and sends a notification through SNS, based on the endpoint ARNs in the DynamoDB table. ####Creating SNS Applications We will refer you to some existing documentation for steps 1 and 2. For GCM, follow the first two steps [here](http://docs.aws.amazon.com/sns/latest/dg/mobile-push-gcm.html). For APNS, follow the first three steps [here](http://docs.aws.amazon.com/sns/latest/dg/mobile-push-apns.html). (You don't need to create a GCM or APNS application if you do not intend to develop for Android or iOS.) Register each with SNS by following the steps [here](http://docs.aws.amazon.com/sns/latest/dg/mobile-push-send-register.html). Finally, you should have the Android platform application ARN and iOS platform application ARN from SNS, as well as the Google console project ID from the Google Developers console (assuming you are developing for both platforms). Provide these values in `ChessNetworkManager.cs`: ``` // Needed only when building for Android private const string AndroidPlatformApplicationArn = "arn:aws:sns:us-east-1:654321123456:app/GCM/ChessGame"; private const string GoogleConsoleProjectId = "1234567891011"; // Needed only when building for iOS private const string IOSPlatformApplicationArn = "arn:aws:sns:us-east-1:654321123456:app/APNS_SANDBOX/ChessGame"; ``` ####Registering a Device with SNS Check out the `RegisterDeviceAsync` code in `ChessNetworkManager.cs` to see how we use the `SNSClient`, the iOS notification services, and GCM utilities to register a device. We receive the `SNSEndpointARN` and keep a reference to it in `ChessNetworkManager` so we can write its value to a DynamoDB table next time `PubliclyRegisterIdentityAsync` is called. ####Keeping Track of a Tser's SNS Endpoints We need a DynamoDB table that can hold zero-to-many SNS endpoint ARNs for a given user identity. Create a table named `SNSEndpointLookup` with a **Primary Key Type** of **Hash** and **Range Key** where the **Hash Attribute Name** is `PlayerId` and the **Range Attribute Name** is `SNSEndpointARN`. This allows us to put multiple items with the same player ID to the table as long as they have different SNS endpoint ARNs. We use the AWS Mobile SDK for Unity to save these items to DynamoDB the same way we did with `GameState.PlayerInfo` and `SimpleMatchInfo` objects, but now with `SNSEndpointLookupEntry` objects, which are defined in `ChessNetworkManager.cs`. The code for this update is in the `PubliclyRegisterIdentityAsync` method. Note that it is only executed if the update to the user's `PlayerInfo` succeeds. ####Detecting When to Notify the User We want to notify the user when it's his/her turn in a match. DynamoDB and Lambda work together when this is the case. DynamoDB offers a functionality called DynamoDB Streams and Lambda has the ability to react to DynamoDB stream events, so we are able to set up a Lambda function that performs an action whenever an `INPUT` or `MODIFY` operation occurs in the `ChessMatches` table. Go back to the [Lambda console](https://console.aws.amazon.com/lambda/home), to create a new function. It will be different from our other Lambda function in the following ways: * Name the function `NewMoveNotifier`. * Use the code in `LambdaFunctions/NewMoveNotifier.js`. You may have to change the values for `SNS_REGION` and `DDB_REGION` if you are not using us-east-1 for SNS or DynamoDB. * Create a new role. * This time, we are going to make a custom role to allow us to use the SNS and DynamoDB resources we need access to. Start by creating "DynamoDB event stream." Next, in the [IAM Management Console](https://console.aws.amazon.com/iam/home#roles), select the role you created, choose **Edit Policy**, and then modify the policy to look like this: ``` { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "lambda:InvokeFunction" ], "Resource": [ "*" ] }, { "Effect": "Allow", "Action": [ "dynamodb:GetRecords", "dynamodb:GetShardIterator", "dynamodb:DescribeStream", "dynamodb:ListStreams", "dynamodb:Query", "sns:Publish", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": "*" } ] } ``` * After the Lambda function is created, go to its **Event Sources** tab and choose **Add event source**. For the event source type, use DynamoDB. For the `DynamoDB` table, use `ChessMatches`. For **Enable event source**, make sure enable now is selected. Skim through the Node.js code, or at least read its comments, to get an idea of how the DynamoDB event stream is processed and how an SNS message is published from it. ## Measuring Usage There is one last thing to add, and it won't take much work at all: Amazon Mobile Analytics. With Amazon Mobile Analytics, we are able to record and view how our users are using the application. In `ChessNetworkManager.cs`, you can see `OnApplicationFocus`, which is a method called by Unity on game components when the application gains or loses focus. All we have to do is call `AnalyticsManager.ResumeSession()` or `AnalyticsManager.PauseSession()` for these events to be recorded in Mobile Analytics. When we create the `AnalyticsManager` (when `ChessNetworkManager` awakes) the start of the session is automatically created. For this reason, we do not call `ResumeSession` the first time the application gains focus, because it doesn't make sense to resume a session that has started and not yet been paused. With the `AnalyticsManager`, you also have the option to send custom events, not just pause and resume. Now that we have the code in place, all that's left to do is add an app in the [Mobile Analytics console](https://console.aws.amazon.com/mobileanalytics/home). Give the app any name you want. After it's created, add the app ID (which you can find [here](https://console.aws.amazon.com/mobileanalytics/home/?#/overview?consoleState=management)) to `ChessNetworkManager.cs`, like this: ``` private const string MobileAnaylticsAppId = "1234567890abcdefgh1234567890abcd"; ``` ## Build and run Open the project in Unity, go to **Build Settings**, and drag in all of the scenes in `ChessGame/Assets/ChessGameScenes` into **Scenes In Build**. Make sure the `PersistentObjectInit` is first in the **Scenes In Build** order. Now you can build and run for iOS or Android, or run the game in the Unity editor. If you run it in the Unity editor, make sure you have the `PersistentObjectInit` scene open when you press play. ## That's it? That's it (for the scope of this sample)! We now have a game with a ton of networking functionality that takes advantage of Cognito Identity and Cognito Sync, Lambda, DynamoDB, SNS, and Mobile Analytics. ## What's next? There is still some work to do to get your game ready for release and infinite possibilities for adding more cool features. Our game is set up to do all of the checking for legal chess moves client-side. This opens up a lot of opportunities for users to cheat. Consider how we could use some of the tools we've talked about to keep users from cheating, and take a look [here](http://mobile.awsblog.com/post/TxH3SWPR48HGAO/Using-Amazon-Cognito-and-AWS-Lambda-to-Detect-Cheating). Also, in this sample, we make users manually copy and paste their friends' IDs to add them. When making your own game, consider integrating the Facebook SDK or other social network tools to make finding and adding friends easy on the user. Or, you could use AWS to design a way for users to create unique user names. Leaderboards? Tournaments? Watching other players' matches? Trophies and badges? Get creative and use Cognito, Lambda, DynamoDB, SNS, Mobile Analytics, and other AWS services to solve interesting problems and create awesome games!
{ "pile_set_name": "Github" }
/** * Lo-Dash 2.4.1 (Custom Build) <http://lodash.com/> * Build: `lodash modularize exports="node" -o ./compat/` * Copyright 2012-2013 The Dojo Foundation <http://dojofoundation.org/> * Based on Underscore.js 1.5.2 <http://underscorejs.org/LICENSE> * Copyright 2009-2013 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors * Available under MIT license <http://lodash.com/license> */ /** Used for native method references */ var objectProto = Object.prototype; /** Native method shortcuts */ var hasOwnProperty = objectProto.hasOwnProperty; /** * Checks if the specified property name exists as a direct property of `object`, * instead of an inherited property. * * @static * @memberOf _ * @category Objects * @param {Object} object The object to inspect. * @param {string} key The name of the property to check. * @returns {boolean} Returns `true` if key is a direct property, else `false`. * @example * * _.has({ 'a': 1, 'b': 2, 'c': 3 }, 'b'); * // => true */ function has(object, key) { return object ? hasOwnProperty.call(object, key) : false; } module.exports = has;
{ "pile_set_name": "Github" }
import { AfterViewInit, ChangeDetectionStrategy, Component, ElementRef, EventEmitter, Input, OnChanges, OnDestroy, Output, SimpleChanges, ViewChild, ViewEncapsulation, Renderer, OnInit } from '@angular/core'; import { Select2OptionData } from './ng2-select2.interface'; @Component({ selector: 'select2', template: ` <select #selector> <ng-content select="option, optgroup"> </ng-content> </select>`, encapsulation: ViewEncapsulation.None, changeDetection: ChangeDetectionStrategy.OnPush }) export class Select2Component implements AfterViewInit, OnChanges, OnDestroy, OnInit { @ViewChild('selector') selector: ElementRef; // data for select2 drop down @Input() data: Array<Select2OptionData>; // value for select2 @Input() value: string | string[]; // enable / disable default style for select2 @Input() cssImport: boolean = false; // width of select2 input @Input() width: string; // enable / disable select2 @Input() disabled: boolean = false; // all additional options @Input() options: Select2Options; // emitter when value is changed @Output() valueChanged = new EventEmitter(); private element: JQuery = undefined; private check: boolean = false; constructor(private renderer: Renderer) { } ngOnInit() { if(this.cssImport) { const head = document.getElementsByTagName('head')[0]; const link: any = head.children[head.children.length-1]; if(!link.version) { const newLink = this.renderer.createElement(head, 'style'); this.renderer.setElementProperty(newLink, 'type', 'text/css'); this.renderer.setElementProperty(newLink, 'version', 'select2'); this.renderer.setElementProperty(newLink, 'innerHTML', this.style); } } } async ngOnChanges(changes: SimpleChanges) { if(!this.element) { return; } if(changes['data'] && JSON.stringify(changes['data'].previousValue) !== JSON.stringify(changes['data'].currentValue)) { await this.initPlugin(); const newValue: string = this.element.val() as string; this.valueChanged.emit({ value: newValue, data: this.element.select2('data') }); } if(changes['value'] && changes['value'].previousValue !== changes['value'].currentValue) { const newValue: string = changes['value'].currentValue; this.setElementValue(newValue); this.valueChanged.emit({ value: newValue, data: this.element.select2('data') }); } if(changes['disabled'] && changes['disabled'].previousValue !== changes['disabled'].currentValue) { this.renderer.setElementProperty(this.selector.nativeElement, 'disabled', this.disabled); } } async ngAfterViewInit() { this.element = jQuery(this.selector.nativeElement); await this.initPlugin(); if (typeof this.value !== 'undefined') { this.setElementValue(this.value); } this.element.on('select2:select select2:unselect', () => { this.valueChanged.emit({ value: this.element.val(), data: this.element.select2('data') }); }); } ngOnDestroy() { if (this.element && this.element.off) { this.element.off("select2:select"); } } private async initPlugin() { if(!this.element.select2) { if(!this.check) { this.check = true; console.log("Please add Select2 library (js file) to the project. You can download it from https://github.com/select2/select2/tree/master/dist/js."); } return; } // If select2 already initialized remove him and remove all tags inside if (this.element.hasClass('select2-hidden-accessible') == true) { this.element.select2('destroy'); this.renderer.setElementProperty(this.selector.nativeElement, 'innerHTML', ''); } let options: Select2Options = { data: this.data, width: (this.width) ? this.width : 'resolve' }; Object.assign(options, this.options); if(options.matcher) { let oldMatcher: any = await this.requireOldMatcher(); options.matcher = oldMatcher(options.matcher); this.element.select2(options); if (typeof this.value !== 'undefined') { this.setElementValue(this.value); } } else { this.element.select2(options); } if(this.disabled) { this.renderer.setElementProperty(this.selector.nativeElement, 'disabled', this.disabled); } } private async requireOldMatcher() : Promise<any> { return new Promise<any[]>(resolve => { jQuery.fn.select2.amd.require(['select2/compat/matcher'], (oldMatcher: any) => { resolve(oldMatcher); }); }); } private setElementValue (newValue: string | string[]) { if(Array.isArray(newValue)) { for (let option of this.selector.nativeElement.options) { if (newValue.indexOf(option.value) > -1) { this.renderer.setElementProperty(option, 'selected', 'true'); } } } else { this.renderer.setElementProperty(this.selector.nativeElement, 'value', newValue); } this.element.trigger('change.select2'); } private style: string = `CSS`; }
{ "pile_set_name": "Github" }
# desc "Explaining what the task does" # task :payola do # # Task goes here # end
{ "pile_set_name": "Github" }
/** @file Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.<BR> This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License which accompanies this distribution. The full text of the license may be found at http://opensource.org/licenses/bsd-license.php THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. **/ #ifndef _BIOS_GRAPHICS_OUTPUT_H_ #define _BIOS_GRAPHICS_OUTPUT_H_ #include <FrameworkDxe.h> #include <Protocol/PciIo.h> #include <Protocol/EdidActive.h> #include <Protocol/DevicePath.h> #include <Protocol/DevicePathToText.h> #include <Protocol/EdidDiscovered.h> #include <Protocol/LegacyBios.h> #include <Protocol/VgaMiniPort.h> #include <Protocol/GraphicsOutput.h> #include <Protocol/EdidOverride.h> #include <Guid/StatusCodeDataTypeId.h> #include <Guid/LegacyBios.h> #include <Guid/EventGroup.h> #include <Guid/GlobalVariable.h> #include <Library/PcdLib.h> #include <Library/DebugLib.h> #include <Library/ReportStatusCodeLib.h> #include <Library/BaseMemoryLib.h> #include <Library/UefiDriverEntryPoint.h> #include <Library/UefiBootServicesTableLib.h> #include <Library/UefiRuntimeServicesTableLib.h> #include <Library/UefiLib.h> #include <Library/DevicePathLib.h> #include <Library/MemoryAllocationLib.h> //#include <Library/GenericBdsLib.h> #include <IndustryStandard/Pci.h> #include <IndustryStandard/VesaBiosExtensions.h> // // Packed format support: The number of bits reserved for each of the colors and the actual // position of RGB in the frame buffer is specified in the VBE Mode information // typedef struct { UINT8 Position; // Position of the color UINT8 Mask; // The number of bits expressed as a mask } BIOS_VIDEO_COLOR_PLACEMENT; // // BIOS Graphics Output Graphical Mode Data // typedef struct { UINT16 VbeModeNumber; UINT16 BytesPerScanLine; VOID *LinearFrameBuffer; UINTN FrameBufferSize; UINT32 HorizontalResolution; UINT32 VerticalResolution; UINT32 ColorDepth; UINT32 RefreshRate; UINT32 BitsPerPixel; BIOS_VIDEO_COLOR_PLACEMENT Red; BIOS_VIDEO_COLOR_PLACEMENT Green; BIOS_VIDEO_COLOR_PLACEMENT Blue; BIOS_VIDEO_COLOR_PLACEMENT Reserved; EFI_GRAPHICS_PIXEL_FORMAT PixelFormat; EFI_PIXEL_BITMASK PixelBitMask; } BIOS_VIDEO_MODE_DATA; // // BIOS video child handle private data Structure // #define BIOS_VIDEO_DEV_SIGNATURE SIGNATURE_32 ('B', 'V', 'M', 'p') typedef struct { UINTN Signature; EFI_HANDLE Handle; // // Consumed Protocols // EFI_PCI_IO_PROTOCOL *PciIo; EFI_LEGACY_BIOS_PROTOCOL *LegacyBios; // // Original PCI attributes // UINT64 OriginalPciAttributes; // // Produced Protocols // EFI_GRAPHICS_OUTPUT_PROTOCOL GraphicsOutput; EFI_EDID_DISCOVERED_PROTOCOL EdidDiscovered; EFI_EDID_ACTIVE_PROTOCOL EdidActive; EFI_VGA_MINI_PORT_PROTOCOL VgaMiniPort; // // General fields // BOOLEAN VgaCompatible; BOOLEAN ProduceGraphicsOutput; // // Graphics Output Protocol related fields // BOOLEAN HardwareNeedsStarting; UINTN CurrentMode; UINTN MaxMode; BIOS_VIDEO_MODE_DATA *ModeData; UINT8 *LineBuffer; EFI_GRAPHICS_OUTPUT_BLT_PIXEL *VbeFrameBuffer; UINT8 *VgaFrameBuffer; // // VESA Bios Extensions related fields // UINTN NumberOfPagesBelow1MB; // Number of 4KB pages in PagesBelow1MB EFI_PHYSICAL_ADDRESS PagesBelow1MB; // Buffer for all VBE Information Blocks VESA_BIOS_EXTENSIONS_INFORMATION_BLOCK *VbeInformationBlock; // 0x200 bytes. Must be allocated below 1MB VESA_BIOS_EXTENSIONS_MODE_INFORMATION_BLOCK *VbeModeInformationBlock; // 0x100 bytes. Must be allocated below 1MB VESA_BIOS_EXTENSIONS_EDID_DATA_BLOCK *VbeEdidDataBlock; // 0x80 bytes. Must be allocated below 1MB VESA_BIOS_EXTENSIONS_CRTC_INFORMATION_BLOCK *VbeCrtcInformationBlock; // 59 bytes. Must be allocated below 1MB UINTN VbeSaveRestorePages; // Number of 4KB pages in VbeSaveRestoreBuffer EFI_PHYSICAL_ADDRESS VbeSaveRestoreBuffer; // Must be allocated below 1MB // // Status code // EFI_DEVICE_PATH_PROTOCOL *GopDevicePath; EFI_EVENT ExitBootServicesEvent; } BIOS_VIDEO_DEV; #define BIOS_VIDEO_DEV_FROM_PCI_IO_THIS(a) CR (a, BIOS_VIDEO_DEV, PciIo, BIOS_VIDEO_DEV_SIGNATURE) #define BIOS_VIDEO_DEV_FROM_GRAPHICS_OUTPUT_THIS(a) CR (a, BIOS_VIDEO_DEV, GraphicsOutput, BIOS_VIDEO_DEV_SIGNATURE) #define BIOS_VIDEO_DEV_FROM_VGA_MINI_PORT_THIS(a) CR (a, BIOS_VIDEO_DEV, VgaMiniPort, BIOS_VIDEO_DEV_SIGNATURE) #define GRAPHICS_OUTPUT_INVALIDE_MODE_NUMBER 0xffff // // Global Variables // extern EFI_DRIVER_BINDING_PROTOCOL gBiosVideoDriverBinding; extern EFI_COMPONENT_NAME_PROTOCOL gBiosVideoComponentName; extern EFI_COMPONENT_NAME2_PROTOCOL gBiosVideoComponentName2; // // Driver Binding Protocol functions // /** Supported. @param This Pointer to driver binding protocol @param Controller Controller handle to connect @param RemainingDevicePath A pointer to the remaining portion of a device path @retval EFI_STATUS EFI_SUCCESS:This controller can be managed by this driver, Otherwise, this controller cannot be managed by this driver **/ EFI_STATUS EFIAPI BiosVideoDriverBindingSupported ( IN EFI_DRIVER_BINDING_PROTOCOL *This, IN EFI_HANDLE Controller, IN EFI_DEVICE_PATH_PROTOCOL *RemainingDevicePath ); /** Install Graphics Output Protocol onto VGA device handles. @param This Pointer to driver binding protocol @param Controller Controller handle to connect @param RemainingDevicePath A pointer to the remaining portion of a device path @return EFI_STATUS **/ EFI_STATUS EFIAPI BiosVideoDriverBindingStart ( IN EFI_DRIVER_BINDING_PROTOCOL *This, IN EFI_HANDLE Controller, IN EFI_DEVICE_PATH_PROTOCOL *RemainingDevicePath ); /** Stop. @param This Pointer to driver binding protocol @param Controller Controller handle to connect @param NumberOfChildren Number of children handle created by this driver @param ChildHandleBuffer Buffer containing child handle created @retval EFI_SUCCESS Driver disconnected successfully from controller @retval EFI_UNSUPPORTED Cannot find BIOS_VIDEO_DEV structure **/ EFI_STATUS EFIAPI BiosVideoDriverBindingStop ( IN EFI_DRIVER_BINDING_PROTOCOL *This, IN EFI_HANDLE Controller, IN UINTN NumberOfChildren, IN EFI_HANDLE *ChildHandleBuffer ); // // Private worker functions // /** Check for VBE device. @param BiosVideoPrivate Pointer to BIOS_VIDEO_DEV structure @retval EFI_SUCCESS VBE device found **/ EFI_STATUS BiosVideoCheckForVbe ( IN OUT BIOS_VIDEO_DEV *BiosVideoPrivate ); /** Check for VGA device. @param BiosVideoPrivate Pointer to BIOS_VIDEO_DEV structure @retval EFI_SUCCESS Standard VGA device found **/ EFI_STATUS BiosVideoCheckForVga ( IN OUT BIOS_VIDEO_DEV *BiosVideoPrivate ); /** Release resource for biso video instance. @param BiosVideoPrivate Video child device private data structure **/ VOID BiosVideoDeviceReleaseResource ( BIOS_VIDEO_DEV *BiosVideoPrivate ); // // BIOS Graphics Output Protocol functions // /** Graphics Output protocol interface to get video mode. @param This Protocol instance pointer. @param ModeNumber The mode number to return information on. @param SizeOfInfo A pointer to the size, in bytes, of the Info buffer. @param Info Caller allocated buffer that returns information about ModeNumber. @retval EFI_SUCCESS Mode information returned. @retval EFI_BUFFER_TOO_SMALL The Info buffer was too small. @retval EFI_DEVICE_ERROR A hardware error occurred trying to retrieve the video mode. @retval EFI_NOT_STARTED Video display is not initialized. Call SetMode () @retval EFI_INVALID_PARAMETER One of the input args was NULL. **/ EFI_STATUS EFIAPI BiosVideoGraphicsOutputQueryMode ( IN EFI_GRAPHICS_OUTPUT_PROTOCOL *This, IN UINT32 ModeNumber, OUT UINTN *SizeOfInfo, OUT EFI_GRAPHICS_OUTPUT_MODE_INFORMATION **Info ); /** Graphics Output protocol interface to set video mode. @param This Protocol instance pointer. @param ModeNumber The mode number to be set. @retval EFI_SUCCESS Graphics mode was changed. @retval EFI_DEVICE_ERROR The device had an error and could not complete the request. @retval EFI_UNSUPPORTED ModeNumber is not supported by this device. **/ EFI_STATUS EFIAPI BiosVideoGraphicsOutputSetMode ( IN EFI_GRAPHICS_OUTPUT_PROTOCOL * This, IN UINT32 ModeNumber ); /** Graphics Output protocol instance to block transfer for VBE device. @param This Pointer to Graphics Output protocol instance @param BltBuffer The data to transfer to screen @param BltOperation The operation to perform @param SourceX The X coordinate of the source for BltOperation @param SourceY The Y coordinate of the source for BltOperation @param DestinationX The X coordinate of the destination for BltOperation @param DestinationY The Y coordinate of the destination for BltOperation @param Width The width of a rectangle in the blt rectangle in pixels @param Height The height of a rectangle in the blt rectangle in pixels @param Delta Not used for EfiBltVideoFill and EfiBltVideoToVideo operation. If a Delta of 0 is used, the entire BltBuffer will be operated on. If a subrectangle of the BltBuffer is used, then Delta represents the number of bytes in a row of the BltBuffer. @retval EFI_INVALID_PARAMETER Invalid parameter passed in @retval EFI_SUCCESS Blt operation success **/ EFI_STATUS EFIAPI BiosVideoGraphicsOutputVbeBlt ( IN EFI_GRAPHICS_OUTPUT_PROTOCOL *This, IN EFI_GRAPHICS_OUTPUT_BLT_PIXEL *BltBuffer, OPTIONAL IN EFI_GRAPHICS_OUTPUT_BLT_OPERATION BltOperation, IN UINTN SourceX, IN UINTN SourceY, IN UINTN DestinationX, IN UINTN DestinationY, IN UINTN Width, IN UINTN Height, IN UINTN Delta ); /** Grahpics Output protocol instance to block transfer for VGA device. @param This Pointer to Grahpics Output protocol instance @param BltBuffer The data to transfer to screen @param BltOperation The operation to perform @param SourceX The X coordinate of the source for BltOperation @param SourceY The Y coordinate of the source for BltOperation @param DestinationX The X coordinate of the destination for BltOperation @param DestinationY The Y coordinate of the destination for BltOperation @param Width The width of a rectangle in the blt rectangle in pixels @param Height The height of a rectangle in the blt rectangle in pixels @param Delta Not used for EfiBltVideoFill and EfiBltVideoToVideo operation. If a Delta of 0 is used, the entire BltBuffer will be operated on. If a subrectangle of the BltBuffer is used, then Delta represents the number of bytes in a row of the BltBuffer. @retval EFI_INVALID_PARAMETER Invalid parameter passed in @retval EFI_SUCCESS Blt operation success **/ EFI_STATUS EFIAPI BiosVideoGraphicsOutputVgaBlt ( IN EFI_GRAPHICS_OUTPUT_PROTOCOL *This, IN EFI_GRAPHICS_OUTPUT_BLT_PIXEL *BltBuffer, OPTIONAL IN EFI_GRAPHICS_OUTPUT_BLT_OPERATION BltOperation, IN UINTN SourceX, IN UINTN SourceY, IN UINTN DestinationX, IN UINTN DestinationY, IN UINTN Width, IN UINTN Height, IN UINTN Delta ); // // BIOS VGA Mini Port Protocol functions // /** VgaMiniPort protocol interface to set mode. @param This Pointer to VgaMiniPort protocol instance @param ModeNumber The index of the mode @retval EFI_UNSUPPORTED The requested mode is not supported @retval EFI_SUCCESS The requested mode is set successfully **/ EFI_STATUS EFIAPI BiosVideoVgaMiniPortSetMode ( IN EFI_VGA_MINI_PORT_PROTOCOL *This, IN UINTN ModeNumber ); /** Event handler for Exit Boot Service. @param Event The event that be siganlled when exiting boot service. @param Context Pointer to instance of BIOS_VIDEO_DEV. **/ VOID EFIAPI BiosVideoNotifyExitBootServices ( IN EFI_EVENT Event, IN VOID *Context ); // // Standard VGA Definitions // #define VGA_HORIZONTAL_RESOLUTION 640 #define VGA_VERTICAL_RESOLUTION 480 #define VGA_NUMBER_OF_BIT_PLANES 4 #define VGA_PIXELS_PER_BYTE 8 #define VGA_BYTES_PER_SCAN_LINE (VGA_HORIZONTAL_RESOLUTION / VGA_PIXELS_PER_BYTE) #define VGA_BYTES_PER_BIT_PLANE (VGA_VERTICAL_RESOLUTION * VGA_BYTES_PER_SCAN_LINE) #define VGA_GRAPHICS_CONTROLLER_ADDRESS_REGISTER 0x3ce #define VGA_GRAPHICS_CONTROLLER_DATA_REGISTER 0x3cf #define VGA_GRAPHICS_CONTROLLER_SET_RESET_REGISTER 0x00 #define VGA_GRAPHICS_CONTROLLER_ENABLE_SET_RESET_REGISTER 0x01 #define VGA_GRAPHICS_CONTROLLER_COLOR_COMPARE_REGISTER 0x02 #define VGA_GRAPHICS_CONTROLLER_DATA_ROTATE_REGISTER 0x03 #define VGA_GRAPHICS_CONTROLLER_FUNCTION_REPLACE 0x00 #define VGA_GRAPHICS_CONTROLLER_FUNCTION_AND 0x08 #define VGA_GRAPHICS_CONTROLLER_FUNCTION_OR 0x10 #define VGA_GRAPHICS_CONTROLLER_FUNCTION_XOR 0x18 #define VGA_GRAPHICS_CONTROLLER_READ_MAP_SELECT_REGISTER 0x04 #define VGA_GRAPHICS_CONTROLLER_MODE_REGISTER 0x05 #define VGA_GRAPHICS_CONTROLLER_READ_MODE_0 0x00 #define VGA_GRAPHICS_CONTROLLER_READ_MODE_1 0x08 #define VGA_GRAPHICS_CONTROLLER_WRITE_MODE_0 0x00 #define VGA_GRAPHICS_CONTROLLER_WRITE_MODE_1 0x01 #define VGA_GRAPHICS_CONTROLLER_WRITE_MODE_2 0x02 #define VGA_GRAPHICS_CONTROLLER_WRITE_MODE_3 0x03 #define VGA_GRAPHICS_CONTROLLER_MISCELLANEOUS_REGISTER 0x06 #define VGA_GRAPHICS_CONTROLLER_COLOR_DONT_CARE_REGISTER 0x07 #define VGA_GRAPHICS_CONTROLLER_BIT_MASK_REGISTER 0x08 /** Install child handles if the Handle supports MBR format. @param This Calling context. @param ParentHandle Parent Handle @param ParentPciIo Parent PciIo interface @param ParentLegacyBios Parent LegacyBios interface @param ParentDevicePath Parent Device Path @param RemainingDevicePath Remaining Device Path @param OriginalPciAttributes Original PCI Attributes @retval EFI_SUCCESS If a child handle was added @retval other A child handle was not added **/ EFI_STATUS BiosVideoChildHandleInstall ( IN EFI_DRIVER_BINDING_PROTOCOL *This, IN EFI_HANDLE ParentHandle, IN EFI_PCI_IO_PROTOCOL *ParentPciIo, IN EFI_LEGACY_BIOS_PROTOCOL *ParentLegacyBios, IN EFI_DEVICE_PATH_PROTOCOL *ParentDevicePath, IN EFI_DEVICE_PATH_PROTOCOL *RemainingDevicePath ); /** Deregister an video child handle and free resources. @param This Protocol instance pointer. @param Controller Video controller handle @param Handle Video child handle @return EFI_STATUS **/ EFI_STATUS BiosVideoChildHandleUninstall ( EFI_DRIVER_BINDING_PROTOCOL *This, EFI_HANDLE Controller, EFI_HANDLE Handle ); /** Release resource for biso video instance. @param BiosVideoPrivate Video child device private data structure **/ VOID BiosVideoDeviceReleaseResource ( BIOS_VIDEO_DEV *BiosVideoPrivate ); /** Check if all video child handles have been uninstalled. @param Controller Video controller handle @return TRUE Child handles exist. @return FALSE All video child handles have been uninstalled. **/ BOOLEAN HasChildHandle ( IN EFI_HANDLE Controller ); #endif
{ "pile_set_name": "Github" }
package com.habitrpg.android.habitica.models import com.habitrpg.android.habitica.models.notifications.* class Notification { enum class Type(val type: String) { // Notification types coming from the server LOGIN_INCENTIVE("LOGIN_INCENTIVE"), NEW_STUFF("NEW_STUFF"), NEW_CHAT_MESSAGE("NEW_CHAT_MESSAGE"), NEW_MYSTERY_ITEMS("NEW_MYSTERY_ITEMS"), GROUP_TASK_NEEDS_WORK("GROUP_TASK_NEEDS_WORK"), GROUP_TASK_APPROVED("GROUP_TASK_APPROVED"), GROUP_TASK_REQUIRES_APPROVAL("GROUP_TASK_REQUIRES_APPROVAL"), UNALLOCATED_STATS_POINTS("UNALLOCATED_STATS_POINTS"), //Achievements ACHIEVEMENT_PARTY_UP("ACHIEVEMENT_PARTY_UP"), ACHIEVEMENT_PARTY_ON("ACHIEVEMENT_PARTY_ON"), ACHIEVEMENT_BEAST_MASTER("ACHIEVEMENT_BEAST_MASTER"), ACHIEVEMENT_MOUNT_MASTER("ACHIEVEMENT_MOUNT_MASTER"), ACHIEVEMENT_TRIAD_BINGO("ACHIEVEMENT_TRIAD_BINGO"), ACHIEVEMENT_GUILD_JOINED("GUILD_JOINED_ACHIEVEMENT"), ACHIEVEMENT_CHALLENGE_JOINED("CHALLENGE_JOINED_ACHIEVEMENT"), ACHIEVEMENT_INVITED_FRIEND("INVITED_FRIEND_ACHIEVEMENT"), ACHIEVEMENT_GENERIC("ACHIEVEMENT"), ACHIEVEMENT_ONBOARDING_COMPLETE("ONBOARDING_COMPLETE"), FIRST_DROP("FIRST_DROPS"), // Custom notification types (created by this app) GUILD_INVITATION("GUILD_INVITATION"), PARTY_INVITATION("PARTY_INVITATION"), QUEST_INVITATION("QUEST_INVITATION"), } var id: String = "" var type: String? = null var seen: Boolean? = null var data: NotificationData? = null fun getDataType(): java.lang.reflect.Type? { return when (type) { Type.LOGIN_INCENTIVE.type -> LoginIncentiveData::class.java Type.NEW_STUFF.type -> NewStuffData::class.java Type.NEW_CHAT_MESSAGE.type -> NewChatMessageData::class.java Type.GROUP_TASK_NEEDS_WORK.type -> GroupTaskNeedsWorkData::class.java Type.GROUP_TASK_APPROVED.type -> GroupTaskApprovedData::class.java Type.GROUP_TASK_REQUIRES_APPROVAL.type -> GroupTaskRequiresApprovalData::class.java Type.UNALLOCATED_STATS_POINTS.type -> UnallocatedPointsData::class.java Type.GUILD_INVITATION.type -> GuildInvitationData::class.java Type.PARTY_INVITATION.type -> PartyInvitationData::class.java Type.QUEST_INVITATION.type -> QuestInvitationData::class.java Type.FIRST_DROP.type -> FirstDropData::class.java Type.ACHIEVEMENT_GENERIC.type -> AchievementData::class.java else -> null } } val priority: Int get() { return when (type) { Type.NEW_STUFF.type -> 1 Type.GUILD_INVITATION.type -> 2 Type.PARTY_INVITATION.type -> 3 Type.UNALLOCATED_STATS_POINTS.type -> 4 Type.NEW_MYSTERY_ITEMS.type -> 5 Type.NEW_CHAT_MESSAGE.type -> 6 else -> 100 } } }
{ "pile_set_name": "Github" }
Copyright (c) 2002-2006 Marcus Geelnard Copyright (c) 2006-2016 Camilla Berglund <elmindreda@glfw.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution.
{ "pile_set_name": "Github" }
// (C) Copyright Gennadiy Rozental 2001. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // See http://www.boost.org/libs/test for the library home page. // //!@file //!@brief few helpers for working with variadic macros // *************************************************************************** #ifndef BOOST_TEST_PP_VARIADIC_HPP_021515GER #define BOOST_TEST_PP_VARIADIC_HPP_021515GER // Boost #include <boost/preprocessor/control/iif.hpp> #include <boost/preprocessor/comparison/equal.hpp> #include <boost/preprocessor/variadic/size.hpp> //____________________________________________________________________________// #if BOOST_PP_VARIADICS #if BOOST_PP_VARIADICS_MSVC # define BOOST_TEST_INVOKE_VARIADIC( tool, ... ) BOOST_PP_CAT( tool (__VA_ARGS__), ) #else # define BOOST_TEST_INVOKE_VARIADIC( tool, ... ) tool (__VA_ARGS__) #endif //____________________________________________________________________________// /// if sizeof(__VA_ARGS__) == N: F1(__VA_ARGS__) /// else: F2(__VA_ARGS__) #define BOOST_TEST_INVOKE_IF_N_ARGS( N, F1, F2, ... ) \ BOOST_TEST_INVOKE_VARIADIC( \ BOOST_PP_IIF( \ BOOST_PP_EQUAL(BOOST_PP_VARIADIC_SIZE(__VA_ARGS__), N), \ F1, \ F2), \ __VA_ARGS__ ) \ /**/ //____________________________________________________________________________// #endif /* BOOST_PP_VARIADICS */ #endif // BOOST_TEST_PP_VARIADIC_HPP_021515GER // EOF
{ "pile_set_name": "Github" }
.so man2/setpgid.2
{ "pile_set_name": "Github" }
// snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.] // snippet-sourcedescription:[CreateOpsItem.java demonstrates how to create a new OpsItem.] // snippet-service:[ssm] // snippet-keyword:[Java] // snippet-keyword:[AWS Systems Manager] // snippet-keyword:[Code Sample] // snippet-sourcetype:[full-example] // snippet-sourcedate:[2020-09-10] // snippet-sourceauthor:[AWS - scmacdon] /** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * This file is licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. A copy of * the License is located at * * http://aws.amazon.com/apache2.0/ * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * */ package com.example.ssm; //snippet-start:[ssm.java2.create_ops.import] import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.ssm.SsmClient; import software.amazon.awssdk.services.ssm.model.CreateOpsItemRequest; import software.amazon.awssdk.services.ssm.model.CreateOpsItemResponse; import software.amazon.awssdk.services.ssm.model.SsmException; //snippet-end:[ssm.java2.create_ops.import] public class CreateOpsItem { public static void main(String[] args) { final String USAGE = "\n" + "Usage:\n" + " CreateOpsItem <title><source><category><severity>\n\n" + "Where:\n" + " title - The OpsItem title.\n" + " source - The origin of the OpsItem, such as Amazon EC2 or AWS Systems Manager.\n" + " category - A category to assign to an OpsItem.\n" + " severity - A severity to assign to an OpsItem.\n"; if (args.length < 4) { System.out.println(USAGE); System.exit(1); } /* Read the name from command args */ String title = args[0]; String source = args[1]; String category = args[2]; String severity = args[3]; Region region = Region.US_EAST_1; SsmClient ssmClient = SsmClient.builder() .region(region) .build(); System.out.println("The ID of the OpsItem is " +createNewOpsItem(ssmClient, title, source, category, severity)); } //snippet-start:[ssm.java2.create_ops.main] public static String createNewOpsItem( SsmClient ssmClient, String title, String source, String category, String severity) { try { CreateOpsItemRequest opsItemRequest = CreateOpsItemRequest.builder() .description("Created by the AWS Systems Manager Java API") .title(title) .source(source) .category(category) .severity(severity) .build(); CreateOpsItemResponse itemResponse = ssmClient.createOpsItem(opsItemRequest); return itemResponse.opsItemId(); } catch (SsmException e) { System.err.println(e.getMessage()); System.exit(1); } return ""; } //snippet-end:[ssm.java2.create_ops.main] }
{ "pile_set_name": "Github" }
## Sum ``` Sum(expr, {i, imin, imax}) ``` > evaluates the discrete sum of `expr` with `i` ranging from `imin` to `imax`. ``` Sum(expr, {i, imin, imax, di}) ``` > `i` ranges from `imin` to `imax` in steps of `di`. ``` Sum(expr, {i, imin, imax}, {j, jmin, jmax}, ...) ``` > evaluates `expr` as a multiple sum, with `{i, ...}, {j, ...}, ...` being in outermost-to-innermost order. See * [Wikipedia - Summation](https://en.wikipedia.org/wiki/Summation) ### Examples ``` >> Sum(k, {k, 1, 10}) 55 ``` Double sum: ``` >> Sum(i * j, {i, 1, 10}, {j, 1, 10}) 3025 ``` Symbolic sums are evaluated: ``` >> Sum(k, {k, 1, n}) 1/2*n*(1+n) >> Sum(k, {k, n, 2*n}) 3/2*n*(1+n) >> Sum(k, {k, I, I + 1}) 1+I*2 >> Sum(1 / k ^ 2, {k, 1, n}) HarmonicNumber(n, 2) ``` Verify algebraic identities: ``` >> Simplify(Sum(x ^ 2, {x, 1, y}) - y * (y + 1) * (2 * y + 1) / 6) 0 ``` Infinite sums: ``` >> Sum(1 / 2 ^ i, {i, 1, Infinity}) 1 >> Sum(1 / k ^ 2, {k, 1, Infinity}) Pi^2/6 >> Sum(x^k*Sum(y^l,{l,0,4}),{k,0,4})) 1+y+y^2+y^3+y^4+x*(1+y+y^2+y^3+y^4)+(1+y+y^2+y^3+y^4)*x^2+(1+y+y^2+y^3+y^4)*x^3+(1+y+y^2+y^3+y^4)*x^4 >> Sum(2^(-i), {i, 1, Infinity}) 1 >> Sum(i / Log(i), {i, 1, Infinity}) Sum(i/Log(i),{i,1,Infinity}) >> Sum(Cos(Pi i), {i, 1, Infinity}) Sum(Cos(i*Pi),{i,1,Infinity}) ```
{ "pile_set_name": "Github" }
module SocialStream module Places module Models module ActivityObject extend ActiveSupport::Concern included do has_one :geotag, :autosave => true has_one :address, :through => :geotag, :autosave => true delegate :latitude, :latitude=, :longitude, :longitude=, :altitude, :altitude=, :heading, :heading=, :tilt, :tilt=, :to => :geotag! delegate :formatted, :formatted=, :streetAddress, :streetAddress=, :locality, :locality=, :region, :region=, :postalCode, :postalCode=, :country, :country=, :to => :address! end def geotag! geotag || build_geotag end def address! geotag!.address! end end end end end
{ "pile_set_name": "Github" }
// Automatically generated by the Fast Binary Encoding compiler, do not modify! // https://github.com/chronoxor/FastBinaryEncoding // Source: test.fbe // Version: 1.4.0.0 @file:Suppress("UnusedImport", "unused") package com.chronoxor.test.fbe // Fast Binary Encoding optional Char final model class FinalModelOptionalChar(buffer: com.chronoxor.fbe.Buffer, offset: Long) : com.chronoxor.fbe.FinalModel(buffer, offset) { // Get the allocation size fun fbeAllocationSize(optional: Char?): Long = 1 + (if (optional != null) value.fbeAllocationSize(optional) else 0) // Checks if the object contains a value fun hasValue(): Boolean { if ((_buffer.offset + fbeOffset + 1) > _buffer.size) return false val fbeHasValue = readInt8(fbeOffset).toInt() return fbeHasValue != 0 } // Base final model value val value = com.chronoxor.fbe.FinalModelChar(buffer, 0) // Check if the optional value is valid override fun verify(): Long { if ((_buffer.offset + fbeOffset + 1) > _buffer.size) return Long.MAX_VALUE val fbeHasValue = readInt8(fbeOffset).toInt() if (fbeHasValue == 0) return 1 _buffer.shift(fbeOffset + 1) val fbeResult = value.verify() _buffer.unshift(fbeOffset + 1) return 1 + fbeResult } // Get the optional value fun get(size: com.chronoxor.fbe.Size): Char? { assert((_buffer.offset + fbeOffset + 1) <= _buffer.size) { "Model is broken!" } if ((_buffer.offset + fbeOffset + 1) > _buffer.size) { size.value = 0 return null } if (!hasValue()) { size.value = 1 return null } _buffer.shift(fbeOffset + 1) val optional = value.get(size) _buffer.unshift(fbeOffset + 1) size.value += 1 return optional } // Set the optional value fun set(optional: Char?): Long { assert((_buffer.offset + fbeOffset + 1) <= _buffer.size) { "Model is broken!" } if ((_buffer.offset + fbeOffset + 1) > _buffer.size) return 0 val fbeHasValue = if (optional != null) 1 else 0 write(fbeOffset, fbeHasValue.toByte()) if (fbeHasValue == 0) return 1 _buffer.shift(fbeOffset + 1) val size = value.set(optional!!) _buffer.unshift(fbeOffset + 1) return 1 + size } }
{ "pile_set_name": "Github" }