code_text
stringlengths
604
999k
repo_name
stringlengths
4
100
file_path
stringlengths
4
873
language
stringclasses
23 values
license
stringclasses
15 values
size
int32
1.02k
999k
#include <linux/fault-inject.h> #include <linux/slab.h> static struct { struct fault_attr attr; u32 ignore_gfp_wait; int cache_filter; } failslab = { .attr = FAULT_ATTR_INITIALIZER, .ignore_gfp_wait = 1, .cache_filter = 0, }; bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags) { if (gfpflags & __GFP_NOFAIL) return false; if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) return false; if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB)) return false; return should_fail(&failslab.attr, size); } static int __init setup_failslab(char *str) { return setup_fault_attr(&failslab.attr, str); } __setup("failslab=", setup_failslab); #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS static int __init failslab_debugfs_init(void) { struct dentry *dir; umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; dir = fault_create_debugfs_attr("failslab", NULL, &failslab.attr); if (IS_ERR(dir)) return PTR_ERR(dir); if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, &failslab.ignore_gfp_wait)) goto fail; if (!debugfs_create_bool("cache-filter", mode, dir, &failslab.cache_filter)) goto fail; return 0; fail: debugfs_remove_recursive(dir); return -ENOMEM; } late_initcall(failslab_debugfs_init); #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
languitar/android_kernel_lge_hammerhead
mm/failslab.c
C
gpl-2.0
1,316
#include <linux/init.h> #include <linux/module.h> #include <linux/cpufreq.h> #include <hwregs/reg_map.h> #include <arch/hwregs/reg_rdwr.h> #include <arch/hwregs/config_defs.h> #include <arch/hwregs/bif_core_defs.h> static int cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, void *data); static struct notifier_block cris_sdram_freq_notifier_block = { .notifier_call = cris_sdram_freq_notifier }; static struct cpufreq_frequency_table cris_freq_table[] = { {0x01, 6000}, {0x02, 200000}, {0, CPUFREQ_TABLE_END}, }; static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu) { reg_config_rw_clk_ctrl clk_ctrl; clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); return clk_ctrl.pll ? 200000 : 6000; } static void cris_freq_set_cpu_state(unsigned int state) { int i; struct cpufreq_freqs freqs; reg_config_rw_clk_ctrl clk_ctrl; clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl); for_each_possible_cpu(i) { freqs.old = cris_freq_get_cpu_frequency(i); freqs.new = cris_freq_table[state].frequency; freqs.cpu = i; } cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); local_irq_disable(); /* Even though we may be SMP they will share the same clock * so all settings are made on CPU0. */ if (cris_freq_table[state].frequency == 200000) clk_ctrl.pll = 1; else clk_ctrl.pll = 0; REG_WR(config, regi_config, rw_clk_ctrl, clk_ctrl); local_irq_enable(); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); }; static int cris_freq_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]); } static int cris_freq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate = 0; if (cpufreq_frequency_table_target (policy, cris_freq_table, target_freq, relation, &newstate)) return -EINVAL; cris_freq_set_cpu_state(newstate); return 0; } static int cris_freq_cpu_init(struct cpufreq_policy *policy) { int result; /* cpuinfo and default policy values */ policy->cpuinfo.transition_latency = 1000000; /* 1ms */ policy->cur = cris_freq_get_cpu_frequency(0); result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table); if (result) return (result); cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu); return 0; } static int cris_freq_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); return 0; } static struct freq_attr *cris_freq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver cris_freq_driver = { .get = cris_freq_get_cpu_frequency, .verify = cris_freq_verify, .target = cris_freq_target, .init = cris_freq_cpu_init, .exit = cris_freq_cpu_exit, .name = "cris_freq", .owner = THIS_MODULE, .attr = cris_freq_attr, }; static int __init cris_freq_init(void) { int ret; ret = cpufreq_register_driver(&cris_freq_driver); cpufreq_register_notifier(&cris_sdram_freq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); return ret; } static int cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val, void *data) { int i; struct cpufreq_freqs *freqs = data; if (val == CPUFREQ_PRECHANGE) { reg_bif_core_rw_sdram_timing timing = REG_RD(bif_core, regi_bif_core, rw_sdram_timing); timing.cpd = (freqs->new == 200000 ? 0 : 1); if (freqs->new == 200000) for (i = 0; i < 50000; i++) ; REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing); } return 0; } module_init(cris_freq_init);
jongwonk/s5pv210_linux_kernel
arch/cris/arch-v32/mach-fs/cpufreq.c
C
gpl-2.0
3,546
var baseFlatten = require('../internal/baseFlatten'), createWrapper = require('../internal/createWrapper'), functions = require('../object/functions'), restParam = require('./restParam'); /** Used to compose bitmasks for wrapper metadata. */ var BIND_FLAG = 1; /** * Binds methods of an object to the object itself, overwriting the existing * method. Method names may be specified as individual arguments or as arrays * of method names. If no method names are provided all enumerable function * properties, own and inherited, of `object` are bound. * * **Note:** This method does not set the "length" property of bound functions. * * @static * @memberOf _ * @category Function * @param {Object} object The object to bind and assign the bound methods to. * @param {...(string|string[])} [methodNames] The object method names to bind, * specified as individual method names or arrays of method names. * @returns {Object} Returns `object`. * @example * * var view = { * 'label': 'docs', * 'onClick': function() { * console.log('clicked ' + this.label); * } * }; * * _.bindAll(view); * jQuery('#docs').on('click', view.onClick); * // => logs 'clicked docs' when the element is clicked */ var bindAll = restParam(function(object, methodNames) { methodNames = methodNames.length ? baseFlatten(methodNames) : functions(object); var index = -1, length = methodNames.length; while (++index < length) { var key = methodNames[index]; object[key] = createWrapper(object[key], BIND_FLAG, object); } return object; }); module.exports = bindAll;
nqtuan164/queenmodel
node_modules/grunt-contrib-less/node_modules/lodash/function/bindAll.js
JavaScript
gpl-2.0
1,610
/* * linux/arch/alpha/kernel/core_polaris.c * * POLARIS chip-specific code */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_polaris.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <asm/ptrace.h> #include "proto.h" #include "pci_impl.h" /* * BIOS32-style PCI interface: */ #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif /* * Given a bus, device, and function number, compute resulting * configuration space address. This is fairly straightforward * on POLARIS, since the chip itself generates Type 0 or Type 1 * cycles automatically depending on the bus number (Bus 0 is * hardwired to Type 0, all others are Type 1. Peer bridges * are not supported). * * All types: * * 3 3 3 3|3 3 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |1|1|1|1|1|0|0|1|1|1|1|1|1|1|1|0|B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|x|x| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., scsi and ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr, u8 *type1) { u8 bus = pbus->number; *type1 = (bus == 0) ? 0 : 1; *pci_addr = (bus << 16) | (device_fn << 8) | (where) | POLARIS_DENSE_CONFIG_BASE; DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," " returning address 0x%p\n" bus, device_fn, where, *pci_addr)); return 0; } static int polaris_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *value = __kernel_ldbu(*(vucp)addr); break; case 2: *value = __kernel_ldwu(*(vusp)addr); break; case 4: *value = *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } static int polaris_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: __kernel_stb(value, *(vucp)addr); mb(); __kernel_ldbu(*(vucp)addr); break; case 2: __kernel_stw(value, *(vusp)addr); mb(); __kernel_ldwu(*(vusp)addr); break; case 4: *(vuip)addr = value; mb(); *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops polaris_pci_ops = { .read = polaris_read_config, .write = polaris_write_config, }; void __init polaris_init_arch(void) { struct pci_controller *hose; /* May need to initialize error reporting (see PCICTL0/1), but * for now assume that the firmware has done the right thing * already. */ #if 0 printk("polaris_init_arch(): trusting firmware for setup\n"); #endif /* * Create our single hose. */ pci_isa_hose = hose = alloc_pci_controller(); hose->io_space = &ioport_resource; hose->mem_space = &iomem_resource; hose->index = 0; hose->sparse_mem_base = 0; hose->dense_mem_base = POLARIS_DENSE_MEM_BASE - IDENT_ADDR; hose->sparse_io_base = 0; hose->dense_io_base = POLARIS_DENSE_IO_BASE - IDENT_ADDR; hose->sg_isa = hose->sg_pci = NULL; /* The I/O window is fixed at 2G @ 2G. */ __direct_map_base = 0x80000000; __direct_map_size = 0x80000000; } static inline void polaris_pci_clr_err(void) { *(vusp)POLARIS_W_STATUS; /* Write 1's to settable bits to clear errors */ *(vusp)POLARIS_W_STATUS = 0x7800; mb(); *(vusp)POLARIS_W_STATUS; } void polaris_machine_check(unsigned long vector, unsigned long la_ptr) { /* Clear the error before any reporting. */ mb(); mb(); draina(); polaris_pci_clr_err(); wrmces(0x7); mb(); process_mcheck_info(vector, la_ptr, "POLARIS", mcheck_expected(0)); }
RepoB/android_kernel_sony_msm8974-GreatDevs
arch/alpha/kernel/core_polaris.c
C
gpl-2.0
4,523
#include <windows.h> #include <stdio.h> #include <ddraw.h> #include <ddrawi.h> #include <d3dhal.h> #include <ddrawgdi.h> #if defined(_WIN32) && !defined(_NO_COM ) #define COM_NO_WINDOWS_H #include <objbase.h> #else #define IUnknown void #if !defined(NT_BUILD_ENVIRONMENT) && !defined(WINNT) #define CO_E_NOTINITIALIZED 0x800401F0 #endif #endif ULONG WINAPI Main_DDrawSurface_AddRef(LPDIRECTDRAWSURFACE2); ULONG WINAPI Main_DDrawSurface_Release(LPDIRECTDRAWSURFACE2); HRESULT WINAPI Main_DDrawSurface_QueryInterface(LPDIRECTDRAWSURFACE2, REFIID, LPVOID*); HRESULT WINAPI Main_DDrawSurface_ReleaseDC(LPDIRECTDRAWSURFACE2, HDC); HRESULT WINAPI Main_DDrawSurface_Blt(LPDIRECTDRAWSURFACE2, LPRECT, LPDIRECTDRAWSURFACE2, LPRECT, DWORD, LPDDBLTFX); HRESULT WINAPI Main_DDrawSurface_BltBatch(LPDIRECTDRAWSURFACE2, LPDDBLTBATCH, DWORD, DWORD); HRESULT WINAPI Main_DDrawSurface_BltFast(LPDIRECTDRAWSURFACE2, DWORD, DWORD, LPDIRECTDRAWSURFACE2, LPRECT, DWORD); HRESULT WINAPI Main_DDrawSurface_DeleteAttachedSurface(LPDIRECTDRAWSURFACE2, DWORD, LPDIRECTDRAWSURFACE2); HRESULT WINAPI Main_DDrawSurface_EnumAttachedSurfaces(LPDIRECTDRAWSURFACE2, LPVOID, LPDDENUMSURFACESCALLBACK); HRESULT WINAPI Main_DDrawSurface_EnumOverlayZOrders(LPDIRECTDRAWSURFACE2, DWORD, LPVOID,LPDDENUMSURFACESCALLBACK); HRESULT WINAPI Main_DDrawSurface_Flip(LPDIRECTDRAWSURFACE2 , LPDIRECTDRAWSURFACE2, DWORD); HRESULT WINAPI Main_DDrawSurface_GetAttachedSurface(LPDIRECTDRAWSURFACE2, LPDDSCAPS, LPDIRECTDRAWSURFACE2*); HRESULT WINAPI Main_DDrawSurface_GetBltStatus(LPDIRECTDRAWSURFACE2, DWORD dwFlags); HRESULT WINAPI Main_DDrawSurface_GetCaps(LPDIRECTDRAWSURFACE2, LPDDSCAPS pCaps); HRESULT WINAPI Main_DDrawSurface_GetClipper(LPDIRECTDRAWSURFACE2, LPDIRECTDRAWCLIPPER*); HRESULT WINAPI Main_DDrawSurface_GetColorKey(LPDIRECTDRAWSURFACE2, DWORD, LPDDCOLORKEY); HRESULT WINAPI Main_DDrawSurface_GetDC(LPDIRECTDRAWSURFACE2, HDC *); HRESULT WINAPI Main_DDrawSurface_GetDDInterface(LPDIRECTDRAWSURFACE2, LPVOID*); HRESULT WINAPI Main_DDrawSurface_GetFlipStatus(LPDIRECTDRAWSURFACE2, DWORD); HRESULT WINAPI Main_DDrawSurface_GetOverlayPosition(LPDIRECTDRAWSURFACE2, LPLONG, LPLONG); HRESULT WINAPI Main_DDrawSurface_GetPalette(LPDIRECTDRAWSURFACE2, LPDIRECTDRAWPALETTE*); HRESULT WINAPI Main_DDrawSurface_GetPixelFormat(LPDIRECTDRAWSURFACE2, LPDDPIXELFORMAT); HRESULT WINAPI Main_DDrawSurface_GetSurfaceDesc(LPDIRECTDRAWSURFACE2, LPDDSURFACEDESC); HRESULT WINAPI Main_DDrawSurface_IsLost(LPDIRECTDRAWSURFACE2); HRESULT WINAPI Main_DDrawSurface_PageLock(LPDIRECTDRAWSURFACE2, DWORD); HRESULT WINAPI Main_DDrawSurface_PageUnlock(LPDIRECTDRAWSURFACE2, DWORD); HRESULT WINAPI Main_DDrawSurface_ReleaseDC(LPDIRECTDRAWSURFACE2, HDC); HRESULT WINAPI Main_DDrawSurface_SetClipper (LPDIRECTDRAWSURFACE2, LPDIRECTDRAWCLIPPER); HRESULT WINAPI Main_DDrawSurface_SetColorKey (LPDIRECTDRAWSURFACE2, DWORD, LPDDCOLORKEY); HRESULT WINAPI Main_DDrawSurface_SetOverlayPosition (LPDIRECTDRAWSURFACE2, LONG, LONG); HRESULT WINAPI Main_DDrawSurface_SetPalette (LPDIRECTDRAWSURFACE2, LPDIRECTDRAWPALETTE); HRESULT WINAPI Main_DDrawSurface_UpdateOverlayDisplay (LPDIRECTDRAWSURFACE2, DWORD); HRESULT WINAPI Main_DDrawSurface_UpdateOverlayZOrder (LPDIRECTDRAWSURFACE2, DWORD, LPDIRECTDRAWSURFACE2); HRESULT WINAPI Main_DDrawSurface_Unlock (LPDIRECTDRAWSURFACE2, LPVOID); HRESULT WINAPI Main_DDrawSurface_Initialize (LPDIRECTDRAWSURFACE2, LPDIRECTDRAW, LPDDSURFACEDESC); HRESULT WINAPI Main_DDrawSurface_Lock (LPDIRECTDRAWSURFACE2, LPRECT, LPDDSURFACEDESC, DWORD, HANDLE); HRESULT WINAPI Main_DDrawSurface_Restore(LPDIRECTDRAWSURFACE2); HRESULT WINAPI Main_DDrawSurface_UpdateOverlay (LPDIRECTDRAWSURFACE2, LPRECT, LPDIRECTDRAWSURFACE2, LPRECT, DWORD, LPDDOVERLAYFX); HRESULT WINAPI Main_DDrawSurface_AddAttachedSurface(LPDIRECTDRAWSURFACE2, LPDIRECTDRAWSURFACE2); HRESULT WINAPI Main_DDrawSurface_AddOverlayDirtyRect(LPDIRECTDRAWSURFACE2, LPRECT); HRESULT WINAPI Main_DDrawSurface_SetSurfaceDesc(LPDIRECTDRAWSURFACE2, DDSURFACEDESC2, DWORD); IDirectDrawSurface2Vtbl DirectDrawSurface2_Vtable = { Main_DDrawSurface_QueryInterface, Main_DDrawSurface_AddRef, /* (Compact done) */ Main_DDrawSurface_Release, Main_DDrawSurface_AddAttachedSurface, Main_DDrawSurface_AddOverlayDirtyRect, Main_DDrawSurface_Blt, Main_DDrawSurface_BltBatch, Main_DDrawSurface_BltFast, Main_DDrawSurface_DeleteAttachedSurface, Main_DDrawSurface_EnumAttachedSurfaces, Main_DDrawSurface_EnumOverlayZOrders, Main_DDrawSurface_Flip, Main_DDrawSurface_GetAttachedSurface, Main_DDrawSurface_GetBltStatus, Main_DDrawSurface_GetCaps, Main_DDrawSurface_GetClipper, Main_DDrawSurface_GetColorKey, Main_DDrawSurface_GetDC, Main_DDrawSurface_GetFlipStatus, Main_DDrawSurface_GetOverlayPosition, Main_DDrawSurface_GetPalette, Main_DDrawSurface_GetPixelFormat, Main_DDrawSurface_GetSurfaceDesc, Main_DDrawSurface_Initialize, Main_DDrawSurface_IsLost, Main_DDrawSurface_Lock, Main_DDrawSurface_ReleaseDC, Main_DDrawSurface_Restore, Main_DDrawSurface_SetClipper, Main_DDrawSurface_SetColorKey, Main_DDrawSurface_SetOverlayPosition, Main_DDrawSurface_SetPalette, Main_DDrawSurface_Unlock, Main_DDrawSurface_UpdateOverlay, Main_DDrawSurface_UpdateOverlayDisplay, Main_DDrawSurface_UpdateOverlayZOrder, Main_DDrawSurface_GetDDInterface, Main_DDrawSurface_PageLock, Main_DDrawSurface_PageUnlock, };
rickerliang/reactos-mirror2
dll/directx/ddraw/Vtable/DirectDrawSurface2_Vtable.c
C
gpl-2.0
5,531
/* linux/arch/arm/mach-msm/m7wl-wifi.c */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/err.h> #include <asm/mach-types.h> #include <asm/gpio.h> #include <asm/io.h> #include <linux/skbuff.h> #include <linux/wifi_tiwlan.h> #include "board-m7wl.h" #include "board-m7wl-wifi.h" int m7wl_wifi_power(int on); int m7wl_wifi_reset(int on); int m7wl_wifi_set_carddetect(int on); int m7wl_wifi_get_mac_addr(unsigned char *buf); #define PREALLOC_WLAN_NUMBER_OF_SECTIONS 4 #define PREALLOC_WLAN_NUMBER_OF_BUFFERS 160 #define PREALLOC_WLAN_SECTION_HEADER 24 #define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 128) #define WLAN_SECTION_SIZE_1 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 128) #define WLAN_SECTION_SIZE_2 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 512) #define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 1024) #define WLAN_SKB_BUF_NUM 16 #define HW_OOB 1 static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM]; typedef struct wifi_mem_prealloc_struct { void *mem_ptr; unsigned long size; } wifi_mem_prealloc_t; static wifi_mem_prealloc_t wifi_mem_array[PREALLOC_WLAN_NUMBER_OF_SECTIONS] = { { NULL, (WLAN_SECTION_SIZE_0 + PREALLOC_WLAN_SECTION_HEADER) }, { NULL, (WLAN_SECTION_SIZE_1 + PREALLOC_WLAN_SECTION_HEADER) }, { NULL, (WLAN_SECTION_SIZE_2 + PREALLOC_WLAN_SECTION_HEADER) }, { NULL, (WLAN_SECTION_SIZE_3 + PREALLOC_WLAN_SECTION_HEADER) } }; static void *m7wl_wifi_mem_prealloc(int section, unsigned long size) { if (section == PREALLOC_WLAN_NUMBER_OF_SECTIONS) return wlan_static_skb; if ((section < 0) || (section > PREALLOC_WLAN_NUMBER_OF_SECTIONS)) return NULL; if (wifi_mem_array[section].size < size) return NULL; return wifi_mem_array[section].mem_ptr; } int __init m7wl_init_wifi_mem(void) { int i; for (i = 0; (i < WLAN_SKB_BUF_NUM); i++) { if (i < (WLAN_SKB_BUF_NUM/2)) wlan_static_skb[i] = dev_alloc_skb(PAGE_SIZE*2); else wlan_static_skb[i] = dev_alloc_skb(PAGE_SIZE*16); } for (i = 0; (i < PREALLOC_WLAN_NUMBER_OF_SECTIONS); i++) { wifi_mem_array[i].mem_ptr = kmalloc(wifi_mem_array[i].size, GFP_KERNEL); if (wifi_mem_array[i].mem_ptr == NULL) return -ENOMEM; } return 0; } static struct resource m7wl_wifi_resources[] = { [0] = { .name = "bcmdhd_wlan_irq", .start = PM8921_GPIO_IRQ(PM8921_IRQ_BASE, WL_HOST_WAKE), .end = PM8921_GPIO_IRQ(PM8921_IRQ_BASE, WL_HOST_WAKE), #ifdef HW_OOB .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE, #else .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, #endif }, }; static struct wifi_platform_data m7wl_wifi_control = { .set_power = m7wl_wifi_power, .set_reset = m7wl_wifi_reset, .set_carddetect = m7wl_wifi_set_carddetect, .mem_prealloc = m7wl_wifi_mem_prealloc, .get_mac_addr = m7wl_wifi_get_mac_addr, }; static struct platform_device m7wl_wifi_device = { .name = "bcmdhd_wlan", .id = 1, .num_resources = ARRAY_SIZE(m7wl_wifi_resources), .resource = m7wl_wifi_resources, .dev = { .platform_data = &m7wl_wifi_control, }, }; static unsigned m7wl_wifi_update_nvs(char *str) { #define NVS_LEN_OFFSET 0x0C #define NVS_DATA_OFFSET 0x40 unsigned char *ptr; unsigned len; if (!str) return -EINVAL; ptr = get_wifi_nvs_ram(); /* Size in format LE assumed */ memcpy(&len, ptr + NVS_LEN_OFFSET, sizeof(len)); /* the last bye in NVRAM is 0, trim it */ if (ptr[NVS_DATA_OFFSET + len - 1] == 0) len -= 1; if (ptr[NVS_DATA_OFFSET + len - 1] != '\n') { len += 1; ptr[NVS_DATA_OFFSET + len - 1] = '\n'; } strcpy(ptr + NVS_DATA_OFFSET + len, str); len += strlen(str); memcpy(ptr + NVS_LEN_OFFSET, &len, sizeof(len)); return 0; } #ifdef HW_OOB static unsigned strip_nvs_param(char *param) { unsigned char *nvs_data; unsigned param_len; int start_idx, end_idx; unsigned char *ptr; unsigned len; if (!param) return -EINVAL; ptr = get_wifi_nvs_ram(); /* Size in format LE assumed */ memcpy(&len, ptr + NVS_LEN_OFFSET, sizeof(len)); /* the last bye in NVRAM is 0, trim it */ if (ptr[NVS_DATA_OFFSET + len - 1] == 0) len -= 1; nvs_data = ptr + NVS_DATA_OFFSET; param_len = strlen(param); /* search param */ for (start_idx = 0; start_idx < len - param_len; start_idx++) { if (memcmp(&nvs_data[start_idx], param, param_len) == 0) break; } end_idx = 0; if (start_idx < len - param_len) { /* search end-of-line */ for (end_idx = start_idx + param_len; end_idx < len; end_idx++) { if (nvs_data[end_idx] == '\n' || nvs_data[end_idx] == 0) break; } } if (start_idx < end_idx) { /* move the remain data forward */ for (; end_idx + 1 < len; start_idx++, end_idx++) nvs_data[start_idx] = nvs_data[end_idx+1]; len = len - (end_idx - start_idx + 1); memcpy(ptr + NVS_LEN_OFFSET, &len, sizeof(len)); } return 0; } #endif #define WIFI_MAC_PARAM_STR "macaddr=" #define WIFI_MAX_MAC_LEN 17 /* XX:XX:XX:XX:XX:XX */ static uint get_mac_from_wifi_nvs_ram(char *buf, unsigned int buf_len) { unsigned char *nvs_ptr; unsigned char *mac_ptr; uint len = 0; if (!buf || !buf_len) return 0; nvs_ptr = get_wifi_nvs_ram(); if (nvs_ptr) nvs_ptr += NVS_DATA_OFFSET; mac_ptr = strstr(nvs_ptr, WIFI_MAC_PARAM_STR); if (mac_ptr) { mac_ptr += strlen(WIFI_MAC_PARAM_STR); /* skip leading space */ while (mac_ptr[0] == ' ') mac_ptr++; /* locate end-of-line */ len = 0; while (mac_ptr[len] != '\r' && mac_ptr[len] != '\n' && mac_ptr[len] != '\0') { len++; } if (len > buf_len) len = buf_len; memcpy(buf, mac_ptr, len); } return len; } #define ETHER_ADDR_LEN 6 int m7wl_wifi_get_mac_addr(unsigned char *buf) { static u8 ether_mac_addr[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}; char mac[WIFI_MAX_MAC_LEN]; unsigned mac_len; unsigned int macpattern[ETHER_ADDR_LEN]; int i; mac_len = get_mac_from_wifi_nvs_ram(mac, WIFI_MAX_MAC_LEN); if (mac_len > 0) { /* Mac address to pattern */ sscanf(mac, "%02x:%02x:%02x:%02x:%02x:%02x", &macpattern[0], &macpattern[1], &macpattern[2], &macpattern[3], &macpattern[4], &macpattern[5] ); for (i = 0; i < ETHER_ADDR_LEN; i++) ether_mac_addr[i] = (u8)macpattern[i]; } memcpy(buf, ether_mac_addr, sizeof(ether_mac_addr)); printk(KERN_INFO"m7wl_wifi_get_mac_addr = %02x %02x %02x %02x %02x %02x \n", ether_mac_addr[0], ether_mac_addr[1], ether_mac_addr[2], ether_mac_addr[3], ether_mac_addr[4], ether_mac_addr[5]); return 0; } int __init m7wl_wifi_init(void) { int ret; printk(KERN_INFO "%s: start\n", __func__); #ifdef HW_OOB strip_nvs_param("sd_oobonly"); #else m7wl_wifi_update_nvs("sd_oobonly=1\n"); #endif m7wl_wifi_update_nvs("btc_params80=0\n"); m7wl_wifi_update_nvs("btc_params6=30\n"); m7wl_init_wifi_mem(); ret = platform_device_register(&m7wl_wifi_device); return ret; }
denseye73/mynewkernel
arch/arm/mach-msm/board-m7wl-wifi.c
C
gpl-2.0
6,896
/*- * BSD LICENSE * * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <string.h> #include <rte_common.h> #include <rte_malloc.h> #include <rte_cryptodev_pmd.h> #include "rte_aesni_mb_pmd_private.h" static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = { { /* MD5 HMAC */ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, {.sym = { .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, {.auth = { .algo = RTE_CRYPTO_AUTH_MD5_HMAC, .block_size = 64, .key_size = { .min = 64, .max = 64, .increment = 0 }, .digest_size = { .min = 12, .max = 12, .increment = 0 }, .aad_size = { 0 } }, } }, } }, { /* SHA1 HMAC */ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, {.sym = { .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, {.auth = { .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, .block_size = 64, .key_size = { .min = 64, .max = 64, .increment = 0 }, .digest_size = { .min = 12, .max = 12, .increment = 0 }, .aad_size = { 0 } }, } }, } }, { /* SHA224 HMAC */ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, {.sym = { .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, {.auth = { .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, .block_size = 64, .key_size = { .min = 64, .max = 64, .increment = 0 }, .digest_size = { .min = 14, .max = 14, .increment = 0 }, .aad_size = { 0 } }, } }, } }, { /* SHA256 HMAC */ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, {.sym = { .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, {.auth = { .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, .block_size = 64, .key_size = { .min = 64, .max = 64, .increment = 0 }, .digest_size = { .min = 16, .max = 16, .increment = 0 }, .aad_size = { 0 } }, } }, } }, { /* SHA384 HMAC */ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, {.sym = { .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, {.auth = { .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, .block_size = 128, .key_size = { .min = 128, .max = 128, .increment = 0 }, .digest_size = { .min = 24, .max = 24, .increment = 0 }, .aad_size = { 0 } }, } }, } }, { /* SHA512 HMAC */ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, {.sym = { .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, {.auth = { .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, .block_size = 128, .key_size = { .min = 128, .max = 128, .increment = 0 }, .digest_size = { .min = 32, .max = 32, .increment = 0 }, .aad_size = { 0 } }, } }, } }, { /* AES XCBC HMAC */ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, {.sym = { .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, {.auth = { .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, .block_size = 16, .key_size = { .min = 16, .max = 16, .increment = 0 }, .digest_size = { .min = 12, .max = 12, .increment = 0 }, .aad_size = { 0 } }, } }, } }, { /* AES CBC */ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, {.sym = { .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, {.cipher = { .algo = RTE_CRYPTO_CIPHER_AES_CBC, .block_size = 16, .key_size = { .min = 16, .max = 32, .increment = 8 }, .iv_size = { .min = 16, .max = 16, .increment = 0 } }, } }, } }, RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() }; /** Configure device */ static int aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev) { return 0; } /** Start device */ static int aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev) { return 0; } /** Stop device */ static void aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev) { } /** Close device */ static int aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev) { return 0; } /** Get device statistics */ static void aesni_mb_pmd_stats_get(struct rte_cryptodev *dev, struct rte_cryptodev_stats *stats) { int qp_id; for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id]; stats->enqueued_count += qp->stats.enqueued_count; stats->dequeued_count += qp->stats.dequeued_count; stats->enqueue_err_count += qp->stats.enqueue_err_count; stats->dequeue_err_count += qp->stats.dequeue_err_count; } } /** Reset device statistics */ static void aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev) { int qp_id; for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id]; memset(&qp->stats, 0, sizeof(qp->stats)); } } /** Get device info */ static void aesni_mb_pmd_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info *dev_info) { struct aesni_mb_private *internals = dev->data->dev_private; if (dev_info != NULL) { dev_info->dev_type = dev->dev_type; dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = aesni_mb_pmd_capabilities; dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; dev_info->sym.max_nb_sessions = internals->max_nb_sessions; } } /** Release queue pair */ static int aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) { if (dev->data->queue_pairs[qp_id] != NULL) { rte_free(dev->data->queue_pairs[qp_id]); dev->data->queue_pairs[qp_id] = NULL; } return 0; } /** set a unique name for the queue pair based on it's name, dev_id and qp_id */ static int aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev, struct aesni_mb_qp *qp) { unsigned n = snprintf(qp->name, sizeof(qp->name), "aesni_mb_pmd_%u_qp_%u", dev->data->dev_id, qp->id); if (n > sizeof(qp->name)) return -1; return 0; } /** Create a ring to place processed operations on */ static struct rte_ring * aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp, unsigned ring_size, int socket_id) { struct rte_ring *r; r = rte_ring_lookup(qp->name); if (r) { if (r->prod.size >= ring_size) { MB_LOG_INFO("Reusing existing ring %s for processed ops", qp->name); return r; } MB_LOG_ERR("Unable to reuse existing ring %s for processed ops", qp->name); return NULL; } return rte_ring_create(qp->name, ring_size, socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); } /** Setup a queue pair */ static int aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) { struct aesni_mb_qp *qp = NULL; struct aesni_mb_private *internals = dev->data->dev_private; /* Free memory prior to re-allocation if needed. */ if (dev->data->queue_pairs[qp_id] != NULL) aesni_mb_pmd_qp_release(dev, qp_id); /* Allocate the queue pair data structure. */ qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp), RTE_CACHE_LINE_SIZE, socket_id); if (qp == NULL) return -ENOMEM; qp->id = qp_id; dev->data->queue_pairs[qp_id] = qp; if (aesni_mb_pmd_qp_set_unique_name(dev, qp)) goto qp_setup_cleanup; qp->ops = &job_ops[internals->vector_mode]; qp->processed_ops = aesni_mb_pmd_qp_create_processed_ops_ring(qp, qp_conf->nb_descriptors, socket_id); if (qp->processed_ops == NULL) goto qp_setup_cleanup; qp->sess_mp = dev->data->session_pool; memset(&qp->stats, 0, sizeof(qp->stats)); /* Initialise multi-buffer manager */ (*qp->ops->job.init_mgr)(&qp->mb_mgr); return 0; qp_setup_cleanup: if (qp) rte_free(qp); return -1; } /** Start queue pair */ static int aesni_mb_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, __rte_unused uint16_t queue_pair_id) { return -ENOTSUP; } /** Stop queue pair */ static int aesni_mb_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, __rte_unused uint16_t queue_pair_id) { return -ENOTSUP; } /** Return the number of allocated queue pairs */ static uint32_t aesni_mb_pmd_qp_count(struct rte_cryptodev *dev) { return dev->data->nb_queue_pairs; } /** Returns the size of the aesni multi-buffer session structure */ static unsigned aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct aesni_mb_session); } /** Configure a aesni multi-buffer session from a crypto xform chain */ static void * aesni_mb_pmd_session_configure(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *sess) { struct aesni_mb_private *internals = dev->data->dev_private; if (unlikely(sess == NULL)) { MB_LOG_ERR("invalid session struct"); return NULL; } if (aesni_mb_set_session_parameters(&job_ops[internals->vector_mode], sess, xform) != 0) { MB_LOG_ERR("failed configure session parameters"); return NULL; } return sess; } /** Clear the memory of session so it doesn't leave key material behind */ static void aesni_mb_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess) { /* * Current just resetting the whole data structure, need to investigate * whether a more selective reset of key would be more performant */ if (sess) memset(sess, 0, sizeof(struct aesni_mb_session)); } struct rte_cryptodev_ops aesni_mb_pmd_ops = { .dev_configure = aesni_mb_pmd_config, .dev_start = aesni_mb_pmd_start, .dev_stop = aesni_mb_pmd_stop, .dev_close = aesni_mb_pmd_close, .stats_get = aesni_mb_pmd_stats_get, .stats_reset = aesni_mb_pmd_stats_reset, .dev_infos_get = aesni_mb_pmd_info_get, .queue_pair_setup = aesni_mb_pmd_qp_setup, .queue_pair_release = aesni_mb_pmd_qp_release, .queue_pair_start = aesni_mb_pmd_qp_start, .queue_pair_stop = aesni_mb_pmd_qp_stop, .queue_pair_count = aesni_mb_pmd_qp_count, .session_get_size = aesni_mb_pmd_session_get_size, .session_configure = aesni_mb_pmd_session_configure, .session_clear = aesni_mb_pmd_session_clear }; struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;
yubo/dpdk
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
C
gpl-2.0
11,404
/* * Copyright (C) 2011 Sony Ericsson Mobile Communications AB. * Copyright (C) 2012 Sony Mobile Communications AB. * Copyright (C) 2011 Silicon Image Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/vmalloc.h> #include <linux/mhl.h> #include <linux/mhl_defs.h> #define PRINT_DEVCAP #ifdef CONFIG_MHL_RAP #define RAP_CONTENT_ON "RAP_CONTENT_ON" /* RAPK_WAIT(TRAP_WAIT) 1000ms */ #define RAPK_WAIT_TIME (jiffies + HZ) #define RAPK_RETRY_TIME (jiffies + HZ/2) #define RAP_SEND_RETRY_MAX 2 #endif /* CONFIG_MHL_RAP */ /* * mhl.c - MHL control abustruction provides following feature * - Userspace interface * - USB interface * - Common control for MSC commands * - Common control for RCP/RAP */ static struct class *mhl_class; static DEFINE_MUTEX(msc_command_queue_mutex); struct workqueue_struct *msc_command_workqueue; #ifdef CONFIG_MHL_RAP static DEFINE_MUTEX(rap_command_queue_mutex); struct workqueue_struct *rap_command_workqueue; #endif /* CONFIG_MHL_RAP */ static DEFINE_MUTEX(usb_online_mutex); struct workqueue_struct *usb_online_workqueue; static void (*notify_usb_online)(int online); static int mhl_update_peer_devcap(struct mhl_device *mhl_dev, int offset, u8 devcap); static int __mhl_match(struct device *dev, void *data) { char *name = (char *) data; if (strncmp(dev_name(dev), name, MHL_DEVICE_NAME_MAX) == 0) return 1; return 0; } static int atoi(const char *name) { int val = 0; for (;; name++) { switch (*name) { case '0' ... '9': val = 10*val+(*name-'0'); break; default: return val; } } } static struct mhl_device *mhl_get_dev(const char *name) { struct device *dev; if (!name) return NULL; /* lookup mhl device by name */ dev = class_find_device(mhl_class, NULL, (void *) name, __mhl_match); if (!dev) { pr_err("%s: mhl device (%s) not registered!\n", __func__, name); return NULL; } return to_mhl_device(dev); } /******************************** * MHL event ********************************/ int mhl_notify_plugged(struct mhl_device *mhl_dev) { if (!mhl_dev) return -EFAULT; mhl_dev->mhl_online |= MHL_PLUGGED; /* callback usb driver if callback registered */ queue_work(usb_online_workqueue, &mhl_dev->usb_online_work); return 0; } EXPORT_SYMBOL(mhl_notify_plugged); int mhl_notify_unplugged(struct mhl_device *mhl_dev) { if (!mhl_dev) return -EFAULT; mhl_dev->mhl_online = 0; mhl_dev->hpd_state = 0; mhl_dev->tmds_state = FALSE; mhl_dev->devcap_state = 0; memset(&mhl_dev->state, 0, sizeof(struct mhl_state)); /* callback usb driver if callback registered */ queue_work(usb_online_workqueue, &mhl_dev->usb_online_work); return 0; } EXPORT_SYMBOL(mhl_notify_unplugged); static int mhl_check_sink_version(struct mhl_device *mhl_dev) { int ret_val = 0; /* The points to distinguish whether it is Samsung MHL 1.0 Sink Devices are as follows. 1.MHL_VERSION = 0x10 2.INT_STAT_SIZE = 0x44 */ if (mhl_dev->state.peer_devcap[DEVCAP_OFFSET_MHL_VERSION] == 0x10 && mhl_dev->state.peer_devcap[DEVCAP_OFFSET_INT_STAT_SIZE] == 0x44) ret_val = 1; return ret_val; } int mhl_notify_online(struct mhl_device *mhl_dev) { if (!mhl_dev) return -EFAULT; /* WRITE_STAT: DCAP_RDY */ mhl_msc_send_write_stat( mhl_dev, MHL_STATUS_REG_CONNECTED_RDY, MHL_STATUS_DCAP_RDY); /* SET_INT: DCAP_CHG */ mhl_msc_send_set_int( mhl_dev, MHL_RCHANGE_INT, MHL_INT_DCAP_CHG); mhl_dev->mhl_online |= MHL_LINK_ESTABLISHED; if (mhl_dev->mhl_online == MHL_ONLINE && mhl_dev->hpd_state) kobject_uevent(&mhl_dev->dev.kobj, KOBJ_ONLINE); return 0; } EXPORT_SYMBOL(mhl_notify_online); static int mhl_set_mhl_state(struct mhl_device *mhl_dev, unsigned int state) { if (!mhl_dev) return -EFAULT; mhl_dev->mhl_online |= state; if (mhl_dev->mhl_online == MHL_ONLINE && mhl_dev->hpd_state) kobject_uevent(&mhl_dev->dev.kobj, KOBJ_ONLINE); return 0; } static int mhl_clear_mhl_state(struct mhl_device *mhl_dev, unsigned int state) { if (!mhl_dev) return -EFAULT; mhl_dev->mhl_online &= ~state; return 0; } int mhl_notify_offline(struct mhl_device *mhl_dev) { if (!mhl_dev) return -EFAULT; mhl_dev->mhl_online = 0; mhl_dev->hpd_state = 0; mhl_dev->devcap_state = 0; memset(&mhl_dev->state, 0, sizeof(struct mhl_state)); kobject_uevent(&mhl_dev->dev.kobj, KOBJ_OFFLINE); mhl_dev->mhl_online = MHL_OFFLINE; return 0; } EXPORT_SYMBOL(mhl_notify_offline); #ifdef CONFIG_MHL_RAP static int mhl_notify_rap_content_on(struct mhl_device *mhl_dev) { char *envp[2]; if (!mhl_dev) return -EFAULT; envp[0] = RAP_CONTENT_ON; envp[1] = NULL; kobject_uevent_env(&mhl_dev->dev.kobj, KOBJ_CHANGE, envp); return 0; } #endif /* CONFIG_MHL_RAP */ int mhl_notify_hpd(struct mhl_device *mhl_dev, int state) { if (!mhl_dev) return -EFAULT; if (mhl_dev->hpd_state == state) return 0; mhl_dev->hpd_state = state; if (mhl_dev->mhl_online == MHL_ONLINE && mhl_dev->hpd_state) kobject_uevent(&mhl_dev->dev.kobj, KOBJ_ONLINE); else kobject_uevent(&mhl_dev->dev.kobj, KOBJ_OFFLINE); return 0; } EXPORT_SYMBOL(mhl_notify_hpd); static int mhl_qualify_path_enable(struct mhl_device *mhl_dev) { int ret_val = 0; if (mhl_dev->tmds_state) { ret_val = 1; } else { if (mhl_check_sink_version(mhl_dev)) { ret_val = 1; mhl_dev->tmds_state = TRUE; } } return ret_val; } /******************************** * MSC command ********************************/ /* this function called by chip driver's send_msc_command implementation. * so ops_mutex must be already acquired. */ int mhl_msc_command_done(struct mhl_device *mhl_dev, struct msc_command_struct *req) { char *envp[2]; switch (req->command) { case MHL_WRITE_STAT: if (req->offset == MHL_STATUS_REG_LINK_MODE) { if (req->payload.data[0] & MHL_STATUS_PATH_ENABLED) { /* Enable TMDS output */ if (mhl_dev->full_operation) mhl_dev->ops->tmds_control(TRUE); mhl_dev->tmds_state = TRUE; } else { /* Disable TMDS output */ mhl_dev->ops->tmds_control(FALSE); mhl_dev->tmds_state = FALSE; } } break; case MHL_READ_DEVCAP: mhl_update_peer_devcap(mhl_dev, req->offset, req->retval); mhl_dev->devcap_state |= BIT(req->offset); if (MHL_DEVCAP_READ_DONE(mhl_dev->devcap_state)) { mhl_dev->devcap_state = 0; envp[0] = "DEVCAP_CHANGED"; envp[1] = NULL; kobject_uevent_env(&mhl_dev->dev.kobj, KOBJ_CHANGE, envp); } switch (req->offset) { case MHL_DEV_CATEGORY_OFFSET: if (req->retval & MHL_DEV_CATEGORY_POW_BIT) { if (mhl_dev->ops->charging_control) mhl_dev->ops->charging_control (TRUE, 700); } else { if (mhl_dev->ops->charging_control) mhl_dev->ops->charging_control (FALSE, 0); } mhl_set_mhl_state(mhl_dev, MHL_PEER_DCAP_READ); break; #ifdef CONFIG_MHL_RAP case DEVCAP_OFFSET_FEATURE_FLAG: if (req->retval & MHL_FEATURE_RAP_SUPPORT) mhl_notify_rap_content_on(mhl_dev); break; #endif /* CONFIG_MHL_RAP */ case DEVCAP_OFFSET_MHL_VERSION: case DEVCAP_OFFSET_INT_STAT_SIZE: if (!mhl_dev->tmds_state) { if (mhl_qualify_path_enable(mhl_dev)) mhl_dev->ops->tmds_control(TRUE); } break; } break; } return 0; } EXPORT_SYMBOL(mhl_msc_command_done); static void mhl_msc_command_work(struct work_struct *work) { struct mhl_device *mhl_dev = container_of(work, struct mhl_device, msc_command_work); struct mhl_event *event; int retry; int ret; mutex_lock(&msc_command_queue_mutex); while (!list_empty(&mhl_dev->msc_queue)) { event = list_first_entry(&mhl_dev->msc_queue, struct mhl_event, msc_queue); list_del(&event->msc_queue); mutex_unlock(&msc_command_queue_mutex); mutex_lock(&mhl_dev->ops_mutex); ret = mhl_dev->ops->send_msc_command (&event->msc_command_queue); mutex_unlock(&mhl_dev->ops_mutex); if (ret == -EAGAIN) { retry = 2; while (retry--) { mutex_lock(&mhl_dev->ops_mutex); ret = mhl_dev->ops->send_msc_command (&event->msc_command_queue); mutex_unlock(&mhl_dev->ops_mutex); if (ret != -EAGAIN) break; } } if (ret == -EAGAIN) pr_err("%s: send_msc_command retry out!\n", __func__); if (event->msc_command_queue.payload.data[0] == MHL_MSC_MSG_RAP) mod_timer(&mhl_dev->rap_send_timer, RAPK_WAIT_TIME); vfree(event); mutex_lock(&msc_command_queue_mutex); if (mhl_dev->msc_command_counter) mhl_dev->msc_command_counter--; else pr_err("%s: msc_command_counter fail!\n", __func__); } mutex_unlock(&msc_command_queue_mutex); } static int mhl_queue_msc_command(struct mhl_device *mhl_dev, struct msc_command_struct *comm, int queue_kind) { struct mhl_event *new_event; mutex_lock(&msc_command_queue_mutex); if (mhl_dev->msc_command_counter >= MSC_COMMAND_QUEUE_SIZE) { pr_err("%s: queue full!\n", __func__); mutex_unlock(&msc_command_queue_mutex); return -EBUSY; } new_event = vmalloc(sizeof(struct mhl_event)); if (!new_event) { pr_err("%s: out of memory!\n", __func__); mutex_unlock(&msc_command_queue_mutex); return -ENOMEM; } memcpy(&new_event->msc_command_queue, comm, sizeof(struct msc_command_struct)); mhl_dev->msc_command_counter++; if (queue_kind) list_add(&new_event->msc_queue, &mhl_dev->msc_queue); else list_add_tail(&new_event->msc_queue, &mhl_dev->msc_queue); mutex_unlock(&msc_command_queue_mutex); queue_work(msc_command_workqueue, &mhl_dev->msc_command_work); return 0; } #ifdef CONFIG_MHL_RAP /* rap command */ static void mhl_rap_command_work(struct work_struct *work) { struct mhl_device *mhl_dev = container_of(work, struct mhl_device, rap_command_work); struct mhl_rap_event *event; int ret; /* Check the RAP running */ if (mhl_dev->rap_sending) return; mutex_lock(&rap_command_queue_mutex); if (list_empty(&mhl_dev->rap_queue)) { mutex_unlock(&rap_command_queue_mutex); return; } event = list_first_entry(&mhl_dev->rap_queue, struct mhl_rap_event, rap_queue); list_del(&event->rap_queue); mutex_unlock(&rap_command_queue_mutex); mhl_dev->rap_sending = TRUE; mhl_dev->rap_send_retry_num = RAP_SEND_RETRY_MAX; mhl_dev->rap_action_code_retry = event->rap_command_queue.payload.data[1]; ret = mhl_queue_msc_command(mhl_dev, &event->rap_command_queue, MSC_NORMAL_SEND); vfree(event); if (ret) { pr_err("%s: queue full!\n", __func__); return; } mutex_lock(&rap_command_queue_mutex); if (mhl_dev->rap_command_counter) mhl_dev->rap_command_counter--; else pr_err("%s: rap_command_counter fail!\n", __func__); mutex_unlock(&rap_command_queue_mutex); } static int mhl_queue_rap_command(struct mhl_device *mhl_dev, struct msc_command_struct *comm, int queue_kind) { struct mhl_rap_event *new_event; mutex_lock(&rap_command_queue_mutex); if (mhl_dev->rap_command_counter >= MSC_COMMAND_QUEUE_SIZE) { pr_err("%s: queue full!\n", __func__); mutex_unlock(&rap_command_queue_mutex); return -EBUSY; } new_event = vmalloc(sizeof(struct mhl_rap_event)); if (!new_event) { pr_err("%s: out of memory!\n", __func__); mutex_unlock(&rap_command_queue_mutex); return -ENOMEM; } memcpy(&new_event->rap_command_queue, comm, sizeof(struct msc_command_struct)); mhl_dev->rap_command_counter++; if (queue_kind) list_add(&new_event->rap_queue, &mhl_dev->rap_queue); else list_add_tail(&new_event->rap_queue, &mhl_dev->rap_queue); mutex_unlock(&rap_command_queue_mutex); queue_work(rap_command_workqueue, &mhl_dev->rap_command_work); return 0; } static void mhl_rap_retry_work(struct work_struct *work) { struct mhl_device *mhl_dev = container_of(work, struct mhl_device, rap_retry_work); mod_timer(&mhl_dev->rap_send_timer, RAPK_WAIT_TIME); mhl_msc_send_msc_msg(mhl_dev, MHL_MSC_MSG_RAP, mhl_dev->rap_action_code_retry); } /******************************** * MHL rap timers ********************************/ static void mhl_rap_send_timer(unsigned long data) { struct mhl_device *mhl_dev = (struct mhl_device *)data; if (!mhl_dev) return; mhl_dev->rap_sending = FALSE; queue_work(rap_command_workqueue, &mhl_dev->rap_command_work); } static void mhl_rap_send_retry_timer(unsigned long data) { struct mhl_device *mhl_dev = (struct mhl_device *)data; if (!mhl_dev) return; queue_work(rap_command_workqueue, &mhl_dev->rap_retry_work); } static void mhl_init_rap_timers(struct mhl_device *mhl_dev) { init_timer(&mhl_dev->rap_send_timer); mhl_dev->rap_send_timer.function = mhl_rap_send_timer; mhl_dev->rap_send_timer.data = (unsigned long)mhl_dev; mhl_dev->rap_send_timer.expires = 0xffffffffL; add_timer(&mhl_dev->rap_send_timer); init_timer(&mhl_dev->rap_retry_timer); mhl_dev->rap_retry_timer.function = mhl_rap_send_retry_timer; mhl_dev->rap_retry_timer.data = (unsigned long)mhl_dev; mhl_dev->rap_retry_timer.expires = 0xffffffffL; add_timer(&mhl_dev->rap_retry_timer); } #endif /* CONFIG_MHL_RAP */ /* * MSC: 0x60 WRITE_STAT */ int mhl_msc_send_write_stat(struct mhl_device *mhl_dev, u8 offset, u8 value) { struct msc_command_struct req; if (!mhl_dev) return -EFAULT; req.command = MHL_WRITE_STAT; req.offset = offset; req.payload.data[0] = value; return mhl_queue_msc_command(mhl_dev, &req, MSC_NORMAL_SEND); } EXPORT_SYMBOL(mhl_msc_send_write_stat); #ifdef CONFIG_MHL_OSD_NAME int mhl_notify_scpd_recv(struct mhl_device *mhl_dev, const char *buf) { char *envp[2]; ssize_t ret = 0; int i; envp[0] = kmalloc(128, GFP_KERNEL); if (!envp[0]) return -ENOMEM; ret = snprintf(envp[0], 128, "SCPD="); for (i = 0; i < MHL_SCRATCHPAD_SIZE; i++) ret += snprintf(envp[0]+ret, 128-ret, "%02x", buf[i]); envp[1] = NULL; pr_info("env[0] : %s", envp[0]); kobject_uevent_env(&mhl_dev->dev.kobj, KOBJ_CHANGE, envp); kfree(envp[0]); return 0; } EXPORT_SYMBOL(mhl_notify_scpd_recv); #endif /* CONFIG_MHL_OSD_NAME */ int mhl_msc_recv_write_stat(struct mhl_device *mhl_dev, u8 offset, u8 value) { if (!mhl_dev) return -EFAULT; if (offset >= 2) return -EFAULT; switch (offset) { case 0: /* DCAP_RDY */ if (((value ^ mhl_dev->state.device_status[offset]) & MHL_STATUS_DCAP_RDY)) { if (value & MHL_STATUS_DCAP_RDY) { mhl_dev->devcap_state = 0; mhl_msc_read_devcap_all(mhl_dev); } else { /* peer dcap turned not ready */ mhl_clear_mhl_state(mhl_dev, MHL_PEER_DCAP_READ); } } break; case 1: /* PATH_EN */ if ((value ^ mhl_dev->state.device_status[offset]) & MHL_STATUS_PATH_ENABLED) { if (value & MHL_STATUS_PATH_ENABLED) { mhl_dev->state.peer_status[1] |= (MHL_STATUS_PATH_ENABLED | MHL_STATUS_CLK_MODE_NORMAL); mhl_msc_send_write_stat( mhl_dev, MHL_STATUS_REG_LINK_MODE, mhl_dev->state.peer_status[1]); #ifdef CONFIG_MHL_RAP mhl_notify_rap_content_on(mhl_dev); #endif /* CONFIG_MHL_RAP */ } else { mhl_dev->state.peer_status[1] &= ~(MHL_STATUS_PATH_ENABLED | MHL_STATUS_CLK_MODE_NORMAL); mhl_msc_send_write_stat( mhl_dev, MHL_STATUS_REG_LINK_MODE, mhl_dev->state.peer_status[1]); } } break; } mhl_dev->state.device_status[offset] = value; return 0; } EXPORT_SYMBOL(mhl_msc_recv_write_stat); /* * MSC: 0x60 SET_INT */ int mhl_msc_send_set_int(struct mhl_device *mhl_dev, u8 offset, u8 mask) { struct msc_command_struct req; if (!mhl_dev) return -EFAULT; req.command = MHL_SET_INT; req.offset = offset; req.payload.data[0] = mask; return mhl_queue_msc_command(mhl_dev, &req, MSC_NORMAL_SEND); } EXPORT_SYMBOL(mhl_msc_send_set_int); int mhl_msc_recv_set_int(struct mhl_device *mhl_dev, u8 offset, u8 mask) { if (!mhl_dev) return -EFAULT; if (offset >= 2) return -EFAULT; switch (offset) { case 0: /* DCAP_CHG */ if (mask & MHL_INT_DCAP_CHG) { /* peer dcap has changed */ mhl_clear_mhl_state(mhl_dev, MHL_PEER_DCAP_READ); mhl_dev->devcap_state = 0; mhl_msc_read_devcap_all(mhl_dev); } /* DSCR_CHG */ #ifdef CONFIG_MHL_OSD_NAME if (mask & MHL_INT_DSCR_CHG) { mhl_dev->ops->scratchpad_data_get(); mhl_dev->write_burst_requested = FALSE; } #else if (mask & MHL_INT_DSCR_CHG) mhl_dev->write_burst_requested = FALSE; #endif /* CONFIG_MHL_OSD_NAME */ /* REQ_WRT */ if (mask & MHL_INT_REQ_WRT) { mhl_dev->write_burst_requested = TRUE; /* SET_INT: GRT_WRT */ mhl_msc_send_set_int( mhl_dev, MHL_RCHANGE_INT, MHL_INT_GRT_WRT); } /* GRT_WRT */ if (mask & MHL_INT_GRT_WRT) complete_all(&mhl_dev->req_write_done); break; case 1: /* EDID_CHG */ if (mask & MHL_INT_EDID_CHG) { /* peer EDID has changed. toggle HPD to read EDID again */ if (mhl_dev->ops->hpd_control) { mhl_dev->ops->hpd_control(FALSE); msleep(110); mhl_dev->ops->hpd_control(TRUE); } else { /* chip driver doesn't have HPD control send offline/online to userspace */ kobject_uevent(&mhl_dev->dev.kobj, KOBJ_ONLINE); msleep(110); kobject_uevent(&mhl_dev->dev.kobj, KOBJ_OFFLINE); } } break; } return 0; } EXPORT_SYMBOL(mhl_msc_recv_set_int); /* * MSC: 0x61 READ_DEVCAP */ #ifdef PRINT_DEVCAP const char *devcap_reg_name[] = { "DEV_STATE ", "MHL_VERSION ", "DEV_CAT ", "ADOPTER_ID_H ", "ADOPTER_ID_L ", "VID_LINK_MODE ", "AUD_LINK_MODE ", "VIDEO_TYPE ", "LOG_DEV_MAP ", "BANDWIDTH ", "FEATURE_FLAG ", "DEVICE_ID_H ", "DEVICE_ID_L ", "SCRATCHPAD_SIZE ", "INT_STAT_SIZE ", "Reserved ", }; static void mhl_print_devcap(struct mhl_device *mhl_dev, int offset) { u8 reg; switch (offset) { case DEVCAP_OFFSET_DEV_CAT: reg = mhl_dev->state.peer_devcap[offset]; pr_info("DCAP: %02X %s: %02X DEV_TYPE=%X POW=%s\n", offset, devcap_reg_name[offset], reg, reg & 0x0F, (reg & 0x10) ? "y" : "n"); break; case DEVCAP_OFFSET_FEATURE_FLAG: reg = mhl_dev->state.peer_devcap[offset]; pr_info("DCAP: %02X %s: %02X RCP=%s RAP=%s SP=%s\n", offset, devcap_reg_name[offset], reg, (reg & 0x01) ? "y" : "n", (reg & 0x02) ? "y" : "n", (reg & 0x04) ? "y" : "n"); break; default: reg = mhl_dev->state.peer_devcap[offset]; pr_info("DCAP: %02X %s: %02X\n", offset, devcap_reg_name[offset], reg); break; } } #else static inline void mhl_print_devcap(struct mhl_device *mhl_dev) {} #endif int mhl_msc_read_devcap(struct mhl_device *mhl_dev, u8 offset) { struct msc_command_struct req; if (!mhl_dev) return -EFAULT; if (offset > 15) return -EFAULT; req.command = MHL_READ_DEVCAP; req.offset = offset; req.payload.data[0] = 0; return mhl_queue_msc_command(mhl_dev, &req, MSC_NORMAL_SEND); } EXPORT_SYMBOL(mhl_msc_read_devcap); int mhl_msc_read_devcap_all(struct mhl_device *mhl_dev) { int offset; int ret; /* check if peer dcap already read */ if (mhl_dev->mhl_online & MHL_PEER_DCAP_READ) return 0; for (offset = 0; offset < DEVCAP_SIZE; offset++) { ret = mhl_msc_read_devcap(mhl_dev, offset); if (ret == -EBUSY) pr_err("%s: queue busy!\n", __func__); } return 0; } EXPORT_SYMBOL(mhl_msc_read_devcap_all); static int mhl_update_peer_devcap(struct mhl_device *mhl_dev, int offset, u8 devcap) { if (!mhl_dev) return -EFAULT; if (offset < 0 || offset > 15) return -EFAULT; mhl_dev->state.peer_devcap[offset] = devcap; #ifdef PRINT_DEVCAP mhl_print_devcap(mhl_dev, offset); #endif return 0; } u8 mhl_peer_devcap(struct mhl_device *mhl_dev, int offset) { return mhl_dev->state.peer_devcap[offset]; } EXPORT_SYMBOL(mhl_peer_devcap); /* * MSC: 0x68 MSC_MSG */ int mhl_msc_send_msc_msg( struct mhl_device *mhl_dev, u8 sub_cmd, u8 cmd_data) { struct msc_command_struct req; if (!mhl_dev) return -EFAULT; req.command = MHL_MSC_MSG; req.payload.data[0] = sub_cmd; req.payload.data[1] = cmd_data; return mhl_queue_msc_command(mhl_dev, &req, MSC_NORMAL_SEND); } EXPORT_SYMBOL(mhl_msc_send_msc_msg); static int mhl_prior_send_msc_command_msc_msg( struct mhl_device *mhl_dev, u8 sub_cmd, u8 cmd_data) { struct msc_command_struct req; if (!mhl_dev) return -EFAULT; req.command = MHL_MSC_MSG; req.payload.data[0] = sub_cmd; req.payload.data[1] = cmd_data; return mhl_queue_msc_command(mhl_dev, &req, MSC_PRIOR_SEND); } static int mhl_notify_rcp_recv(struct mhl_device *mhl_dev, u8 key_code) { char *envp[2]; if (!mhl_dev) return -EFAULT; envp[0] = kmalloc(128, GFP_KERNEL); if (!envp[0]) return -ENOMEM; snprintf(envp[0], 128, "RCP_KEYCODE=%x", key_code); envp[1] = NULL; kobject_uevent_env(&mhl_dev->dev.kobj, KOBJ_CHANGE, envp); kfree(envp[0]); return 0; } #ifdef CONFIG_MHL_RAP static int mhl_rap_send_msc_msg( struct mhl_device *mhl_dev, u8 sub_cmd, u8 cmd_data) { struct msc_command_struct req; if (!mhl_dev) return -EFAULT; req.command = MHL_MSC_MSG; req.payload.data[0] = sub_cmd; req.payload.data[1] = cmd_data; return mhl_queue_rap_command(mhl_dev, &req, MSC_NORMAL_SEND); } #endif /* CONFIG_MHL_RAP */ /* supported RCP key code */ static const u8 support_rcp_key_code_tbl[] = { 1, 1, 1, 1, 1, 0, 0, 0, /* 0x00~0x07 */ 0, 1, 1, 0, 0, 1, 0, 0, /* 0x08~0x0f */ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x10~0x17 */ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x18~0x1f */ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x20~0x27 */ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x28~0x2f */ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x30~0x37 */ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x38~0x3f */ 0, 0, 0, 0, 1, 1, 1, 0, /* 0x40~0x47 */ 1, 1, 0, 1, 1, 0, 0, 0, /* 0x48~0x4f */ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x50~0x57 */ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x58~0x5f */ 1, 1, 0, 0, 1, 0, 0, 0, /* 0x60~0x67 */ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x68~0x6f */ 0, 1, 1, 1, 1, 0, 0, 0, /* 0x70~0x77 */ 0, 0, 0, 0, 0, 0, 0, 0 /* 0x78~0x7f */ }; static int mhl_rcp_recv(struct mhl_device *mhl_dev, u8 key_code) { int rc; if (support_rcp_key_code_tbl[(key_code & 0x7f)]) { /* notify userspace */ mhl_notify_rcp_recv(mhl_dev, key_code); /* prior send rcpk */ rc = mhl_prior_send_msc_command_msc_msg( mhl_dev, MHL_MSC_MSG_RCPK, key_code); } else { /* prior send rcpe */ rc = mhl_prior_send_msc_command_msc_msg( mhl_dev, MHL_MSC_MSG_RCPE, MHL_RCPE_UNSUPPORTED_KEY_CODE); if (rc) return rc; /* send rcpk after rcpe send */ rc = mhl_msc_send_msc_msg( mhl_dev, MHL_MSC_MSG_RCPK, key_code); } return rc; } #ifdef CONFIG_MHL_RAP static int mhl_notify_rap_recv(struct mhl_device *mhl_dev, u8 action_code) { char *envp[2]; envp[0] = kmalloc(128, GFP_KERNEL); if (!envp[0]) return -ENOMEM; snprintf(envp[0], 128, "RAP_ACTIONCODE=%x", action_code); envp[1] = NULL; kobject_uevent_env(&mhl_dev->dev.kobj, KOBJ_CHANGE, envp); kfree(envp[0]); return 0; } #endif /* CONFIG_MHL_RAP */ static int mhl_rap_action(struct mhl_device *mhl_dev, u8 action_code) { #ifdef CONFIG_MHL_RAP switch (action_code) { case MHL_RAP_CONTENT_ON: if (!mhl_dev->tmds_state) { mhl_dev->ops->tmds_control(TRUE); /* notify userspace */ mhl_notify_rap_recv(mhl_dev, action_code); } break; case MHL_RAP_CONTENT_OFF: if (mhl_dev->tmds_state) { mhl_dev->ops->tmds_control(FALSE); /* notify userspace */ mhl_notify_rap_recv(mhl_dev, action_code); } break; default: break; } return 0; #else switch (action_code) { case MHL_RAP_CONTENT_ON: mutex_lock(&mhl_dev->ops_mutex); mhl_dev->ops->tmds_control(TRUE); mutex_unlock(&mhl_dev->ops_mutex); break; case MHL_RAP_CONTENT_OFF: mutex_lock(&mhl_dev->ops_mutex); mhl_dev->ops->tmds_control(FALSE); mutex_unlock(&mhl_dev->ops_mutex); break; default: break; } return 0; #endif /* CONFIG_MHL_RAP */ } #ifdef CONFIG_MHL_RAP #else static int mhl_notify_rap_recv(struct mhl_device *mhl_dev, u8 action_code) { char *envp[2]; envp[0] = kmalloc(128, GFP_KERNEL); if (!envp[0]) return -ENOMEM; snprintf(envp[0], 128, "RAP_ACTIONCODE=%x", action_code); envp[1] = NULL; kobject_uevent_env(&mhl_dev->dev.kobj, KOBJ_CHANGE, envp); kfree(envp[0]); return 0; } #endif /* CONFIG_MHL_RAP */ static int mhl_rap_recv(struct mhl_device *mhl_dev, u8 action_code) { u8 error_code; switch (action_code) { case MHL_RAP_POLL: case MHL_RAP_CONTENT_ON: case MHL_RAP_CONTENT_OFF: if (mhl_dev->full_operation) { mhl_rap_action(mhl_dev, action_code); error_code = MHL_RAPK_NO_ERROR; #ifdef CONFIG_MHL_RAP #else /* notify userspace */ mhl_notify_rap_recv(mhl_dev, action_code); #endif /* CONFIG_MHL_RAP */ } else error_code = MHL_RAPK_UNSUPPORTED_ACTION_CODE; break; default: error_code = MHL_RAPK_UNRECOGNIZED_ACTION_CODE; break; } /* prior send rapk */ return mhl_prior_send_msc_command_msc_msg( mhl_dev, MHL_MSC_MSG_RAPK, error_code); } int mhl_msc_recv_msc_msg( struct mhl_device *mhl_dev, u8 sub_cmd, u8 cmd_data) { int rc = 0; if (!mhl_dev) return -EFAULT; switch (sub_cmd) { case MHL_MSC_MSG_RCP: pr_info("MHL: receive RCP(0x%02x)\n", cmd_data); rc = mhl_rcp_recv(mhl_dev, cmd_data); break; case MHL_MSC_MSG_RCPK: pr_info("MHL: receive RCPK(0x%02x)\n", cmd_data); break; case MHL_MSC_MSG_RCPE: pr_info("MHL: receive RCPE(0x%02x)\n", cmd_data); break; case MHL_MSC_MSG_RAP: pr_info("MHL: receive RAP(0x%02x)\n", cmd_data); rc = mhl_rap_recv(mhl_dev, cmd_data); break; case MHL_MSC_MSG_RAPK: pr_info("MHL: receive RAPK(0x%02x)\n", cmd_data); #ifdef CONFIG_MHL_RAP del_timer(&mhl_dev->rap_send_timer); if (cmd_data == MHL_RAPK_BUSY) { if (!mhl_dev->rap_send_retry_num) { mhl_dev->rap_sending = FALSE; del_timer(&mhl_dev->rap_retry_timer); queue_work(rap_command_workqueue, &mhl_dev->rap_command_work); } else { /* retry */ mhl_dev->rap_send_retry_num--; mod_timer(&mhl_dev->rap_retry_timer, RAPK_RETRY_TIME); } } else { mhl_dev->rap_sending = FALSE; del_timer(&mhl_dev->rap_retry_timer); queue_work(rap_command_workqueue, &mhl_dev->rap_command_work); } #endif /* CONFIG_MHL_RAP */ break; default: break; } return rc; } EXPORT_SYMBOL(mhl_msc_recv_msc_msg); static ssize_t mhl_store_rcp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mhl_device *mhl_dev = to_mhl_device(dev); u8 peer_features = mhl_dev->state.peer_devcap[MHL_DEV_FEATURE_FLAG_OFFSET]; u8 key_code; ssize_t ret = strnlen(buf, PAGE_SIZE); key_code = (u8) atoi(buf); key_code &= 0x7f; if (peer_features & MHL_FEATURE_RCP_SUPPORT) mhl_msc_send_msc_msg( mhl_dev, MHL_MSC_MSG_RCP, key_code); else return -EFAULT; return ret; } static ssize_t mhl_store_rap(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mhl_device *mhl_dev = to_mhl_device(dev); u8 peer_features = mhl_dev->state.peer_devcap[MHL_DEV_FEATURE_FLAG_OFFSET]; u8 action_code; ssize_t ret = strnlen(buf, PAGE_SIZE); action_code = (u8) atoi(buf); if (peer_features & MHL_FEATURE_RAP_SUPPORT) { switch (action_code) { case MHL_RAP_CONTENT_ON: case MHL_RAP_CONTENT_OFF: #ifdef CONFIG_MHL_RAP ret = mhl_rap_send_msc_msg( mhl_dev, MHL_MSC_MSG_RAP, action_code); #else mhl_msc_send_msc_msg( mhl_dev, MHL_MSC_MSG_RAP, action_code); #endif /* CONFIG_MHL_RAP */ break; default: return -EFAULT; } } else return -EFAULT; return ret; } /* * MSC: 0x6C WRITE_BURST */ static int mhl_msc_write_burst(struct mhl_device *mhl_dev, u8 offset, u8 *data, u8 length) { struct msc_command_struct req; if (!mhl_dev) return -EFAULT; req.command = MHL_WRITE_BURST; req.offset = offset; req.length = length; req.payload.burst_data = data; return mhl_queue_msc_command(mhl_dev, &req, MSC_NORMAL_SEND); } int mhl_msc_request_write_burst(struct mhl_device *mhl_dev, u8 offset, u8 *data, u8 length) { int retry = 20; int timeout; while (mhl_dev->write_burst_requested && retry--) msleep(50); if (!retry) return -EAGAIN; if (mhl_dev->state.peer_devcap[MHL_DEV_FEATURE_FLAG_OFFSET] & MHL_FEATURE_SP_SUPPORT) { retry = 5; do { init_completion(&mhl_dev->req_write_done); /* SET_INT: REQ_WRT */ mhl_msc_send_set_int( mhl_dev, MHL_RCHANGE_INT, MHL_INT_REQ_WRT); timeout = wait_for_completion_interruptible_timeout (&mhl_dev->req_write_done, HZ); } while (retry-- > 0 && timeout == 0); if (!timeout) { pr_err("%s: write_burst_send timed out!\n", __func__); return -EAGAIN; } else msleep(20); mhl_msc_write_burst( mhl_dev, offset, data, length); /* SET_INT: DSCR_CHG */ mhl_msc_send_set_int( mhl_dev, MHL_RCHANGE_INT, MHL_INT_DSCR_CHG); } return 0; } EXPORT_SYMBOL(mhl_msc_request_write_burst); /******************************** * USB driver interface ********************************/ int mhl_device_discovery(const char *name, int *result) { struct device *dev; struct mhl_device *mhl_dev; int rc; if (!name) return -EFAULT; /* lookup mhl device by name */ dev = class_find_device(mhl_class, NULL, (void *) name, __mhl_match); if (!dev) { pr_err("%s: mhl device (%s) not registered!\n", __func__, name); return -EFAULT; } mhl_dev = to_mhl_device(dev); /* ok, call chip driver */ mutex_lock(&mhl_dev->ops_mutex); rc = mhl_dev->ops->discovery_result_get(result); mutex_unlock(&mhl_dev->ops_mutex); if (rc) return rc; return 0; } EXPORT_SYMBOL(mhl_device_discovery); static ssize_t mhl_show_discovery(struct device *dev, struct device_attribute *attr, char *buf) { int result; mhl_device_discovery(dev_name(dev), &result); return snprintf(buf, 4, "%s\n", result ? "mhl" : "usb"); } static void mhl_usb_online_work(struct work_struct *work) { struct mhl_device *mhl_dev = container_of(work, struct mhl_device, usb_online_work); if (notify_usb_online) { pr_info("%s: mhl usb online(%d)\n", __func__, !!(mhl_dev->mhl_online & MHL_PLUGGED)); notify_usb_online (!!(mhl_dev->mhl_online & MHL_PLUGGED)); } } int mhl_register_callback(const char *name, void (*callback)(int on)) { int ret = 0; if (!notify_usb_online) notify_usb_online = callback; else { pr_err("%s: callback is already registered!\n", __func__); ret = -EFAULT; } return ret; } EXPORT_SYMBOL(mhl_register_callback); int mhl_unregister_callback(const char *name) { int ret = 0; if (notify_usb_online) notify_usb_online = NULL; else { pr_err("%s: callback is already unregistered!\n", __func__); ret = -EFAULT; } return ret; } EXPORT_SYMBOL(mhl_unregister_callback); /******************************** * HDMI driver interface ********************************/ int mhl_full_operation(const char *name, int enable) { struct mhl_device *mhl_dev; mhl_dev = mhl_get_dev(name); if (!mhl_dev) return -EFAULT; pr_info("%s: %s\n", __func__, enable ? "enable" : "disable"); if (enable) { if (mhl_qualify_path_enable(mhl_dev)) { pr_info("%s: enabling TMDS output..\n", __func__); /* enable TMDS output */ mhl_dev->ops->tmds_control(TRUE); /* enable HPD routing */ mhl_dev->ops->hpd_control(TRUE); } } else { if (mhl_dev->tmds_state) { pr_info("%s: disabling TMDS output..\n", __func__); /* disable HPD routing (force HPD=LOW) */ mhl_dev->ops->hpd_control(FALSE); /* disable TMDS output */ mhl_dev->ops->tmds_control(FALSE); } } mhl_dev->full_operation = enable; return 0; } EXPORT_SYMBOL(mhl_full_operation); /******************************** * interfaces for user space ********************************/ static ssize_t mhl_show_adopter_id(struct device *dev, struct device_attribute *attr, char *buf) { struct mhl_device *mhl_dev = to_mhl_device(dev); return snprintf(buf, PAGE_SIZE, "%02x%02x\n", mhl_dev->state.peer_devcap [MHL_DEV_ADOPTER_ID_H_OFFSET], mhl_dev->state.peer_devcap [MHL_DEV_ADOPTER_ID_L_OFFSET]); } static ssize_t mhl_show_device_id(struct device *dev, struct device_attribute *attr, char *buf) { struct mhl_device *mhl_dev = to_mhl_device(dev); return snprintf(buf, PAGE_SIZE, "%02x%02x\n", mhl_dev->state.peer_devcap [MHL_DEV_DEVICE_ID_H_OFFSET], mhl_dev->state.peer_devcap [MHL_DEV_DEVICE_ID_L_OFFSET]); } #ifdef CONFIG_MHL_OSD_NAME static ssize_t mhl_store_scpd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mhl_device *mhl_dev = to_mhl_device(dev); u8 peer_features = mhl_dev->state.peer_devcap[MHL_DEV_FEATURE_FLAG_OFFSET]; ssize_t ret = strnlen(buf, PAGE_SIZE); static char data[MHL_SCRATCHPAD_SIZE]; strlcpy(data, buf, MHL_SCRATCHPAD_SIZE); if (peer_features & MHL_FEATURE_SP_SUPPORT) mhl_msc_request_write_burst( mhl_dev, 0x40, data, MHL_SCRATCHPAD_SIZE); else return -EFAULT; return ret; } #endif /* CONFIG_MHL_OSD_NAME */ /******************************** * MHL class driver ********************************/ static void mhl_device_release(struct device *dev) { struct mhl_device *mhl_dev = to_mhl_device(dev); kfree(mhl_dev); } struct mhl_device *mhl_device_register(const char *name, struct device *parent, void *drvdata, const struct mhl_ops *ops) { struct mhl_device *mhl_dev; int rc; if (!name) return ERR_PTR(-EFAULT); if (!ops || !ops->discovery_result_get || !ops->send_msc_command || !ops->tmds_control) return ERR_PTR(-EFAULT); mhl_dev = kzalloc(sizeof(struct mhl_device), GFP_KERNEL); if (!mhl_dev) { pr_err("%s: out of memory!\n", __func__); return ERR_PTR(-ENOMEM); } mhl_dev->dev.class = mhl_class; mhl_dev->dev.parent = parent; mhl_dev->dev.release = mhl_device_release; dev_set_name(&mhl_dev->dev, name); dev_set_drvdata(&mhl_dev->dev, drvdata); rc = device_register(&mhl_dev->dev); if (rc) { kfree(mhl_dev); return ERR_PTR(rc); } mhl_dev->ops = ops; mutex_init(&mhl_dev->ops_mutex); INIT_LIST_HEAD(&mhl_dev->msc_queue); INIT_WORK(&mhl_dev->msc_command_work, mhl_msc_command_work); #ifdef CONFIG_MHL_RAP mhl_init_rap_timers(mhl_dev); INIT_LIST_HEAD(&mhl_dev->rap_queue); INIT_WORK(&mhl_dev->rap_command_work, mhl_rap_command_work); INIT_WORK(&mhl_dev->rap_retry_work, mhl_rap_retry_work); #endif /* CONFIG_MHL_RAP */ INIT_WORK(&mhl_dev->usb_online_work, mhl_usb_online_work); /* device added */ kobject_uevent(&mhl_dev->dev.kobj, KOBJ_ADD); pr_info("MHL: mhl device (%s) registered\n", name); return mhl_dev; } EXPORT_SYMBOL(mhl_device_register); void mhl_device_unregister(struct mhl_device *mhl_dev) { if (!mhl_dev) return; #ifdef CONFIG_MHL_RAP del_timer(&mhl_dev->rap_send_timer); del_timer(&mhl_dev->rap_retry_timer); #endif /* CONFIG_MHL_RAP */ mutex_lock(&mhl_dev->ops_mutex); mhl_dev->ops = NULL; mutex_unlock(&mhl_dev->ops_mutex); device_unregister(&mhl_dev->dev); } EXPORT_SYMBOL(mhl_device_unregister); static struct device_attribute mhl_class_attributes[] = { __ATTR(discovery, 0440, mhl_show_discovery, NULL), __ATTR(rcp, 0660, NULL, mhl_store_rcp), __ATTR(rap, 0660, NULL, mhl_store_rap), __ATTR(device_id, 0440, mhl_show_device_id, NULL), __ATTR(adopter_id, 0440, mhl_show_adopter_id, NULL), #ifdef CONFIG_MHL_OSD_NAME __ATTR(scpd, 0660, NULL, mhl_store_scpd), #endif /* CONFIG_MHL_OSD_NAME */ __ATTR_NULL, }; static int __init mhl_class_init(void) { mhl_class = class_create(THIS_MODULE, "mhl"); if (IS_ERR(mhl_class)) { pr_err("%s: unable to create mhl class!\n", __func__); return PTR_ERR(mhl_class); } mhl_class->dev_attrs = mhl_class_attributes; msc_command_workqueue = create_singlethread_workqueue ("mhl_msc_command"); #ifdef CONFIG_MHL_RAP rap_command_workqueue = create_singlethread_workqueue ("mhl_rap_command"); #endif /* CONFIG_MHL_RAP */ usb_online_workqueue = create_workqueue("mhl_usb_online"); return 0; } static void __exit mhl_class_exit(void) { class_destroy(mhl_class); } subsys_initcall(mhl_class_init); module_exit(mhl_class_exit); MODULE_LICENSE("GPL v2"); MODULE_VERSION("1.0"); MODULE_AUTHOR("Sony Ericsson Mobile Communications AB"); MODULE_DESCRIPTION("MHL Control Abstruction");
samno1607/XyZ
drivers/video/mhl.c
C
gpl-2.0
36,315
/* Pthreads test program. Copyright 1996, 2002, 2003, 2004, 2007, 2008 Free Software Foundation, Inc. Written by Keith Seitz of Red Hat. Copied from gdb.threads/pthreads.c. Contributed by Red Hat. This file is part of GDB. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> #include <stdlib.h> #include <pthread.h> /* Under OSF 2.0 & 3.0 and HPUX 10, the second arg of pthread_create is prototyped to be just a "pthread_attr_t", while under Solaris it is a "pthread_attr_t *". Arg! */ #if defined (__osf__) || defined (__hpux__) #define PTHREAD_CREATE_ARG2(arg) arg #define PTHREAD_CREATE_NULL_ARG2 null_attr static pthread_attr_t null_attr; #else #define PTHREAD_CREATE_ARG2(arg) &arg #define PTHREAD_CREATE_NULL_ARG2 NULL #endif void * routine (void *arg) { /* When gdb is running, it sets hidden breakpoints in the thread library. The signals caused by these hidden breakpoints can cause system calls such as 'sleep' to return early. Pay attention to the return value from 'sleep' to get the full sleep. */ int unslept = 9; while (unslept > 0) unslept = sleep (unslept); printf ("hello thread\n"); } /* Marker function for the testsuite */ void done_making_threads (void) { /* Nothing */ } void create_thread (void) { pthread_t tid; if (pthread_create (&tid, PTHREAD_CREATE_NULL_ARG2, routine, (void *) 0xfeedface)) { perror ("pthread_create 1"); exit (1); } } int main (int argc, char *argv[]) { int i; /* Create a few threads */ for (i = 0; i < 5; i++) create_thread (); done_making_threads (); printf ("hello\n"); printf ("hello\n"); return 0; }
Bytewerk/uClinux-ipcam
user/gdb/gdb/testsuite/gdb.mi/pthreads.c
C
gpl-2.0
2,277
# encoding: utf-8 # *************************************************************************** # # Copyright (c) 2002 - 2012 Novell, Inc. # All Rights Reserved. # # This program is free software; you can redistribute it and/or # modify it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, contact Novell, Inc. # # To contact Novell about this file by physical or electronic mail, # you may find current contact information at www.novell.com # # *************************************************************************** # File: command4.ycp # Package: yast2 # Summary: test command-line parsing # Author: Stanislav Visnovsky <visnov@suse.cz> # # $Id$ # # testedfiles: CommandLine.ycp Testsuite.ycp module Yast class Command3Client < Client def main Yast.include self, "testsuite.rb" Yast.include self, "testsuitedata.rb" Yast.import "TypeRepository" Yast.import "CommandLine" # option requires value TEST(->() { CommandLine.Init(@cmdline, ["add", "device"]) }, [], nil) TEST(->() { CommandLine.Command }, [], nil) # EOF nil end end end Yast::Command3Client.new.main
kobliha/yast-yast2
library/commandline/testsuite/tests/command3.rb
Ruby
gpl-2.0
1,550
@font-face{font-family:'TablePress';src:url('tablepress.eot');src:url('tablepress.eot?#ie') format('embedded-opentype'),url(data:application/x-font-woff;base64,d09GRgABAAAAAAXYAAwAAAAACXwAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAABGRlRNAAABHAAAABwAAAAcZInHOk9TLzIAAAE4AAAAPgAAAGB7NXPDY21hcAAAAXgAAABMAAABUvD45QVnYXNwAAABxAAAAAgAAAAIAAAAEGdseWYAAAHMAAABKQAAAgjYVHFyaGVhZAAAAvgAAAAvAAAANv2jaBNoaGVhAAADKAAAAB0AAAAkCk0F6GhtdHgAAANIAAAAIAAAACgoAADqbG9jYQAAA2gAAAAWAAAAFgJWAbxtYXhwAAADgAAAAB0AAAAgAE4AH25hbWUAAAOgAAAB3AAAA43ZugaUcG9zdAAABXwAAABZAAAAgeNVfAkAAAABAAAAAMwXcmMAAAAAy1XFoAAAAADNHI82eJxjYGbexjiBgZWBhaWHxZiBgaENQjMVM0SB+ThBQWVRMYPDh+APd9gY/gP5bAyMykCKEUmJAgMjAC56Ct8AAHicY2BgYGaAYBkGRgYQ8AHyGMF8FgYDIM0BhEwMDB9CPtz6cOf/fyAr+MN1EIt/A78evy5UFxgwsjHAuYxAPSB9KICRYdgDAL67D8kAAQAB//8AD3icXU/NTsJAEJ5ZFggxxmCbkiwHAzLTowlu4ejeyxPwCOBR7r3rE/geeubuG3i3TyGts12MtT18ndnd7w8QsvpdX/Q/IAboI9s1rpY3mMTDkcAVDuaqPD0RqRfaU05UGSxJhj2p5wDNUplqyjsZQT6EQ/2mc9GcyBIPReQOObPrkccHMZhgonMS3R1tiLCsDNFm8bgQF3W06ekgqqnN5Aa//D9nlrOgDeg09Aq4lK3RTiX16n621BBVLrIe2PQK09s2c4THyPIvF4TrAndwO5uHvhPUwOa7MNw8FsCtZfZDJBKB66BQhW58cRwHW09OVMEtI+3aOhEHX+E63crM2dh6X+WazMFLvf6F57OvAqiPGvo+87Tb+LrT4X9/7lT6bKXs9vsByDlkyAAAAHicY2BkYGAA4mO9Sxji+W2+MnCzM4DAWZl+bwT9fyuLN+tpIJeDgQkkCgAbqgoOAHicY2BkYGBj+HeXgYGdAQRYvBkYGVABFwA+hgJEAAAAeJxjZ4AAdiBmaWAwBeJSFhAbiJkaGByAGMwGADa+AxEAAAAAAAAAAAAqAFgAdgCUALIA0AEEAAB4nGNgZGBg4GKQYWBiAAEQycgAEnMA8xkABjEAbgAAAHicjZJBaxNBFMf/u0mbJoig5KC3QcRblt2tFCm9lEp701iKIHiZJLPJ0mSnzE5S8h38BB79Nn4ab94E/zN9JVB7MCEzv/3Pm/f+720ADPEbCe4/qXCCAV4Kp+jhrXAHGb4Ld/Ecv4T38Cx5I7yPQfJBuIdPyR/hAzxJa+E+Buk3Zku6fal8xwn9dIRTPMVr4Q6+4kS4S/Wn8B5eJanwPobJO+EefiRfhA/wIv0o3Mcw3YRyV9CYYAmDMRzXll9c6cnSjJ1pyZcU51gzRDMAl2a+XmrCOSwa+Lg7RhgolJxKzv2Yv8cSqwfZ7iILHGHEdcT7OfdDJreNP7dublSZ5epY7QwpMUCxOBoVozIvDh/v4mEx/JsE+MwQx+A6dhO8hA7y2EvwUjLEuLa2jSqyPFcsV/7H0MbxUFMOYwkhVSykODAb10U8OePTDbY8qel0QV3FO1PSvaUqDnh3p5LBB8WxxozqKnZ4TU1T9THfhJ3vsjTcw9M02s1ocWl0a5QzlXHKW+UXRp3Zm62r5wuvWjP1oenKunhS8YUo7/TMrLS7Vtp7V0/WMaSxvp6aNoP8FYKzU9zGOpbOTHidzqvTW9PaFZ/e0+QmtnJBatjAlqLeGHWhm9kWfwGPR6C4eJxjYGIAg/9VDFEM2AAXEDMyMDEyMTIzsjCyMrIxsjNy8CRnpJYV5efF56SmlfDCOEWZ6RklXMmJRakl8Sn55XkcEGZpAVQMpJgbwgQrZSnOLyoBACnGHqgAAAA=) format('woff'),url('tablepress.ttf') format('truetype'),url('tablepress.svg#TablePress') format('svg');font-weight:normal;font-style:normal}.tablepress-table-description{clear:both;display:block}.tablepress{border-collapse:collapse;border-spacing:0;width:100%;margin-bottom:1em;border:0}.tablepress th,.tablepress td{padding:8px;border:0;background:0;text-align:left}.tablepress tbody td{vertical-align:top}.tablepress tbody td,.tablepress tfoot th{border-top:1px solid #ddd}.tablepress tbody tr:first-child td{border-top:0}.tablepress thead th{border-bottom:1px solid #ddd}.tablepress thead th,.tablepress tfoot th{background-color:#d9edf7;font-weight:bold;vertical-align:middle}.tablepress .odd td{background-color:#f9f9f9}.tablepress .even td{background-color:#fff}.tablepress .row-hover tr:hover td{background-color:#f3f3f3}.dataTables_wrapper{clear:both;*zoom:1;margin-bottom:1em}.dataTables_wrapper .tablepress{clear:both;margin:0!important}.dataTables_length{float:left;white-space:nowrap}.dataTables_filter{float:right;white-space:nowrap}.dataTables_info{clear:both;float:left;margin:4px 0 0}.dataTables_paginate{float:right;margin:4px 0 0;padding:0 15px}.dataTables_paginate a{color:#111!important;display:inline-block;outline:0;position:relative;text-decoration:underline;text-decoration:none\0/IE}.dataTables_paginate .paginate_disabled_previous,.dataTables_paginate .paginate_disabled_next{color:#999!important;text-decoration:none}.paginate_disabled_next,.paginate_enabled_next{margin-left:10px}.paginate_enabled_previous:hover,.paginate_enabled_next:hover{cursor:pointer;text-decoration:none}.paginate_disabled_previous:before,.paginate_enabled_previous:before,.paginate_disabled_next:after,.paginate_enabled_next:after{float:left;text-align:left;font-family:TablePress;font-size:14px;font-weight:bold;-webkit-font-smoothing:antialiased;content:"\f053";color:#d9edf7;text-shadow:.1em .1em #666;position:absolute;top:0;bottom:0;left:-14px;margin:auto;height:14px;width:14px;line-height:1}.paginate_disabled_next:after,.paginate_enabled_next:after{float:right;text-align:right;content:"\f054";left:auto}.paginate_disabled_previous:before,.paginate_disabled_next:after{color:#f9f9f9}.paginate_enabled_previous:hover:before,.paginate_enabled_next:hover:after{color:#049cdb}.paging_full_numbers{height:22px;line-height:22px}.paging_full_numbers a{border:1px solid #aaa;border-radius:5px;padding:2px 5px;margin:0 3px;color:#333!important;background-color:#ddd}.paging_full_numbers .paginate_button:hover{background-color:#ccc}.paging_full_numbers .paginate_active{background-color:#99b3ff}.dataTables_processing{display:none}.dataTables_scroll{clear:both}.dataTables_scrollBody{-webkit-overflow-scrolling:touch}.dataTables_wrapper .sorting,.dataTables_wrapper .sorting_asc,.dataTables_wrapper .sorting_desc{padding-right:20px;cursor:pointer;outline:0}.dataTables_wrapper .sorting div,.dataTables_wrapper .sorting_asc div,.dataTables_wrapper .sorting_desc div{position:relative;margin:0!important;padding:0!important;float:none!important}.dataTables_wrapper .sorting div:before,.dataTables_wrapper .sorting div:after,.dataTables_wrapper .sorting_asc div:before,.dataTables_wrapper .sorting_asc div:after,.dataTables_wrapper .sorting_desc div:before,.dataTables_wrapper .sorting_desc div:after{font-family:TablePress;font-weight:normal;font-size:14px;-webkit-font-smoothing:antialiased;position:absolute;top:0;bottom:0;left:auto;right:-14px;margin:auto;height:14px;line-height:1;padding:1px 0 0}.dataTables_wrapper .sorting div:after{content:"\f0dc"}.dataTables_wrapper .sorting_asc div:after{content:"\f0d8";padding:0 0 1px}.dataTables_wrapper .sorting_desc div:after{content:"\f0d7"}.dataTables_wrapper .sorting:hover,.dataTables_wrapper .sorting_asc,.dataTables_wrapper .sorting_desc{background-color:#049cdb}.dataTables_wrapper:after{content:".";display:block;clear:both;visibility:hidden;line-height:0;height:0}.dataTables_wrapper label input,.dataTables_wrapper label select{display:inline;margin:2px;width:auto}.tablepress-rtl td,.tablepress-rtl th{text-align:right}.dataTables_wrapper .tablepress-rtl .sorting,.dataTables_wrapper .tablepress-rtl .sorting_asc,.dataTables_wrapper .tablepress-rtl .sorting_desc{padding-right:8px;padding-left:20px}.dataTables_wrapper .tablepress-rtl .sorting div:before,.dataTables_wrapper .tablepress-rtl .sorting_asc div:before,.dataTables_wrapper .tablepress-rtl .sorting_desc div:before{left:-14px;right:auto}.dataTables_wrapper .tablepress-rtl .sorting div:after,.dataTables_wrapper .tablepress-rtl .sorting_asc div:after,.dataTables_wrapper .tablepress-rtl .sorting_desc div:after{content:""}.dataTables_wrapper .tablepress-rtl .sorting div:before{content:"\f0dc"}.dataTables_wrapper .tablepress-rtl .sorting_asc div:before{content:"\f0d8"}.dataTables_wrapper .tablepress-rtl .sorting_desc div:before{content:"\f0d7"}
Maddy1984/noesis
wp-content/plugins/tablepress/css/default.min.css
CSS
gpl-2.0
7,026
<?php if( !defined( '_VALID_MOS' ) && !defined( '_JEXEC' ) ) die( 'Direct Access to '.basename(__FILE__).' is not allowed.' ); /** * This is the page that is shown when the order has been placed. * It is used to thank the customer for her/his order and show a link * to the order details. * * @version $Id: checkout.thankyou.tpl.php 1364 2008-04-09 16:44:28Z soeren_nb $ * @package VirtueMart * @subpackage themes * @copyright Copyright (C) 2004-2008 soeren - All rights reserved. * @license http://www.gnu.org/copyleft/gpl.html GNU/GPL, see LICENSE.php * VirtueMart is free software. This version may have been modified pursuant * to the GNU General Public License, and as distributed it includes or * is derivative of works licensed under the GNU General Public License or * other free or open source software licenses. * See /administrator/components/com_virtuemart/COPYRIGHT.php for copyright notices and details. * http://virtuemart.net */ mm_showMyFileName( __FILE__ ); global $VM_LANG; ?> <h3><?php echo $VM_LANG->_('PHPSHOP_THANKYOU') ?></h3> <p> <?php echo vmCommonHTML::imageTag( VM_THEMEURL .'images/button_ok.png', 'Success', 'center', '48', '48' ); ?> <?php echo $VM_LANG->_('PHPSHOP_THANKYOU_SUCCESS')?> <br /><br /> <?php echo $VM_LANG->_('PHPSHOP_EMAIL_SENDTO') .": <strong>". $user->user_email . '</strong>'; ?><br /> </p> <!-- Begin Payment Information --> <?php if( empty($auth['user_id'])) { return; } if ($db->f("order_status") == "P" ) { // Copy the db object to prevent it gets altered $db_temp = ps_DB::_clone( $db ); /** Start printing out HTML Form code (Payment Extra Info) **/ ?> <br /> <table width="100%"> <tr> <td width="100%" align="center"> <?php /** * PLEASE DON'T CHANGE THIS SECTION UNLESS YOU KNOW WHAT YOU'RE DOING */ // Try to get PayPal/PayMate/Worldpay/whatever Configuration File @include( CLASSPATH."payment/".$db->f("payment_class").".cfg.php" ); $vmLogger->debug('Beginning to parse the payment extra info code...' ); // Here's the place where the Payment Extra Form Code is included // Thanks to Steve for this solution (why make it complicated...?) if( eval('?>' . $db->f("payment_extrainfo") . '<?php ') === false ) { $vmLogger->debug( "Error: The code of the payment method ".$db->f( 'payment_method_name').' ('.$db->f('payment_method_code').') ' .'contains a Parse Error!<br />Please correct that first' ); } else { $vmLogger->debug('Successfully parsed the payment extra info code.' ); } // END printing out HTML Form code (Payment Extra Info) ?> </td> </tr> </table> <br /> <?php $db = $db_temp; } ?> <p> <a href="<?php $sess->purl(SECUREURL.basename($_SERVER['PHP_SELF'])."?page=account.order_details&order_id=". $order_id) ?>" onclick="if( parent.parent.location ) { parent.parent.location = this.href.replace(/index2.php/, 'index.php' ); };"> <?php echo $VM_LANG->_('PHPSHOP_ORDER_LINK') ?> </a> </p>
viollarr/alab
components/com_virtuemart/themes/default/templates/pages/checkout.thankyou.tpl.php
PHP
gpl-2.0
3,091
/**** Isotope Filtering ****/ .isotope-item { z-index: 2; } .isotope-hidden.isotope-item { pointer-events: none; z-index: 1; } /**** Isotope CSS3 transitions ****/ .isotope, .isotope .isotope-item { -webkit-transition-duration: 0.8s; -moz-transition-duration: 0.8s; -ms-transition-duration: 0.8s; -o-transition-duration: 0.8s; transition-duration: 0.8s; } .isotope { -webkit-transition-property: height, width; -moz-transition-property: height, width; -ms-transition-property: height, width; -o-transition-property: height, width; transition-property: height, width; } .isotope .isotope-item { -webkit-transition-property: -webkit-transform, opacity; -moz-transition-property: -moz-transform, opacity; -ms-transition-property: -ms-transform, opacity; -o-transition-property: top, left, opacity; transition-property: transform, opacity; } /**** disabling Isotope CSS3 transitions ****/ .isotope.no-transition, .isotope.no-transition .isotope-item, .isotope .isotope-item.no-transition { -webkit-transition-duration: 0s; -moz-transition-duration: 0s; -ms-transition-duration: 0s; -o-transition-duration: 0s; transition-duration: 0s; }
jolay/intelisis3
sites/all/libraries/isotope/isotope.css
CSS
gpl-2.0
1,303
// NAnt - A .NET build tool // Copyright (C) 2001-2003 Gerry Shaw // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA // // Gert Driesen (drieseng@users.sourceforge.net) using System; using System.Collections; namespace NAnt.Core.Types { /// <summary> /// Contains a collection of <see cref="EnvironmentVariable" /> elements. /// </summary> [Serializable()] public class EnvironmentVariableCollection : CollectionBase { #region Public Instance Constructors /// <summary> /// Initializes a new instance of the <see cref="EnvironmentVariableCollection"/> class. /// </summary> public EnvironmentVariableCollection() { } /// <summary> /// Initializes a new instance of the <see cref="EnvironmentVariableCollection"/> class /// with the specified <see cref="EnvironmentVariableCollection"/> instance. /// </summary> public EnvironmentVariableCollection(EnvironmentVariableCollection value) { AddRange(value); } /// <summary> /// Initializes a new instance of the <see cref="EnvironmentVariableCollection"/> class /// with the specified array of <see cref="EnvironmentVariable"/> instances. /// </summary> public EnvironmentVariableCollection(EnvironmentVariable[] value) { AddRange(value); } #endregion Public Instance Constructors #region Public Instance Properties /// <summary> /// Gets or sets the element at the specified index. /// </summary> /// <param name="index">The zero-based index of the element to get or set.</param> [System.Runtime.CompilerServices.IndexerName("Item")] public EnvironmentVariable this[int index] { get {return ((EnvironmentVariable)(base.List[index]));} set {base.List[index] = value;} } /// <summary> /// Gets the <see cref="EnvironmentVariable"/> with the specified name. /// </summary> /// <param name="name">The name of the <see cref="EnvironmentVariable"/> to get.</param> [System.Runtime.CompilerServices.IndexerName("Item")] public EnvironmentVariable this[string name] { get { if (name != null) { // try to locate instance by name foreach (EnvironmentVariable environmentVariable in base.List) { if (environmentVariable.VariableName == name) { return environmentVariable; } } } return null; } } #endregion Public Instance Properties #region Public Instance Methods /// <summary> /// Adds a <see cref="EnvironmentVariable"/> to the end of the collection. /// </summary> /// <param name="item">The <see cref="EnvironmentVariable"/> to be added to the end of the collection.</param> /// <returns>The position into which the new element was inserted.</returns> public int Add(EnvironmentVariable item) { return base.List.Add(item); } /// <summary> /// Adds the elements of a <see cref="EnvironmentVariable"/> array to the end of the collection. /// </summary> /// <param name="items">The array of <see cref="EnvironmentVariable"/> elements to be added to the end of the collection.</param> public void AddRange(EnvironmentVariable[] items) { for (int i = 0; (i < items.Length); i = (i + 1)) { Add(items[i]); } } /// <summary> /// Adds the elements of a <see cref="EnvironmentVariableCollection"/> to the end of the collection. /// </summary> /// <param name="items">The <see cref="EnvironmentVariableCollection"/> to be added to the end of the collection.</param> public void AddRange(EnvironmentVariableCollection items) { for (int i = 0; (i < items.Count); i = (i + 1)) { Add(items[i]); } } /// <summary> /// Determines whether a <see cref="EnvironmentVariable"/> is in the collection. /// </summary> /// <param name="item">The <see cref="EnvironmentVariable"/> to locate in the collection.</param> /// <returns> /// <see langword="true" /> if <paramref name="item"/> is found in the /// collection; otherwise, <see langword="false" />. /// </returns> public bool Contains(EnvironmentVariable item) { return base.List.Contains(item); } /// <summary> /// Determines whether a <see cref="EnvironmentVariable"/> with the specified /// value is in the collection. /// </summary> /// <param name="value">The argument value to locate in the collection.</param> /// <returns> /// <see langword="true" /> if a <see cref="EnvironmentVariable" /> with value /// <paramref name="value"/> is found in the collection; otherwise, /// <see langword="false" />. /// </returns> public bool Contains(string value) { return this[value] != null; } /// <summary> /// Copies the entire collection to a compatible one-dimensional array, starting at the specified index of the target array. /// </summary> /// <param name="array">The one-dimensional array that is the destination of the elements copied from the collection. The array must have zero-based indexing.</param> /// <param name="index">The zero-based index in <paramref name="array"/> at which copying begins.</param> public void CopyTo(EnvironmentVariable[] array, int index) { base.List.CopyTo(array, index); } /// <summary> /// Retrieves the index of a specified <see cref="EnvironmentVariable"/> object in the collection. /// </summary> /// <param name="item">The <see cref="EnvironmentVariable"/> object for which the index is returned.</param> /// <returns> /// The index of the specified <see cref="EnvironmentVariable"/>. If the <see cref="EnvironmentVariable"/> is not currently a member of the collection, it returns -1. /// </returns> public int IndexOf(EnvironmentVariable item) { return base.List.IndexOf(item); } /// <summary> /// Inserts a <see cref="EnvironmentVariable"/> into the collection at the specified index. /// </summary> /// <param name="index">The zero-based index at which <paramref name="item"/> should be inserted.</param> /// <param name="item">The <see cref="EnvironmentVariable"/> to insert.</param> public void Insert(int index, EnvironmentVariable item) { base.List.Insert(index, item); } /// <summary> /// Returns an enumerator that can iterate through the collection. /// </summary> /// <returns> /// A <see cref="EnvironmentVariableEnumerator"/> for the entire collection. /// </returns> public new EnvironmentVariableEnumerator GetEnumerator() { return new EnvironmentVariableEnumerator(this); } /// <summary> /// Removes a member from the collection. /// </summary> /// <param name="item">The <see cref="EnvironmentVariable"/> to remove from the collection.</param> public void Remove(EnvironmentVariable item) { base.List.Remove(item); } #endregion Public Instance Methods } /// <summary> /// Enumerates the <see cref="EnvironmentVariable"/> elements of a <see cref="EnvironmentVariableCollection"/>. /// </summary> public class EnvironmentVariableEnumerator : IEnumerator { #region Internal Instance Constructors /// <summary> /// Initializes a new instance of the <see cref="EnvironmentVariableEnumerator"/> class /// with the specified <see cref="EnvironmentVariableCollection"/>. /// </summary> /// <param name="arguments">The collection that should be enumerated.</param> internal EnvironmentVariableEnumerator(EnvironmentVariableCollection arguments) { IEnumerable temp = (IEnumerable) (arguments); _baseEnumerator = temp.GetEnumerator(); } #endregion Internal Instance Constructors #region Implementation of IEnumerator /// <summary> /// Gets the current element in the collection. /// </summary> /// <returns> /// The current element in the collection. /// </returns> public EnvironmentVariable Current { get { return (EnvironmentVariable) _baseEnumerator.Current; } } object IEnumerator.Current { get { return _baseEnumerator.Current; } } /// <summary> /// Advances the enumerator to the next element of the collection. /// </summary> /// <returns> /// <see langword="true" /> if the enumerator was successfully advanced /// to the next element; <see langword="false" /> if the enumerator has /// passed the end of the collection. /// </returns> public bool MoveNext() { return _baseEnumerator.MoveNext(); } bool IEnumerator.MoveNext() { return _baseEnumerator.MoveNext(); } /// <summary> /// Sets the enumerator to its initial position, which is before the /// first element in the collection. /// </summary> public void Reset() { _baseEnumerator.Reset(); } void IEnumerator.Reset() { _baseEnumerator.Reset(); } #endregion Implementation of IEnumerator #region Private Instance Fields private IEnumerator _baseEnumerator; #endregion Private Instance Fields } }
fhchina/nant
src/NAnt.Core/Types/EnvironmentVariableCollection.cs
C#
gpl-2.0
10,878
<?php /* * This file is part of EC-CUBE * * Copyright(c) 2000-2011 LOCKON CO.,LTD. All Rights Reserved. * * http://www.lockon.co.jp/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /** * @file * Common functions that many EC-CUBE classes will need to reference. */ /** * Translate a message alias. * * @param string $string message alias * @param array $tokens parameters for translation * @param array $options options * @return string message to display */ function t($string, $tokens = array(), $options = array()) { if (method_exists('SC_Helper_Locale_Ex', 'get_locale')) { // Get a string of specified language which corresponds to the message alias. $translated = SC_Helper_Locale_Ex::get_locale($string, $options); } else { $translated = $string; } // If parameters are set, translate a message. if (empty($tokens)) { return $translated; } else { return strtr($translated, $tokens); } } /** * Translate a message alias (plural). * * @param integer $count number for detecting format * @param string $single message alias (single) * @param string $plural message alias (plural) * @param array $tokens parameters for translation * @param array $options options * @return string message to display */ function t_plural($count, $single, $plural, $tokens = array(), $options = array()) { if (method_exists('SC_Helper_Locale_Ex', 'get_locale_plural')) { list($translated_single, $translated_plural) = SC_Helper_Locale_Ex::get_locale_plural($single, $plural, $options); } else { $translated_single = $single; $translated_plural = $plural; $options['lang_code'] = 'en-US'; } if ($count == 1) { $return = $translated_single; } else { // Determine appropriate plural form. $index = get_plural_index($count, $options['lang_code']); if ($index < 0) { $return = $translated_plural; } else { switch ($index) { case "0": $return = $translated_single; case "1": default: $return = $translated_plural; } } } // Add a counter to translation parameters. $tokens['T_COUNT'] = number_format($count); return strtr($return, $tokens); } /** * Determine appropriate plural form. * * @param integer $count counter * @param string $lang_code language code * @return integer index */ function get_plural_index($count, $lang_code = 'en-US') { static $plural_indexes = array(); if (!isset($plural_indexes[$lang_code][$count])) { // Get a formula $formula = get_plural_formula($lang_code); // If there is a plural formula for the language, evaluate it if (!empty($formula)) { $string = str_replace('nplurals', "\$total", $formula); $string = str_replace("n", $count, $string); $string = str_replace('plural', "\$plural", $string); $total = 0; $plural = 0; eval("$string"); if ($plural >= $total) $plural = $total - 1; $plural_indexes[$lang_code][$count] = $plural; // If there is no plural formula for English } elseif ($lang_code == 'en-US') { $plural_indexes[$lang_code][$count] = (int) ($count != 1); // Otherwise, return -1 (unknown). } else { $plural_indexes[$lang_code][$count] = -1; } } return $plural_indexes[$lang_code][$count]; } /** * Get a formula to determine appropriate plural form. * * @param string $lang_code language code * @return string formula */ function get_plural_formula($lang_code) { static $plural_formulas = array(); // If formula is empty, include the file. if(empty($plural_formulas)){ $plural_formulas = @include_once DATA_REALDIR . "include/plural_forms.inc"; } return isset($plural_formulas[$lang_code]) ? $plural_formulas[$lang_code] : NULL; }
doankhoi/eccube
data/include/common.php
PHP
gpl-2.0
4,806
/* $Id: d3d8_main.c $ */ /** @file * VBox D3D8 dll switcher */ /* * Copyright (C) 2009 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; * you can redistribute it and/or modify it under the terms of the GNU * General Public License (GPL) as published by the Free Software * Foundation, in version 2 as it comes in the "COPYING" file of the * VirtualBox OSE distribution. VirtualBox OSE is distributed in the * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. */ #include "d3d8.h" #include "switcher.h" typedef HRESULT (WINAPI *D3D8GetSWInfoProc)(void); typedef void (WINAPI *DebugSetMuteProc)(void); typedef IDirect3D8* (WINAPI *Direct3DCreate8Proc)(UINT SDKVersion); typedef HRESULT (WINAPI *ValidatePixelShaderProc)(DWORD* pixelshader, DWORD* reserved1, BOOL bool, DWORD* toto); typedef HRESULT (WINAPI *ValidateVertexShaderProc)(DWORD* vertexshader, DWORD* reserved1, DWORD* reserved2, BOOL bool, DWORD* toto); static HRESULT WINAPI vboxD3D8GetSWInfoStub(void) { return E_FAIL; } static void WINAPI vboxDebugSetMuteStub(void) { } static IDirect3D8* WINAPI vboxDirect3DCreate8Stub(UINT SDKVersion) { return NULL; } static HRESULT WINAPI vboxValidatePixelShaderStub(DWORD* pixelshader, DWORD* reserved1, BOOL bool, DWORD* toto) { return E_FAIL; } static HRESULT WINAPI vboxValidateVertexShaderStub(DWORD* vertexshader, DWORD* reserved1, DWORD* reserved2, BOOL bool, DWORD* toto) { return E_FAIL; } typedef struct _D3D8ExTag { int initialized; const char *vboxName; const char *msName; D3D8GetSWInfoProc pD3D8GetSWInfo; DebugSetMuteProc pDebugSetMute; Direct3DCreate8Proc pDirect3DCreate8; ValidatePixelShaderProc pValidatePixelShader; ValidateVertexShaderProc pValidateVertexShader; } D3D8Export; static D3D8Export g_swd3d8 = {0, "VBoxD3D8.dll", "MSD3D8.dll",}; void FillD3DExports(HANDLE hDLL) { SW_FILLPROC(g_swd3d8, hDLL, D3D8GetSWInfo); SW_FILLPROC(g_swd3d8, hDLL, DebugSetMute); SW_FILLPROC(g_swd3d8, hDLL, Direct3DCreate8); SW_FILLPROC(g_swd3d8, hDLL, ValidatePixelShader); SW_FILLPROC(g_swd3d8, hDLL, ValidateVertexShader); } HRESULT WINAPI D3D8GetSWInfo(void) { SW_CHECKRET(g_swd3d8, D3D8GetSWInfo, E_FAIL); return g_swd3d8.pD3D8GetSWInfo(); } void WINAPI DebugSetMute(void) { SW_CHECKCALL(g_swd3d8, DebugSetMute); g_swd3d8.pDebugSetMute(); } IDirect3D8* WINAPI Direct3DCreate8(UINT SDKVersion) { SW_CHECKRET(g_swd3d8, Direct3DCreate8, NULL); return g_swd3d8.pDirect3DCreate8(SDKVersion); } HRESULT WINAPI ValidatePixelShader(DWORD* pixelshader, DWORD* reserved1, BOOL bool, DWORD* toto) { SW_CHECKRET(g_swd3d8, ValidatePixelShader, E_FAIL); return g_swd3d8.pValidatePixelShader(pixelshader, reserved1, bool, toto); } HRESULT WINAPI ValidateVertexShader(DWORD* vertexshader, DWORD* reserved1, DWORD* reserved2, BOOL bool, DWORD* toto) { SW_CHECKRET(g_swd3d8, ValidateVertexShader, E_FAIL) return g_swd3d8.pValidateVertexShader(vertexshader, reserved1, reserved2, bool, toto); }
VirtualMonitor/VirtualMonitor
src/VBox/Additions/WINNT/Graphics/Wine/switcher/d3d8_main.c
C
gpl-2.0
3,222
/* ** Write ahead logging implementation copyright Chris Mason 2000 ** ** The background commits make this code very interelated, and ** overly complex. I need to rethink things a bit....The major players: ** ** journal_begin -- call with the number of blocks you expect to log. ** If the current transaction is too ** old, it will block until the current transaction is ** finished, and then start a new one. ** Usually, your transaction will get joined in with ** previous ones for speed. ** ** journal_join -- same as journal_begin, but won't block on the current ** transaction regardless of age. Don't ever call ** this. Ever. There are only two places it should be ** called from, and they are both inside this file. ** ** journal_mark_dirty -- adds blocks into this transaction. clears any flags ** that might make them get sent to disk ** and then marks them BH_JDirty. Puts the buffer head ** into the current transaction hash. ** ** journal_end -- if the current transaction is batchable, it does nothing ** otherwise, it could do an async/synchronous commit, or ** a full flush of all log and real blocks in the ** transaction. ** ** flush_old_commits -- if the current transaction is too old, it is ended and ** commit blocks are sent to disk. Forces commit blocks ** to disk for all backgrounded commits that have been ** around too long. ** -- Note, if you call this as an immediate flush from ** from within kupdate, it will ignore the immediate flag */ #include <linux/time.h> #include <linux/semaphore.h> #include <linux/vmalloc.h> #include <linux/reiserfs_fs.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/smp_lock.h> #include <linux/buffer_head.h> #include <linux/workqueue.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <asm/system.h> /* gets a struct reiserfs_journal_list * from a list head */ #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \ j_list)) #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \ j_working_list)) /* the number of mounted filesystems. This is used to decide when to ** start and kill the commit workqueue */ static int reiserfs_mounted_fs_count; static struct workqueue_struct *commit_wq; #define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit structs at 4k */ #define BUFNR 64 /*read ahead */ /* cnode stat bits. Move these into reiserfs_fs.h */ #define BLOCK_FREED 2 /* this block was freed, and can't be written. */ #define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */ #define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */ #define BLOCK_DIRTIED 5 /* journal list state bits */ #define LIST_TOUCHED 1 #define LIST_DIRTY 2 #define LIST_COMMIT_PENDING 4 /* someone will commit this list */ /* flags for do_journal_end */ #define FLUSH_ALL 1 /* flush commit and real blocks */ #define COMMIT_NOW 2 /* end and commit this transaction */ #define WAIT 4 /* wait for the log blocks to hit the disk */ static int do_journal_end(struct reiserfs_transaction_handle *, struct super_block *, unsigned long nblocks, int flags); static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall); static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall); static int can_dirty(struct reiserfs_journal_cnode *cn); static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks); static int release_journal_dev(struct super_block *super, struct reiserfs_journal *journal); static int dirty_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl); static void flush_async_commits(struct work_struct *work); static void queue_log_writer(struct super_block *s); /* values for join in do_journal_begin_r */ enum { JBEGIN_REG = 0, /* regular journal begin */ JBEGIN_JOIN = 1, /* join the running transaction if at all possible */ JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */ }; static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks, int join); static void init_journal_hash(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); memset(journal->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)); } /* ** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to ** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for ** more details. */ static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) { if (bh) { clear_buffer_dirty(bh); clear_buffer_journal_test(bh); } return 0; } static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block *sb) { struct reiserfs_bitmap_node *bn; static int id; bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS); if (!bn) { return NULL; } bn->data = kzalloc(sb->s_blocksize, GFP_NOFS); if (!bn->data) { kfree(bn); return NULL; } bn->id = id++; INIT_LIST_HEAD(&bn->list); return bn; } static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_bitmap_node *bn = NULL; struct list_head *entry = journal->j_bitmap_nodes.next; journal->j_used_bitmap_nodes++; repeat: if (entry != &journal->j_bitmap_nodes) { bn = list_entry(entry, struct reiserfs_bitmap_node, list); list_del(entry); memset(bn->data, 0, sb->s_blocksize); journal->j_free_bitmap_nodes--; return bn; } bn = allocate_bitmap_node(sb); if (!bn) { yield(); goto repeat; } return bn; } static inline void free_bitmap_node(struct super_block *sb, struct reiserfs_bitmap_node *bn) { struct reiserfs_journal *journal = SB_JOURNAL(sb); journal->j_used_bitmap_nodes--; if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) { kfree(bn->data); kfree(bn); } else { list_add(&bn->list, &journal->j_bitmap_nodes); journal->j_free_bitmap_nodes++; } } static void allocate_bitmap_nodes(struct super_block *sb) { int i; struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_bitmap_node *bn = NULL; for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) { bn = allocate_bitmap_node(sb); if (bn) { list_add(&bn->list, &journal->j_bitmap_nodes); journal->j_free_bitmap_nodes++; } else { break; /* this is ok, we'll try again when more are needed */ } } } static int set_bit_in_list_bitmap(struct super_block *sb, b_blocknr_t block, struct reiserfs_list_bitmap *jb) { unsigned int bmap_nr = block / (sb->s_blocksize << 3); unsigned int bit_nr = block % (sb->s_blocksize << 3); if (!jb->bitmaps[bmap_nr]) { jb->bitmaps[bmap_nr] = get_bitmap_node(sb); } set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data); return 0; } static void cleanup_bitmap_list(struct super_block *sb, struct reiserfs_list_bitmap *jb) { int i; if (jb->bitmaps == NULL) return; for (i = 0; i < reiserfs_bmap_count(sb); i++) { if (jb->bitmaps[i]) { free_bitmap_node(sb, jb->bitmaps[i]); jb->bitmaps[i] = NULL; } } } /* ** only call this on FS unmount. */ static int free_list_bitmaps(struct super_block *sb, struct reiserfs_list_bitmap *jb_array) { int i; struct reiserfs_list_bitmap *jb; for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { jb = jb_array + i; jb->journal_list = NULL; cleanup_bitmap_list(sb, jb); vfree(jb->bitmaps); jb->bitmaps = NULL; } return 0; } static int free_bitmap_nodes(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct list_head *next = journal->j_bitmap_nodes.next; struct reiserfs_bitmap_node *bn; while (next != &journal->j_bitmap_nodes) { bn = list_entry(next, struct reiserfs_bitmap_node, list); list_del(next); kfree(bn->data); kfree(bn); next = journal->j_bitmap_nodes.next; journal->j_free_bitmap_nodes--; } return 0; } /* ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps. ** jb_array is the array to be filled in. */ int reiserfs_allocate_list_bitmaps(struct super_block *sb, struct reiserfs_list_bitmap *jb_array, unsigned int bmap_nr) { int i; int failed = 0; struct reiserfs_list_bitmap *jb; int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *); for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { jb = jb_array + i; jb->journal_list = NULL; jb->bitmaps = vmalloc(mem); if (!jb->bitmaps) { reiserfs_warning(sb, "clm-2000", "unable to " "allocate bitmaps for journal lists"); failed = 1; break; } memset(jb->bitmaps, 0, mem); } if (failed) { free_list_bitmaps(sb, jb_array); return -1; } return 0; } /* ** find an available list bitmap. If you can't find one, flush a commit list ** and try again */ static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *sb, struct reiserfs_journal_list *jl) { int i, j; struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_list_bitmap *jb = NULL; for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) { i = journal->j_list_bitmap_index; journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS; jb = journal->j_list_bitmap + i; if (journal->j_list_bitmap[i].journal_list) { flush_commit_list(sb, journal->j_list_bitmap[i]. journal_list, 1); if (!journal->j_list_bitmap[i].journal_list) { break; } } else { break; } } if (jb->journal_list) { /* double check to make sure if flushed correctly */ return NULL; } jb->journal_list = jl; return jb; } /* ** allocates a new chunk of X nodes, and links them all together as a list. ** Uses the cnode->next and cnode->prev pointers ** returns NULL on failure */ static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) { struct reiserfs_journal_cnode *head; int i; if (num_cnodes <= 0) { return NULL; } head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode)); if (!head) { return NULL; } memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode)); head[0].prev = NULL; head[0].next = head + 1; for (i = 1; i < num_cnodes; i++) { head[i].prev = head + (i - 1); head[i].next = head + (i + 1); /* if last one, overwrite it after the if */ } head[num_cnodes - 1].next = NULL; return head; } /* ** pulls a cnode off the free list, or returns NULL on failure */ static struct reiserfs_journal_cnode *get_cnode(struct super_block *sb) { struct reiserfs_journal_cnode *cn; struct reiserfs_journal *journal = SB_JOURNAL(sb); reiserfs_check_lock_depth(sb, "get_cnode"); if (journal->j_cnode_free <= 0) { return NULL; } journal->j_cnode_used++; journal->j_cnode_free--; cn = journal->j_cnode_free_list; if (!cn) { return cn; } if (cn->next) { cn->next->prev = NULL; } journal->j_cnode_free_list = cn->next; memset(cn, 0, sizeof(struct reiserfs_journal_cnode)); return cn; } /* ** returns a cnode to the free list */ static void free_cnode(struct super_block *sb, struct reiserfs_journal_cnode *cn) { struct reiserfs_journal *journal = SB_JOURNAL(sb); reiserfs_check_lock_depth(sb, "free_cnode"); journal->j_cnode_used--; journal->j_cnode_free++; /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */ cn->next = journal->j_cnode_free_list; if (journal->j_cnode_free_list) { journal->j_cnode_free_list->prev = cn; } cn->prev = NULL; /* not needed with the memset, but I might kill the memset, and forget to do this */ journal->j_cnode_free_list = cn; } static void clear_prepared_bits(struct buffer_head *bh) { clear_buffer_journal_prepared(bh); clear_buffer_journal_restore_dirty(bh); } /* return a cnode with same dev, block number and size in table, or null if not found */ static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct super_block *sb, struct reiserfs_journal_cnode **table, long bl) { struct reiserfs_journal_cnode *cn; cn = journal_hash(table, sb, bl); while (cn) { if (cn->blocknr == bl && cn->sb == sb) return cn; cn = cn->hnext; } return (struct reiserfs_journal_cnode *)0; } /* ** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever ** being overwritten by a replay after crashing. ** ** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting ** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make ** sure you never write the block without logging it. ** ** next_zero_bit is a suggestion about the next block to try for find_forward. ** when bl is rejected because it is set in a journal list bitmap, we search ** for the next zero bit in the bitmap that rejected bl. Then, we return that ** through next_zero_bit for find_forward to try. ** ** Just because we return something in next_zero_bit does not mean we won't ** reject it on the next call to reiserfs_in_journal ** */ int reiserfs_in_journal(struct super_block *sb, unsigned int bmap_nr, int bit_nr, int search_all, b_blocknr_t * next_zero_bit) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn; struct reiserfs_list_bitmap *jb; int i; unsigned long bl; *next_zero_bit = 0; /* always start this at zero. */ PROC_INFO_INC(sb, journal.in_journal); /* If we aren't doing a search_all, this is a metablock, and it will be logged before use. ** if we crash before the transaction that freed it commits, this transaction won't ** have committed either, and the block will never be written */ if (search_all) { for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { PROC_INFO_INC(sb, journal.in_journal_bitmap); jb = journal->j_list_bitmap + i; if (jb->journal_list && jb->bitmaps[bmap_nr] && test_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]-> data)) { *next_zero_bit = find_next_zero_bit((unsigned long *) (jb->bitmaps[bmap_nr]-> data), sb->s_blocksize << 3, bit_nr + 1); return 1; } } } bl = bmap_nr * (sb->s_blocksize << 3) + bit_nr; /* is it in any old transactions? */ if (search_all && (cn = get_journal_hash_dev(sb, journal->j_list_hash_table, bl))) { return 1; } /* is it in the current transaction. This should never happen */ if ((cn = get_journal_hash_dev(sb, journal->j_hash_table, bl))) { BUG(); return 1; } PROC_INFO_INC(sb, journal.in_journal_reusable); /* safe for reuse */ return 0; } /* insert cn into table */ static inline void insert_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_cnode *cn) { struct reiserfs_journal_cnode *cn_orig; cn_orig = journal_hash(table, cn->sb, cn->blocknr); cn->hnext = cn_orig; cn->hprev = NULL; if (cn_orig) { cn_orig->hprev = cn; } journal_hash(table, cn->sb, cn->blocknr) = cn; } /* lock the current transaction */ static inline void lock_journal(struct super_block *sb) { PROC_INFO_INC(sb, journal.lock_journal); reiserfs_mutex_lock_safe(&SB_JOURNAL(sb)->j_mutex, sb); } /* unlock the current transaction */ static inline void unlock_journal(struct super_block *sb) { mutex_unlock(&SB_JOURNAL(sb)->j_mutex); } static inline void get_journal_list(struct reiserfs_journal_list *jl) { jl->j_refcount++; } static inline void put_journal_list(struct super_block *s, struct reiserfs_journal_list *jl) { if (jl->j_refcount < 1) { reiserfs_panic(s, "journal-2", "trans id %u, refcount at %d", jl->j_trans_id, jl->j_refcount); } if (--jl->j_refcount == 0) kfree(jl); } /* ** this used to be much more involved, and I'm keeping it just in case things get ugly again. ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a ** transaction. */ static void cleanup_freed_for_journal_list(struct super_block *sb, struct reiserfs_journal_list *jl) { struct reiserfs_list_bitmap *jb = jl->j_list_bitmap; if (jb) { cleanup_bitmap_list(sb, jb); } jl->j_list_bitmap->journal_list = NULL; jl->j_list_bitmap = NULL; } static int journal_list_still_alive(struct super_block *s, unsigned int trans_id) { struct reiserfs_journal *journal = SB_JOURNAL(s); struct list_head *entry = &journal->j_journal_list; struct reiserfs_journal_list *jl; if (!list_empty(entry)) { jl = JOURNAL_LIST_ENTRY(entry->next); if (jl->j_trans_id <= trans_id) { return 1; } } return 0; } /* * If page->mapping was null, we failed to truncate this page for * some reason. Most likely because it was truncated after being * logged via data=journal. * * This does a check to see if the buffer belongs to one of these * lost pages before doing the final put_bh. If page->mapping was * null, it tries to free buffers on the page, which should make the * final page_cache_release drop the page from the lru. */ static void release_buffer_page(struct buffer_head *bh) { struct page *page = bh->b_page; if (!page->mapping && trylock_page(page)) { page_cache_get(page); put_bh(bh); if (!page->mapping) try_to_free_buffers(page); unlock_page(page); page_cache_release(page); } else { put_bh(bh); } } static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) { char b[BDEVNAME_SIZE]; if (buffer_journaled(bh)) { reiserfs_warning(NULL, "clm-2084", "pinned buffer %lu:%s sent to disk", bh->b_blocknr, bdevname(bh->b_bdev, b)); } if (uptodate) set_buffer_uptodate(bh); else clear_buffer_uptodate(bh); unlock_buffer(bh); release_buffer_page(bh); } static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate) { if (uptodate) set_buffer_uptodate(bh); else clear_buffer_uptodate(bh); unlock_buffer(bh); put_bh(bh); } static void submit_logged_buffer(struct buffer_head *bh) { get_bh(bh); bh->b_end_io = reiserfs_end_buffer_io_sync; clear_buffer_journal_new(bh); clear_buffer_dirty(bh); if (!test_clear_buffer_journal_test(bh)) BUG(); if (!buffer_uptodate(bh)) BUG(); submit_bh(WRITE, bh); } static void submit_ordered_buffer(struct buffer_head *bh) { get_bh(bh); bh->b_end_io = reiserfs_end_ordered_io; clear_buffer_dirty(bh); if (!buffer_uptodate(bh)) BUG(); submit_bh(WRITE, bh); } #define CHUNK_SIZE 32 struct buffer_chunk { struct buffer_head *bh[CHUNK_SIZE]; int nr; }; static void write_chunk(struct buffer_chunk *chunk) { int i; get_fs_excl(); for (i = 0; i < chunk->nr; i++) { submit_logged_buffer(chunk->bh[i]); } chunk->nr = 0; put_fs_excl(); } static void write_ordered_chunk(struct buffer_chunk *chunk) { int i; get_fs_excl(); for (i = 0; i < chunk->nr; i++) { submit_ordered_buffer(chunk->bh[i]); } chunk->nr = 0; put_fs_excl(); } static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh, spinlock_t * lock, void (fn) (struct buffer_chunk *)) { int ret = 0; BUG_ON(chunk->nr >= CHUNK_SIZE); chunk->bh[chunk->nr++] = bh; if (chunk->nr >= CHUNK_SIZE) { ret = 1; if (lock) spin_unlock(lock); fn(chunk); if (lock) spin_lock(lock); } return ret; } static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0); static struct reiserfs_jh *alloc_jh(void) { struct reiserfs_jh *jh; while (1) { jh = kmalloc(sizeof(*jh), GFP_NOFS); if (jh) { atomic_inc(&nr_reiserfs_jh); return jh; } yield(); } } /* * we want to free the jh when the buffer has been written * and waited on */ void reiserfs_free_jh(struct buffer_head *bh) { struct reiserfs_jh *jh; jh = bh->b_private; if (jh) { bh->b_private = NULL; jh->bh = NULL; list_del_init(&jh->list); kfree(jh); if (atomic_read(&nr_reiserfs_jh) <= 0) BUG(); atomic_dec(&nr_reiserfs_jh); put_bh(bh); } } static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh, int tail) { struct reiserfs_jh *jh; if (bh->b_private) { spin_lock(&j->j_dirty_buffers_lock); if (!bh->b_private) { spin_unlock(&j->j_dirty_buffers_lock); goto no_jh; } jh = bh->b_private; list_del_init(&jh->list); } else { no_jh: get_bh(bh); jh = alloc_jh(); spin_lock(&j->j_dirty_buffers_lock); /* buffer must be locked for __add_jh, should be able to have * two adds at the same time */ BUG_ON(bh->b_private); jh->bh = bh; bh->b_private = jh; } jh->jl = j->j_current_jl; if (tail) list_add_tail(&jh->list, &jh->jl->j_tail_bh_list); else { list_add_tail(&jh->list, &jh->jl->j_bh_list); } spin_unlock(&j->j_dirty_buffers_lock); return 0; } int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh) { return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1); } int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh) { return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0); } #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list) static int write_ordered_buffers(spinlock_t * lock, struct reiserfs_journal *j, struct reiserfs_journal_list *jl, struct list_head *list) { struct buffer_head *bh; struct reiserfs_jh *jh; int ret = j->j_errno; struct buffer_chunk chunk; struct list_head tmp; INIT_LIST_HEAD(&tmp); chunk.nr = 0; spin_lock(lock); while (!list_empty(list)) { jh = JH_ENTRY(list->next); bh = jh->bh; get_bh(bh); if (!trylock_buffer(bh)) { if (!buffer_dirty(bh)) { list_move(&jh->list, &tmp); goto loop_next; } spin_unlock(lock); if (chunk.nr) write_ordered_chunk(&chunk); wait_on_buffer(bh); cond_resched(); spin_lock(lock); goto loop_next; } /* in theory, dirty non-uptodate buffers should never get here, * but the upper layer io error paths still have a few quirks. * Handle them here as gracefully as we can */ if (!buffer_uptodate(bh) && buffer_dirty(bh)) { clear_buffer_dirty(bh); ret = -EIO; } if (buffer_dirty(bh)) { list_move(&jh->list, &tmp); add_to_chunk(&chunk, bh, lock, write_ordered_chunk); } else { reiserfs_free_jh(bh); unlock_buffer(bh); } loop_next: put_bh(bh); cond_resched_lock(lock); } if (chunk.nr) { spin_unlock(lock); write_ordered_chunk(&chunk); spin_lock(lock); } while (!list_empty(&tmp)) { jh = JH_ENTRY(tmp.prev); bh = jh->bh; get_bh(bh); reiserfs_free_jh(bh); if (buffer_locked(bh)) { spin_unlock(lock); wait_on_buffer(bh); spin_lock(lock); } if (!buffer_uptodate(bh)) { ret = -EIO; } /* ugly interaction with invalidatepage here. * reiserfs_invalidate_page will pin any buffer that has a valid * journal head from an older transaction. If someone else sets * our buffer dirty after we write it in the first loop, and * then someone truncates the page away, nobody will ever write * the buffer. We're safe if we write the page one last time * after freeing the journal header. */ if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) { spin_unlock(lock); ll_rw_block(WRITE, 1, &bh); spin_lock(lock); } put_bh(bh); cond_resched_lock(lock); } spin_unlock(lock); return ret; } static int flush_older_commits(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal *journal = SB_JOURNAL(s); struct reiserfs_journal_list *other_jl; struct reiserfs_journal_list *first_jl; struct list_head *entry; unsigned int trans_id = jl->j_trans_id; unsigned int other_trans_id; unsigned int first_trans_id; find_first: /* * first we walk backwards to find the oldest uncommitted transation */ first_jl = jl; entry = jl->j_list.prev; while (1) { other_jl = JOURNAL_LIST_ENTRY(entry); if (entry == &journal->j_journal_list || atomic_read(&other_jl->j_older_commits_done)) break; first_jl = other_jl; entry = other_jl->j_list.prev; } /* if we didn't find any older uncommitted transactions, return now */ if (first_jl == jl) { return 0; } first_trans_id = first_jl->j_trans_id; entry = &first_jl->j_list; while (1) { other_jl = JOURNAL_LIST_ENTRY(entry); other_trans_id = other_jl->j_trans_id; if (other_trans_id < trans_id) { if (atomic_read(&other_jl->j_commit_left) != 0) { flush_commit_list(s, other_jl, 0); /* list we were called with is gone, return */ if (!journal_list_still_alive(s, trans_id)) return 1; /* the one we just flushed is gone, this means all * older lists are also gone, so first_jl is no longer * valid either. Go back to the beginning. */ if (!journal_list_still_alive (s, other_trans_id)) { goto find_first; } } entry = entry->next; if (entry == &journal->j_journal_list) return 0; } else { return 0; } } return 0; } static int reiserfs_async_progress_wait(struct super_block *s) { struct reiserfs_journal *j = SB_JOURNAL(s); if (atomic_read(&j->j_async_throttle)) { reiserfs_write_unlock(s); congestion_wait(BLK_RW_ASYNC, HZ / 10); reiserfs_write_lock(s); } return 0; } /* ** if this journal list still has commit blocks unflushed, send them to disk. ** ** log areas must be flushed in order (transaction 2 can't commit before transaction 1) ** Before the commit block can by written, every other log block must be safely on disk ** */ static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) { int i; b_blocknr_t bn; struct buffer_head *tbh = NULL; unsigned int trans_id = jl->j_trans_id; struct reiserfs_journal *journal = SB_JOURNAL(s); int retval = 0; int write_len; reiserfs_check_lock_depth(s, "flush_commit_list"); if (atomic_read(&jl->j_older_commits_done)) { return 0; } get_fs_excl(); /* before we can put our commit blocks on disk, we have to make sure everyone older than ** us is on disk too */ BUG_ON(jl->j_len <= 0); BUG_ON(trans_id == journal->j_trans_id); get_journal_list(jl); if (flushall) { if (flush_older_commits(s, jl) == 1) { /* list disappeared during flush_older_commits. return */ goto put_jl; } } /* make sure nobody is trying to flush this one at the same time */ reiserfs_mutex_lock_safe(&jl->j_commit_mutex, s); if (!journal_list_still_alive(s, trans_id)) { mutex_unlock(&jl->j_commit_mutex); goto put_jl; } BUG_ON(jl->j_trans_id == 0); /* this commit is done, exit */ if (atomic_read(&(jl->j_commit_left)) <= 0) { if (flushall) { atomic_set(&(jl->j_older_commits_done), 1); } mutex_unlock(&jl->j_commit_mutex); goto put_jl; } if (!list_empty(&jl->j_bh_list)) { int ret; /* * We might sleep in numerous places inside * write_ordered_buffers. Relax the write lock. */ reiserfs_write_unlock(s); ret = write_ordered_buffers(&journal->j_dirty_buffers_lock, journal, jl, &jl->j_bh_list); if (ret < 0 && retval == 0) retval = ret; reiserfs_write_lock(s); } BUG_ON(!list_empty(&jl->j_bh_list)); /* * for the description block and all the log blocks, submit any buffers * that haven't already reached the disk. Try to write at least 256 * log blocks. later on, we will only wait on blocks that correspond * to this transaction, but while we're unplugging we might as well * get a chunk of data on there. */ atomic_inc(&journal->j_async_throttle); write_len = jl->j_len + 1; if (write_len < 256) write_len = 256; for (i = 0 ; i < write_len ; i++) { bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s); tbh = journal_find_get_block(s, bn); if (tbh) { if (buffer_dirty(tbh)) { reiserfs_write_unlock(s); ll_rw_block(WRITE, 1, &tbh); reiserfs_write_lock(s); } put_bh(tbh) ; } } atomic_dec(&journal->j_async_throttle); for (i = 0; i < (jl->j_len + 1); i++) { bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s); tbh = journal_find_get_block(s, bn); reiserfs_write_unlock(s); wait_on_buffer(tbh); reiserfs_write_lock(s); // since we're using ll_rw_blk above, it might have skipped over // a locked buffer. Double check here // /* redundant, sync_dirty_buffer() checks */ if (buffer_dirty(tbh)) { reiserfs_write_unlock(s); sync_dirty_buffer(tbh); reiserfs_write_lock(s); } if (unlikely(!buffer_uptodate(tbh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(s, "journal-601", "buffer write failed"); #endif retval = -EIO; } put_bh(tbh); /* once for journal_find_get_block */ put_bh(tbh); /* once due to original getblk in do_journal_end */ atomic_dec(&(jl->j_commit_left)); } BUG_ON(atomic_read(&(jl->j_commit_left)) != 1); /* If there was a write error in the journal - we can't commit * this transaction - it will be invalid and, if successful, * will just end up propagating the write error out to * the file system. */ if (likely(!retval && !reiserfs_is_journal_aborted (journal))) { if (buffer_dirty(jl->j_commit_bh)) BUG(); mark_buffer_dirty(jl->j_commit_bh) ; reiserfs_write_unlock(s); if (reiserfs_barrier_flush(s)) __sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA); else sync_dirty_buffer(jl->j_commit_bh); reiserfs_write_lock(s); } /* If there was a write error in the journal - we can't commit this * transaction - it will be invalid and, if successful, will just end * up propagating the write error out to the filesystem. */ if (unlikely(!buffer_uptodate(jl->j_commit_bh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(s, "journal-615", "buffer write failed"); #endif retval = -EIO; } bforget(jl->j_commit_bh); if (journal->j_last_commit_id != 0 && (jl->j_trans_id - journal->j_last_commit_id) != 1) { reiserfs_warning(s, "clm-2200", "last commit %lu, current %lu", journal->j_last_commit_id, jl->j_trans_id); } journal->j_last_commit_id = jl->j_trans_id; /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */ cleanup_freed_for_journal_list(s, jl); retval = retval ? retval : journal->j_errno; /* mark the metadata dirty */ if (!retval) dirty_one_transaction(s, jl); atomic_dec(&(jl->j_commit_left)); if (flushall) { atomic_set(&(jl->j_older_commits_done), 1); } mutex_unlock(&jl->j_commit_mutex); put_jl: put_journal_list(s, jl); if (retval) reiserfs_abort(s, retval, "Journal write error in %s", __func__); put_fs_excl(); return retval; } /* ** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or ** returns NULL if it can't find anything */ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct reiserfs_journal_cnode *cn) { struct super_block *sb = cn->sb; b_blocknr_t blocknr = cn->blocknr; cn = cn->hprev; while (cn) { if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) { return cn->jlist; } cn = cn->hprev; } return NULL; } static int newer_jl_done(struct reiserfs_journal_cnode *cn) { struct super_block *sb = cn->sb; b_blocknr_t blocknr = cn->blocknr; cn = cn->hprev; while (cn) { if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist && atomic_read(&cn->jlist->j_commit_left) != 0) return 0; cn = cn->hprev; } return 1; } static void remove_journal_hash(struct super_block *, struct reiserfs_journal_cnode **, struct reiserfs_journal_list *, unsigned long, int); /* ** once all the real blocks have been flushed, it is safe to remove them from the ** journal list for this transaction. Aside from freeing the cnode, this also allows the ** block to be reallocated for data blocks if it had been deleted. */ static void remove_all_from_journal_list(struct super_block *sb, struct reiserfs_journal_list *jl, int debug) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn, *last; cn = jl->j_realblock; /* which is better, to lock once around the whole loop, or ** to lock for each call to remove_journal_hash? */ while (cn) { if (cn->blocknr != 0) { if (debug) { reiserfs_warning(sb, "reiserfs-2201", "block %u, bh is %d, state %ld", cn->blocknr, cn->bh ? 1 : 0, cn->state); } cn->state = 0; remove_journal_hash(sb, journal->j_list_hash_table, jl, cn->blocknr, 1); } last = cn; cn = cn->next; free_cnode(sb, last); } jl->j_realblock = NULL; } /* ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block. ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start ** releasing blocks in this transaction for reuse as data blocks. ** called by flush_journal_list, before it calls remove_all_from_journal_list ** */ static int _update_journal_header_block(struct super_block *sb, unsigned long offset, unsigned int trans_id) { struct reiserfs_journal_header *jh; struct reiserfs_journal *journal = SB_JOURNAL(sb); if (reiserfs_is_journal_aborted(journal)) return -EIO; if (trans_id >= journal->j_last_flush_trans_id) { if (buffer_locked((journal->j_header_bh))) { reiserfs_write_unlock(sb); wait_on_buffer((journal->j_header_bh)); reiserfs_write_lock(sb); if (unlikely(!buffer_uptodate(journal->j_header_bh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(sb, "journal-699", "buffer write failed"); #endif return -EIO; } } journal->j_last_flush_trans_id = trans_id; journal->j_first_unflushed_offset = offset; jh = (struct reiserfs_journal_header *)(journal->j_header_bh-> b_data); jh->j_last_flush_trans_id = cpu_to_le32(trans_id); jh->j_first_unflushed_offset = cpu_to_le32(offset); jh->j_mount_id = cpu_to_le32(journal->j_mount_id); set_buffer_dirty(journal->j_header_bh); reiserfs_write_unlock(sb); if (reiserfs_barrier_flush(sb)) __sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA); else sync_dirty_buffer(journal->j_header_bh); reiserfs_write_lock(sb); if (!buffer_uptodate(journal->j_header_bh)) { reiserfs_warning(sb, "journal-837", "IO error during journal replay"); return -EIO; } } return 0; } static int update_journal_header_block(struct super_block *sb, unsigned long offset, unsigned int trans_id) { return _update_journal_header_block(sb, offset, trans_id); } /* ** flush any and all journal lists older than you are ** can only be called from flush_journal_list */ static int flush_older_journal_lists(struct super_block *sb, struct reiserfs_journal_list *jl) { struct list_head *entry; struct reiserfs_journal_list *other_jl; struct reiserfs_journal *journal = SB_JOURNAL(sb); unsigned int trans_id = jl->j_trans_id; /* we know we are the only ones flushing things, no extra race * protection is required. */ restart: entry = journal->j_journal_list.next; /* Did we wrap? */ if (entry == &journal->j_journal_list) return 0; other_jl = JOURNAL_LIST_ENTRY(entry); if (other_jl->j_trans_id < trans_id) { BUG_ON(other_jl->j_refcount <= 0); /* do not flush all */ flush_journal_list(sb, other_jl, 0); /* other_jl is now deleted from the list */ goto restart; } return 0; } static void del_from_work_list(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal *journal = SB_JOURNAL(s); if (!list_empty(&jl->j_working_list)) { list_del_init(&jl->j_working_list); journal->j_num_work_lists--; } } /* flush a journal list, both commit and real blocks ** ** always set flushall to 1, unless you are calling from inside ** flush_journal_list ** ** IMPORTANT. This can only be called while there are no journal writers, ** and the journal is locked. That means it can only be called from ** do_journal_end, or by journal_release */ static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) { struct reiserfs_journal_list *pjl; struct reiserfs_journal_cnode *cn, *last; int count; int was_jwait = 0; int was_dirty = 0; struct buffer_head *saved_bh; unsigned long j_len_saved = jl->j_len; struct reiserfs_journal *journal = SB_JOURNAL(s); int err = 0; BUG_ON(j_len_saved <= 0); if (atomic_read(&journal->j_wcount) != 0) { reiserfs_warning(s, "clm-2048", "called with wcount %d", atomic_read(&journal->j_wcount)); } BUG_ON(jl->j_trans_id == 0); /* if flushall == 0, the lock is already held */ if (flushall) { reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s); } else if (mutex_trylock(&journal->j_flush_mutex)) { BUG(); } count = 0; if (j_len_saved > journal->j_trans_max) { reiserfs_panic(s, "journal-715", "length is %lu, trans id %lu", j_len_saved, jl->j_trans_id); return 0; } get_fs_excl(); /* if all the work is already done, get out of here */ if (atomic_read(&(jl->j_nonzerolen)) <= 0 && atomic_read(&(jl->j_commit_left)) <= 0) { goto flush_older_and_return; } /* start by putting the commit list on disk. This will also flush ** the commit lists of any olders transactions */ flush_commit_list(s, jl, 1); if (!(jl->j_state & LIST_DIRTY) && !reiserfs_is_journal_aborted(journal)) BUG(); /* are we done now? */ if (atomic_read(&(jl->j_nonzerolen)) <= 0 && atomic_read(&(jl->j_commit_left)) <= 0) { goto flush_older_and_return; } /* loop through each cnode, see if we need to write it, ** or wait on a more recent transaction, or just ignore it */ if (atomic_read(&(journal->j_wcount)) != 0) { reiserfs_panic(s, "journal-844", "journal list is flushing, " "wcount is not 0"); } cn = jl->j_realblock; while (cn) { was_jwait = 0; was_dirty = 0; saved_bh = NULL; /* blocknr of 0 is no longer in the hash, ignore it */ if (cn->blocknr == 0) { goto free_cnode; } /* This transaction failed commit. Don't write out to the disk */ if (!(jl->j_state & LIST_DIRTY)) goto free_cnode; pjl = find_newer_jl_for_cn(cn); /* the order is important here. We check pjl to make sure we ** don't clear BH_JDirty_wait if we aren't the one writing this ** block to disk */ if (!pjl && cn->bh) { saved_bh = cn->bh; /* we do this to make sure nobody releases the buffer while ** we are working with it */ get_bh(saved_bh); if (buffer_journal_dirty(saved_bh)) { BUG_ON(!can_dirty(cn)); was_jwait = 1; was_dirty = 1; } else if (can_dirty(cn)) { /* everything with !pjl && jwait should be writable */ BUG(); } } /* if someone has this block in a newer transaction, just make ** sure they are committed, and don't try writing it to disk */ if (pjl) { if (atomic_read(&pjl->j_commit_left)) flush_commit_list(s, pjl, 1); goto free_cnode; } /* bh == NULL when the block got to disk on its own, OR, ** the block got freed in a future transaction */ if (saved_bh == NULL) { goto free_cnode; } /* this should never happen. kupdate_one_transaction has this list ** locked while it works, so we should never see a buffer here that ** is not marked JDirty_wait */ if ((!was_jwait) && !buffer_locked(saved_bh)) { reiserfs_warning(s, "journal-813", "BAD! buffer %llu %cdirty %cjwait, " "not in a newer tranasction", (unsigned long long)saved_bh-> b_blocknr, was_dirty ? ' ' : '!', was_jwait ? ' ' : '!'); } if (was_dirty) { /* we inc again because saved_bh gets decremented at free_cnode */ get_bh(saved_bh); set_bit(BLOCK_NEEDS_FLUSH, &cn->state); lock_buffer(saved_bh); BUG_ON(cn->blocknr != saved_bh->b_blocknr); if (buffer_dirty(saved_bh)) submit_logged_buffer(saved_bh); else unlock_buffer(saved_bh); count++; } else { reiserfs_warning(s, "clm-2082", "Unable to flush buffer %llu in %s", (unsigned long long)saved_bh-> b_blocknr, __func__); } free_cnode: last = cn; cn = cn->next; if (saved_bh) { /* we incremented this to keep others from taking the buffer head away */ put_bh(saved_bh); if (atomic_read(&(saved_bh->b_count)) < 0) { reiserfs_warning(s, "journal-945", "saved_bh->b_count < 0"); } } } if (count > 0) { cn = jl->j_realblock; while (cn) { if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) { if (!cn->bh) { reiserfs_panic(s, "journal-1011", "cn->bh is NULL"); } reiserfs_write_unlock(s); wait_on_buffer(cn->bh); reiserfs_write_lock(s); if (!cn->bh) { reiserfs_panic(s, "journal-1012", "cn->bh is NULL"); } if (unlikely(!buffer_uptodate(cn->bh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(s, "journal-949", "buffer write failed"); #endif err = -EIO; } /* note, we must clear the JDirty_wait bit after the up to date ** check, otherwise we race against our flushpage routine */ BUG_ON(!test_clear_buffer_journal_dirty (cn->bh)); /* drop one ref for us */ put_bh(cn->bh); /* drop one ref for journal_mark_dirty */ release_buffer_page(cn->bh); } cn = cn->next; } } if (err) reiserfs_abort(s, -EIO, "Write error while pushing transaction to disk in %s", __func__); flush_older_and_return: /* before we can update the journal header block, we _must_ flush all ** real blocks from all older transactions to disk. This is because ** once the header block is updated, this transaction will not be ** replayed after a crash */ if (flushall) { flush_older_journal_lists(s, jl); } err = journal->j_errno; /* before we can remove everything from the hash tables for this ** transaction, we must make sure it can never be replayed ** ** since we are only called from do_journal_end, we know for sure there ** are no allocations going on while we are flushing journal lists. So, ** we only need to update the journal header block for the last list ** being flushed */ if (!err && flushall) { err = update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(s), jl->j_trans_id); if (err) reiserfs_abort(s, -EIO, "Write error while updating journal header in %s", __func__); } remove_all_from_journal_list(s, jl, 0); list_del_init(&jl->j_list); journal->j_num_lists--; del_from_work_list(s, jl); if (journal->j_last_flush_id != 0 && (jl->j_trans_id - journal->j_last_flush_id) != 1) { reiserfs_warning(s, "clm-2201", "last flush %lu, current %lu", journal->j_last_flush_id, jl->j_trans_id); } journal->j_last_flush_id = jl->j_trans_id; /* not strictly required since we are freeing the list, but it should * help find code using dead lists later on */ jl->j_len = 0; atomic_set(&(jl->j_nonzerolen), 0); jl->j_start = 0; jl->j_realblock = NULL; jl->j_commit_bh = NULL; jl->j_trans_id = 0; jl->j_state = 0; put_journal_list(s, jl); if (flushall) mutex_unlock(&journal->j_flush_mutex); put_fs_excl(); return err; } static int test_transaction(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal_cnode *cn; if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) return 1; cn = jl->j_realblock; while (cn) { /* if the blocknr == 0, this has been cleared from the hash, ** skip it */ if (cn->blocknr == 0) { goto next; } if (cn->bh && !newer_jl_done(cn)) return 0; next: cn = cn->next; cond_resched(); } return 0; } static int write_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl, struct buffer_chunk *chunk) { struct reiserfs_journal_cnode *cn; int ret = 0; jl->j_state |= LIST_TOUCHED; del_from_work_list(s, jl); if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) { return 0; } cn = jl->j_realblock; while (cn) { /* if the blocknr == 0, this has been cleared from the hash, ** skip it */ if (cn->blocknr == 0) { goto next; } if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) { struct buffer_head *tmp_bh; /* we can race against journal_mark_freed when we try * to lock_buffer(cn->bh), so we have to inc the buffer * count, and recheck things after locking */ tmp_bh = cn->bh; get_bh(tmp_bh); lock_buffer(tmp_bh); if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) { if (!buffer_journal_dirty(tmp_bh) || buffer_journal_prepared(tmp_bh)) BUG(); add_to_chunk(chunk, tmp_bh, NULL, write_chunk); ret++; } else { /* note, cn->bh might be null now */ unlock_buffer(tmp_bh); } put_bh(tmp_bh); } next: cn = cn->next; cond_resched(); } return ret; } /* used by flush_commit_list */ static int dirty_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal_cnode *cn; struct reiserfs_journal_list *pjl; int ret = 0; jl->j_state |= LIST_DIRTY; cn = jl->j_realblock; while (cn) { /* look for a more recent transaction that logged this ** buffer. Only the most recent transaction with a buffer in ** it is allowed to send that buffer to disk */ pjl = find_newer_jl_for_cn(cn); if (!pjl && cn->blocknr && cn->bh && buffer_journal_dirty(cn->bh)) { BUG_ON(!can_dirty(cn)); /* if the buffer is prepared, it will either be logged * or restored. If restored, we need to make sure * it actually gets marked dirty */ clear_buffer_journal_new(cn->bh); if (buffer_journal_prepared(cn->bh)) { set_buffer_journal_restore_dirty(cn->bh); } else { set_buffer_journal_test(cn->bh); mark_buffer_dirty(cn->bh); } } cn = cn->next; } return ret; } static int kupdate_transactions(struct super_block *s, struct reiserfs_journal_list *jl, struct reiserfs_journal_list **next_jl, unsigned int *next_trans_id, int num_blocks, int num_trans) { int ret = 0; int written = 0; int transactions_flushed = 0; unsigned int orig_trans_id = jl->j_trans_id; struct buffer_chunk chunk; struct list_head *entry; struct reiserfs_journal *journal = SB_JOURNAL(s); chunk.nr = 0; reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s); if (!journal_list_still_alive(s, orig_trans_id)) { goto done; } /* we've got j_flush_mutex held, nobody is going to delete any * of these lists out from underneath us */ while ((num_trans && transactions_flushed < num_trans) || (!num_trans && written < num_blocks)) { if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) || atomic_read(&jl->j_commit_left) || !(jl->j_state & LIST_DIRTY)) { del_from_work_list(s, jl); break; } ret = write_one_transaction(s, jl, &chunk); if (ret < 0) goto done; transactions_flushed++; written += ret; entry = jl->j_list.next; /* did we wrap? */ if (entry == &journal->j_journal_list) { break; } jl = JOURNAL_LIST_ENTRY(entry); /* don't bother with older transactions */ if (jl->j_trans_id <= orig_trans_id) break; } if (chunk.nr) { write_chunk(&chunk); } done: mutex_unlock(&journal->j_flush_mutex); return ret; } /* for o_sync and fsync heavy applications, they tend to use ** all the journa list slots with tiny transactions. These ** trigger lots and lots of calls to update the header block, which ** adds seeks and slows things down. ** ** This function tries to clear out a large chunk of the journal lists ** at once, which makes everything faster since only the newest journal ** list updates the header block */ static int flush_used_journal_lists(struct super_block *s, struct reiserfs_journal_list *jl) { unsigned long len = 0; unsigned long cur_len; int ret; int i; int limit = 256; struct reiserfs_journal_list *tjl; struct reiserfs_journal_list *flush_jl; unsigned int trans_id; struct reiserfs_journal *journal = SB_JOURNAL(s); flush_jl = tjl = jl; /* in data logging mode, try harder to flush a lot of blocks */ if (reiserfs_data_log(s)) limit = 1024; /* flush for 256 transactions or limit blocks, whichever comes first */ for (i = 0; i < 256 && len < limit; i++) { if (atomic_read(&tjl->j_commit_left) || tjl->j_trans_id < jl->j_trans_id) { break; } cur_len = atomic_read(&tjl->j_nonzerolen); if (cur_len > 0) { tjl->j_state &= ~LIST_TOUCHED; } len += cur_len; flush_jl = tjl; if (tjl->j_list.next == &journal->j_journal_list) break; tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next); } /* try to find a group of blocks we can flush across all the ** transactions, but only bother if we've actually spanned ** across multiple lists */ if (flush_jl != jl) { ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i); } flush_journal_list(s, flush_jl, 1); return 0; } /* ** removes any nodes in table with name block and dev as bh. ** only touchs the hnext and hprev pointers. */ void remove_journal_hash(struct super_block *sb, struct reiserfs_journal_cnode **table, struct reiserfs_journal_list *jl, unsigned long block, int remove_freed) { struct reiserfs_journal_cnode *cur; struct reiserfs_journal_cnode **head; head = &(journal_hash(table, sb, block)); if (!head) { return; } cur = *head; while (cur) { if (cur->blocknr == block && cur->sb == sb && (jl == NULL || jl == cur->jlist) && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) { if (cur->hnext) { cur->hnext->hprev = cur->hprev; } if (cur->hprev) { cur->hprev->hnext = cur->hnext; } else { *head = cur->hnext; } cur->blocknr = 0; cur->sb = NULL; cur->state = 0; if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */ atomic_dec(&(cur->jlist->j_nonzerolen)); cur->bh = NULL; cur->jlist = NULL; } cur = cur->hnext; } } static void free_journal_ram(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); kfree(journal->j_current_jl); journal->j_num_lists--; vfree(journal->j_cnode_free_orig); free_list_bitmaps(sb, journal->j_list_bitmap); free_bitmap_nodes(sb); /* must be after free_list_bitmaps */ if (journal->j_header_bh) { brelse(journal->j_header_bh); } /* j_header_bh is on the journal dev, make sure not to release the journal * dev until we brelse j_header_bh */ release_journal_dev(sb, journal); vfree(journal); } /* ** call on unmount. Only set error to 1 if you haven't made your way out ** of read_super() yet. Any other caller must keep error at 0. */ static int do_journal_release(struct reiserfs_transaction_handle *th, struct super_block *sb, int error) { struct reiserfs_transaction_handle myth; int flushed = 0; struct reiserfs_journal *journal = SB_JOURNAL(sb); /* we only want to flush out transactions if we were called with error == 0 */ if (!error && !(sb->s_flags & MS_RDONLY)) { /* end the current trans */ BUG_ON(!th->t_trans_id); do_journal_end(th, sb, 10, FLUSH_ALL); /* make sure something gets logged to force our way into the flush code */ if (!journal_join(&myth, sb, 1)) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&myth, sb, SB_BUFFER_WITH_SB(sb)); do_journal_end(&myth, sb, 1, FLUSH_ALL); flushed = 1; } } /* this also catches errors during the do_journal_end above */ if (!error && reiserfs_is_journal_aborted(journal)) { memset(&myth, 0, sizeof(myth)); if (!journal_join_abort(&myth, sb, 1)) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&myth, sb, SB_BUFFER_WITH_SB(sb)); do_journal_end(&myth, sb, 1, FLUSH_ALL); } } reiserfs_mounted_fs_count--; /* wait for all commits to finish */ cancel_delayed_work(&SB_JOURNAL(sb)->j_work); /* * We must release the write lock here because * the workqueue job (flush_async_commit) needs this lock */ reiserfs_write_unlock(sb); flush_workqueue(commit_wq); if (!reiserfs_mounted_fs_count) { destroy_workqueue(commit_wq); commit_wq = NULL; } free_journal_ram(sb); reiserfs_write_lock(sb); return 0; } /* ** call on unmount. flush all journal trans, release all alloc'd ram */ int journal_release(struct reiserfs_transaction_handle *th, struct super_block *sb) { return do_journal_release(th, sb, 0); } /* ** only call from an error condition inside reiserfs_read_super! */ int journal_release_error(struct reiserfs_transaction_handle *th, struct super_block *sb) { return do_journal_release(th, sb, 1); } /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */ static int journal_compare_desc_commit(struct super_block *sb, struct reiserfs_journal_desc *desc, struct reiserfs_journal_commit *commit) { if (get_commit_trans_id(commit) != get_desc_trans_id(desc) || get_commit_trans_len(commit) != get_desc_trans_len(desc) || get_commit_trans_len(commit) > SB_JOURNAL(sb)->j_trans_max || get_commit_trans_len(commit) <= 0) { return 1; } return 0; } /* returns 0 if it did not find a description block ** returns -1 if it found a corrupt commit block ** returns 1 if both desc and commit were valid */ static int journal_transaction_is_valid(struct super_block *sb, struct buffer_head *d_bh, unsigned int *oldest_invalid_trans_id, unsigned long *newest_mount_id) { struct reiserfs_journal_desc *desc; struct reiserfs_journal_commit *commit; struct buffer_head *c_bh; unsigned long offset; if (!d_bh) return 0; desc = (struct reiserfs_journal_desc *)d_bh->b_data; if (get_desc_trans_len(desc) > 0 && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) { if (oldest_invalid_trans_id && *oldest_invalid_trans_id && get_desc_trans_id(desc) > *oldest_invalid_trans_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-986: transaction " "is valid returning because trans_id %d is greater than " "oldest_invalid %lu", get_desc_trans_id(desc), *oldest_invalid_trans_id); return 0; } if (newest_mount_id && *newest_mount_id > get_desc_mount_id(desc)) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1087: transaction " "is valid returning because mount_id %d is less than " "newest_mount_id %lu", get_desc_mount_id(desc), *newest_mount_id); return -1; } if (get_desc_trans_len(desc) > SB_JOURNAL(sb)->j_trans_max) { reiserfs_warning(sb, "journal-2018", "Bad transaction length %d " "encountered, ignoring transaction", get_desc_trans_len(desc)); return -1; } offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb); /* ok, we have a journal description block, lets see if the transaction was valid */ c_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((offset + get_desc_trans_len(desc) + 1) % SB_ONDISK_JOURNAL_SIZE(sb))); if (!c_bh) return 0; commit = (struct reiserfs_journal_commit *)c_bh->b_data; if (journal_compare_desc_commit(sb, desc, commit)) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal_transaction_is_valid, commit offset %ld had bad " "time %d or length %d", c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_commit_trans_id(commit), get_commit_trans_len(commit)); brelse(c_bh); if (oldest_invalid_trans_id) { *oldest_invalid_trans_id = get_desc_trans_id(desc); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1004: " "transaction_is_valid setting oldest invalid trans_id " "to %d", get_desc_trans_id(desc)); } return -1; } brelse(c_bh); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1006: found valid " "transaction start offset %llu, len %d id %d", d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_desc_trans_len(desc), get_desc_trans_id(desc)); return 1; } else { return 0; } } static void brelse_array(struct buffer_head **heads, int num) { int i; for (i = 0; i < num; i++) { brelse(heads[i]); } } /* ** given the start, and values for the oldest acceptable transactions, ** this either reads in a replays a transaction, or returns because the transaction ** is invalid, or too old. */ static int journal_read_transaction(struct super_block *sb, unsigned long cur_dblock, unsigned long oldest_start, unsigned int oldest_trans_id, unsigned long newest_mount_id) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_desc *desc; struct reiserfs_journal_commit *commit; unsigned int trans_id = 0; struct buffer_head *c_bh; struct buffer_head *d_bh; struct buffer_head **log_blocks = NULL; struct buffer_head **real_blocks = NULL; unsigned int trans_offset; int i; int trans_half; d_bh = journal_bread(sb, cur_dblock); if (!d_bh) return 1; desc = (struct reiserfs_journal_desc *)d_bh->b_data; trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1037: " "journal_read_transaction, offset %llu, len %d mount_id %d", d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_desc_trans_len(desc), get_desc_mount_id(desc)); if (get_desc_trans_id(desc) < oldest_trans_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1039: " "journal_read_trans skipping because %lu is too old", cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb)); brelse(d_bh); return 1; } if (get_desc_mount_id(desc) != newest_mount_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1146: " "journal_read_trans skipping because %d is != " "newest_mount_id %lu", get_desc_mount_id(desc), newest_mount_id); brelse(d_bh); return 1; } c_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((trans_offset + get_desc_trans_len(desc) + 1) % SB_ONDISK_JOURNAL_SIZE(sb))); if (!c_bh) { brelse(d_bh); return 1; } commit = (struct reiserfs_journal_commit *)c_bh->b_data; if (journal_compare_desc_commit(sb, desc, commit)) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal_read_transaction, " "commit offset %llu had bad time %d or length %d", c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_commit_trans_id(commit), get_commit_trans_len(commit)); brelse(c_bh); brelse(d_bh); return 1; } if (bdev_read_only(sb->s_bdev)) { reiserfs_warning(sb, "clm-2076", "device is readonly, unable to replay log"); brelse(c_bh); brelse(d_bh); return -EROFS; } trans_id = get_desc_trans_id(desc); /* now we know we've got a good transaction, and it was inside the valid time ranges */ log_blocks = kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS); real_blocks = kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS); if (!log_blocks || !real_blocks) { brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); reiserfs_warning(sb, "journal-1169", "kmalloc failed, unable to mount FS"); return -1; } /* get all the buffer heads */ trans_half = journal_trans_half(sb->s_blocksize); for (i = 0; i < get_desc_trans_len(desc); i++) { log_blocks[i] = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + (trans_offset + 1 + i) % SB_ONDISK_JOURNAL_SIZE(sb)); if (i < trans_half) { real_blocks[i] = sb_getblk(sb, le32_to_cpu(desc->j_realblock[i])); } else { real_blocks[i] = sb_getblk(sb, le32_to_cpu(commit-> j_realblock[i - trans_half])); } if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(sb)) { reiserfs_warning(sb, "journal-1207", "REPLAY FAILURE fsck required! " "Block to replay is outside of " "filesystem"); goto abort_replay; } /* make sure we don't try to replay onto log or reserved area */ if (is_block_in_log_or_reserved_area (sb, real_blocks[i]->b_blocknr)) { reiserfs_warning(sb, "journal-1204", "REPLAY FAILURE fsck required! " "Trying to replay onto a log block"); abort_replay: brelse_array(log_blocks, i); brelse_array(real_blocks, i); brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return -1; } } /* read in the log blocks, memcpy to the corresponding real block */ ll_rw_block(READ, get_desc_trans_len(desc), log_blocks); for (i = 0; i < get_desc_trans_len(desc); i++) { reiserfs_write_unlock(sb); wait_on_buffer(log_blocks[i]); reiserfs_write_lock(sb); if (!buffer_uptodate(log_blocks[i])) { reiserfs_warning(sb, "journal-1212", "REPLAY FAILURE fsck required! " "buffer write failed"); brelse_array(log_blocks + i, get_desc_trans_len(desc) - i); brelse_array(real_blocks, get_desc_trans_len(desc)); brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return -1; } memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data, real_blocks[i]->b_size); set_buffer_uptodate(real_blocks[i]); brelse(log_blocks[i]); } /* flush out the real blocks */ for (i = 0; i < get_desc_trans_len(desc); i++) { set_buffer_dirty(real_blocks[i]); write_dirty_buffer(real_blocks[i], WRITE); } for (i = 0; i < get_desc_trans_len(desc); i++) { wait_on_buffer(real_blocks[i]); if (!buffer_uptodate(real_blocks[i])) { reiserfs_warning(sb, "journal-1226", "REPLAY FAILURE, fsck required! " "buffer write failed"); brelse_array(real_blocks + i, get_desc_trans_len(desc) - i); brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return -1; } brelse(real_blocks[i]); } cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((trans_offset + get_desc_trans_len(desc) + 2) % SB_ONDISK_JOURNAL_SIZE(sb)); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1095: setting journal " "start to offset %ld", cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb)); /* init starting values for the first transaction, in case this is the last transaction to be replayed. */ journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb); journal->j_last_flush_trans_id = trans_id; journal->j_trans_id = trans_id + 1; /* check for trans_id overflow */ if (journal->j_trans_id == 0) journal->j_trans_id = 10; brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return 0; } /* This function reads blocks starting from block and to max_block of bufsize size (but no more than BUFNR blocks at a time). This proved to improve mounting speed on self-rebuilding raid5 arrays at least. Right now it is only used from journal code. But later we might use it from other places. Note: Do not use journal_getblk/sb_getblk functions here! */ static struct buffer_head *reiserfs_breada(struct block_device *dev, b_blocknr_t block, int bufsize, b_blocknr_t max_block) { struct buffer_head *bhlist[BUFNR]; unsigned int blocks = BUFNR; struct buffer_head *bh; int i, j; bh = __getblk(dev, block, bufsize); if (buffer_uptodate(bh)) return (bh); if (block + BUFNR > max_block) { blocks = max_block - block; } bhlist[0] = bh; j = 1; for (i = 1; i < blocks; i++) { bh = __getblk(dev, block + i, bufsize); if (buffer_uptodate(bh)) { brelse(bh); break; } else bhlist[j++] = bh; } ll_rw_block(READ, j, bhlist); for (i = 1; i < j; i++) brelse(bhlist[i]); bh = bhlist[0]; wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; brelse(bh); return NULL; } /* ** read and replay the log ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid ** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast. ** ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid. ** ** On exit, it sets things up so the first transaction will work correctly. */ static int journal_read(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_desc *desc; unsigned int oldest_trans_id = 0; unsigned int oldest_invalid_trans_id = 0; time_t start; unsigned long oldest_start = 0; unsigned long cur_dblock = 0; unsigned long newest_mount_id = 9; struct buffer_head *d_bh; struct reiserfs_journal_header *jh; int valid_journal_header = 0; int replay_count = 0; int continue_replay = 1; int ret; char b[BDEVNAME_SIZE]; cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb); reiserfs_info(sb, "checking transaction log (%s)\n", bdevname(journal->j_dev_bd, b)); start = get_seconds(); /* step 1, read in the journal header block. Check the transaction it says ** is the first unflushed, and if that transaction is not valid, ** replay is done */ journal->j_header_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb)); if (!journal->j_header_bh) { return 1; } jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data); if (le32_to_cpu(jh->j_first_unflushed_offset) < SB_ONDISK_JOURNAL_SIZE(sb) && le32_to_cpu(jh->j_last_flush_trans_id) > 0) { oldest_start = SB_ONDISK_JOURNAL_1st_BLOCK(sb) + le32_to_cpu(jh->j_first_unflushed_offset); oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1; newest_mount_id = le32_to_cpu(jh->j_mount_id); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1153: found in " "header: first_unflushed_offset %d, last_flushed_trans_id " "%lu", le32_to_cpu(jh->j_first_unflushed_offset), le32_to_cpu(jh->j_last_flush_trans_id)); valid_journal_header = 1; /* now, we try to read the first unflushed offset. If it is not valid, ** there is nothing more we can do, and it makes no sense to read ** through the whole log. */ d_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + le32_to_cpu(jh->j_first_unflushed_offset)); ret = journal_transaction_is_valid(sb, d_bh, NULL, NULL); if (!ret) { continue_replay = 0; } brelse(d_bh); goto start_log_replay; } /* ok, there are transactions that need to be replayed. start with the first log block, find ** all the valid transactions, and pick out the oldest. */ while (continue_replay && cur_dblock < (SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb))) { /* Note that it is required for blocksize of primary fs device and journal device to be the same */ d_bh = reiserfs_breada(journal->j_dev_bd, cur_dblock, sb->s_blocksize, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb)); ret = journal_transaction_is_valid(sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id); if (ret == 1) { desc = (struct reiserfs_journal_desc *)d_bh->b_data; if (oldest_start == 0) { /* init all oldest_ values */ oldest_trans_id = get_desc_trans_id(desc); oldest_start = d_bh->b_blocknr; newest_mount_id = get_desc_mount_id(desc); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1179: Setting " "oldest_start to offset %llu, trans_id %lu", oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK (sb), oldest_trans_id); } else if (oldest_trans_id > get_desc_trans_id(desc)) { /* one we just read was older */ oldest_trans_id = get_desc_trans_id(desc); oldest_start = d_bh->b_blocknr; reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1180: Resetting " "oldest_start to offset %lu, trans_id %lu", oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK (sb), oldest_trans_id); } if (newest_mount_id < get_desc_mount_id(desc)) { newest_mount_id = get_desc_mount_id(desc); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1299: Setting " "newest_mount_id to %d", get_desc_mount_id(desc)); } cur_dblock += get_desc_trans_len(desc) + 2; } else { cur_dblock++; } brelse(d_bh); } start_log_replay: cur_dblock = oldest_start; if (oldest_trans_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1206: Starting replay " "from offset %llu, trans_id %lu", cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb), oldest_trans_id); } replay_count = 0; while (continue_replay && oldest_trans_id > 0) { ret = journal_read_transaction(sb, cur_dblock, oldest_start, oldest_trans_id, newest_mount_id); if (ret < 0) { return ret; } else if (ret != 0) { break; } cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb) + journal->j_start; replay_count++; if (cur_dblock == oldest_start) break; } if (oldest_trans_id == 0) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1225: No valid " "transactions found"); } /* j_start does not get set correctly if we don't replay any transactions. ** if we had a valid journal_header, set j_start to the first unflushed transaction value, ** copy the trans_id from the header */ if (valid_journal_header && replay_count == 0) { journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset); journal->j_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1; /* check for trans_id overflow */ if (journal->j_trans_id == 0) journal->j_trans_id = 10; journal->j_last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id); journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1; } else { journal->j_mount_id = newest_mount_id + 1; } reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1299: Setting " "newest_mount_id to %lu", journal->j_mount_id); journal->j_first_unflushed_offset = journal->j_start; if (replay_count > 0) { reiserfs_info(sb, "replayed %d transactions in %lu seconds\n", replay_count, get_seconds() - start); } if (!bdev_read_only(sb->s_bdev) && _update_journal_header_block(sb, journal->j_start, journal->j_last_flush_trans_id)) { /* replay failed, caller must call free_journal_ram and abort ** the mount */ return -1; } return 0; } static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s) { struct reiserfs_journal_list *jl; jl = kzalloc(sizeof(struct reiserfs_journal_list), GFP_NOFS | __GFP_NOFAIL); INIT_LIST_HEAD(&jl->j_list); INIT_LIST_HEAD(&jl->j_working_list); INIT_LIST_HEAD(&jl->j_tail_bh_list); INIT_LIST_HEAD(&jl->j_bh_list); mutex_init(&jl->j_commit_mutex); SB_JOURNAL(s)->j_num_lists++; get_journal_list(jl); return jl; } static void journal_list_init(struct super_block *sb) { SB_JOURNAL(sb)->j_current_jl = alloc_journal_list(sb); } static int release_journal_dev(struct super_block *super, struct reiserfs_journal *journal) { int result; result = 0; if (journal->j_dev_bd != NULL) { if (journal->j_dev_bd->bd_dev != super->s_dev) bd_release(journal->j_dev_bd); result = blkdev_put(journal->j_dev_bd, journal->j_dev_mode); journal->j_dev_bd = NULL; } if (result != 0) { reiserfs_warning(super, "sh-457", "Cannot release journal device: %i", result); } return result; } static int journal_init_dev(struct super_block *super, struct reiserfs_journal *journal, const char *jdev_name) { int result; dev_t jdev; fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE; char b[BDEVNAME_SIZE]; result = 0; journal->j_dev_bd = NULL; jdev = SB_ONDISK_JOURNAL_DEVICE(super) ? new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev; if (bdev_read_only(super->s_bdev)) blkdev_mode = FMODE_READ; /* there is no "jdev" option and journal is on separate device */ if ((!jdev_name || !jdev_name[0])) { journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode); journal->j_dev_mode = blkdev_mode; if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); journal->j_dev_bd = NULL; reiserfs_warning(super, "sh-458", "cannot init journal device '%s': %i", __bdevname(jdev, b), result); return result; } else if (jdev != super->s_dev) { result = bd_claim(journal->j_dev_bd, journal); if (result) { blkdev_put(journal->j_dev_bd, blkdev_mode); return result; } set_blocksize(journal->j_dev_bd, super->s_blocksize); } return 0; } journal->j_dev_mode = blkdev_mode; journal->j_dev_bd = open_bdev_exclusive(jdev_name, blkdev_mode, journal); if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); journal->j_dev_bd = NULL; reiserfs_warning(super, "journal_init_dev: Cannot open '%s': %i", jdev_name, result); return result; } set_blocksize(journal->j_dev_bd, super->s_blocksize); reiserfs_info(super, "journal_init_dev: journal device: %s\n", bdevname(journal->j_dev_bd, b)); return 0; } /** * When creating/tuning a file system user can assign some * journal params within boundaries which depend on the ratio * blocksize/standard_blocksize. * * For blocks >= standard_blocksize transaction size should * be not less then JOURNAL_TRANS_MIN_DEFAULT, and not more * then JOURNAL_TRANS_MAX_DEFAULT. * * For blocks < standard_blocksize these boundaries should be * decreased proportionally. */ #define REISERFS_STANDARD_BLKSIZE (4096) static int check_advise_trans_params(struct super_block *sb, struct reiserfs_journal *journal) { if (journal->j_trans_max) { /* Non-default journal params. Do sanity check for them. */ int ratio = 1; if (sb->s_blocksize < REISERFS_STANDARD_BLKSIZE) ratio = REISERFS_STANDARD_BLKSIZE / sb->s_blocksize; if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio || journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio || SB_ONDISK_JOURNAL_SIZE(sb) / journal->j_trans_max < JOURNAL_MIN_RATIO) { reiserfs_warning(sb, "sh-462", "bad transaction max size (%u). " "FSCK?", journal->j_trans_max); return 1; } if (journal->j_max_batch != (journal->j_trans_max) * JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT) { reiserfs_warning(sb, "sh-463", "bad transaction max batch (%u). " "FSCK?", journal->j_max_batch); return 1; } } else { /* Default journal params. The file system was created by old version of mkreiserfs, so some fields contain zeros, and we need to advise proper values for them */ if (sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) { reiserfs_warning(sb, "sh-464", "bad blocksize (%u)", sb->s_blocksize); return 1; } journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT; journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT; journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE; } return 0; } /* ** must be called once on fs mount. calls journal_read for you */ int journal_init(struct super_block *sb, const char *j_dev_name, int old_format, unsigned int commit_max_age) { int num_cnodes = SB_ONDISK_JOURNAL_SIZE(sb) * 2; struct buffer_head *bhjh; struct reiserfs_super_block *rs; struct reiserfs_journal_header *jh; struct reiserfs_journal *journal; struct reiserfs_journal_list *jl; char b[BDEVNAME_SIZE]; int ret; /* * Unlock here to avoid various RECLAIM-FS-ON <-> IN-RECLAIM-FS * dependency inversion warnings. */ reiserfs_write_unlock(sb); journal = SB_JOURNAL(sb) = vmalloc(sizeof(struct reiserfs_journal)); if (!journal) { reiserfs_warning(sb, "journal-1256", "unable to get memory for journal structure"); reiserfs_write_lock(sb); return 1; } memset(journal, 0, sizeof(struct reiserfs_journal)); INIT_LIST_HEAD(&journal->j_bitmap_nodes); INIT_LIST_HEAD(&journal->j_prealloc_list); INIT_LIST_HEAD(&journal->j_working_list); INIT_LIST_HEAD(&journal->j_journal_list); journal->j_persistent_trans = 0; ret = reiserfs_allocate_list_bitmaps(sb, journal->j_list_bitmap, reiserfs_bmap_count(sb)); reiserfs_write_lock(sb); if (ret) goto free_and_return; allocate_bitmap_nodes(sb); /* reserved for journal area support */ SB_JOURNAL_1st_RESERVED_BLOCK(sb) = (old_format ? REISERFS_OLD_DISK_OFFSET_IN_BYTES / sb->s_blocksize + reiserfs_bmap_count(sb) + 1 : REISERFS_DISK_OFFSET_IN_BYTES / sb->s_blocksize + 2); /* Sanity check to see is the standard journal fitting withing first bitmap (actual for small blocksizes) */ if (!SB_ONDISK_JOURNAL_DEVICE(sb) && (SB_JOURNAL_1st_RESERVED_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb) > sb->s_blocksize * 8)) { reiserfs_warning(sb, "journal-1393", "journal does not fit for area addressed " "by first of bitmap blocks. It starts at " "%u and its size is %u. Block size %ld", SB_JOURNAL_1st_RESERVED_BLOCK(sb), SB_ONDISK_JOURNAL_SIZE(sb), sb->s_blocksize); goto free_and_return; } /* * We need to unlock here to avoid creating the following * dependency: * reiserfs_lock -> sysfs_mutex * Because the reiserfs mmap path creates the following dependency: * mm->mmap -> reiserfs_lock, hence we have * mm->mmap -> reiserfs_lock ->sysfs_mutex * This would ends up in a circular dependency with sysfs readdir path * which does sysfs_mutex -> mm->mmap_sem * This is fine because the reiserfs lock is useless in mount path, * at least until we call journal_begin. We keep it for paranoid * reasons. */ reiserfs_write_unlock(sb); if (journal_init_dev(sb, journal, j_dev_name) != 0) { reiserfs_write_lock(sb); reiserfs_warning(sb, "sh-462", "unable to initialize jornal device"); goto free_and_return; } reiserfs_write_lock(sb); rs = SB_DISK_SUPER_BLOCK(sb); /* read journal header */ bhjh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb)); if (!bhjh) { reiserfs_warning(sb, "sh-459", "unable to read journal header"); goto free_and_return; } jh = (struct reiserfs_journal_header *)(bhjh->b_data); /* make sure that journal matches to the super block */ if (is_reiserfs_jr(rs) && (le32_to_cpu(jh->jh_journal.jp_journal_magic) != sb_jp_journal_magic(rs))) { reiserfs_warning(sb, "sh-460", "journal header magic %x (device %s) does " "not match to magic found in super block %x", jh->jh_journal.jp_journal_magic, bdevname(journal->j_dev_bd, b), sb_jp_journal_magic(rs)); brelse(bhjh); goto free_and_return; } journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max); journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch); journal->j_max_commit_age = le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age); journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE; if (check_advise_trans_params(sb, journal) != 0) goto free_and_return; journal->j_default_max_commit_age = journal->j_max_commit_age; if (commit_max_age != 0) { journal->j_max_commit_age = commit_max_age; journal->j_max_trans_age = commit_max_age; } reiserfs_info(sb, "journal params: device %s, size %u, " "journal first block %u, max trans len %u, max batch %u, " "max commit age %u, max trans age %u\n", bdevname(journal->j_dev_bd, b), SB_ONDISK_JOURNAL_SIZE(sb), SB_ONDISK_JOURNAL_1st_BLOCK(sb), journal->j_trans_max, journal->j_max_batch, journal->j_max_commit_age, journal->j_max_trans_age); brelse(bhjh); journal->j_list_bitmap_index = 0; journal_list_init(sb); memset(journal->j_list_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)); INIT_LIST_HEAD(&journal->j_dirty_buffers); spin_lock_init(&journal->j_dirty_buffers_lock); journal->j_start = 0; journal->j_len = 0; journal->j_len_alloc = 0; atomic_set(&(journal->j_wcount), 0); atomic_set(&(journal->j_async_throttle), 0); journal->j_bcount = 0; journal->j_trans_start_time = 0; journal->j_last = NULL; journal->j_first = NULL; init_waitqueue_head(&(journal->j_join_wait)); mutex_init(&journal->j_mutex); mutex_init(&journal->j_flush_mutex); journal->j_trans_id = 10; journal->j_mount_id = 10; journal->j_state = 0; atomic_set(&(journal->j_jlock), 0); reiserfs_write_unlock(sb); journal->j_cnode_free_list = allocate_cnodes(num_cnodes); reiserfs_write_lock(sb); journal->j_cnode_free_orig = journal->j_cnode_free_list; journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0; journal->j_cnode_used = 0; journal->j_must_wait = 0; if (journal->j_cnode_free == 0) { reiserfs_warning(sb, "journal-2004", "Journal cnode memory " "allocation failed (%ld bytes). Journal is " "too large for available memory. Usually " "this is due to a journal that is too large.", sizeof (struct reiserfs_journal_cnode) * num_cnodes); goto free_and_return; } init_journal_hash(sb); jl = journal->j_current_jl; jl->j_list_bitmap = get_list_bitmap(sb, jl); if (!jl->j_list_bitmap) { reiserfs_warning(sb, "journal-2005", "get_list_bitmap failed for journal list 0"); goto free_and_return; } if (journal_read(sb) < 0) { reiserfs_warning(sb, "reiserfs-2006", "Replay Failure, unable to mount"); goto free_and_return; } reiserfs_mounted_fs_count++; if (reiserfs_mounted_fs_count <= 1) { reiserfs_write_unlock(sb); commit_wq = create_workqueue("reiserfs"); reiserfs_write_lock(sb); } INIT_DELAYED_WORK(&journal->j_work, flush_async_commits); journal->j_work_sb = sb; return 0; free_and_return: free_journal_ram(sb); return 1; } /* ** test for a polite end of the current transaction. Used by file_write, and should ** be used by delete to make sure they don't write more than can fit inside a single ** transaction */ int journal_transaction_should_end(struct reiserfs_transaction_handle *th, int new_alloc) { struct reiserfs_journal *journal = SB_JOURNAL(th->t_super); time_t now = get_seconds(); /* cannot restart while nested */ BUG_ON(!th->t_trans_id); if (th->t_refcount > 1) return 0; if (journal->j_must_wait > 0 || (journal->j_len_alloc + new_alloc) >= journal->j_max_batch || atomic_read(&(journal->j_jlock)) || (now - journal->j_trans_start_time) > journal->j_max_trans_age || journal->j_cnode_free < (journal->j_trans_max * 3)) { return 1; } /* protected by the BKL here */ journal->j_len_alloc += new_alloc; th->t_blocks_allocated += new_alloc ; return 0; } /* this must be called inside a transaction, and requires the ** kernel_lock to be held */ void reiserfs_block_writes(struct reiserfs_transaction_handle *th) { struct reiserfs_journal *journal = SB_JOURNAL(th->t_super); BUG_ON(!th->t_trans_id); journal->j_must_wait = 1; set_bit(J_WRITERS_BLOCKED, &journal->j_state); return; } /* this must be called without a transaction started, and does not ** require BKL */ void reiserfs_allow_writes(struct super_block *s) { struct reiserfs_journal *journal = SB_JOURNAL(s); clear_bit(J_WRITERS_BLOCKED, &journal->j_state); wake_up(&journal->j_join_wait); } /* this must be called without a transaction started, and does not ** require BKL */ void reiserfs_wait_on_write_block(struct super_block *s) { struct reiserfs_journal *journal = SB_JOURNAL(s); wait_event(journal->j_join_wait, !test_bit(J_WRITERS_BLOCKED, &journal->j_state)); } static void queue_log_writer(struct super_block *s) { wait_queue_t wait; struct reiserfs_journal *journal = SB_JOURNAL(s); set_bit(J_WRITERS_QUEUED, &journal->j_state); /* * we don't want to use wait_event here because * we only want to wait once. */ init_waitqueue_entry(&wait, current); add_wait_queue(&journal->j_join_wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) { reiserfs_write_unlock(s); schedule(); reiserfs_write_lock(s); } __set_current_state(TASK_RUNNING); remove_wait_queue(&journal->j_join_wait, &wait); } static void wake_queued_writers(struct super_block *s) { struct reiserfs_journal *journal = SB_JOURNAL(s); if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state)) wake_up(&journal->j_join_wait); } static void let_transaction_grow(struct super_block *sb, unsigned int trans_id) { struct reiserfs_journal *journal = SB_JOURNAL(sb); unsigned long bcount = journal->j_bcount; while (1) { reiserfs_write_unlock(sb); schedule_timeout_uninterruptible(1); reiserfs_write_lock(sb); journal->j_current_jl->j_state |= LIST_COMMIT_PENDING; while ((atomic_read(&journal->j_wcount) > 0 || atomic_read(&journal->j_jlock)) && journal->j_trans_id == trans_id) { queue_log_writer(sb); } if (journal->j_trans_id != trans_id) break; if (bcount == journal->j_bcount) break; bcount = journal->j_bcount; } } /* join == true if you must join an existing transaction. ** join == false if you can deal with waiting for others to finish ** ** this will block until the transaction is joinable. send the number of blocks you ** expect to use in nblocks. */ static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks, int join) { time_t now = get_seconds(); unsigned int old_trans_id; struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_transaction_handle myth; int sched_count = 0; int retval; reiserfs_check_lock_depth(sb, "journal_begin"); BUG_ON(nblocks > journal->j_trans_max); PROC_INFO_INC(sb, journal.journal_being); /* set here for journal_join */ th->t_refcount = 1; th->t_super = sb; relock: lock_journal(sb); if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) { unlock_journal(sb); retval = journal->j_errno; goto out_fail; } journal->j_bcount++; if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) { unlock_journal(sb); reiserfs_write_unlock(sb); reiserfs_wait_on_write_block(sb); reiserfs_write_lock(sb); PROC_INFO_INC(sb, journal.journal_relock_writers); goto relock; } now = get_seconds(); /* if there is no room in the journal OR ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning ** we don't sleep if there aren't other writers */ if ((!join && journal->j_must_wait > 0) || (!join && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch) || (!join && atomic_read(&journal->j_wcount) > 0 && journal->j_trans_start_time > 0 && (now - journal->j_trans_start_time) > journal->j_max_trans_age) || (!join && atomic_read(&journal->j_jlock)) || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) { old_trans_id = journal->j_trans_id; unlock_journal(sb); /* allow others to finish this transaction */ if (!join && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch && ((journal->j_len + nblocks + 2) * 100) < (journal->j_len_alloc * 75)) { if (atomic_read(&journal->j_wcount) > 10) { sched_count++; queue_log_writer(sb); goto relock; } } /* don't mess with joining the transaction if all we have to do is * wait for someone else to do a commit */ if (atomic_read(&journal->j_jlock)) { while (journal->j_trans_id == old_trans_id && atomic_read(&journal->j_jlock)) { queue_log_writer(sb); } goto relock; } retval = journal_join(&myth, sb, 1); if (retval) goto out_fail; /* someone might have ended the transaction while we joined */ if (old_trans_id != journal->j_trans_id) { retval = do_journal_end(&myth, sb, 1, 0); } else { retval = do_journal_end(&myth, sb, 1, COMMIT_NOW); } if (retval) goto out_fail; PROC_INFO_INC(sb, journal.journal_relock_wcount); goto relock; } /* we are the first writer, set trans_id */ if (journal->j_trans_start_time == 0) { journal->j_trans_start_time = get_seconds(); } atomic_inc(&(journal->j_wcount)); journal->j_len_alloc += nblocks; th->t_blocks_logged = 0; th->t_blocks_allocated = nblocks; th->t_trans_id = journal->j_trans_id; unlock_journal(sb); INIT_LIST_HEAD(&th->t_list); get_fs_excl(); return 0; out_fail: memset(th, 0, sizeof(*th)); /* Re-set th->t_super, so we can properly keep track of how many * persistent transactions there are. We need to do this so if this * call is part of a failed restart_transaction, we can free it later */ th->t_super = sb; return retval; } struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct super_block *s, int nblocks) { int ret; struct reiserfs_transaction_handle *th; /* if we're nesting into an existing transaction. It will be ** persistent on its own */ if (reiserfs_transaction_running(s)) { th = current->journal_info; th->t_refcount++; BUG_ON(th->t_refcount < 2); return th; } th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS); if (!th) return NULL; ret = journal_begin(th, s, nblocks); if (ret) { kfree(th); return NULL; } SB_JOURNAL(s)->j_persistent_trans++; return th; } int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th) { struct super_block *s = th->t_super; int ret = 0; if (th->t_trans_id) ret = journal_end(th, th->t_super, th->t_blocks_allocated); else ret = -EIO; if (th->t_refcount == 0) { SB_JOURNAL(s)->j_persistent_trans--; kfree(th); } return ret; } static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { struct reiserfs_transaction_handle *cur_th = current->journal_info; /* this keeps do_journal_end from NULLing out the current->journal_info ** pointer */ th->t_handle_save = cur_th; BUG_ON(cur_th && cur_th->t_refcount > 1); return do_journal_begin_r(th, sb, nblocks, JBEGIN_JOIN); } int journal_join_abort(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { struct reiserfs_transaction_handle *cur_th = current->journal_info; /* this keeps do_journal_end from NULLing out the current->journal_info ** pointer */ th->t_handle_save = cur_th; BUG_ON(cur_th && cur_th->t_refcount > 1); return do_journal_begin_r(th, sb, nblocks, JBEGIN_ABORT); } int journal_begin(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { struct reiserfs_transaction_handle *cur_th = current->journal_info; int ret; th->t_handle_save = NULL; if (cur_th) { /* we are nesting into the current transaction */ if (cur_th->t_super == sb) { BUG_ON(!cur_th->t_refcount); cur_th->t_refcount++; memcpy(th, cur_th, sizeof(*th)); if (th->t_refcount <= 1) reiserfs_warning(sb, "reiserfs-2005", "BAD: refcount <= 1, but " "journal_info != 0"); return 0; } else { /* we've ended up with a handle from a different filesystem. ** save it and restore on journal_end. This should never ** really happen... */ reiserfs_warning(sb, "clm-2100", "nesting info a different FS"); th->t_handle_save = current->journal_info; current->journal_info = th; } } else { current->journal_info = th; } ret = do_journal_begin_r(th, sb, nblocks, JBEGIN_REG); BUG_ON(current->journal_info != th); /* I guess this boils down to being the reciprocal of clm-2100 above. * If do_journal_begin_r fails, we need to put it back, since journal_end * won't be called to do it. */ if (ret) current->journal_info = th->t_handle_save; else BUG_ON(!th->t_refcount); return ret; } /* ** puts bh into the current transaction. If it was already there, reorders removes the ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order). ** ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the ** transaction is committed. ** ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len. */ int journal_mark_dirty(struct reiserfs_transaction_handle *th, struct super_block *sb, struct buffer_head *bh) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn = NULL; int count_already_incd = 0; int prepared = 0; BUG_ON(!th->t_trans_id); PROC_INFO_INC(sb, journal.mark_dirty); if (th->t_trans_id != journal->j_trans_id) { reiserfs_panic(th->t_super, "journal-1577", "handle trans id %ld != current trans id %ld", th->t_trans_id, journal->j_trans_id); } sb->s_dirt = 1; prepared = test_clear_buffer_journal_prepared(bh); clear_buffer_journal_restore_dirty(bh); /* already in this transaction, we are done */ if (buffer_journaled(bh)) { PROC_INFO_INC(sb, journal.mark_dirty_already); return 0; } /* this must be turned into a panic instead of a warning. We can't allow ** a dirty or journal_dirty or locked buffer to be logged, as some changes ** could get to disk too early. NOT GOOD. */ if (!prepared || buffer_dirty(bh)) { reiserfs_warning(sb, "journal-1777", "buffer %llu bad state " "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT", (unsigned long long)bh->b_blocknr, prepared ? ' ' : '!', buffer_locked(bh) ? ' ' : '!', buffer_dirty(bh) ? ' ' : '!', buffer_journal_dirty(bh) ? ' ' : '!'); } if (atomic_read(&(journal->j_wcount)) <= 0) { reiserfs_warning(sb, "journal-1409", "returning because j_wcount was %d", atomic_read(&(journal->j_wcount))); return 1; } /* this error means I've screwed up, and we've overflowed the transaction. ** Nothing can be done here, except make the FS readonly or panic. */ if (journal->j_len >= journal->j_trans_max) { reiserfs_panic(th->t_super, "journal-1413", "j_len (%lu) is too big", journal->j_len); } if (buffer_journal_dirty(bh)) { count_already_incd = 1; PROC_INFO_INC(sb, journal.mark_dirty_notjournal); clear_buffer_journal_dirty(bh); } if (journal->j_len > journal->j_len_alloc) { journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT; } set_buffer_journaled(bh); /* now put this guy on the end */ if (!cn) { cn = get_cnode(sb); if (!cn) { reiserfs_panic(sb, "journal-4", "get_cnode failed!"); } if (th->t_blocks_logged == th->t_blocks_allocated) { th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT; journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT; } th->t_blocks_logged++; journal->j_len++; cn->bh = bh; cn->blocknr = bh->b_blocknr; cn->sb = sb; cn->jlist = NULL; insert_journal_hash(journal->j_hash_table, cn); if (!count_already_incd) { get_bh(bh); } } cn->next = NULL; cn->prev = journal->j_last; cn->bh = bh; if (journal->j_last) { journal->j_last->next = cn; journal->j_last = cn; } else { journal->j_first = cn; journal->j_last = cn; } return 0; } int journal_end(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { if (!current->journal_info && th->t_refcount > 1) reiserfs_warning(sb, "REISER-NESTING", "th NULL, refcount %d", th->t_refcount); if (!th->t_trans_id) { WARN_ON(1); return -EIO; } th->t_refcount--; if (th->t_refcount > 0) { struct reiserfs_transaction_handle *cur_th = current->journal_info; /* we aren't allowed to close a nested transaction on a different ** filesystem from the one in the task struct */ BUG_ON(cur_th->t_super != th->t_super); if (th != cur_th) { memcpy(current->journal_info, th, sizeof(*th)); th->t_trans_id = 0; } return 0; } else { return do_journal_end(th, sb, nblocks, 0); } } /* removes from the current transaction, relsing and descrementing any counters. ** also files the removed buffer directly onto the clean list ** ** called by journal_mark_freed when a block has been deleted ** ** returns 1 if it cleaned and relsed the buffer. 0 otherwise */ static int remove_from_transaction(struct super_block *sb, b_blocknr_t blocknr, int already_cleaned) { struct buffer_head *bh; struct reiserfs_journal_cnode *cn; struct reiserfs_journal *journal = SB_JOURNAL(sb); int ret = 0; cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr); if (!cn || !cn->bh) { return ret; } bh = cn->bh; if (cn->prev) { cn->prev->next = cn->next; } if (cn->next) { cn->next->prev = cn->prev; } if (cn == journal->j_first) { journal->j_first = cn->next; } if (cn == journal->j_last) { journal->j_last = cn->prev; } if (bh) remove_journal_hash(sb, journal->j_hash_table, NULL, bh->b_blocknr, 0); clear_buffer_journaled(bh); /* don't log this one */ if (!already_cleaned) { clear_buffer_journal_dirty(bh); clear_buffer_dirty(bh); clear_buffer_journal_test(bh); put_bh(bh); if (atomic_read(&(bh->b_count)) < 0) { reiserfs_warning(sb, "journal-1752", "b_count < 0"); } ret = 1; } journal->j_len--; journal->j_len_alloc--; free_cnode(sb, cn); return ret; } /* ** for any cnode in a journal list, it can only be dirtied of all the ** transactions that include it are committed to disk. ** this checks through each transaction, and returns 1 if you are allowed to dirty, ** and 0 if you aren't ** ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log ** blocks for a given transaction on disk ** */ static int can_dirty(struct reiserfs_journal_cnode *cn) { struct super_block *sb = cn->sb; b_blocknr_t blocknr = cn->blocknr; struct reiserfs_journal_cnode *cur = cn->hprev; int can_dirty = 1; /* first test hprev. These are all newer than cn, so any node here ** with the same block number and dev means this node can't be sent ** to disk right now. */ while (cur && can_dirty) { if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) { can_dirty = 0; } cur = cur->hprev; } /* then test hnext. These are all older than cn. As long as they ** are committed to the log, it is safe to write cn to disk */ cur = cn->hnext; while (cur && can_dirty) { if (cur->jlist && cur->jlist->j_len > 0 && atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh && cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) { can_dirty = 0; } cur = cur->hnext; } return can_dirty; } /* syncs the commit blocks, but does not force the real buffers to disk ** will wait until the current transaction is done/committed before returning */ int journal_end_sync(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { struct reiserfs_journal *journal = SB_JOURNAL(sb); BUG_ON(!th->t_trans_id); /* you can sync while nested, very, very bad */ BUG_ON(th->t_refcount > 1); if (journal->j_len == 0) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb)); } return do_journal_end(th, sb, nblocks, COMMIT_NOW | WAIT); } /* ** writeback the pending async commits to disk */ static void flush_async_commits(struct work_struct *work) { struct reiserfs_journal *journal = container_of(work, struct reiserfs_journal, j_work.work); struct super_block *sb = journal->j_work_sb; struct reiserfs_journal_list *jl; struct list_head *entry; reiserfs_write_lock(sb); if (!list_empty(&journal->j_journal_list)) { /* last entry is the youngest, commit it and you get everything */ entry = journal->j_journal_list.prev; jl = JOURNAL_LIST_ENTRY(entry); flush_commit_list(sb, jl, 1); } reiserfs_write_unlock(sb); } /* ** flushes any old transactions to disk ** ends the current transaction if it is too old */ int reiserfs_flush_old_commits(struct super_block *sb) { time_t now; struct reiserfs_transaction_handle th; struct reiserfs_journal *journal = SB_JOURNAL(sb); now = get_seconds(); /* safety check so we don't flush while we are replaying the log during * mount */ if (list_empty(&journal->j_journal_list)) { return 0; } /* check the current transaction. If there are no writers, and it is * too old, finish it, and force the commit blocks to disk */ if (atomic_read(&journal->j_wcount) <= 0 && journal->j_trans_start_time > 0 && journal->j_len > 0 && (now - journal->j_trans_start_time) > journal->j_max_trans_age) { if (!journal_join(&th, sb, 1)) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb)); /* we're only being called from kreiserfsd, it makes no sense to do ** an async commit so that kreiserfsd can do it later */ do_journal_end(&th, sb, 1, COMMIT_NOW | WAIT); } } return sb->s_dirt; } /* ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit ** ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just ** flushes the commit list and returns 0. ** ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait. ** ** Note, we can't allow the journal_end to proceed while there are still writers in the log. */ static int check_journal_end(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks, int flags) { time_t now; int flush = flags & FLUSH_ALL; int commit_now = flags & COMMIT_NOW; int wait_on_commit = flags & WAIT; struct reiserfs_journal_list *jl; struct reiserfs_journal *journal = SB_JOURNAL(sb); BUG_ON(!th->t_trans_id); if (th->t_trans_id != journal->j_trans_id) { reiserfs_panic(th->t_super, "journal-1577", "handle trans id %ld != current trans id %ld", th->t_trans_id, journal->j_trans_id); } journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged); if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */ atomic_dec(&(journal->j_wcount)); } /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released ** will be dealt with by next transaction that actually writes something, but should be taken ** care of in this trans */ BUG_ON(journal->j_len == 0); /* if wcount > 0, and we are called to with flush or commit_now, ** we wait on j_join_wait. We will wake up when the last writer has ** finished the transaction, and started it on its way to the disk. ** Then, we flush the commit or journal list, and just return 0 ** because the rest of journal end was already done for this transaction. */ if (atomic_read(&(journal->j_wcount)) > 0) { if (flush || commit_now) { unsigned trans_id; jl = journal->j_current_jl; trans_id = jl->j_trans_id; if (wait_on_commit) jl->j_state |= LIST_COMMIT_PENDING; atomic_set(&(journal->j_jlock), 1); if (flush) { journal->j_next_full_flush = 1; } unlock_journal(sb); /* sleep while the current transaction is still j_jlocked */ while (journal->j_trans_id == trans_id) { if (atomic_read(&journal->j_jlock)) { queue_log_writer(sb); } else { lock_journal(sb); if (journal->j_trans_id == trans_id) { atomic_set(&(journal->j_jlock), 1); } unlock_journal(sb); } } BUG_ON(journal->j_trans_id == trans_id); if (commit_now && journal_list_still_alive(sb, trans_id) && wait_on_commit) { flush_commit_list(sb, jl, 1); } return 0; } unlock_journal(sb); return 0; } /* deal with old transactions where we are the last writers */ now = get_seconds(); if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) { commit_now = 1; journal->j_next_async_flush = 1; } /* don't batch when someone is waiting on j_join_wait */ /* don't batch when syncing the commit or flushing the whole trans */ if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock))) && !flush && !commit_now && (journal->j_len < journal->j_max_batch) && journal->j_len_alloc < journal->j_max_batch && journal->j_cnode_free > (journal->j_trans_max * 3)) { journal->j_bcount++; unlock_journal(sb); return 0; } if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(sb)) { reiserfs_panic(sb, "journal-003", "j_start (%ld) is too high", journal->j_start); } return 1; } /* ** Does all the work that makes deleting blocks safe. ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on. ** ** otherwise: ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes ** before this transaction has finished. ** ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with ** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash, ** the block can't be reallocated yet. ** ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list. */ int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_block *sb, b_blocknr_t blocknr) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn = NULL; struct buffer_head *bh = NULL; struct reiserfs_list_bitmap *jb = NULL; int cleaned = 0; BUG_ON(!th->t_trans_id); cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr); if (cn && cn->bh) { bh = cn->bh; get_bh(bh); } /* if it is journal new, we just remove it from this transaction */ if (bh && buffer_journal_new(bh)) { clear_buffer_journal_new(bh); clear_prepared_bits(bh); reiserfs_clean_and_file_buffer(bh); cleaned = remove_from_transaction(sb, blocknr, cleaned); } else { /* set the bit for this block in the journal bitmap for this transaction */ jb = journal->j_current_jl->j_list_bitmap; if (!jb) { reiserfs_panic(sb, "journal-1702", "journal_list_bitmap is NULL"); } set_bit_in_list_bitmap(sb, blocknr, jb); /* Note, the entire while loop is not allowed to schedule. */ if (bh) { clear_prepared_bits(bh); reiserfs_clean_and_file_buffer(bh); } cleaned = remove_from_transaction(sb, blocknr, cleaned); /* find all older transactions with this block, make sure they don't try to write it out */ cn = get_journal_hash_dev(sb, journal->j_list_hash_table, blocknr); while (cn) { if (sb == cn->sb && blocknr == cn->blocknr) { set_bit(BLOCK_FREED, &cn->state); if (cn->bh) { if (!cleaned) { /* remove_from_transaction will brelse the buffer if it was ** in the current trans */ clear_buffer_journal_dirty(cn-> bh); clear_buffer_dirty(cn->bh); clear_buffer_journal_test(cn-> bh); cleaned = 1; put_bh(cn->bh); if (atomic_read (&(cn->bh->b_count)) < 0) { reiserfs_warning(sb, "journal-2138", "cn->bh->b_count < 0"); } } if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */ atomic_dec(& (cn->jlist-> j_nonzerolen)); } cn->bh = NULL; } } cn = cn->hnext; } } if (bh) release_buffer_page(bh); /* get_hash grabs the buffer */ return 0; } void reiserfs_update_inode_transaction(struct inode *inode) { struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb); REISERFS_I(inode)->i_jl = journal->j_current_jl; REISERFS_I(inode)->i_trans_id = journal->j_trans_id; } /* * returns -1 on error, 0 if no commits/barriers were done and 1 * if a transaction was actually committed and the barrier was done */ static int __commit_trans_jl(struct inode *inode, unsigned long id, struct reiserfs_journal_list *jl) { struct reiserfs_transaction_handle th; struct super_block *sb = inode->i_sb; struct reiserfs_journal *journal = SB_JOURNAL(sb); int ret = 0; /* is it from the current transaction, or from an unknown transaction? */ if (id == journal->j_trans_id) { jl = journal->j_current_jl; /* try to let other writers come in and grow this transaction */ let_transaction_grow(sb, id); if (journal->j_trans_id != id) { goto flush_commit_only; } ret = journal_begin(&th, sb, 1); if (ret) return ret; /* someone might have ended this transaction while we joined */ if (journal->j_trans_id != id) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb)); ret = journal_end(&th, sb, 1); goto flush_commit_only; } ret = journal_end_sync(&th, sb, 1); if (!ret) ret = 1; } else { /* this gets tricky, we have to make sure the journal list in * the inode still exists. We know the list is still around * if we've got a larger transaction id than the oldest list */ flush_commit_only: if (journal_list_still_alive(inode->i_sb, id)) { /* * we only set ret to 1 when we know for sure * the barrier hasn't been started yet on the commit * block. */ if (atomic_read(&jl->j_commit_left) > 1) ret = 1; flush_commit_list(sb, jl, 1); if (journal->j_errno) ret = journal->j_errno; } } /* otherwise the list is gone, and long since committed */ return ret; } int reiserfs_commit_for_inode(struct inode *inode) { unsigned int id = REISERFS_I(inode)->i_trans_id; struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl; /* for the whole inode, assume unset id means it was * changed in the current transaction. More conservative */ if (!id || !jl) { reiserfs_update_inode_transaction(inode); id = REISERFS_I(inode)->i_trans_id; /* jl will be updated in __commit_trans_jl */ } return __commit_trans_jl(inode, id, jl); } void reiserfs_restore_prepared_buffer(struct super_block *sb, struct buffer_head *bh) { struct reiserfs_journal *journal = SB_JOURNAL(sb); PROC_INFO_INC(sb, journal.restore_prepared); if (!bh) { return; } if (test_clear_buffer_journal_restore_dirty(bh) && buffer_journal_dirty(bh)) { struct reiserfs_journal_cnode *cn; cn = get_journal_hash_dev(sb, journal->j_list_hash_table, bh->b_blocknr); if (cn && can_dirty(cn)) { set_buffer_journal_test(bh); mark_buffer_dirty(bh); } } clear_buffer_journal_prepared(bh); } extern struct tree_balance *cur_tb; /* ** before we can change a metadata block, we have to make sure it won't ** be written to disk while we are altering it. So, we must: ** clean it ** wait on it. ** */ int reiserfs_prepare_for_journal(struct super_block *sb, struct buffer_head *bh, int wait) { PROC_INFO_INC(sb, journal.prepare); if (!trylock_buffer(bh)) { if (!wait) return 0; lock_buffer(bh); } set_buffer_journal_prepared(bh); if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) { clear_buffer_journal_test(bh); set_buffer_journal_restore_dirty(bh); } unlock_buffer(bh); return 1; } static void flush_old_journal_lists(struct super_block *s) { struct reiserfs_journal *journal = SB_JOURNAL(s); struct reiserfs_journal_list *jl; struct list_head *entry; time_t now = get_seconds(); while (!list_empty(&journal->j_journal_list)) { entry = journal->j_journal_list.next; jl = JOURNAL_LIST_ENTRY(entry); /* this check should always be run, to send old lists to disk */ if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) && atomic_read(&jl->j_commit_left) == 0 && test_transaction(s, jl)) { flush_used_journal_lists(s, jl); } else { break; } } } /* ** long and ugly. If flush, will not return until all commit ** blocks and all real buffers in the trans are on disk. ** If no_async, won't return until all commit blocks are on disk. ** ** keep reading, there are comments as you go along ** ** If the journal is aborted, we just clean up. Things like flushing ** journal lists, etc just won't happen. */ static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks, int flags) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn, *next, *jl_cn; struct reiserfs_journal_cnode *last_cn = NULL; struct reiserfs_journal_desc *desc; struct reiserfs_journal_commit *commit; struct buffer_head *c_bh; /* commit bh */ struct buffer_head *d_bh; /* desc bh */ int cur_write_start = 0; /* start index of current log write */ int old_start; int i; int flush; int wait_on_commit; struct reiserfs_journal_list *jl, *temp_jl; struct list_head *entry, *safe; unsigned long jindex; unsigned int commit_trans_id; int trans_half; BUG_ON(th->t_refcount > 1); BUG_ON(!th->t_trans_id); /* protect flush_older_commits from doing mistakes if the transaction ID counter gets overflowed. */ if (th->t_trans_id == ~0U) flags |= FLUSH_ALL | COMMIT_NOW | WAIT; flush = flags & FLUSH_ALL; wait_on_commit = flags & WAIT; put_fs_excl(); current->journal_info = th->t_handle_save; reiserfs_check_lock_depth(sb, "journal end"); if (journal->j_len == 0) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb)); } lock_journal(sb); if (journal->j_next_full_flush) { flags |= FLUSH_ALL; flush = 1; } if (journal->j_next_async_flush) { flags |= COMMIT_NOW | WAIT; wait_on_commit = 1; } /* check_journal_end locks the journal, and unlocks if it does not return 1 ** it tells us if we should continue with the journal_end, or just return */ if (!check_journal_end(th, sb, nblocks, flags)) { sb->s_dirt = 1; wake_queued_writers(sb); reiserfs_async_progress_wait(sb); goto out; } /* check_journal_end might set these, check again */ if (journal->j_next_full_flush) { flush = 1; } /* ** j must wait means we have to flush the log blocks, and the real blocks for ** this transaction */ if (journal->j_must_wait > 0) { flush = 1; } #ifdef REISERFS_PREALLOCATE /* quota ops might need to nest, setup the journal_info pointer for them * and raise the refcount so that it is > 0. */ current->journal_info = th; th->t_refcount++; reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into * the transaction */ th->t_refcount--; current->journal_info = th->t_handle_save; #endif /* setup description block */ d_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + journal->j_start); set_buffer_uptodate(d_bh); desc = (struct reiserfs_journal_desc *)(d_bh)->b_data; memset(d_bh->b_data, 0, d_bh->b_size); memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8); set_desc_trans_id(desc, journal->j_trans_id); /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */ c_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((journal->j_start + journal->j_len + 1) % SB_ONDISK_JOURNAL_SIZE(sb))); commit = (struct reiserfs_journal_commit *)c_bh->b_data; memset(c_bh->b_data, 0, c_bh->b_size); set_commit_trans_id(commit, journal->j_trans_id); set_buffer_uptodate(c_bh); /* init this journal list */ jl = journal->j_current_jl; /* we lock the commit before doing anything because * we want to make sure nobody tries to run flush_commit_list until * the new transaction is fully setup, and we've already flushed the * ordered bh list */ reiserfs_mutex_lock_safe(&jl->j_commit_mutex, sb); /* save the transaction id in case we need to commit it later */ commit_trans_id = jl->j_trans_id; atomic_set(&jl->j_older_commits_done, 0); jl->j_trans_id = journal->j_trans_id; jl->j_timestamp = journal->j_trans_start_time; jl->j_commit_bh = c_bh; jl->j_start = journal->j_start; jl->j_len = journal->j_len; atomic_set(&jl->j_nonzerolen, journal->j_len); atomic_set(&jl->j_commit_left, journal->j_len + 2); jl->j_realblock = NULL; /* The ENTIRE FOR LOOP MUST not cause schedule to occur. ** for each real block, add it to the journal list hash, ** copy into real block index array in the commit or desc block */ trans_half = journal_trans_half(sb->s_blocksize); for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) { if (buffer_journaled(cn->bh)) { jl_cn = get_cnode(sb); if (!jl_cn) { reiserfs_panic(sb, "journal-1676", "get_cnode returned NULL"); } if (i == 0) { jl->j_realblock = jl_cn; } jl_cn->prev = last_cn; jl_cn->next = NULL; if (last_cn) { last_cn->next = jl_cn; } last_cn = jl_cn; /* make sure the block we are trying to log is not a block of journal or reserved area */ if (is_block_in_log_or_reserved_area (sb, cn->bh->b_blocknr)) { reiserfs_panic(sb, "journal-2332", "Trying to log block %lu, " "which is a log block", cn->bh->b_blocknr); } jl_cn->blocknr = cn->bh->b_blocknr; jl_cn->state = 0; jl_cn->sb = sb; jl_cn->bh = cn->bh; jl_cn->jlist = jl; insert_journal_hash(journal->j_list_hash_table, jl_cn); if (i < trans_half) { desc->j_realblock[i] = cpu_to_le32(cn->bh->b_blocknr); } else { commit->j_realblock[i - trans_half] = cpu_to_le32(cn->bh->b_blocknr); } } else { i--; } } set_desc_trans_len(desc, journal->j_len); set_desc_mount_id(desc, journal->j_mount_id); set_desc_trans_id(desc, journal->j_trans_id); set_commit_trans_len(commit, journal->j_len); /* special check in case all buffers in the journal were marked for not logging */ BUG_ON(journal->j_len == 0); /* we're about to dirty all the log blocks, mark the description block * dirty now too. Don't mark the commit block dirty until all the * others are on disk */ mark_buffer_dirty(d_bh); /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */ cur_write_start = journal->j_start; cn = journal->j_first; jindex = 1; /* start at one so we don't get the desc again */ while (cn) { clear_buffer_journal_new(cn->bh); /* copy all the real blocks into log area. dirty log blocks */ if (buffer_journaled(cn->bh)) { struct buffer_head *tmp_bh; char *addr; struct page *page; tmp_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((cur_write_start + jindex) % SB_ONDISK_JOURNAL_SIZE(sb))); set_buffer_uptodate(tmp_bh); page = cn->bh->b_page; addr = kmap(page); memcpy(tmp_bh->b_data, addr + offset_in_page(cn->bh->b_data), cn->bh->b_size); kunmap(page); mark_buffer_dirty(tmp_bh); jindex++; set_buffer_journal_dirty(cn->bh); clear_buffer_journaled(cn->bh); } else { /* JDirty cleared sometime during transaction. don't log this one */ reiserfs_warning(sb, "journal-2048", "BAD, buffer in journal hash, " "but not JDirty!"); brelse(cn->bh); } next = cn->next; free_cnode(sb, cn); cn = next; reiserfs_write_unlock(sb); cond_resched(); reiserfs_write_lock(sb); } /* we are done with both the c_bh and d_bh, but ** c_bh must be written after all other commit blocks, ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1. */ journal->j_current_jl = alloc_journal_list(sb); /* now it is safe to insert this transaction on the main list */ list_add_tail(&jl->j_list, &journal->j_journal_list); list_add_tail(&jl->j_working_list, &journal->j_working_list); journal->j_num_work_lists++; /* reset journal values for the next transaction */ old_start = journal->j_start; journal->j_start = (journal->j_start + journal->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(sb); atomic_set(&(journal->j_wcount), 0); journal->j_bcount = 0; journal->j_last = NULL; journal->j_first = NULL; journal->j_len = 0; journal->j_trans_start_time = 0; /* check for trans_id overflow */ if (++journal->j_trans_id == 0) journal->j_trans_id = 10; journal->j_current_jl->j_trans_id = journal->j_trans_id; journal->j_must_wait = 0; journal->j_len_alloc = 0; journal->j_next_full_flush = 0; journal->j_next_async_flush = 0; init_journal_hash(sb); // make sure reiserfs_add_jh sees the new current_jl before we // write out the tails smp_mb(); /* tail conversion targets have to hit the disk before we end the * transaction. Otherwise a later transaction might repack the tail * before this transaction commits, leaving the data block unflushed and * clean, if we crash before the later transaction commits, the data block * is lost. */ if (!list_empty(&jl->j_tail_bh_list)) { reiserfs_write_unlock(sb); write_ordered_buffers(&journal->j_dirty_buffers_lock, journal, jl, &jl->j_tail_bh_list); reiserfs_write_lock(sb); } BUG_ON(!list_empty(&jl->j_tail_bh_list)); mutex_unlock(&jl->j_commit_mutex); /* honor the flush wishes from the caller, simple commits can ** be done outside the journal lock, they are done below ** ** if we don't flush the commit list right now, we put it into ** the work queue so the people waiting on the async progress work ** queue don't wait for this proc to flush journal lists and such. */ if (flush) { flush_commit_list(sb, jl, 1); flush_journal_list(sb, jl, 1); } else if (!(jl->j_state & LIST_COMMIT_PENDING)) queue_delayed_work(commit_wq, &journal->j_work, HZ / 10); /* if the next transaction has any chance of wrapping, flush ** transactions that might get overwritten. If any journal lists are very ** old flush them as well. */ first_jl: list_for_each_safe(entry, safe, &journal->j_journal_list) { temp_jl = JOURNAL_LIST_ENTRY(entry); if (journal->j_start <= temp_jl->j_start) { if ((journal->j_start + journal->j_trans_max + 1) >= temp_jl->j_start) { flush_used_journal_lists(sb, temp_jl); goto first_jl; } else if ((journal->j_start + journal->j_trans_max + 1) < SB_ONDISK_JOURNAL_SIZE(sb)) { /* if we don't cross into the next transaction and we don't * wrap, there is no way we can overlap any later transactions * break now */ break; } } else if ((journal->j_start + journal->j_trans_max + 1) > SB_ONDISK_JOURNAL_SIZE(sb)) { if (((journal->j_start + journal->j_trans_max + 1) % SB_ONDISK_JOURNAL_SIZE(sb)) >= temp_jl->j_start) { flush_used_journal_lists(sb, temp_jl); goto first_jl; } else { /* we don't overlap anything from out start to the end of the * log, and our wrapped portion doesn't overlap anything at * the start of the log. We can break */ break; } } } flush_old_journal_lists(sb); journal->j_current_jl->j_list_bitmap = get_list_bitmap(sb, journal->j_current_jl); if (!(journal->j_current_jl->j_list_bitmap)) { reiserfs_panic(sb, "journal-1996", "could not get a list bitmap"); } atomic_set(&(journal->j_jlock), 0); unlock_journal(sb); /* wake up any body waiting to join. */ clear_bit(J_WRITERS_QUEUED, &journal->j_state); wake_up(&(journal->j_join_wait)); if (!flush && wait_on_commit && journal_list_still_alive(sb, commit_trans_id)) { flush_commit_list(sb, jl, 1); } out: reiserfs_check_lock_depth(sb, "journal end2"); memset(th, 0, sizeof(*th)); /* Re-set th->t_super, so we can properly keep track of how many * persistent transactions there are. We need to do this so if this * call is part of a failed restart_transaction, we can free it later */ th->t_super = sb; return journal->j_errno; } /* Send the file system read only and refuse new transactions */ void reiserfs_abort_journal(struct super_block *sb, int errno) { struct reiserfs_journal *journal = SB_JOURNAL(sb); if (test_bit(J_ABORTED, &journal->j_state)) return; if (!journal->j_errno) journal->j_errno = errno; sb->s_flags |= MS_RDONLY; set_bit(J_ABORTED, &journal->j_state); #ifdef CONFIG_REISERFS_CHECK dump_stack(); #endif }
GaloisInc/linux-deadline
fs/reiserfs/journal.c
C
gpl-2.0
125,360
/* Kernel module to match connection tracking byte counter. * GPL (C) 2002 Martin Devera (devik@cdi.cz). */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_connbytes.h> #include <net/netfilter/nf_conntrack.h> #include <asm/div64.h> #include <asm/bitops.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_DESCRIPTION("iptables match for matching number of pkts/bytes per connection"); MODULE_ALIAS("ipt_connbytes"); static bool match(const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct xt_match *match, const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop) { const struct xt_connbytes_info *sinfo = matchinfo; const struct nf_conn *ct; enum ip_conntrack_info ctinfo; u_int64_t what = 0; /* initialize to make gcc happy */ u_int64_t bytes = 0; u_int64_t pkts = 0; const struct ip_conntrack_counter *counters; ct = nf_ct_get(skb, &ctinfo); if (!ct) return false; counters = ct->counters; switch (sinfo->what) { case XT_CONNBYTES_PKTS: switch (sinfo->direction) { case XT_CONNBYTES_DIR_ORIGINAL: what = counters[IP_CT_DIR_ORIGINAL].packets; break; case XT_CONNBYTES_DIR_REPLY: what = counters[IP_CT_DIR_REPLY].packets; break; case XT_CONNBYTES_DIR_BOTH: what = counters[IP_CT_DIR_ORIGINAL].packets; what += counters[IP_CT_DIR_REPLY].packets; break; } break; case XT_CONNBYTES_BYTES: switch (sinfo->direction) { case XT_CONNBYTES_DIR_ORIGINAL: what = counters[IP_CT_DIR_ORIGINAL].bytes; break; case XT_CONNBYTES_DIR_REPLY: what = counters[IP_CT_DIR_REPLY].bytes; break; case XT_CONNBYTES_DIR_BOTH: what = counters[IP_CT_DIR_ORIGINAL].bytes; what += counters[IP_CT_DIR_REPLY].bytes; break; } break; case XT_CONNBYTES_AVGPKT: switch (sinfo->direction) { case XT_CONNBYTES_DIR_ORIGINAL: bytes = counters[IP_CT_DIR_ORIGINAL].bytes; pkts = counters[IP_CT_DIR_ORIGINAL].packets; break; case XT_CONNBYTES_DIR_REPLY: bytes = counters[IP_CT_DIR_REPLY].bytes; pkts = counters[IP_CT_DIR_REPLY].packets; break; case XT_CONNBYTES_DIR_BOTH: bytes = counters[IP_CT_DIR_ORIGINAL].bytes + counters[IP_CT_DIR_REPLY].bytes; pkts = counters[IP_CT_DIR_ORIGINAL].packets + counters[IP_CT_DIR_REPLY].packets; break; } if (pkts != 0) what = div64_64(bytes, pkts); break; } if (sinfo->count.to) return what <= sinfo->count.to && what >= sinfo->count.from; else return what >= sinfo->count.from; } static bool check(const char *tablename, const void *ip, const struct xt_match *match, void *matchinfo, unsigned int hook_mask) { const struct xt_connbytes_info *sinfo = matchinfo; if (sinfo->what != XT_CONNBYTES_PKTS && sinfo->what != XT_CONNBYTES_BYTES && sinfo->what != XT_CONNBYTES_AVGPKT) return false; if (sinfo->direction != XT_CONNBYTES_DIR_ORIGINAL && sinfo->direction != XT_CONNBYTES_DIR_REPLY && sinfo->direction != XT_CONNBYTES_DIR_BOTH) return false; if (nf_ct_l3proto_try_module_get(match->family) < 0) { printk(KERN_WARNING "can't load conntrack support for " "proto=%d\n", match->family); return false; } return true; } static void destroy(const struct xt_match *match, void *matchinfo) { nf_ct_l3proto_module_put(match->family); } static struct xt_match xt_connbytes_match[] __read_mostly = { { .name = "connbytes", .family = AF_INET, .checkentry = check, .match = match, .destroy = destroy, .matchsize = sizeof(struct xt_connbytes_info), .me = THIS_MODULE }, { .name = "connbytes", .family = AF_INET6, .checkentry = check, .match = match, .destroy = destroy, .matchsize = sizeof(struct xt_connbytes_info), .me = THIS_MODULE }, }; static int __init xt_connbytes_init(void) { return xt_register_matches(xt_connbytes_match, ARRAY_SIZE(xt_connbytes_match)); } static void __exit xt_connbytes_fini(void) { xt_unregister_matches(xt_connbytes_match, ARRAY_SIZE(xt_connbytes_match)); } module_init(xt_connbytes_init); module_exit(xt_connbytes_fini);
xmyth/linux-mips-osolution
net/netfilter/xt_connbytes.c
C
gpl-2.0
4,221
/** * @license Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved. * For licensing, see LICENSE.html or http://ckeditor.com/license */ (function() { function noBlockLeft( bqBlock ) { for ( var i = 0, length = bqBlock.getChildCount(), child; i < length && ( child = bqBlock.getChild( i ) ); i++ ) { if ( child.type == CKEDITOR.NODE_ELEMENT && child.isBlockBoundary() ) return false; } return true; } var commandObject = { exec: function( editor ) { var state = editor.getCommand( 'blockquote' ).state, selection = editor.getSelection(), range = selection && selection.getRanges( true )[ 0 ]; if ( !range ) return; var bookmarks = selection.createBookmarks(); // Kludge for #1592: if the bookmark nodes are in the beginning of // blockquote, then move them to the nearest block element in the // blockquote. if ( CKEDITOR.env.ie ) { var bookmarkStart = bookmarks[ 0 ].startNode, bookmarkEnd = bookmarks[ 0 ].endNode, cursor; if ( bookmarkStart && bookmarkStart.getParent().getName() == 'blockquote' ) { cursor = bookmarkStart; while ( ( cursor = cursor.getNext() ) ) { if ( cursor.type == CKEDITOR.NODE_ELEMENT && cursor.isBlockBoundary() ) { bookmarkStart.move( cursor, true ); break; } } } if ( bookmarkEnd && bookmarkEnd.getParent().getName() == 'blockquote' ) { cursor = bookmarkEnd; while ( ( cursor = cursor.getPrevious() ) ) { if ( cursor.type == CKEDITOR.NODE_ELEMENT && cursor.isBlockBoundary() ) { bookmarkEnd.move( cursor ); break; } } } } var iterator = range.createIterator(), block; iterator.enlargeBr = editor.config.enterMode != CKEDITOR.ENTER_BR; if ( state == CKEDITOR.TRISTATE_OFF ) { var paragraphs = []; while ( ( block = iterator.getNextParagraph() ) ) paragraphs.push( block ); // If no paragraphs, create one from the current selection position. if ( paragraphs.length < 1 ) { var para = editor.document.createElement( editor.config.enterMode == CKEDITOR.ENTER_P ? 'p' : 'div' ), firstBookmark = bookmarks.shift(); range.insertNode( para ); para.append( new CKEDITOR.dom.text( '\ufeff', editor.document ) ); range.moveToBookmark( firstBookmark ); range.selectNodeContents( para ); range.collapse( true ); firstBookmark = range.createBookmark(); paragraphs.push( para ); bookmarks.unshift( firstBookmark ); } // Make sure all paragraphs have the same parent. var commonParent = paragraphs[ 0 ].getParent(), tmp = []; for ( var i = 0; i < paragraphs.length; i++ ) { block = paragraphs[ i ]; commonParent = commonParent.getCommonAncestor( block.getParent() ); } // The common parent must not be the following tags: table, tbody, tr, ol, ul. var denyTags = { table:1,tbody:1,tr:1,ol:1,ul:1 }; while ( denyTags[ commonParent.getName() ] ) commonParent = commonParent.getParent(); // Reconstruct the block list to be processed such that all resulting blocks // satisfy parentNode.equals( commonParent ). var lastBlock = null; while ( paragraphs.length > 0 ) { block = paragraphs.shift(); while ( !block.getParent().equals( commonParent ) ) block = block.getParent(); if ( !block.equals( lastBlock ) ) tmp.push( block ); lastBlock = block; } // If any of the selected blocks is a blockquote, remove it to prevent // nested blockquotes. while ( tmp.length > 0 ) { block = tmp.shift(); if ( block.getName() == 'blockquote' ) { var docFrag = new CKEDITOR.dom.documentFragment( editor.document ); while ( block.getFirst() ) { docFrag.append( block.getFirst().remove() ); paragraphs.push( docFrag.getLast() ); } docFrag.replace( block ); } else paragraphs.push( block ); } // Now we have all the blocks to be included in a new blockquote node. var bqBlock = editor.document.createElement( 'blockquote' ); bqBlock.insertBefore( paragraphs[ 0 ] ); while ( paragraphs.length > 0 ) { block = paragraphs.shift(); bqBlock.append( block ); } } else if ( state == CKEDITOR.TRISTATE_ON ) { var moveOutNodes = [], database = {}; while ( ( block = iterator.getNextParagraph() ) ) { var bqParent = null, bqChild = null; while ( block.getParent() ) { if ( block.getParent().getName() == 'blockquote' ) { bqParent = block.getParent(); bqChild = block; break; } block = block.getParent(); } // Remember the blocks that were recorded down in the moveOutNodes array // to prevent duplicates. if ( bqParent && bqChild && !bqChild.getCustomData( 'blockquote_moveout' ) ) { moveOutNodes.push( bqChild ); CKEDITOR.dom.element.setMarker( database, bqChild, 'blockquote_moveout', true ); } } CKEDITOR.dom.element.clearAllMarkers( database ); var movedNodes = [], processedBlockquoteBlocks = []; database = {}; while ( moveOutNodes.length > 0 ) { var node = moveOutNodes.shift(); bqBlock = node.getParent(); // If the node is located at the beginning or the end, just take it out // without splitting. Otherwise, split the blockquote node and move the // paragraph in between the two blockquote nodes. if ( !node.getPrevious() ) node.remove().insertBefore( bqBlock ); else if ( !node.getNext() ) node.remove().insertAfter( bqBlock ); else { node.breakParent( node.getParent() ); processedBlockquoteBlocks.push( node.getNext() ); } // Remember the blockquote node so we can clear it later (if it becomes empty). if ( !bqBlock.getCustomData( 'blockquote_processed' ) ) { processedBlockquoteBlocks.push( bqBlock ); CKEDITOR.dom.element.setMarker( database, bqBlock, 'blockquote_processed', true ); } movedNodes.push( node ); } CKEDITOR.dom.element.clearAllMarkers( database ); // Clear blockquote nodes that have become empty. for ( i = processedBlockquoteBlocks.length - 1; i >= 0; i-- ) { bqBlock = processedBlockquoteBlocks[ i ]; if ( noBlockLeft( bqBlock ) ) bqBlock.remove(); } if ( editor.config.enterMode == CKEDITOR.ENTER_BR ) { var firstTime = true; while ( movedNodes.length ) { node = movedNodes.shift(); if ( node.getName() == 'div' ) { docFrag = new CKEDITOR.dom.documentFragment( editor.document ); var needBeginBr = firstTime && node.getPrevious() && !( node.getPrevious().type == CKEDITOR.NODE_ELEMENT && node.getPrevious().isBlockBoundary() ); if ( needBeginBr ) docFrag.append( editor.document.createElement( 'br' ) ); var needEndBr = node.getNext() && !( node.getNext().type == CKEDITOR.NODE_ELEMENT && node.getNext().isBlockBoundary() ); while ( node.getFirst() ) node.getFirst().remove().appendTo( docFrag ); if ( needEndBr ) docFrag.append( editor.document.createElement( 'br' ) ); docFrag.replace( node ); firstTime = false; } } } } selection.selectBookmarks( bookmarks ); editor.focus(); }, refresh: function( editor, path ) { // Check if inside of blockquote. var firstBlock = path.block || path.blockLimit; this.setState( editor.elementPath( firstBlock ).contains( 'blockquote', 1 ) ? CKEDITOR.TRISTATE_ON : CKEDITOR.TRISTATE_OFF ); }, context: 'blockquote' }; CKEDITOR.plugins.add( 'blockquote', { lang: 'af,ar,bg,bn,bs,ca,cs,cy,da,de,el,en-au,en-ca,en-gb,en,eo,es,et,eu,fa,fi,fo,fr-ca,fr,gl,gu,he,hi,hr,hu,is,it,ja,ka,km,ko,ku,lt,lv,mk,mn,ms,nb,nl,no,pl,pt-br,pt,ro,ru,sk,sl,sr-latn,sr,sv,th,tr,ug,uk,vi,zh-cn,zh', // %REMOVE_LINE_CORE% icons: 'blockquote', // %REMOVE_LINE_CORE% init: function( editor ) { if ( editor.blockless ) return; editor.addCommand( 'blockquote', commandObject ); editor.ui.addButton && editor.ui.addButton( 'Blockquote', { label: editor.lang.blockquote.toolbar, command: 'blockquote', toolbar: 'blocks,10' }); } }); })();
nagyistoce/rt
devel/third-party/ckeditor/plugins/blockquote/plugin.js
JavaScript
gpl-2.0
8,486
<?php //======================================================================= // File: JPGRAPH_ICONPLOT.PHP // Description: PHP4 Graph Plotting library. Extension module. // Created: 2004-02-18 // Author: Johan Persson (johanp@aditus.nu) // Ver: $Id: jpgraph_iconplot.php 521 2006-02-08 21:22:09Z ljp $ // // Copyright (c) Aditus Consulting. All rights reserved. //======================================================================== //=================================================== // CLASS IconPlot // Description: Make it possible to add a (small) image // to the graph //=================================================== class IconPlot { var $iHorAnchor='left',$iVertAnchor='top'; var $iX=0,$iY=0; var $iFile=''; var $iScale=1.0,$iMix=100; var $iAnchors = array('left','right','top','bottom','center'); var $iCountryFlag='',$iCountryStdSize=3; var $iScalePosY=null,$iScalePosX=null; var $iImgString=''; function IconPlot($aFile="",$aX=0,$aY=0,$aScale=1.0,$aMix=100) { $this->iFile = $aFile; $this->iX=$aX; $this->iY=$aY; $this->iScale= $aScale; if( $aMix < 0 || $aMix > 100 ) { JpGraphError::RaiseL(8001); //('Mix value for icon must be between 0 and 100.'); } $this->iMix = $aMix ; } function CreateFromString($aStr) { $this->iImgString = $aStr; } function SetCountryFlag($aFlag,$aX=0,$aY=0,$aScale=1.0,$aMix=100,$aStdSize=3) { $this->iCountryFlag = $aFlag; $this->iX=$aX; $this->iY=$aY; $this->iScale= $aScale; if( $aMix < 0 || $aMix > 100 ) { JpGraphError::RaiseL(8001);//'Mix value for icon must be between 0 and 100.'); } $this->iMix = $aMix; $this->iCountryStdSize = $aStdSize; } function SetPos($aX,$aY) { $this->iX=$aX; $this->iY=$aY; } function SetScalePos($aX,$aY) { $this->iScalePosX = $aX; $this->iScalePosY = $aY; } function SetScale($aScale) { $this->iScale = $aScale; } function SetMix($aMix) { if( $aMix < 0 || $aMix > 100 ) { JpGraphError::RaiseL(8001);//('Mix value for icon must be between 0 and 100.'); } $this->iMix = $aMix ; } function SetAnchor($aXAnchor='left',$aYAnchor='center') { if( !in_array($aXAnchor,$this->iAnchors) || !in_array($aYAnchor,$this->iAnchors) ) { JpGraphError::RaiseL(8002);//("Anchor position for icons must be one of 'top', 'bottom', 'left', 'right' or 'center'"); } $this->iHorAnchor=$aXAnchor; $this->iVertAnchor=$aYAnchor; } function PreStrokeAdjust($aGraph) { // Nothing to do ... } function DoLegend($aGraph) { // Nothing to do ... } function Max() { return array(false,false); } // The next four function are framework function tht gets called // from Gantt and is not menaiungfull in the context of Icons but // they must be implemented to avoid errors. function GetMaxDate() { return false; } function GetMinDate() { return false; } function GetLineNbr() { return 0; } function GetAbsHeight() {return 0; } function Min() { return array(false,false); } function StrokeMargin(&$aImg) { return true; } function Stroke(&$aImg,$axscale,$ayscale) { $this->StrokeWithScale($aImg,$axscale,$ayscale); } function StrokeWithScale(&$aImg,$axscale,$ayscale) { if( $this->iScalePosX === null || $this->iScalePosY === null ) { $this->_Stroke($aImg); } else { $this->_Stroke($aImg, round($axscale->Translate($this->iScalePosX)), round($ayscale->Translate($this->iScalePosY))); } } function GetWidthHeight() { $dummy=0; return $this->_Stroke($dummy,null,null,true); } function _Stroke(&$aImg,$x=null,$y=null,$aReturnWidthHeight=false) { if( $this->iFile != '' && $this->iCountryFlag != '' ) { JpGraphError::RaiseL(8003);//('It is not possible to specify both an image file and a country flag for the same icon.'); } if( $this->iFile != '' ) { $gdimg = Graph::LoadBkgImage('',$this->iFile); } elseif( $this->iImgString != '') { $gdimg = Image::CreateFromString($this->iImgString); } else { if( ! class_exists('FlagImages') ) { JpGraphError::RaiseL(8004);//('In order to use Country flags as icons you must include the "jpgraph_flags.php" file.'); } $fobj = new FlagImages($this->iCountryStdSize); $dummy=''; $gdimg = $fobj->GetImgByName($this->iCountryFlag,$dummy); } $iconw = imagesx($gdimg); $iconh = imagesy($gdimg); if( $aReturnWidthHeight ) { return array(round($iconw*$this->iScale),round($iconh*$this->iScale)); } if( $x !== null && $y !== null ) { $this->iX = $x; $this->iY = $y; } if( $this->iX >= 0 && $this->iX <= 1.0 ) { $w = imagesx($aImg->img); $this->iX = round($w*$this->iX); } if( $this->iY >= 0 && $this->iY <= 1.0 ) { $h = imagesy($aImg->img); $this->iY = round($h*$this->iY); } if( $this->iHorAnchor == 'center' ) $this->iX -= round($iconw*$this->iScale/2); if( $this->iHorAnchor == 'right' ) $this->iX -= round($iconw*$this->iScale); if( $this->iVertAnchor == 'center' ) $this->iY -= round($iconh*$this->iScale/2); if( $this->iVertAnchor == 'bottom' ) $this->iY -= round($iconh*$this->iScale); $aImg->CopyMerge($gdimg,$this->iX,$this->iY,0,0, round($iconw*$this->iScale),round($iconh*$this->iScale), $iconw,$iconh, $this->iMix); } } ?>
kallioli/php-syslog-ng
html/includes/jpgraph/jpgraph_iconplot.php
PHP
gpl-2.0
5,373
/* Copyright (C) 1997-2019 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely. */ #include "testnative.h" #ifdef TEST_NATIVE_WINDOWS static void *CreateWindowNative(int w, int h); static void DestroyWindowNative(void *window); NativeWindowFactory WindowsWindowFactory = { "windows", CreateWindowNative, DestroyWindowNative }; LRESULT CALLBACK WndProc(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam) { switch (msg) { case WM_CLOSE: DestroyWindow(hwnd); break; case WM_DESTROY: PostQuitMessage(0); break; default: return DefWindowProc(hwnd, msg, wParam, lParam); } return 0; } static void * CreateWindowNative(int w, int h) { HWND hwnd; WNDCLASS wc; wc.style = 0; wc.lpfnWndProc = WndProc; wc.cbClsExtra = 0; wc.cbWndExtra = 0; wc.hInstance = GetModuleHandle(NULL); wc.hIcon = LoadIcon(NULL, IDI_APPLICATION); wc.hCursor = LoadCursor(NULL, IDC_ARROW); wc.hbrBackground = (HBRUSH) (COLOR_WINDOW + 1); wc.lpszMenuName = NULL; wc.lpszClassName = "SDL Test"; if (!RegisterClass(&wc)) { MessageBox(NULL, "Window Registration Failed!", "Error!", MB_ICONEXCLAMATION | MB_OK); return 0; } hwnd = CreateWindow("SDL Test", "", WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT, w, h, NULL, NULL, GetModuleHandle(NULL), NULL); if (hwnd == NULL) { MessageBox(NULL, "Window Creation Failed!", "Error!", MB_ICONEXCLAMATION | MB_OK); return 0; } ShowWindow(hwnd, SW_SHOW); return hwnd; } static void DestroyWindowNative(void *window) { DestroyWindow((HWND) window); } #endif
h4mu/rott94
android/app/jni/SDL/test/testnativew32.c
C
gpl-2.0
2,073
<?php /** * Code related to the lastlogins-failed.php interface. * * PHP version 5 * * @category Library * @package Sucuri * @subpackage SucuriScanner * @author Daniel Cid <dcid@sucuri.net> * @copyright 2010-2018 Sucuri Inc. * @license https://www.gnu.org/licenses/gpl-2.0.txt GPL2 * @link https://wordpress.org/plugins/sucuri-scanner */ if (!defined('SUCURISCAN_INIT') || SUCURISCAN_INIT !== true) { if (!headers_sent()) { /* Report invalid access if possible. */ header('HTTP/1.1 403 Forbidden'); } exit(1); } /** * Print a list with the failed logins occurred during the last hour. * * @return string A list with the failed logins occurred during the last hour. */ function sucuriscan_failed_logins_panel() { $template_variables = array( 'FailedLogins.List' => '', 'FailedLogins.Total' => '', 'FailedLogins.MaxFailedLogins' => 0, 'FailedLogins.NoItemsVisibility' => 'visible', 'FailedLogins.WarningVisibility' => 'visible', 'FailedLogins.PaginationLinks' => '', 'FailedLogins.PaginationVisibility' => 'hidden', ); // Define variables for the pagination. $page_number = SucuriScanTemplate::pageNumber(); $max_per_page = SUCURISCAN_MAX_PAGINATION_BUTTONS; $page_offset = ($page_number - 1) * $max_per_page; $page_limit = ($page_offset + $max_per_page); // Clear failed login logins when delete button is pressed. if (SucuriScanInterface::checkNonce() && SucuriScanRequest::post(':delete_failedlogins')) { SucuriScanEvent::clearLastLogs('sucuri-failedlogins.php'); } $max_failed_logins = SucuriScanOption::getOption(':maximum_failed_logins'); $notify_bruteforce_attack = SucuriScanOption::getOption(':notify_bruteforce_attack'); $failed_logins = sucuriscan_get_all_failed_logins($page_offset, $max_per_page); if ($failed_logins) { $counter = 0; for ($key = $page_offset; $key < $page_limit; $key++) { if (array_key_exists($key, $failed_logins['entries'])) { $login_data = $failed_logins['entries'][ $key ]; if (!is_array($login_data)) { continue; } $template_variables['FailedLogins.List'] .= SucuriScanTemplate::getSnippet( 'lastlogins-failedlogins', array( 'FailedLogins.Num' => $login_data['attempt_count'], 'FailedLogins.Username' => $login_data['user_login'], 'FailedLogins.RemoteAddr' => $login_data['remote_addr'], 'FailedLogins.UserAgent' => $login_data['user_agent'], 'FailedLogins.Datetime' => SucuriScan::datetime($login_data['attempt_time']), ) ); $counter++; } } if ($counter > 0) { $template_variables['FailedLogins.NoItemsVisibility'] = 'hidden'; } $template_variables['FailedLogins.PaginationLinks'] = SucuriScanTemplate::pagination( '%%SUCURI.URL.Lastlogins%%#failed', $failed_logins['count'], $max_per_page ); if ($failed_logins['count'] > $max_per_page) { $template_variables['FailedLogins.PaginationVisibility'] = 'visible'; } } $template_variables['FailedLogins.MaxFailedLogins'] = $max_failed_logins; if ($notify_bruteforce_attack == 'enabled') { $template_variables['FailedLogins.WarningVisibility'] = 'hidden'; } return SucuriScanTemplate::getSection('lastlogins-failedlogins', $template_variables); } /** * Find the full path of the file where the information of the failed logins * will be stored, it will be created automatically if does not exists (and if * the destination folder has permissions to write). This method can also be * used to reset the content of the datastore file. * * @see sucuriscan_reset_failed_logins() * * @param bool $get_old_logs Whether the old logs will be retrieved or not. * @param bool $reset Whether the file will be resetted or not. * @return string|false Absolute path to the file. */ function sucuriscan_failed_logins_datastore_path($get_old_logs = false, $reset = false) { $file_name = $get_old_logs ? 'sucuri-oldfailedlogins.php' : 'sucuri-failedlogins.php'; $datastore_path = SucuriScan::dataStorePath($file_name); $default_content = sucuriscan_failed_logins_default_content(); // Create the file if it does not exists. if (!file_exists($datastore_path) || $reset) { @file_put_contents($datastore_path, $default_content, LOCK_EX); } // Return the datastore path if the file exists (or was created). if (is_readable($datastore_path)) { return $datastore_path; } return false; } /** * Default content of the datastore file where the failed logins are being kept. * * @return string Default content of the file. */ function sucuriscan_failed_logins_default_content() { return "<?php exit(0); ?>\n"; } /** * Returns failed logins data including old entries. * * @param int $offset Initial index to start the array. * @param int $limit Number of items in the returned array. * @return array|false Failed logins data. */ function sucuriscan_get_all_failed_logins($offset = 0, $limit = -1) { $all = array(); $new = sucuriscan_get_failed_logins(); $old = sucuriscan_get_failed_logins(true, $offset, $limit); if ($new && $old) { // Merge the new and old failed logins. $all = array(); $all['first_attempt'] = $old['first_attempt']; $all['last_attempt'] = $new['last_attempt']; $all['count'] = $new['count'] + $old['count']; $all['diff_time'] = abs($all['last_attempt'] - $all['first_attempt']); $all['entries'] = array_merge($new['entries'], $old['entries']); return $all; } elseif ($new && !$old) { return $new; } elseif (!$new && $old) { return $old; } return false; } /** * Read and parse the content of the datastore file where the failed logins are * being kept. This method will also calculate the difference in time between * the first and last login attempt registered in the file to later decide if * there is a brute-force attack in progress (and send an email notification * with the report) or reset the file after considering it a normal behavior of * the site. * * @param bool $get_old_logs Whether the old logs will be retrieved or not. * @param int $offset Array index from where to start collecting the data. * @param int $limit Number of items to insert into the returned array. * @return array|false Information and entries gathered from the failed logins datastore file. */ function sucuriscan_get_failed_logins($get_old_logs = false, $offset = 0, $limit = -1) { $datastore_path = sucuriscan_failed_logins_datastore_path($get_old_logs); if (!$datastore_path) { return false; } $lines = SucuriScanFileInfo::fileLines($datastore_path); if (!$lines) { return false; } $failed_logins = array( 'count' => 0, 'first_attempt' => 0, 'last_attempt' => 0, 'diff_time' => 0, 'entries' => array(), ); // Read and parse all the entries found in the datastore file. $initial = count($lines) - 1; $processed = 0; // Start from the newest entry in the file. for ($key = $initial; $key >= 0; $key--) { $line = trim($lines[ $key ]); // Skip lines that are clearly not JSON-encoded. if (substr($line, 0, 1) !== '{') { continue; } // Reduce the memory allocation by skipping unnecessary lines (LEFT). if ($limit > 0 && $failed_logins['count'] < $offset) { $failed_logins['entries'][] = $line; $failed_logins['count'] += 1; continue; } // Reduce the memory allocation by skipping unnecessary lines (RIGHT). if ($limit > 0 && $processed > $limit) { $failed_logins['entries'][] = $line; $failed_logins['count'] += 1; continue; } // Decode data only if necessary. $login_data = @json_decode($line, true); $processed++; /* count decoded data */ if (is_array($login_data)) { $login_data['attempt_count'] = ( $key + 1 ); if (!$login_data['user_agent']) { $login_data['user_agent'] = __('Unknown', 'sucuri-scanner'); } if (!isset($login_data['user_password'])) { $login_data['user_password'] = ''; } $failed_logins['entries'][] = $login_data; $failed_logins['count'] += 1; } } // Stop if the there is no data. if ($failed_logins['count'] <= 0) { return false; } // Calculate the different time between the first and last attempt. $idx = abs($failed_logins['count'] - 1); $last = $failed_logins['entries'][$idx]; $first = $failed_logins['entries'][0]; if (!is_array($last)) { /* In case the JSON is not decoded yet */ $last = @json_decode($last, true); } if (!is_array($first)) { /* In case the JSON is not decoded yet */ $first = @json_decode($first, true); } $failed_logins['last_attempt'] = $last['attempt_time']; $failed_logins['first_attempt'] = $first['attempt_time']; $failed_logins['diff_time'] = abs($last['attempt_time'] - $first['attempt_time']); return $failed_logins; } /** * Add a new entry in the datastore file where the failed logins are being kept, * this entry will contain the username, timestamp of the login attempt, remote * address of the computer sending the request, and the user-agent. * * @param string $user_login Information from the current failed login event. * @return bool True if the information was saved, false otherwise. */ function sucuriscan_log_failed_login($user_login = '') { $storage = sucuriscan_failed_logins_datastore_path(); if (!$storage) { return false; } $login_data = json_encode( array( 'user_login' => $user_login, 'attempt_time' => time(), 'remote_addr' => SucuriScan::getRemoteAddr(), 'user_agent' => SucuriScan::getUserAgent(), ) ); return (bool) @file_put_contents( $storage, $login_data . "\n", FILE_APPEND ); } /** * Read and parse all the entries in the datastore file where the failed logins * are being kept, this will loop through all these items and generate a table * in HTML code to send as a report via email according to the plugin settings * for the email alerts. * * @param array $failed_logins Information gathered from the failed logins. * @return bool Whether the report was sent via email or not. */ function sucuriscan_report_failed_logins($failed_logins = array()) { if (!$failed_logins || !isset($failed_logins['count']) || $failed_logins['count'] < 1 ) { return false; } $mail_content = ''; $prettify_mails = SucuriScanMail::prettifyMails(); if ($prettify_mails) { $table_html = '<table border="1" cellspacing="0" cellpadding="0">'; // Add the table headers. $table_html .= '<thead>'; $table_html .= '<tr>'; $table_html .= '<th>' . __('Username', 'sucuri-scanner') . '</th>'; $table_html .= '<th>' . __('Password', 'sucuri-scanner') . '</th>'; $table_html .= '<th>' . __('IP Address', 'sucuri-scanner') . '</th>'; $table_html .= '<th>' . __('Attempt Timestamp', 'sucuri-scanner') . '</th>'; $table_html .= '<th>' . __('Attempt Date/Time', 'sucuri-scanner') . '</th>'; $table_html .= '</tr>'; $table_html .= '</thead>'; $table_html .= '<tbody>'; } foreach ($failed_logins['entries'] as $login_data) { $login_data['attempt_date'] = SucuriScan::datetime($login_data['attempt_time']); if ($prettify_mails) { $table_html .= '<tr>'; $table_html .= '<td>' . esc_attr($login_data['user_login']) . '</td>'; $table_html .= '<td>' . esc_attr($login_data['user_password']) . '</td>'; $table_html .= '<td>' . esc_attr($login_data['remote_addr']) . '</td>'; $table_html .= '<td>' . esc_attr($login_data['attempt_time']) . '</td>'; $table_html .= '<td>' . esc_attr($login_data['attempt_date']) . '</td>'; $table_html .= '</tr>'; } else { $mail_content .= "\n"; $mail_content .= __('Username', 'sucuri-scanner') . ":\x20" . $login_data['user_login'] . "\n"; $mail_content .= __('Password', 'sucuri-scanner') . ":\x20" . $login_data['user_password'] . "\n"; $mail_content .= __('IP Address', 'sucuri-scanner') . ":\x20" . $login_data['remote_addr'] . "\n"; $mail_content .= __('Attempt Timestamp', 'sucuri-scanner') . ":\x20" . $login_data['attempt_time'] . "\n"; $mail_content .= __('Attempt Date/Time', 'sucuri-scanner') . ":\x20" . $login_data['attempt_date'] . "\n"; } } if ($prettify_mails) { $table_html .= '</tbody>'; $table_html .= '</table>'; $mail_content = $table_html; } if (SucuriScanEvent::notifyEvent('bruteforce_attack', $mail_content)) { sucuriscan_reset_failed_logins(); return true; } return false; } /** * Remove all the entries in the datastore file where the failed logins are * being kept. The execution of this method will not delete the file (which is * likely the best move) but rather will clean its content and append the * default code defined by another method above. * * @return bool Whether the datastore file was resetted or not. */ function sucuriscan_reset_failed_logins() { $datastore_path = SucuriScan::dataStorePath('sucuri-failedlogins.php'); $datastore_backup_path = sucuriscan_failed_logins_datastore_path(true, false); $default_content = sucuriscan_failed_logins_default_content(); $current_content = @file_get_contents($datastore_path); $current_content = str_replace($default_content, '', $current_content); @file_put_contents( $datastore_backup_path, $current_content, FILE_APPEND ); return (bool) sucuriscan_failed_logins_datastore_path(false, true); }
Sucuri/sucuri-wordpress-plugin
src/lastlogins-failed.php
PHP
gpl-2.0
14,659
// ScrollTo Plugin 1.4.2 | Copyright (c) 2007-2009 Ariel Flesler | GPL/MIT License ;(function(d){var k=d.scrollTo=function(a,i,e){d(window).scrollTo(a,i,e)};k.defaults={axis:'xy',duration:parseFloat(d.fn.jquery)>=1.3?0:1};k.window=function(a){return d(window)._scrollable()};d.fn._scrollable=function(){return this.map(function(){var a=this,i=!a.nodeName||d.inArray(a.nodeName.toLowerCase(),['iframe','#document','html','body'])!=-1;if(!i)return a;var e=(a.contentWindow||a).document||a.ownerDocument||a;return d.browser.safari||e.compatMode=='BackCompat'?e.body:e.documentElement})};d.fn.scrollTo=function(n,j,b){if(typeof j=='object'){b=j;j=0}if(typeof b=='function')b={onAfter:b};if(n=='max')n=9e9;b=d.extend({},k.defaults,b);j=j||b.speed||b.duration;b.queue=b.queue&&b.axis.length>1;if(b.queue)j/=2;b.offset=p(b.offset);b.over=p(b.over);return this._scrollable().each(function(){var q=this,r=d(q),f=n,s,g={},u=r.is('html,body');switch(typeof f){case'number':case'string':if(/^([+-]=)?\d+(\.\d+)?(px|%)?$/.test(f)){f=p(f);break}f=d(f,this);case'object':if(f.is||f.style)s=(f=d(f)).offset()}d.each(b.axis.split(''),function(a,i){var e=i=='x'?'Left':'Top',h=e.toLowerCase(),c='scroll'+e,l=q[c],m=k.max(q,i);if(s){g[c]=s[h]+(u?0:l-r.offset()[h]);if(b.margin){g[c]-=parseInt(f.css('margin'+e))||0;g[c]-=parseInt(f.css('border'+e+'Width'))||0}g[c]+=b.offset[h]||0;if(b.over[h])g[c]+=f[i=='x'?'width':'height']()*b.over[h]}else{var o=f[h];g[c]=o.slice&&o.slice(-1)=='%'?parseFloat(o)/100*m:o}if(/^\d+$/.test(g[c]))g[c]=g[c]<=0?0:Math.min(g[c],m);if(!a&&b.queue){if(l!=g[c])t(b.onAfterFirst);delete g[c]}});t(b.onAfter);function t(a){r.animate(g,j,b.easing,a&&function(){a.call(this,n,b)})}}).end()};k.max=function(a,i){var e=i=='x'?'Width':'Height',h='scroll'+e;if(!d(a).is('html,body'))return a[h]-d(a)[e.toLowerCase()]();var c='client'+e,l=a.ownerDocument.documentElement,m=a.ownerDocument.body;return Math.max(l[h],m[h])-Math.min(l[c],m[c])};function p(a){return typeof a=='object'?a:{top:a,left:a}}})(jQuery); jetpack = { numModules: 0, container: null, arrow: null, linkClicked: null, resizeTimeout: null, resizeTimer: null, shadowTimer: null, statusText: null, isRTL: !( 'undefined' == typeof isRtl || !isRtl ), didDebug: false, init: function() { jetpack.numModules = jQuery( 'div.jetpack-module' ).not( '.placeholder' ).size(); jetpack.container = jQuery( 'div.module-container' ); jetpack.level_modules(); jetpack.level_placeholders(); jetpack.level_placeholders_on_resize(); jQuery( 'a.more-info-link', 'div.jetpack-module' ).bind( 'click', function(e) { e.preventDefault(); jetpack.hide_shadows(); if ( jetpack.linkClicked && jetpack.linkClicked.parents( 'div.jetpack-module' ).attr( 'id' ) == jQuery(this).parents( 'div.jetpack-module' ).attr( 'id' ) ) { jetpack.close_learn_more( function() { jetpack.show_shadows(); } ); } else { jetpack.linkClicked = jQuery(this); jetpack.insert_learn_more( jQuery(this).parents( 'div.jetpack-module' ), function() { jetpack.show_shadows(); } ); jQuery( 'a.jetpack-deactivate-button' ).hide(); jQuery( 'a.jetpack-configure-button' ).show(); jetpack.linkClicked.parents( 'div.jetpack-module' ).children( '.jetpack-module-actions' ).children( 'a.jetpack-deactivate-button' ).show(); jetpack.linkClicked.parents( 'div.jetpack-module' ).children( '.jetpack-module-actions' ).children( 'a.jetpack-configure-button' ).hide(); } } ); jQuery( window ).bind( 'resize', function() { jetpack.hide_shadows(); clearTimeout( jetpack.shadowTimer ); jetpack.shadowTimer = setTimeout( function() { jetpack.show_shadows(); }, 200 ); }); jQuery( 'a#jp-debug' ).bind( 'click', function(e) { e.preventDefault(); if ( !jetpack.didDebug ) { jetpack.didDebug = true; jQuery( '#jetpack-configuration' ).load( this.href, function() { jQuery.scrollTo( 'max', 'fast' ); } ); } jetpack.toggle_debug(); }); var widerWidth = 0; jQuery( '#jp-disconnect a' ).click( function() { if ( confirm( jetpackL10n.ays_disconnect ) ) { jQuery( this ).addClass( 'clicked' ).css( { "background-image": 'url( ' + userSettings.url + 'wp-admin/images/wpspin_light.gif )', "background-position": '9px 5px', "background-size": '16px 16px' } ).unbind( 'click' ).click( function() { return false; } ); } else { return false; } } ); jQuery( '#jp-unlink a' ).click( function() { if ( confirm( jetpackL10n.ays_unlink ) ) { jQuery( this ).css( { "background-image": 'url( ' + userSettings.url + 'wp-admin/images/wpspin_light.gif )', "background-position": '9px 5px', "background-size": '16px 16px' } ).unbind( 'click' ).click( function() { return false; } ); } else { return false; } } ); }, level_modules: function() { var max_height = 0; // Get the tallest module card and set them all to be that tall. jQuery( 'div.jetpack-module', 'div.module-container' ).each( function() { max_height = Math.max( max_height, jQuery(this).height() ); } ).height( max_height ); }, level_placeholders: function( w ) { jQuery( 'div.placeholder' ).show(); var containerWidth = jetpack.container.width(), needed = 4 * parseInt( containerWidth / 242, 10 ) - jetpack.numModules if ( jetpack.numModules * 242 > containerWidth ) jQuery( 'div.placeholder' ).slice( needed ).hide(); else jQuery( 'div.placeholder' ).hide(); }, level_placeholders_on_resize: function() { jQuery( window ).bind( 'resize', function() { if ( jetpack.resizeTimer ) { return; } jetpack.resizeTimer = setTimeout( function() { jetpack.resizeTimer = false; jetpack.level_placeholders(); jetpack.level_placeholders_on_resize(); }, 100 ); } ); }, insert_learn_more: function( card, callback ) { var perRow = parseInt( jetpack.container.width() / 242, 10 ), cardPosition = 0, cardRow = 0, learnMoreOffset = jetpack.isRTL ? 144 : 28; // Get the position of the card clicked. jQuery( 'div.jetpack-module', 'div.module-container' ).each( function( i, el ) { if ( jQuery(el).attr('id') == jQuery(card).attr('id') ) cardPosition = i; } ); cardRow = 1 + parseInt( cardPosition / perRow, 10 ); // Insert the more info box after the last item of the row. jQuery( 'div.jetpack-module', 'div.module-container' ).each( function( i, el ) { if ( i + 1 == ( perRow * cardRow ) ) { // More info box already exists. if ( jQuery( 'div.more-info' ).length ) { if ( jQuery( el ).next().hasClass( 'more-info' ) ) { jQuery( 'div.more-info div.jp-content' ).fadeOut( 100 ); jetpack.learn_more_content( jQuery(card).attr( 'id' ) ); jQuery( window ).scrollTo( ( jQuery( 'div.more-info' ).prev().offset().top ) - 70, 600, function() { if ( typeof callback == 'function' ) callback.call( this ); } ); } else { jQuery( 'div.more-info div.jp-content' ).hide(); jQuery( 'div.more-info' ).slideUp( 200, function() { jQuery(this).detach().insertAfter( el ); jQuery( 'div.more-info div.jp-content' ).hide(); jetpack.learn_more_content( jQuery(card).attr( 'id' ) ); jQuery( 'div.more-info' ).slideDown( 300 ); jQuery( window ).scrollTo( ( jQuery( 'div.more-info' ).prev().offset().top ) - 70, 600, function() { if ( typeof callback == 'function' ) callback.call( this ); } ); } ); } // More info box does not exist. } else { // Insert the box. jQuery( el ).after( '<div id="message" class="more-info jetpack-message"><div class="arrow"></div><div class="jp-content"></div><div class="jp-close">&times;</div><div class="clear"></div></div>' ); // Show the box jQuery( 'div.more-info', 'div.module-container' ).hide().slideDown( 400, function() { // Load the content and scroll to it jetpack.learn_more_content( jQuery(card).attr( 'id' ) ); jQuery( window ).scrollTo( ( jQuery( 'div.more-info' ).prev().offset().top ) - 70, 600 ); if ( typeof callback == 'function' ) callback.call( this ); } ); jQuery( 'div.more-info' ).children( 'div.arrow' ).animate( { left: jQuery(card).offset().left - jetpack.container.offset().left + learnMoreOffset + 'px' }, 300 ); } jQuery( 'div.more-info' ).children( 'div.arrow' ).animate( { left: jQuery(card).offset().left - jetpack.container.offset().left + learnMoreOffset + 'px' }, 300 ); return; } } ); // Listen for resize jQuery( window ).bind( 'resize', function() { jetpack.reposition_learn_more( card ); jetpack.level_placeholders_on_resize(); } ); // Listen for close. jQuery( 'div.more-info div.jp-close' ).unbind( 'click' ).bind( 'click', function() { jetpack.close_learn_more(); } ); }, reposition_learn_more: function( card ) { var perRow = parseInt( jetpack.container.width() / 242, 10 ); var cardPosition = 0; // Get the position of the card clicked. jQuery( 'div.jetpack-module', 'div.module-container' ).each( function( i, el ) { if ( jQuery(el).attr('id') == jQuery(card).attr('id') ) cardPosition = i; } ); var cardRow = 1 + parseInt( cardPosition / perRow, 10 ); jQuery( 'div.jetpack-module', 'div.module-container' ).each( function( i, el ) { if ( i + 1 == ( perRow * cardRow ) ) { jQuery( 'div.more-info' ).detach().insertAfter( el ); jQuery( 'div.more-info' ).children( 'div.arrow' ).css( { left: jQuery(card).offset().left - jetpack.container.offset().left + 28 + 'px' }, 300 ); } } ); }, learn_more_content: function( module_id ) { response = jQuery( '#jp-more-info-' + module_id ).html(); jQuery( 'div.more-info div.jp-content' ).html( response ).hide().fadeIn( 300 ); }, close_learn_more: function( callback ) { jQuery( 'div.more-info div.jp-content' ).hide(); jQuery( 'div.more-info' ).slideUp( 200, function() { jQuery( this ).remove(); jQuery( 'a.jetpack-deactivate-button' ).hide(); jetpack.linkClicked.parents( 'div.jetpack-module' ).children( '.jetpack-module-actions' ).children( 'a.jetpack-configure-button' ).show(); jetpack.linkClicked = null; if ( typeof callback == 'function' ) callback.call( this ); } ); }, toggle_debug: function() { jQuery('div#jetpack-configuration').toggle( 0, function() { if ( jQuery( this ).is( ':visible' ) ) { jQuery.scrollTo( 'max', 'fast' ); } } ); }, hide_shadows: function() { jQuery( 'div.jetpack-module, div.more-info' ).css( { '-webkit-box-shadow': 'none' } ); }, show_shadows: function() { jQuery( 'div.jetpack-module' ).css( { '-webkit-box-shadow': 'inset 0 1px 0 #fff, inset 0 0 20px rgba(0,0,0,0.05), 0 1px 2px rgba( 0,0,0,0.1 )' } ); jQuery( 'div.more-info' ).css( { '-webkit-box-shadow': 'inset 0 0 20px rgba(0,0,0,0.05), 0 1px 2px rgba( 0,0,0,0.1 )' } ); } } jQuery( function() { jetpack.init(); } );
cowdinosaur/kita.sg
wp-content/plugins/jetpack/_inc/jetpack.js
JavaScript
gpl-2.0
10,858
#uppsite-wrapper { border-bottom: 1px solid #cccccc; display: inline-block; width: 100%; } #toplevel_page_uppsite-settings .wp-menu-image, #toplevel_page_uppsite-setup .wp-menu-image, #wpadminbar #wp-admin-bar-uppsite-mobile > .ab-item .ab-icon { background-image: url(../images/wp-admin-uppsite-icon.png); background-repeat: no-repeat; } .icon32.icon-uppsite, #icon-uppsite { background-image: url(../images/wp-admin-uppsite-icon-big.png); background-repeat: no-repeat; } @media only screen and (-webkit-min-device-pixel-ratio: 1.5) { #toplevel_page_uppsite-settings .wp-menu-image, #toplevel_page_uppsite-setup .wp-menu-image, #wpadminbar #wp-admin-bar-uppsite-mobile > .ab-item .ab-icon { background-image: url(../images/wp-admin-uppsite-icon@2x.png); background-size: 16px 48px; } .icon32.icon-uppsite #icon-uppsite { background-image: url(../images/wp-admin-uppsite-icon-big@2x.png); background-size: 30px 31px; } } #toplevel_page_uppsite-settings .wp-menu-image, #toplevel_page_uppsite-setup .wp-menu-image { background-position:7px -26px !important; } #toplevel_page_uppsite-settings.current .wp-menu-image, #toplevel_page_uppsite-settings.wp-has-current-submenu .wp-menu-image, #toplevel_page_uppsite-settings:hover .wp-menu-image, #toplevel_page_uppsite-setup.current .wp-menu-image, #toplevel_page_uppsite-setup.wp-has-current-submenu .wp-menu-image, #toplevel_page_uppsite-setup:hover .wp-menu-image { background-position:7px 6px !important; } #wpadminbar #wp-admin-bar-uppsite-mobile > .ab-item .ab-icon { background-position:0px 16px !important; } #wpadminbar #wp-admin-bar-uppsite-mobile:hover > .ab-item .ab-icon { background-position:0px 0px !important; } .uppsite-wrap .subsubsub { float: right; margin: 16px 0 0; } .uppsite-wrap .subsubsub a { padding: 0 .2em; } .uppsite-wrap h2 { float: left; } #wpbody-content { padding-bottom: 37px; } /* Changes for dashboard footer */ body[class*='version-3-'][class*='uppsite'] #wpcontent { margin-left: 146px; } body[class*='version-3-'][class*='uppsite'] #uppsite-wrapper { display: block; width: auto; margin: 0 20px; overflow: hidden; } body[class*='version-3-'][class*='uppsite'] .uppsite-wrap { margin: 0; } body[class*='version-3-'][class*='uppsite'] #uppsiteFrame { margin-top: -10px !important; } @media only screen and (max-width: 900px) { body[class*='version-3-'][class*='uppsite'] #wpcontent { margin-left: 33px; } }
mit-social-computing/youarehereweb
wp-content/plugins/app-your-wordpress-uppsite/admin/css/uppsite.css
CSS
gpl-2.0
2,534
/* display single iframes as stillimages * iframes can be created with the 'convert' tool from imagemagick * and mpeg2encode from ftp.mpeg.org, and must have a supported * size, e.g. 702x576: * $ convert -sample 702x576\! test.jpg test.mpg * * or more advanced using netpbm and mpeg2enc (not mpeg2encode) : * $ cat image.jpg | jpegtopnm | pnmscale -xsize=704 -ysize=576 |\ * ppmntsc --pal | ppmtoy4m -F 25:1 -A 4:3 -S 420mpeg2 |\ * mpeg2enc -f 7 -T 90 -F 3 -np -a 2 -o "image.mpg" * */ #include <sys/ioctl.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <time.h> #include <unistd.h> #include <linux/types.h> #include <linux/dvb/video.h> static const char *usage_string = "\n\t" "usage: %s <still.mpg> [still.mpg ...]\n" "\n\t" "to use another videodev than the first, set the environment variable VIDEODEV\n\t" "e.g.[user@linux]$ export VIDEODEV=\"/dev/dvb/adapter1/video1\".\n\t" "to display the image <n> seconds, instead of 10, set the variable SSTIME\n\t" "e.g. [user@linux]$ export SSTIME=\"60\" to display the still for 1 minute.\n\t" "this options can be set in the same line as the %s command:\n\t" "e.g. $ SSTIME=25 VIDEODEV=/dev/dvb/adapter1/video1 %s ...\n"; int main (int argc, char **argv) { int fd; int filefd; struct stat st; struct video_still_picture sp; char *videodev = "/dev/dvb/adapter0/video0"; char *env_sstime; int i = 1; int tsec = 10; if (argc < 2) { fprintf (stderr, usage_string, argv[0],argv[0],argv[0]); return -1; } if (getenv ("VIDEODEV")) videodev = getenv("VIDEODEV"); if (getenv ("SSTIME")) { env_sstime = getenv("SSTIME"); tsec = atoi(env_sstime); } if ((fd = open(videodev, O_RDWR)) < 0) { perror(videodev); return -1; } next_pic: printf("I-frame : '%s'\n", argv[i]); if ((filefd = open(argv[i], O_RDONLY)) < 0) { perror(argv[i]); return -1; } fstat(filefd, &st); sp.iFrame = (char *) malloc (st.st_size); sp.size = st.st_size; printf("I-frame size: %d\n", sp.size); if (!sp.iFrame) { fprintf (stderr, "No memory for I-Frame\n"); return -1; } printf ("read: %d bytes\n", (int) read(filefd, sp.iFrame, sp.size)); close(filefd); if ((ioctl(fd, VIDEO_STILLPICTURE, &sp) < 0)) { perror("ioctl VIDEO_STILLPICTURE"); return -1; } free(sp.iFrame); printf("Display image %d seconds ...\n",tsec); sleep(tsec); printf("Done.\n"); if (argc > ++i) goto next_pic; return 0; }
openpli-arm/dvb-apps
test/test_stillimage.c
C
gpl-2.0
2,505
<?php /** * The template for displaying all pages * * This is the template that displays all pages by default. * Please note that this is the WordPress construct of pages and that other * 'pages' on your WordPress site will use a different template. * * @package WordPress * @subpackage Twenty_Thirteen * @since Twenty Thirteen 1.0 */ get_header(); ?> <div id="primary" class="content-area"> <div id="content" class="site-content" role="main"> <?php /* The loop */ ?> <?php while ( have_posts() ) : the_post(); ?> <article id="post-<?php the_ID(); ?>" <?php post_class(); ?>> <header class="entry-header"> <?php if ( has_post_thumbnail() && ! post_password_required() ) : ?> <div class="entry-thumbnail"> <?php the_post_thumbnail(); ?> </div> <?php endif; ?> <h1 class="entry-title"><?php the_title(); ?></h1> </header><!-- .entry-header --> <div class="entry-content"> <?php the_content(); ?> <?php wp_link_pages( array( 'before' => '<div class="page-links"><span class="page-links-title">' . __( 'Pages:', 'twentythirteen' ) . '</span>', 'after' => '</div>', 'link_before' => '<span>', 'link_after' => '</span>' ) ); ?> </div><!-- .entry-content --> <footer class="entry-meta"> <?php edit_post_link( __( 'Edit', 'twentythirteen' ), '<span class="edit-link">', '</span>' ); ?> </footer><!-- .entry-meta --> </article><!-- #post --> <?php comments_template(); ?> <?php endwhile; ?> </div><!-- #content --> </div><!-- #primary --> <?php get_sidebar(); ?> <?php get_footer(); ?>
AASP/cosmic-wordpress
wp-content/themes/twentythirteen/page.php
PHP
gpl-2.0
1,609
from django import forms from django.template import Context from django.template.loader import get_template from django import template from bootstrapform import config register = template.Library() @register.filter def bootstrap(element): markup_classes = {'label': '', 'value': '', 'single_value': ''} return render(element, markup_classes) @register.filter def bootstrap_inline(element): markup_classes = {'label': 'sr-only', 'value': '', 'single_value': ''} return render(element, markup_classes) @register.filter def bootstrap_horizontal(element, label_cols={}): if not label_cols: label_cols = 'col-sm-2 col-lg-2' markup_classes = {'label': label_cols, 'value': '', 'single_value': ''} for cl in label_cols.split(' '): splited_class = cl.split('-') try: value_nb_cols = int(splited_class[-1]) except ValueError: value_nb_cols = config.BOOTSTRAP_COLUMN_COUNT if value_nb_cols >= config.BOOTSTRAP_COLUMN_COUNT: splited_class[-1] = config.BOOTSTRAP_COLUMN_COUNT else: offset_class = cl.split('-') offset_class[-1] = 'offset-' + str(value_nb_cols) splited_class[-1] = str(config.BOOTSTRAP_COLUMN_COUNT - value_nb_cols) markup_classes['single_value'] += ' ' + '-'.join(offset_class) markup_classes['single_value'] += ' ' + '-'.join(splited_class) markup_classes['value'] += ' ' + '-'.join(splited_class) return render(element, markup_classes) def add_input_classes(field): if not is_checkbox(field) and not is_multiple_checkbox(field) and not is_radio(field) \ and not is_file(field): field_classes = field.field.widget.attrs.get('class', '') field_classes += ' form-control' field.field.widget.attrs['class'] = field_classes def render(element, markup_classes): element_type = element.__class__.__name__.lower() if element_type == 'boundfield': add_input_classes(element) template = get_template("bootstrapform/field.html") context = Context({'field': element, 'classes': markup_classes}) else: has_management = getattr(element, 'management_form', None) if has_management: for form in element.forms: for field in form.visible_fields(): add_input_classes(field) template = get_template("bootstrapform/formset.html") context = Context({'formset': element, 'classes': markup_classes}) else: for field in element.visible_fields(): add_input_classes(field) template = get_template("bootstrapform/form.html") context = Context({'form': element, 'classes': markup_classes}) return template.render(context) @register.filter def is_checkbox(field): return isinstance(field.field.widget, forms.CheckboxInput) @register.filter def is_multiple_checkbox(field): return isinstance(field.field.widget, forms.CheckboxSelectMultiple) @register.filter def is_radio(field): return isinstance(field.field.widget, forms.RadioSelect) @register.filter def is_file(field): return isinstance(field.field.widget, forms.FileInput)
MickaelBergem/UpTimeDelay
bootstrapform/templatetags/bootstrap.py
Python
gpl-2.0
3,284
/* * Copyright (c) 2001 Vojtech Pavlik * * CATC EL1210A NetMate USB Ethernet driver * * Sponsored by SuSE * * Based on the work of * Donald Becker * * Old chipset support added by Simon Evans <spse@secret.org.uk> 2002 * - adds support for Belkin F5U011 */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/bitops.h> #include <asm/uaccess.h> #undef DEBUG #include <linux/usb.h> /* * Version information. */ #define DRIVER_VERSION "v2.8" #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@suse.cz>" #define DRIVER_DESC "CATC EL1210A NetMate USB Ethernet driver" #define SHORT_DRIVER_DESC "EL1210A NetMate USB Ethernet" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static const char driver_name[] = "catc"; /* * Some defines. */ #define STATS_UPDATE (HZ) /* Time between stats updates */ #define TX_TIMEOUT (5*HZ) /* Max time the queue can be stopped */ #define PKT_SZ 1536 /* Max Ethernet packet size */ #define RX_MAX_BURST 15 /* Max packets per rx buffer (> 0, < 16) */ #define TX_MAX_BURST 15 /* Max full sized packets per tx buffer (> 0) */ #define CTRL_QUEUE 16 /* Max control requests in flight (power of two) */ #define RX_PKT_SZ 1600 /* Max size of receive packet for F5U011 */ /* * Control requests. */ enum control_requests { ReadMem = 0xf1, GetMac = 0xf2, Reset = 0xf4, SetMac = 0xf5, SetRxMode = 0xf5, /* F5U011 only */ WriteROM = 0xf8, SetReg = 0xfa, GetReg = 0xfb, WriteMem = 0xfc, ReadROM = 0xfd, }; /* * Registers. */ enum register_offsets { TxBufCount = 0x20, RxBufCount = 0x21, OpModes = 0x22, TxQed = 0x23, RxQed = 0x24, MaxBurst = 0x25, RxUnit = 0x60, EthStatus = 0x61, StationAddr0 = 0x67, EthStats = 0x69, LEDCtrl = 0x81, }; enum eth_stats { TxSingleColl = 0x00, TxMultiColl = 0x02, TxExcessColl = 0x04, RxFramErr = 0x06, }; enum op_mode_bits { Op3MemWaits = 0x03, OpLenInclude = 0x08, OpRxMerge = 0x10, OpTxMerge = 0x20, OpWin95bugfix = 0x40, OpLoopback = 0x80, }; enum rx_filter_bits { RxEnable = 0x01, RxPolarity = 0x02, RxForceOK = 0x04, RxMultiCast = 0x08, RxPromisc = 0x10, AltRxPromisc = 0x20, /* F5U011 uses different bit */ }; enum led_values { LEDFast = 0x01, LEDSlow = 0x02, LEDFlash = 0x03, LEDPulse = 0x04, LEDLink = 0x08, }; enum link_status { LinkNoChange = 0, LinkGood = 1, LinkBad = 2 }; /* * The catc struct. */ #define CTRL_RUNNING 0 #define RX_RUNNING 1 #define TX_RUNNING 2 struct catc { struct net_device *netdev; struct usb_device *usbdev; unsigned long flags; unsigned int tx_ptr, tx_idx; unsigned int ctrl_head, ctrl_tail; spinlock_t tx_lock, ctrl_lock; u8 tx_buf[2][TX_MAX_BURST * (PKT_SZ + 2)]; u8 rx_buf[RX_MAX_BURST * (PKT_SZ + 2)]; u8 irq_buf[2]; u8 ctrl_buf[64]; struct usb_ctrlrequest ctrl_dr; struct timer_list timer; u8 stats_buf[8]; u16 stats_vals[4]; unsigned long last_stats; u8 multicast[64]; struct ctrl_queue { u8 dir; u8 request; u16 value; u16 index; void *buf; int len; void (*callback)(struct catc *catc, struct ctrl_queue *q); } ctrl_queue[CTRL_QUEUE]; struct urb *tx_urb, *rx_urb, *irq_urb, *ctrl_urb; u8 is_f5u011; /* Set if device is an F5U011 */ u8 rxmode[2]; /* Used for F5U011 */ atomic_t recq_sz; /* Used for F5U011 - counter of waiting rx packets */ }; /* * Useful macros. */ #define catc_get_mac(catc, mac) catc_ctrl_msg(catc, USB_DIR_IN, GetMac, 0, 0, mac, 6) #define catc_reset(catc) catc_ctrl_msg(catc, USB_DIR_OUT, Reset, 0, 0, NULL, 0) #define catc_set_reg(catc, reg, val) catc_ctrl_msg(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0) #define catc_get_reg(catc, reg, buf) catc_ctrl_msg(catc, USB_DIR_IN, GetReg, 0, reg, buf, 1) #define catc_write_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size) #define catc_read_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_IN, ReadMem, 0, addr, buf, size) #define f5u011_rxmode(catc, rxmode) catc_ctrl_msg(catc, USB_DIR_OUT, SetRxMode, 0, 1, rxmode, 2) #define f5u011_rxmode_async(catc, rxmode) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 1, &rxmode, 2, NULL) #define f5u011_mchash_async(catc, hash) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 2, &hash, 8, NULL) #define catc_set_reg_async(catc, reg, val) catc_ctrl_async(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0, NULL) #define catc_get_reg_async(catc, reg, cb) catc_ctrl_async(catc, USB_DIR_IN, GetReg, 0, reg, NULL, 1, cb) #define catc_write_mem_async(catc, addr, buf, size) catc_ctrl_async(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size, NULL) /* * Receive routines. */ static void catc_rx_done(struct urb *urb) { struct catc *catc = urb->context; u8 *pkt_start = urb->transfer_buffer; struct sk_buff *skb; int pkt_len, pkt_offset = 0; int status = urb->status; if (!catc->is_f5u011) { clear_bit(RX_RUNNING, &catc->flags); pkt_offset = 2; } if (status) { dbg("rx_done, status %d, length %d", status, urb->actual_length); return; } do { if(!catc->is_f5u011) { pkt_len = le16_to_cpup((__le16*)pkt_start); if (pkt_len > urb->actual_length) { catc->netdev->stats.rx_length_errors++; catc->netdev->stats.rx_errors++; break; } } else { pkt_len = urb->actual_length; } if (!(skb = dev_alloc_skb(pkt_len))) return; skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, catc->netdev); netif_rx(skb); catc->netdev->stats.rx_packets++; catc->netdev->stats.rx_bytes += pkt_len; /* F5U011 only does one packet per RX */ if (catc->is_f5u011) break; pkt_start += (((pkt_len + 1) >> 6) + 1) << 6; } while (pkt_start - (u8 *) urb->transfer_buffer < urb->actual_length); if (catc->is_f5u011) { if (atomic_read(&catc->recq_sz)) { int state; atomic_dec(&catc->recq_sz); dbg("getting extra packet"); urb->dev = catc->usbdev; if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { dbg("submit(rx_urb) status %d", state); } } else { clear_bit(RX_RUNNING, &catc->flags); } } } static void catc_irq_done(struct urb *urb) { struct catc *catc = urb->context; u8 *data = urb->transfer_buffer; int status = urb->status; unsigned int hasdata = 0, linksts = LinkNoChange; int res; if (!catc->is_f5u011) { hasdata = data[1] & 0x80; if (data[1] & 0x40) linksts = LinkGood; else if (data[1] & 0x20) linksts = LinkBad; } else { hasdata = (unsigned int)(be16_to_cpup((__be16*)data) & 0x0fff); if (data[0] == 0x90) linksts = LinkGood; else if (data[0] == 0xA0) linksts = LinkBad; } switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; /* -EPIPE: should clear the halt */ default: /* error */ dbg("irq_done, status %d, data %02x %02x.", status, data[0], data[1]); goto resubmit; } if (linksts == LinkGood) { netif_carrier_on(catc->netdev); dbg("link ok"); } if (linksts == LinkBad) { netif_carrier_off(catc->netdev); dbg("link bad"); } if (hasdata) { if (test_and_set_bit(RX_RUNNING, &catc->flags)) { if (catc->is_f5u011) atomic_inc(&catc->recq_sz); } else { catc->rx_urb->dev = catc->usbdev; if ((res = usb_submit_urb(catc->rx_urb, GFP_ATOMIC)) < 0) { err("submit(rx_urb) status %d", res); } } } resubmit: res = usb_submit_urb (urb, GFP_ATOMIC); if (res) err ("can't resubmit intr, %s-%s, status %d", catc->usbdev->bus->bus_name, catc->usbdev->devpath, res); } /* * Transmit routines. */ static int catc_tx_run(struct catc *catc) { int status; if (catc->is_f5u011) catc->tx_ptr = (catc->tx_ptr + 63) & ~63; catc->tx_urb->transfer_buffer_length = catc->tx_ptr; catc->tx_urb->transfer_buffer = catc->tx_buf[catc->tx_idx]; catc->tx_urb->dev = catc->usbdev; if ((status = usb_submit_urb(catc->tx_urb, GFP_ATOMIC)) < 0) err("submit(tx_urb), status %d", status); catc->tx_idx = !catc->tx_idx; catc->tx_ptr = 0; catc->netdev->trans_start = jiffies; return status; } static void catc_tx_done(struct urb *urb) { struct catc *catc = urb->context; unsigned long flags; int r, status = urb->status; if (status == -ECONNRESET) { dbg("Tx Reset."); urb->status = 0; catc->netdev->trans_start = jiffies; catc->netdev->stats.tx_errors++; clear_bit(TX_RUNNING, &catc->flags); netif_wake_queue(catc->netdev); return; } if (status) { dbg("tx_done, status %d, length %d", status, urb->actual_length); return; } spin_lock_irqsave(&catc->tx_lock, flags); if (catc->tx_ptr) { r = catc_tx_run(catc); if (unlikely(r < 0)) clear_bit(TX_RUNNING, &catc->flags); } else { clear_bit(TX_RUNNING, &catc->flags); } netif_wake_queue(catc->netdev); spin_unlock_irqrestore(&catc->tx_lock, flags); } static netdev_tx_t catc_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); unsigned long flags; int r = 0; char *tx_buf; spin_lock_irqsave(&catc->tx_lock, flags); catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; if (catc->is_f5u011) *(__be16 *)tx_buf = cpu_to_be16(skb->len); else *(__le16 *)tx_buf = cpu_to_le16(skb->len); skb_copy_from_linear_data(skb, tx_buf + 2, skb->len); catc->tx_ptr += skb->len + 2; if (!test_and_set_bit(TX_RUNNING, &catc->flags)) { r = catc_tx_run(catc); if (r < 0) clear_bit(TX_RUNNING, &catc->flags); } if ((catc->is_f5u011 && catc->tx_ptr) || (catc->tx_ptr >= ((TX_MAX_BURST - 1) * (PKT_SZ + 2)))) netif_stop_queue(netdev); spin_unlock_irqrestore(&catc->tx_lock, flags); if (r >= 0) { catc->netdev->stats.tx_bytes += skb->len; catc->netdev->stats.tx_packets++; } dev_kfree_skb(skb); return NETDEV_TX_OK; } static void catc_tx_timeout(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); dev_warn(&netdev->dev, "Transmit timed out.\n"); usb_unlink_urb(catc->tx_urb); } /* * Control messages. */ static int catc_ctrl_msg(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len) { int retval = usb_control_msg(catc->usbdev, dir ? usb_rcvctrlpipe(catc->usbdev, 0) : usb_sndctrlpipe(catc->usbdev, 0), request, 0x40 | dir, value, index, buf, len, 1000); return retval < 0 ? retval : 0; } static void catc_ctrl_run(struct catc *catc) { struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail; struct usb_device *usbdev = catc->usbdev; struct urb *urb = catc->ctrl_urb; struct usb_ctrlrequest *dr = &catc->ctrl_dr; int status; dr->bRequest = q->request; dr->bRequestType = 0x40 | q->dir; dr->wValue = cpu_to_le16(q->value); dr->wIndex = cpu_to_le16(q->index); dr->wLength = cpu_to_le16(q->len); urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0); urb->transfer_buffer_length = q->len; urb->transfer_buffer = catc->ctrl_buf; urb->setup_packet = (void *) dr; urb->dev = usbdev; if (!q->dir && q->buf && q->len) memcpy(catc->ctrl_buf, q->buf, q->len); if ((status = usb_submit_urb(catc->ctrl_urb, GFP_KERNEL))) err("submit(ctrl_urb) status %d", status); } static void catc_ctrl_done(struct urb *urb) { struct catc *catc = urb->context; struct ctrl_queue *q; unsigned long flags; int status = urb->status; if (status) dbg("ctrl_done, status %d, len %d.", status, urb->actual_length); spin_lock_irqsave(&catc->ctrl_lock, flags); q = catc->ctrl_queue + catc->ctrl_tail; if (q->dir) { if (q->buf && q->len) memcpy(q->buf, catc->ctrl_buf, q->len); else q->buf = catc->ctrl_buf; } if (q->callback) q->callback(catc, q); catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1); if (catc->ctrl_head != catc->ctrl_tail) catc_ctrl_run(catc); else clear_bit(CTRL_RUNNING, &catc->flags); spin_unlock_irqrestore(&catc->ctrl_lock, flags); } static int catc_ctrl_async(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len, void (*callback)(struct catc *catc, struct ctrl_queue *q)) { struct ctrl_queue *q; int retval = 0; unsigned long flags; spin_lock_irqsave(&catc->ctrl_lock, flags); q = catc->ctrl_queue + catc->ctrl_head; q->dir = dir; q->request = request; q->value = value; q->index = index; q->buf = buf; q->len = len; q->callback = callback; catc->ctrl_head = (catc->ctrl_head + 1) & (CTRL_QUEUE - 1); if (catc->ctrl_head == catc->ctrl_tail) { err("ctrl queue full"); catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1); retval = -1; } if (!test_and_set_bit(CTRL_RUNNING, &catc->flags)) catc_ctrl_run(catc); spin_unlock_irqrestore(&catc->ctrl_lock, flags); return retval; } /* * Statistics. */ static void catc_stats_done(struct catc *catc, struct ctrl_queue *q) { int index = q->index - EthStats; u16 data, last; catc->stats_buf[index] = *((char *)q->buf); if (index & 1) return; data = ((u16)catc->stats_buf[index] << 8) | catc->stats_buf[index + 1]; last = catc->stats_vals[index >> 1]; switch (index) { case TxSingleColl: case TxMultiColl: catc->netdev->stats.collisions += data - last; break; case TxExcessColl: catc->netdev->stats.tx_aborted_errors += data - last; catc->netdev->stats.tx_errors += data - last; break; case RxFramErr: catc->netdev->stats.rx_frame_errors += data - last; catc->netdev->stats.rx_errors += data - last; break; } catc->stats_vals[index >> 1] = data; } static void catc_stats_timer(unsigned long data) { struct catc *catc = (void *) data; int i; for (i = 0; i < 8; i++) catc_get_reg_async(catc, EthStats + 7 - i, catc_stats_done); mod_timer(&catc->timer, jiffies + STATS_UPDATE); } /* * Receive modes. Broadcast, Multicast, Promisc. */ static void catc_multicast(unsigned char *addr, u8 *multicast) { u32 crc; crc = ether_crc_le(6, addr); multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); } static void catc_set_multicast_list(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); struct dev_mc_list *mc; u8 broadcast[6]; u8 rx = RxEnable | RxPolarity | RxMultiCast; memset(broadcast, 0xff, 6); memset(catc->multicast, 0, 64); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); if (netdev->flags & IFF_PROMISC) { memset(catc->multicast, 0xff, 64); rx |= (!catc->is_f5u011) ? RxPromisc : AltRxPromisc; } if (netdev->flags & IFF_ALLMULTI) { memset(catc->multicast, 0xff, 64); } else { netdev_for_each_mc_addr(mc, netdev) { u32 crc = ether_crc_le(6, mc->dmi_addr); if (!catc->is_f5u011) { catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); } else { catc->multicast[7-(crc >> 29)] |= 1 << ((crc >> 26) & 7); } } } if (!catc->is_f5u011) { catc_set_reg_async(catc, RxUnit, rx); catc_write_mem_async(catc, 0xfa80, catc->multicast, 64); } else { f5u011_mchash_async(catc, catc->multicast); if (catc->rxmode[0] != rx) { catc->rxmode[0] = rx; dbg("Setting RX mode to %2.2X %2.2X", catc->rxmode[0], catc->rxmode[1]); f5u011_rxmode_async(catc, catc->rxmode); } } } static void catc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct catc *catc = netdev_priv(dev); strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN); strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN); usb_make_path (catc->usbdev, info->bus_info, sizeof info->bus_info); } static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct catc *catc = netdev_priv(dev); if (!catc->is_f5u011) return -EOPNOTSUPP; cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_TP; cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_TP; cmd->speed = SPEED_10; cmd->duplex = DUPLEX_HALF; cmd->port = PORT_TP; cmd->phy_address = 0; cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = AUTONEG_DISABLE; cmd->maxtxpkt = 1; cmd->maxrxpkt = 1; return 0; } static const struct ethtool_ops ops = { .get_drvinfo = catc_get_drvinfo, .get_settings = catc_get_settings, .get_link = ethtool_op_get_link }; /* * Open, close. */ static int catc_open(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); int status; catc->irq_urb->dev = catc->usbdev; if ((status = usb_submit_urb(catc->irq_urb, GFP_KERNEL)) < 0) { err("submit(irq_urb) status %d", status); return -1; } netif_start_queue(netdev); if (!catc->is_f5u011) mod_timer(&catc->timer, jiffies + STATS_UPDATE); return 0; } static int catc_stop(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); netif_stop_queue(netdev); if (!catc->is_f5u011) del_timer_sync(&catc->timer); usb_kill_urb(catc->rx_urb); usb_kill_urb(catc->tx_urb); usb_kill_urb(catc->irq_urb); usb_kill_urb(catc->ctrl_urb); return 0; } static const struct net_device_ops catc_netdev_ops = { .ndo_open = catc_open, .ndo_stop = catc_stop, .ndo_start_xmit = catc_start_xmit, .ndo_tx_timeout = catc_tx_timeout, .ndo_set_multicast_list = catc_set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; /* * USB probe, disconnect. */ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usbdev = interface_to_usbdev(intf); struct net_device *netdev; struct catc *catc; u8 broadcast[6]; int i, pktsz; if (usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 1)) { err("Can't set altsetting 1."); return -EIO; } netdev = alloc_etherdev(sizeof(struct catc)); if (!netdev) return -ENOMEM; catc = netdev_priv(netdev); netdev->netdev_ops = &catc_netdev_ops; netdev->watchdog_timeo = TX_TIMEOUT; SET_ETHTOOL_OPS(netdev, &ops); catc->usbdev = usbdev; catc->netdev = netdev; spin_lock_init(&catc->tx_lock); spin_lock_init(&catc->ctrl_lock); init_timer(&catc->timer); catc->timer.data = (long) catc; catc->timer.function = catc_stats_timer; catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if ((!catc->ctrl_urb) || (!catc->tx_urb) || (!catc->rx_urb) || (!catc->irq_urb)) { err("No free urbs available."); usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(netdev); return -ENOMEM; } /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 && le16_to_cpu(usbdev->descriptor.idProduct) == 0xa && le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) { dbg("Testing for f5u011"); catc->is_f5u011 = 1; atomic_set(&catc->recq_sz, 0); pktsz = RX_PKT_SZ; } else { pktsz = RX_MAX_BURST * (PKT_SZ + 2); } usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0), NULL, NULL, 0, catc_ctrl_done, catc); usb_fill_bulk_urb(catc->tx_urb, usbdev, usb_sndbulkpipe(usbdev, 1), NULL, 0, catc_tx_done, catc); usb_fill_bulk_urb(catc->rx_urb, usbdev, usb_rcvbulkpipe(usbdev, 1), catc->rx_buf, pktsz, catc_rx_done, catc); usb_fill_int_urb(catc->irq_urb, usbdev, usb_rcvintpipe(usbdev, 2), catc->irq_buf, 2, catc_irq_done, catc, 1); if (!catc->is_f5u011) { dbg("Checking memory size\n"); i = 0x12345678; catc_write_mem(catc, 0x7a80, &i, 4); i = 0x87654321; catc_write_mem(catc, 0xfa80, &i, 4); catc_read_mem(catc, 0x7a80, &i, 4); switch (i) { case 0x12345678: catc_set_reg(catc, TxBufCount, 8); catc_set_reg(catc, RxBufCount, 32); dbg("64k Memory\n"); break; default: dev_warn(&intf->dev, "Couldn't detect memory size, assuming 32k\n"); case 0x87654321: catc_set_reg(catc, TxBufCount, 4); catc_set_reg(catc, RxBufCount, 16); dbg("32k Memory\n"); break; } dbg("Getting MAC from SEEROM."); catc_get_mac(catc, netdev->dev_addr); dbg("Setting MAC into registers."); for (i = 0; i < 6; i++) catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]); dbg("Filling the multicast list."); memset(broadcast, 0xff, 6); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); catc_write_mem(catc, 0xfa80, catc->multicast, 64); dbg("Clearing error counters."); for (i = 0; i < 8; i++) catc_set_reg(catc, EthStats + i, 0); catc->last_stats = jiffies; dbg("Enabling."); catc_set_reg(catc, MaxBurst, RX_MAX_BURST); catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits); catc_set_reg(catc, LEDCtrl, LEDLink); catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast); } else { dbg("Performing reset\n"); catc_reset(catc); catc_get_mac(catc, netdev->dev_addr); dbg("Setting RX Mode"); catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast; catc->rxmode[1] = 0; f5u011_rxmode(catc, catc->rxmode); } dbg("Init done."); printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n", netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate", usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr); usb_set_intfdata(intf, catc); SET_NETDEV_DEV(netdev, &intf->dev); if (register_netdev(netdev) != 0) { usb_set_intfdata(intf, NULL); usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(netdev); return -EIO; } return 0; } static void catc_disconnect(struct usb_interface *intf) { struct catc *catc = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (catc) { unregister_netdev(catc->netdev); usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(catc->netdev); } } /* * Module functions and tables. */ static struct usb_device_id catc_id_table [] = { { USB_DEVICE(0x0423, 0xa) }, /* CATC Netmate, Belkin F5U011 */ { USB_DEVICE(0x0423, 0xc) }, /* CATC Netmate II, Belkin F5U111 */ { USB_DEVICE(0x08d1, 0x1) }, /* smartBridges smartNIC */ { } }; MODULE_DEVICE_TABLE(usb, catc_id_table); static struct usb_driver catc_driver = { .name = driver_name, .probe = catc_probe, .disconnect = catc_disconnect, .id_table = catc_id_table, }; static int __init catc_init(void) { int result = usb_register(&catc_driver); if (result == 0) printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); return result; } static void __exit catc_exit(void) { usb_deregister(&catc_driver); } module_init(catc_init); module_exit(catc_exit);
grate-driver/linux-2.6
drivers/net/usb/catc.c
C
gpl-2.0
23,979
#include "blaswrap.h" #include "f2c.h" /* Subroutine */ int stgsyl_(char *trans, integer *ijob, integer *m, integer * n, real *a, integer *lda, real *b, integer *ldb, real *c__, integer * ldc, real *d__, integer *ldd, real *e, integer *lde, real *f, integer *ldf, real *scale, real *dif, real *work, integer *lwork, integer * iwork, integer *info) { /* -- LAPACK routine (version 3.0) -- Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., Courant Institute, Argonne National Lab, and Rice University June 30, 1999 Purpose ======= STGSYL solves the generalized Sylvester equation: A * R - L * B = scale * C (1) D * R - L * E = scale * F where R and L are unknown m-by-n matrices, (A, D), (B, E) and (C, F) are given matrix pairs of size m-by-m, n-by-n and m-by-n, respectively, with real entries. (A, D) and (B, E) must be in generalized (real) Schur canonical form, i.e. A, B are upper quasi triangular and D, E are upper triangular. The solution (R, L) overwrites (C, F). 0 <= SCALE <= 1 is an output scaling factor chosen to avoid overflow. In matrix notation (1) is equivalent to solve Zx = scale b, where Z is defined as Z = [ kron(In, A) -kron(B', Im) ] (2) [ kron(In, D) -kron(E', Im) ]. Here Ik is the identity matrix of size k and X' is the transpose of X. kron(X, Y) is the Kronecker product between the matrices X and Y. If TRANS = 'T', STGSYL solves the transposed system Z'*y = scale*b, which is equivalent to solve for R and L in A' * R + D' * L = scale * C (3) R * B' + L * E' = scale * (-F) This case (TRANS = 'T') is used to compute an one-norm-based estimate of Dif[(A,D), (B,E)], the separation between the matrix pairs (A,D) and (B,E), using SLACON. If IJOB >= 1, STGSYL computes a Frobenius norm-based estimate of Dif[(A,D),(B,E)]. That is, the reciprocal of a lower bound on the reciprocal of the smallest singular value of Z. See [1-2] for more information. This is a level 3 BLAS algorithm. Arguments ========= TRANS (input) CHARACTER*1 = 'N', solve the generalized Sylvester equation (1). = 'T', solve the 'transposed' system (3). IJOB (input) INTEGER Specifies what kind of functionality to be performed. =0: solve (1) only. =1: The functionality of 0 and 3. =2: The functionality of 0 and 4. =3: Only an estimate of Dif[(A,D), (B,E)] is computed. (look ahead strategy IJOB = 1 is used). =4: Only an estimate of Dif[(A,D), (B,E)] is computed. ( SGECON on sub-systems is used ). Not referenced if TRANS = 'T'. M (input) INTEGER The order of the matrices A and D, and the row dimension of the matrices C, F, R and L. N (input) INTEGER The order of the matrices B and E, and the column dimension of the matrices C, F, R and L. A (input) REAL array, dimension (LDA, M) The upper quasi triangular matrix A. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1, M). B (input) REAL array, dimension (LDB, N) The upper quasi triangular matrix B. LDB (input) INTEGER The leading dimension of the array B. LDB >= max(1, N). C (input/output) REAL array, dimension (LDC, N) On entry, C contains the right-hand-side of the first matrix equation in (1) or (3). On exit, if IJOB = 0, 1 or 2, C has been overwritten by the solution R. If IJOB = 3 or 4 and TRANS = 'N', C holds R, the solution achieved during the computation of the Dif-estimate. LDC (input) INTEGER The leading dimension of the array C. LDC >= max(1, M). D (input) REAL array, dimension (LDD, M) The upper triangular matrix D. LDD (input) INTEGER The leading dimension of the array D. LDD >= max(1, M). E (input) REAL array, dimension (LDE, N) The upper triangular matrix E. LDE (input) INTEGER The leading dimension of the array E. LDE >= max(1, N). F (input/output) REAL array, dimension (LDF, N) On entry, F contains the right-hand-side of the second matrix equation in (1) or (3). On exit, if IJOB = 0, 1 or 2, F has been overwritten by the solution L. If IJOB = 3 or 4 and TRANS = 'N', F holds L, the solution achieved during the computation of the Dif-estimate. LDF (input) INTEGER The leading dimension of the array F. LDF >= max(1, M). DIF (output) REAL On exit DIF is the reciprocal of a lower bound of the reciprocal of the Dif-function, i.e. DIF is an upper bound of Dif[(A,D), (B,E)] = sigma_min(Z), where Z as in (2). IF IJOB = 0 or TRANS = 'T', DIF is not touched. SCALE (output) REAL On exit SCALE is the scaling factor in (1) or (3). If 0 < SCALE < 1, C and F hold the solutions R and L, resp., to a slightly perturbed system but the input matrices A, B, D and E have not been changed. If SCALE = 0, C and F hold the solutions R and L, respectively, to the homogeneous system with C = F = 0. Normally, SCALE = 1. WORK (workspace/output) REAL array, dimension (LWORK) If IJOB = 0, WORK is not referenced. Otherwise, on exit, if INFO = 0, WORK(1) returns the optimal LWORK. LWORK (input) INTEGER The dimension of the array WORK. LWORK > = 1. If IJOB = 1 or 2 and TRANS = 'N', LWORK >= 2*M*N. If LWORK = -1, then a workspace query is assumed; the routine only calculates the optimal size of the WORK array, returns this value as the first entry of the WORK array, and no error message related to LWORK is issued by XERBLA. IWORK (workspace) INTEGER array, dimension (M+N+6) INFO (output) INTEGER =0: successful exit <0: If INFO = -i, the i-th argument had an illegal value. >0: (A, D) and (B, E) have common or close eigenvalues. Further Details =============== Based on contributions by Bo Kagstrom and Peter Poromaa, Department of Computing Science, Umea University, S-901 87 Umea, Sweden. [1] B. Kagstrom and P. Poromaa, LAPACK-Style Algorithms and Software for Solving the Generalized Sylvester Equation and Estimating the Separation between Regular Matrix Pairs, Report UMINF - 93.23, Department of Computing Science, Umea University, S-901 87 Umea, Sweden, December 1993, Revised April 1994, Also as LAPACK Working Note 75. To appear in ACM Trans. on Math. Software, Vol 22, No 1, 1996. [2] B. Kagstrom, A Perturbation Analysis of the Generalized Sylvester Equation (AR - LB, DR - LE ) = (C, F), SIAM J. Matrix Anal. Appl., 15(4):1045-1060, 1994 [3] B. Kagstrom and L. Westin, Generalized Schur Methods with Condition Estimators for Solving the Generalized Sylvester Equation, IEEE Transactions on Automatic Control, Vol. 34, No. 7, July 1989, pp 745-751. ===================================================================== Decode and test input parameters Parameter adjustments */ /* Table of constant values */ static integer c__2 = 2; static integer c_n1 = -1; static integer c__5 = 5; static real c_b14 = 0.f; static integer c__0 = 0; static integer c__1 = 1; static real c_b53 = -1.f; static real c_b54 = 1.f; /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, d_dim1, d_offset, e_dim1, e_offset, f_dim1, f_offset, i__1, i__2, i__3, i__4; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ static real dsum; static integer ppqq, i__, j, k, p, q; extern logical lsame_(char *, char *); static integer ifunc; extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *); static integer linfo; extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *, integer *, real *, real *, integer *, real *, integer *, real *, real *, integer *); static integer lwmin; extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *, integer *); static real scale2; static integer ie, je, mb, nb; static real dscale; static integer is, js; extern /* Subroutine */ int stgsy2_(char *, integer *, integer *, integer *, real *, integer *, real *, integer *, real *, integer *, real * , integer *, real *, integer *, real *, integer *, real *, real *, real *, integer *, integer *, integer *); static integer pq; static real scaloc; extern /* Subroutine */ int xerbla_(char *, integer *); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *, integer *, real *, integer *); static integer iround; static logical notran; static integer isolve; static logical lquery; #define a_ref(a_1,a_2) a[(a_2)*a_dim1 + a_1] #define b_ref(a_1,a_2) b[(a_2)*b_dim1 + a_1] #define c___ref(a_1,a_2) c__[(a_2)*c_dim1 + a_1] #define d___ref(a_1,a_2) d__[(a_2)*d_dim1 + a_1] #define e_ref(a_1,a_2) e[(a_2)*e_dim1 + a_1] #define f_ref(a_1,a_2) f[(a_2)*f_dim1 + a_1] a_dim1 = *lda; a_offset = 1 + a_dim1 * 1; a -= a_offset; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; c_dim1 = *ldc; c_offset = 1 + c_dim1 * 1; c__ -= c_offset; d_dim1 = *ldd; d_offset = 1 + d_dim1 * 1; d__ -= d_offset; e_dim1 = *lde; e_offset = 1 + e_dim1 * 1; e -= e_offset; f_dim1 = *ldf; f_offset = 1 + f_dim1 * 1; f -= f_offset; --work; --iwork; /* Function Body */ *info = 0; notran = lsame_(trans, "N"); lquery = *lwork == -1; if ((*ijob == 1 || *ijob == 2) && notran) { /* Computing MAX */ i__1 = 1, i__2 = (*m << 1) * *n; lwmin = max(i__1,i__2); } else { lwmin = 1; } if (! notran && ! lsame_(trans, "T")) { *info = -1; } else if (*ijob < 0 || *ijob > 4) { *info = -2; } else if (*m <= 0) { *info = -3; } else if (*n <= 0) { *info = -4; } else if (*lda < max(1,*m)) { *info = -6; } else if (*ldb < max(1,*n)) { *info = -8; } else if (*ldc < max(1,*m)) { *info = -10; } else if (*ldd < max(1,*m)) { *info = -12; } else if (*lde < max(1,*n)) { *info = -14; } else if (*ldf < max(1,*m)) { *info = -16; } else if (*lwork < lwmin && ! lquery) { *info = -20; } if (*info == 0) { work[1] = (real) lwmin; } if (*info != 0) { i__1 = -(*info); xerbla_("STGSYL", &i__1); return 0; } else if (lquery) { return 0; } /* Determine optimal block sizes MB and NB */ mb = ilaenv_(&c__2, "STGSYL", trans, m, n, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); nb = ilaenv_(&c__5, "STGSYL", trans, m, n, &c_n1, &c_n1, (ftnlen)6, ( ftnlen)1); isolve = 1; ifunc = 0; if (*ijob >= 3 && notran) { ifunc = *ijob - 2; i__1 = *n; for (j = 1; j <= i__1; ++j) { scopy_(m, &c_b14, &c__0, &c___ref(1, j), &c__1); scopy_(m, &c_b14, &c__0, &f_ref(1, j), &c__1); /* L10: */ } } else if (*ijob >= 1 && notran) { isolve = 2; } if (mb <= 1 && nb <= 1 || mb >= *m && nb >= *n) { i__1 = isolve; for (iround = 1; iround <= i__1; ++iround) { /* Use unblocked Level 2 solver */ dscale = 0.f; dsum = 1.f; pq = 0; stgsy2_(trans, &ifunc, m, n, &a[a_offset], lda, &b[b_offset], ldb, &c__[c_offset], ldc, &d__[d_offset], ldd, &e[e_offset], lde, &f[f_offset], ldf, scale, &dsum, &dscale, &iwork[1], &pq, info); if (dscale != 0.f) { if (*ijob == 1 || *ijob == 3) { *dif = sqrt((real) ((*m << 1) * *n)) / (dscale * sqrt( dsum)); } else { *dif = sqrt((real) pq) / (dscale * sqrt(dsum)); } } if (isolve == 2 && iround == 1) { ifunc = *ijob; scale2 = *scale; slacpy_("F", m, n, &c__[c_offset], ldc, &work[1], m); slacpy_("F", m, n, &f[f_offset], ldf, &work[*m * *n + 1], m); i__2 = *n; for (j = 1; j <= i__2; ++j) { scopy_(m, &c_b14, &c__0, &c___ref(1, j), &c__1); scopy_(m, &c_b14, &c__0, &f_ref(1, j), &c__1); /* L20: */ } } else if (isolve == 2 && iround == 2) { slacpy_("F", m, n, &work[1], m, &c__[c_offset], ldc); slacpy_("F", m, n, &work[*m * *n + 1], m, &f[f_offset], ldf); *scale = scale2; } /* L30: */ } return 0; } /* Determine block structure of A */ p = 0; i__ = 1; L40: if (i__ > *m) { goto L50; } ++p; iwork[p] = i__; i__ += mb; if (i__ >= *m) { goto L50; } if (a_ref(i__, i__ - 1) != 0.f) { ++i__; } goto L40; L50: iwork[p + 1] = *m + 1; if (iwork[p] == iwork[p + 1]) { --p; } /* Determine block structure of B */ q = p + 1; j = 1; L60: if (j > *n) { goto L70; } ++q; iwork[q] = j; j += nb; if (j >= *n) { goto L70; } if (b_ref(j, j - 1) != 0.f) { ++j; } goto L60; L70: iwork[q + 1] = *n + 1; if (iwork[q] == iwork[q + 1]) { --q; } if (notran) { i__1 = isolve; for (iround = 1; iround <= i__1; ++iround) { /* Solve (I, J)-subsystem A(I, I) * R(I, J) - L(I, J) * B(J, J) = C(I, J) D(I, I) * R(I, J) - L(I, J) * E(J, J) = F(I, J) for I = P, P - 1,..., 1; J = 1, 2,..., Q */ dscale = 0.f; dsum = 1.f; pq = 0; *scale = 1.f; i__2 = q; for (j = p + 2; j <= i__2; ++j) { js = iwork[j]; je = iwork[j + 1] - 1; nb = je - js + 1; for (i__ = p; i__ >= 1; --i__) { is = iwork[i__]; ie = iwork[i__ + 1] - 1; mb = ie - is + 1; ppqq = 0; stgsy2_(trans, &ifunc, &mb, &nb, &a_ref(is, is), lda, & b_ref(js, js), ldb, &c___ref(is, js), ldc, & d___ref(is, is), ldd, &e_ref(js, js), lde, &f_ref( is, js), ldf, &scaloc, &dsum, &dscale, &iwork[q + 2], &ppqq, &linfo); if (linfo > 0) { *info = linfo; } pq += ppqq; if (scaloc != 1.f) { i__3 = js - 1; for (k = 1; k <= i__3; ++k) { sscal_(m, &scaloc, &c___ref(1, k), &c__1); sscal_(m, &scaloc, &f_ref(1, k), &c__1); /* L80: */ } i__3 = je; for (k = js; k <= i__3; ++k) { i__4 = is - 1; sscal_(&i__4, &scaloc, &c___ref(1, k), &c__1); i__4 = is - 1; sscal_(&i__4, &scaloc, &f_ref(1, k), &c__1); /* L90: */ } i__3 = je; for (k = js; k <= i__3; ++k) { i__4 = *m - ie; sscal_(&i__4, &scaloc, &c___ref(ie + 1, k), &c__1) ; i__4 = *m - ie; sscal_(&i__4, &scaloc, &f_ref(ie + 1, k), &c__1); /* L100: */ } i__3 = *n; for (k = je + 1; k <= i__3; ++k) { sscal_(m, &scaloc, &c___ref(1, k), &c__1); sscal_(m, &scaloc, &f_ref(1, k), &c__1); /* L110: */ } *scale *= scaloc; } /* Substitute R(I, J) and L(I, J) into remaining equation. */ if (i__ > 1) { i__3 = is - 1; sgemm_("N", "N", &i__3, &nb, &mb, &c_b53, &a_ref(1, is), lda, &c___ref(is, js), ldc, &c_b54, & c___ref(1, js), ldc); i__3 = is - 1; sgemm_("N", "N", &i__3, &nb, &mb, &c_b53, &d___ref(1, is), ldd, &c___ref(is, js), ldc, &c_b54, & f_ref(1, js), ldf); } if (j < q) { i__3 = *n - je; sgemm_("N", "N", &mb, &i__3, &nb, &c_b54, &f_ref(is, js), ldf, &b_ref(js, je + 1), ldb, &c_b54, & c___ref(is, je + 1), ldc); i__3 = *n - je; sgemm_("N", "N", &mb, &i__3, &nb, &c_b54, &f_ref(is, js), ldf, &e_ref(js, je + 1), lde, &c_b54, & f_ref(is, je + 1), ldf); } /* L120: */ } /* L130: */ } if (dscale != 0.f) { if (*ijob == 1 || *ijob == 3) { *dif = sqrt((real) ((*m << 1) * *n)) / (dscale * sqrt( dsum)); } else { *dif = sqrt((real) pq) / (dscale * sqrt(dsum)); } } if (isolve == 2 && iround == 1) { ifunc = *ijob; scale2 = *scale; slacpy_("F", m, n, &c__[c_offset], ldc, &work[1], m); slacpy_("F", m, n, &f[f_offset], ldf, &work[*m * *n + 1], m); i__2 = *n; for (j = 1; j <= i__2; ++j) { scopy_(m, &c_b14, &c__0, &c___ref(1, j), &c__1); scopy_(m, &c_b14, &c__0, &f_ref(1, j), &c__1); /* L140: */ } } else if (isolve == 2 && iround == 2) { slacpy_("F", m, n, &work[1], m, &c__[c_offset], ldc); slacpy_("F", m, n, &work[*m * *n + 1], m, &f[f_offset], ldf); *scale = scale2; } /* L150: */ } } else { /* Solve transposed (I, J)-subsystem A(I, I)' * R(I, J) + D(I, I)' * L(I, J) = C(I, J) R(I, J) * B(J, J)' + L(I, J) * E(J, J)' = -F(I, J) for I = 1,2,..., P; J = Q, Q-1,..., 1 */ *scale = 1.f; i__1 = p; for (i__ = 1; i__ <= i__1; ++i__) { is = iwork[i__]; ie = iwork[i__ + 1] - 1; mb = ie - is + 1; i__2 = p + 2; for (j = q; j >= i__2; --j) { js = iwork[j]; je = iwork[j + 1] - 1; nb = je - js + 1; stgsy2_(trans, &ifunc, &mb, &nb, &a_ref(is, is), lda, &b_ref( js, js), ldb, &c___ref(is, js), ldc, &d___ref(is, is), ldd, &e_ref(js, js), lde, &f_ref(is, js), ldf, & scaloc, &dsum, &dscale, &iwork[q + 2], &ppqq, &linfo); if (linfo > 0) { *info = linfo; } if (scaloc != 1.f) { i__3 = js - 1; for (k = 1; k <= i__3; ++k) { sscal_(m, &scaloc, &c___ref(1, k), &c__1); sscal_(m, &scaloc, &f_ref(1, k), &c__1); /* L160: */ } i__3 = je; for (k = js; k <= i__3; ++k) { i__4 = is - 1; sscal_(&i__4, &scaloc, &c___ref(1, k), &c__1); i__4 = is - 1; sscal_(&i__4, &scaloc, &f_ref(1, k), &c__1); /* L170: */ } i__3 = je; for (k = js; k <= i__3; ++k) { i__4 = *m - ie; sscal_(&i__4, &scaloc, &c___ref(ie + 1, k), &c__1); i__4 = *m - ie; sscal_(&i__4, &scaloc, &f_ref(ie + 1, k), &c__1); /* L180: */ } i__3 = *n; for (k = je + 1; k <= i__3; ++k) { sscal_(m, &scaloc, &c___ref(1, k), &c__1); sscal_(m, &scaloc, &f_ref(1, k), &c__1); /* L190: */ } *scale *= scaloc; } /* Substitute R(I, J) and L(I, J) into remaining equation. */ if (j > p + 2) { i__3 = js - 1; sgemm_("N", "T", &mb, &i__3, &nb, &c_b54, &c___ref(is, js) , ldc, &b_ref(1, js), ldb, &c_b54, &f_ref(is, 1), ldf); i__3 = js - 1; sgemm_("N", "T", &mb, &i__3, &nb, &c_b54, &f_ref(is, js), ldf, &e_ref(1, js), lde, &c_b54, &f_ref(is, 1), ldf); } if (i__ < p) { i__3 = *m - ie; sgemm_("T", "N", &i__3, &nb, &mb, &c_b53, &a_ref(is, ie + 1), lda, &c___ref(is, js), ldc, &c_b54, &c___ref( ie + 1, js), ldc); i__3 = *m - ie; sgemm_("T", "N", &i__3, &nb, &mb, &c_b53, &d___ref(is, ie + 1), ldd, &f_ref(is, js), ldf, &c_b54, &c___ref( ie + 1, js), ldc); } /* L200: */ } /* L210: */ } } work[1] = (real) lwmin; return 0; /* End of STGSYL */ } /* stgsyl_ */ #undef f_ref #undef e_ref #undef d___ref #undef c___ref #undef b_ref #undef a_ref
maxywb/vsipl
sourceryvsipl++-x86-3.1/src/vsipl++/vendor/clapack/SRC/stgsyl.c
C
gpl-2.0
20,190
<?php /** * Zend Framework * * LICENSE * * This source file is subject to the new BSD license that is bundled * with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://framework.zend.com/license/new-bsd * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@zend.com so we can send you a copy immediately. * * @category Zend * @package Zend_Tool * @subpackage Framework * @copyright Copyright (c) 2005-2014 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License * @version $Id$ */ /** * @see Zend_Tool_Framework_Metadata_Interface */ #require_once 'Zend/Tool/Framework/Metadata/Interface.php'; /** * @see Zend_Tool_Framework_Metadata_Attributable */ #require_once 'Zend/Tool/Framework/Metadata/Attributable.php'; /** * @category Zend * @package Zend_Tool * @copyright Copyright (c) 2005-2014 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License */ class Zend_Tool_Framework_Metadata_Dynamic implements Zend_Tool_Framework_Metadata_Interface, Zend_Tool_Framework_Metadata_Attributable { /** * @var string */ protected $_type = 'Dynamic'; /** * @var string */ protected $_name = null; /** * @var string */ protected $_value = null; /** * @var array */ protected $_dynamicAttributes = array(); public function __construct($options = array()) { if ($options) { $this->setOptions($options); } } public function setOptions(Array $options = array()) { foreach ($options as $optName => $optValue) { $methodName = 'set' . $optName; $this->{$methodName}($optValue); } } /** * setType() * * @param string $type * @return Zend_Tool_Framework_Metadata_Dynamic */ public function setType($type) { $this->_type = $type; return $this; } /** * getType() * * The type of metadata this describes * * @return string */ public function getType() { return $this->_type; } /** * setName() * * @param string $name * @return Zend_Tool_Framework_Metadata_Dynamic */ public function setName($name) { $this->_name = $name; return $this; } /** * getName() * * Metadata name * * @return string */ public function getName() { return $this->_name; } /** * setValue() * * @param mixed $value * @return Zend_Tool_Framework_Metadata_Dynamic */ public function setValue($value) { $this->_value = $value; return $this; } /** * getValue() * * Metadata Value * * @return string */ public function getValue() { return $this->_value; } public function getAttributes() { return $this->_dynamicAttributes; } /** * __isset() * * Check if an attrbute is set * * @param string $name * @return bool */ public function __isset($name) { return isset($this->_dynamicAttributes[$name]); } /** * __unset() * * @param string $name * @return null */ public function __unset($name) { unset($this->_dynamicAttributes[$name]); return; } /** * __get() - Get a property via property call $metadata->foo * * @param string $name * @return mixed */ public function __get($name) { if (method_exists($this, 'get' . $name)) { return $this->{'get' . $name}(); } elseif (array_key_exists($name, $this->_dynamicAttributes)) { return $this->_dynamicAttributes[$name]; } else { #require_once 'Zend/Tool/Framework/Registry/Exception.php'; throw new Zend_Tool_Framework_Registry_Exception('Property ' . $name . ' was not located in this metadata.'); } } /** * __set() - Set a property via the magic set $metadata->foo = 'foo' * * @param string $name * @param mixed $value */ public function __set($name, $value) { if (method_exists($this, 'set' . $name)) { $this->{'set' . $name}($value); return $this; } else { $this->_dynamicAttributes[$name] = $value; return $this; } // { // #require_once 'Zend/Tool/Framework/Registry/Exception.php'; // throw new Zend_Tool_Framework_Registry_Exception('Property ' . $name . ' was not located in this registry.'); // } } }
T0MM0R/magento
web/lib/Zend/Tool/Framework/Metadata/Dynamic.php
PHP
gpl-2.0
4,945
'use strict'; /*! * Module dependencies. */ var Schema = require('./schema') , SchemaType = require('./schematype') , VirtualType = require('./virtualtype') , SchemaDefaults = require('./schemadefault') , STATES = require('./connectionstate') , Types = require('./types') , Query = require('./query') , Promise = require('./promise') , Model = require('./model') , Document = require('./document') , utils = require('./utils') , format = utils.toCollectionName , mongodb = require('mongodb') , pkg = require('../package.json') /*! * Warn users if they are running an unstable release. * * Disable the warning by setting the MONGOOSE_DISABLE_STABILITY_WARNING * environment variable. */ if (pkg.publishConfig && 'unstable' == pkg.publishConfig.tag) { if (!process.env.MONGOOSE_DISABLE_STABILITY_WARNING) { console.log('\u001b[33m'); console.log('##############################################################'); console.log('#'); console.log('# !!! MONGOOSE WARNING !!!'); console.log('#'); console.log('# This is an UNSTABLE release of Mongoose.'); console.log('# Unstable releases are available for preview/testing only.'); console.log('# DO NOT run this in production.'); console.log('#'); console.log('##############################################################'); console.log('\u001b[0m'); } } /** * Mongoose constructor. * * The exports object of the `mongoose` module is an instance of this class. * Most apps will only use this one instance. * * @api public */ function Mongoose () { this.connections = []; this.plugins = []; this.models = {}; this.modelSchemas = {}; // default global options this.options = { pluralization: true }; var conn = this.createConnection(); // default connection conn.models = this.models; }; /** * Expose connection states for user-land * */ Mongoose.prototype.STATES = STATES; /** * Sets mongoose options * * ####Example: * * mongoose.set('test', value) // sets the 'test' option to `value` * * mongoose.set('debug', true) // enable logging collection methods + arguments to the console * * @param {String} key * @param {String} value * @api public */ Mongoose.prototype.set = function (key, value) { if (arguments.length == 1) { return this.options[key]; } this.options[key] = value; return this; }; /** * Gets mongoose options * * ####Example: * * mongoose.get('test') // returns the 'test' value * * @param {String} key * @method get * @api public */ Mongoose.prototype.get = Mongoose.prototype.set; /*! * ReplSet connection string check. */ var rgxReplSet = /^.+,.+$/; /** * Creates a Connection instance. * * Each `connection` instance maps to a single database. This method is helpful when mangaging multiple db connections. * * If arguments are passed, they are proxied to either [Connection#open](#connection_Connection-open) or [Connection#openSet](#connection_Connection-openSet) appropriately. This means we can pass `db`, `server`, and `replset` options to the driver. _Note that the `safe` option specified in your schema will overwrite the `safe` db option specified here unless you set your schemas `safe` option to `undefined`. See [this](/docs/guide.html#safe) for more information._ * * _Options passed take precedence over options included in connection strings._ * * ####Example: * * // with mongodb:// URI * db = mongoose.createConnection('mongodb://user:pass@localhost:port/database'); * * // and options * var opts = { db: { native_parser: true }} * db = mongoose.createConnection('mongodb://user:pass@localhost:port/database', opts); * * // replica sets * db = mongoose.createConnection('mongodb://user:pass@localhost:port/database,mongodb://anotherhost:port,mongodb://yetanother:port'); * * // and options * var opts = { replset: { strategy: 'ping', rs_name: 'testSet' }} * db = mongoose.createConnection('mongodb://user:pass@localhost:port/database,mongodb://anotherhost:port,mongodb://yetanother:port', opts); * * // with [host, database_name[, port] signature * db = mongoose.createConnection('localhost', 'database', port) * * // and options * var opts = { server: { auto_reconnect: false }, user: 'username', pass: 'mypassword' } * db = mongoose.createConnection('localhost', 'database', port, opts) * * // initialize now, connect later * db = mongoose.createConnection(); * db.open('localhost', 'database', port, [opts]); * * @param {String} [uri] a mongodb:// URI * @param {Object} [options] options to pass to the driver * @see Connection#open #connection_Connection-open * @see Connection#openSet #connection_Connection-openSet * @return {Connection} the created Connection object * @api public */ Mongoose.prototype.createConnection = function () { var conn = new Connection(this); this.connections.push(conn); if (arguments.length) { if (rgxReplSet.test(arguments[0])) { conn.openSet.apply(conn, arguments); } else { conn.open.apply(conn, arguments); } } return conn; }; /** * Opens the default mongoose connection. * * If arguments are passed, they are proxied to either [Connection#open](#connection_Connection-open) or [Connection#openSet](#connection_Connection-openSet) appropriately. * * _Options passed take precedence over options included in connection strings._ * * ####Example: * * mongoose.connect('mongodb://user:pass@localhost:port/database'); * * // replica sets * var uri = 'mongodb://user:pass@localhost:port/database,mongodb://anotherhost:port,mongodb://yetanother:port'; * mongoose.connect(uri); * * // with options * mongoose.connect(uri, options); * * // connecting to multiple mongos * var uri = 'mongodb://hostA:27501,hostB:27501'; * var opts = { mongos: true }; * mongoose.connect(uri, opts); * * @param {String} uri(s) * @param {Object} [options] * @param {Function} [callback] * @see Mongoose#createConnection #index_Mongoose-createConnection * @api public * @return {Mongoose} this */ Mongoose.prototype.connect = function () { var conn = this.connection; if (rgxReplSet.test(arguments[0])) { conn.openSet.apply(conn, arguments); } else { conn.open.apply(conn, arguments); } return this; }; /** * Disconnects all connections. * * @param {Function} [fn] called after all connection close. * @return {Mongoose} this * @api public */ Mongoose.prototype.disconnect = function (fn) { var count = this.connections.length , error this.connections.forEach(function(conn){ conn.close(function(err){ if (error) return; if (err) { error = err; if (fn) return fn(err); throw err; } if (fn) --count || fn(); }); }); return this; }; /** * Defines a model or retrieves it. * * Models defined on the `mongoose` instance are available to all connection created by the same `mongoose` instance. * * ####Example: * * var mongoose = require('mongoose'); * * // define an Actor model with this mongoose instance * mongoose.model('Actor', new Schema({ name: String })); * * // create a new connection * var conn = mongoose.createConnection(..); * * // retrieve the Actor model * var Actor = conn.model('Actor'); * * _When no `collection` argument is passed, Mongoose produces a collection name by passing the model `name` to the [utils.toCollectionName](#utils_exports.toCollectionName) method. This method pluralizes the name. If you don't like this behavior, either pass a collection name or set your schemas collection name option._ * * ####Example: * * var schema = new Schema({ name: String }, { collection: 'actor' }); * * // or * * schema.set('collection', 'actor'); * * // or * * var collectionName = 'actor' * var M = mongoose.model('Actor', schema, collectionName) * * @param {String} name model name * @param {Schema} [schema] * @param {String} [collection] name (optional, induced from model name) * @param {Boolean} [skipInit] whether to skip initialization (defaults to false) * @api public */ Mongoose.prototype.model = function (name, schema, collection, skipInit) { if ('string' == typeof schema) { collection = schema; schema = false; } if (utils.isObject(schema) && !(schema instanceof Schema)) { schema = new Schema(schema); } if ('boolean' === typeof collection) { skipInit = collection; collection = null; } // handle internal options from connection.model() var options; if (skipInit && utils.isObject(skipInit)) { options = skipInit; skipInit = true; } else { options = {}; } // look up schema for the collection. this might be a // default schema like system.indexes stored in SchemaDefaults. if (!this.modelSchemas[name]) { if (!schema && name in SchemaDefaults) { schema = SchemaDefaults[name]; } if (schema) { // cache it so we only apply plugins once this.modelSchemas[name] = schema; this._applyPlugins(schema); } else { throw new mongoose.Error.MissingSchemaError(name); } } var model; var sub; // connection.model() may be passing a different schema for // an existing model name. in this case don't read from cache. if (this.models[name] && false !== options.cache) { if (schema instanceof Schema && schema != this.models[name].schema) { throw new mongoose.Error.OverwriteModelError(name); } if (collection) { // subclass current model with alternate collection model = this.models[name]; schema = model.prototype.schema; sub = model.__subclass(this.connection, schema, collection); // do not cache the sub model return sub; } return this.models[name]; } // ensure a schema exists if (!schema) { schema = this.modelSchemas[name]; if (!schema) { throw new mongoose.Error.MissingSchemaError(name); } } // Apply relevant "global" options to the schema if (!('pluralization' in schema.options)) schema.options.pluralization = this.options.pluralization; if (!collection) { collection = schema.get('collection') || format(name, schema.options); } var connection = options.connection || this.connection; model = Model.compile(name, schema, collection, connection, this); if (!skipInit) { model.init(); } if (false === options.cache) { return model; } return this.models[name] = model; } /** * Returns an array of model names created on this instance of Mongoose. * * ####Note: * * _Does not include names of models created using `connection.model()`._ * * @api public * @return {Array} */ Mongoose.prototype.modelNames = function () { var names = Object.keys(this.models); return names; } /** * Applies global plugins to `schema`. * * @param {Schema} schema * @api private */ Mongoose.prototype._applyPlugins = function (schema) { for (var i = 0, l = this.plugins.length; i < l; i++) { schema.plugin(this.plugins[i][0], this.plugins[i][1]); } } /** * Declares a global plugin executed on all Schemas. * * Equivalent to calling `.plugin(fn)` on each Schema you create. * * @param {Function} fn plugin callback * @param {Object} [opts] optional options * @return {Mongoose} this * @see plugins ./plugins.html * @api public */ Mongoose.prototype.plugin = function (fn, opts) { this.plugins.push([fn, opts]); return this; }; /** * The default connection of the mongoose module. * * ####Example: * * var mongoose = require('mongoose'); * mongoose.connect(...); * mongoose.connection.on('error', cb); * * This is the connection used by default for every model created using [mongoose.model](#index_Mongoose-model). * * @property connection * @return {Connection} * @api public */ Mongoose.prototype.__defineGetter__('connection', function(){ return this.connections[0]; }); /*! * Driver depentend APIs */ var driver = global.MONGOOSE_DRIVER_PATH || './drivers/node-mongodb-native'; /*! * Connection */ var Connection = require(driver + '/connection'); /*! * Collection */ var Collection = require(driver + '/collection'); /** * The Mongoose Collection constructor * * @method Collection * @api public */ Mongoose.prototype.Collection = Collection; /** * The Mongoose [Connection](#connection_Connection) constructor * * @method Connection * @api public */ Mongoose.prototype.Connection = Connection; /** * The Mongoose version * * @property version * @api public */ Mongoose.prototype.version = pkg.version; /** * The Mongoose constructor * * The exports of the mongoose module is an instance of this class. * * ####Example: * * var mongoose = require('mongoose'); * var mongoose2 = new mongoose.Mongoose(); * * @method Mongoose * @api public */ Mongoose.prototype.Mongoose = Mongoose; /** * The Mongoose [Schema](#schema_Schema) constructor * * ####Example: * * var mongoose = require('mongoose'); * var Schema = mongoose.Schema; * var CatSchema = new Schema(..); * * @method Schema * @api public */ Mongoose.prototype.Schema = Schema; /** * The Mongoose [SchemaType](#schematype_SchemaType) constructor * * @method SchemaType * @api public */ Mongoose.prototype.SchemaType = SchemaType; /** * The various Mongoose SchemaTypes. * * ####Note: * * _Alias of mongoose.Schema.Types for backwards compatibility._ * * @property SchemaTypes * @see Schema.SchemaTypes #schema_Schema.Types * @api public */ Mongoose.prototype.SchemaTypes = Schema.Types; /** * The Mongoose [VirtualType](#virtualtype_VirtualType) constructor * * @method VirtualType * @api public */ Mongoose.prototype.VirtualType = VirtualType; /** * The various Mongoose Types. * * ####Example: * * var mongoose = require('mongoose'); * var array = mongoose.Types.Array; * * ####Types: * * - [ObjectId](#types-objectid-js) * - [Buffer](#types-buffer-js) * - [SubDocument](#types-embedded-js) * - [Array](#types-array-js) * - [DocumentArray](#types-documentarray-js) * * Using this exposed access to the `ObjectId` type, we can construct ids on demand. * * var ObjectId = mongoose.Types.ObjectId; * var id1 = new ObjectId; * * @property Types * @api public */ Mongoose.prototype.Types = Types; /** * The Mongoose [Query](#query_Query) constructor. * * @method Query * @api public */ Mongoose.prototype.Query = Query; /** * The Mongoose [Promise](#promise_Promise) constructor. * * @method Promise * @api public */ Mongoose.prototype.Promise = Promise; /** * The Mongoose [Model](#model_Model) constructor. * * @method Model * @api public */ Mongoose.prototype.Model = Model; /** * The Mongoose [Document](#document-js) constructor. * * @method Document * @api public */ Mongoose.prototype.Document = Document; /** * The [MongooseError](#error_MongooseError) constructor. * * @method Error * @api public */ Mongoose.prototype.Error = require('./error'); /** * The [node-mongodb-native](https://github.com/mongodb/node-mongodb-native) driver Mongoose uses. * * @property mongo * @api public */ Mongoose.prototype.mongo = require('mongodb'); /** * The [mquery](https://github.com/aheckmann/mquery) query builder Mongoose uses. * * @property mquery * @api public */ Mongoose.prototype.mquery = require('mquery'); /*! * The exports object is an instance of Mongoose. * * @api public */ var mongoose = module.exports = exports = new Mongoose;
TeskeVirtualSystem/alivecontrol
Server/node_modules/mongoose/lib/index.js
JavaScript
gpl-2.0
15,744
<?php use tests\codeception\_pages\ContactPage; /* @var $scenario Codeception\Scenario */ $I = new AcceptanceTester($scenario); $I->wantTo('ensure that contact works'); $contactPage = ContactPage::openBy($I); $I->see('Contact', 'h1'); $I->amGoingTo('submit contact form with no data'); $contactPage->submit([]); if (method_exists($I, 'wait')) { $I->wait(3); // only for selenium } $I->expectTo('see validations errors'); $I->see('Contact', 'h1'); $I->see('Name cannot be blank'); $I->see('Email cannot be blank'); $I->see('Subject cannot be blank'); $I->see('Body cannot be blank'); $I->see('The verification code is incorrect'); $I->amGoingTo('submit contact form with not correct email'); $contactPage->submit([ 'name' => 'tester', 'email' => 'tester.email', 'subject' => 'test subject', 'body' => 'test content', 'verifyCode' => 'testme', ]); if (method_exists($I, 'wait')) { $I->wait(3); // only for selenium } $I->expectTo('see that email address is wrong'); $I->dontSee('Name cannot be blank', '.help-inline'); $I->see('Email is not a valid email address.'); $I->dontSee('Subject cannot be blank', '.help-inline'); $I->dontSee('Body cannot be blank', '.help-inline'); $I->dontSee('The verification code is incorrect', '.help-inline'); $I->amGoingTo('submit contact form with correct data'); $contactPage->submit([ 'name' => 'tester', 'email' => 'tester@example.com', 'subject' => 'test subject', 'body' => 'test content', 'verifyCode' => 'testme', ]); if (method_exists($I, 'wait')) { $I->wait(3); // only for selenium } $I->dontSeeElement('#contact-form'); $I->see('Thank you for contacting us. We will respond to you as soon as possible.');
wangqw920/startPHP
basic/tests/codeception/acceptance/ContactCept.php
PHP
gpl-2.0
1,709
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Copyright (c) by Takashi Iwai <tiwai@suse.de> * * EMU10K1 memory page allocation (PTB area) * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/pci.h> #include <linux/gfp.h> #include <linux/time.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/emu10k1.h> /* page arguments of these two macros are Emu page (4096 bytes), not like * aligned pages in others */ #define __set_ptb_entry(emu,page,addr) \ (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page))) #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE) #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES) /* get aligned page from offset address */ #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT) /* get offset address from aligned page */ #define aligned_page_offset(page) ((page) << PAGE_SHIFT) #if PAGE_SIZE == 4096 /* page size == EMUPAGESIZE */ /* fill PTB entrie(s) corresponding to page with addr */ #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr) /* fill PTB entrie(s) corresponding to page with silence pointer */ #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr) #else /* fill PTB entries -- we need to fill UNIT_PAGES entries */ static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr) { int i; page *= UNIT_PAGES; for (i = 0; i < UNIT_PAGES; i++, page++) { __set_ptb_entry(emu, page, addr); addr += EMUPAGESIZE; } } static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page) { int i; page *= UNIT_PAGES; for (i = 0; i < UNIT_PAGES; i++, page++) /* do not increment ptr */ __set_ptb_entry(emu, page, emu->silent_page.addr); } #endif /* PAGE_SIZE */ /* */ static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk); static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk); #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member) /* initialize emu10k1 part */ static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk) { blk->mapped_page = -1; INIT_LIST_HEAD(&blk->mapped_link); INIT_LIST_HEAD(&blk->mapped_order_link); blk->map_locked = 0; blk->first_page = get_aligned_page(blk->mem.offset); blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1); blk->pages = blk->last_page - blk->first_page + 1; } /* * search empty region on PTB with the given size * * if an empty region is found, return the page and store the next mapped block * in nextp * if not found, return a negative error code. */ static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp) { int page = 0, found_page = -ENOMEM; int max_size = npages; int size; struct list_head *candidate = &emu->mapped_link_head; struct list_head *pos; list_for_each (pos, &emu->mapped_link_head) { struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link); if (blk->mapped_page < 0) continue; size = blk->mapped_page - page; if (size == npages) { *nextp = pos; return page; } else if (size > max_size) { /* we look for the maximum empty hole */ max_size = size; candidate = pos; found_page = page; } page = blk->mapped_page + blk->pages; } size = MAX_ALIGN_PAGES - page; if (size >= max_size) { *nextp = pos; return page; } *nextp = candidate; return found_page; } /* * map a memory block onto emu10k1's PTB * * call with memblk_lock held */ static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) { int page, pg; struct list_head *next; page = search_empty_map_area(emu, blk->pages, &next); if (page < 0) /* not found */ return page; /* insert this block in the proper position of mapped list */ list_add_tail(&blk->mapped_link, next); /* append this as a newest block in order list */ list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head); blk->mapped_page = page; /* fill PTB */ for (pg = blk->first_page; pg <= blk->last_page; pg++) { set_ptb_entry(emu, page, emu->page_addr_table[pg]); page++; } return 0; } /* * unmap the block * return the size of resultant empty pages * * call with memblk_lock held */ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) { int start_page, end_page, mpage, pg; struct list_head *p; struct snd_emu10k1_memblk *q; /* calculate the expected size of empty region */ if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) { q = get_emu10k1_memblk(p, mapped_link); start_page = q->mapped_page + q->pages; } else start_page = 0; if ((p = blk->mapped_link.next) != &emu->mapped_link_head) { q = get_emu10k1_memblk(p, mapped_link); end_page = q->mapped_page; } else end_page = MAX_ALIGN_PAGES; /* remove links */ list_del(&blk->mapped_link); list_del(&blk->mapped_order_link); /* clear PTB */ mpage = blk->mapped_page; for (pg = blk->first_page; pg <= blk->last_page; pg++) { set_silent_ptb(emu, mpage); mpage++; } blk->mapped_page = -1; return end_page - start_page; /* return the new empty size */ } /* * search empty pages with the given size, and create a memory block * * unlike synth_alloc the memory block is aligned to the page start */ static struct snd_emu10k1_memblk * search_empty(struct snd_emu10k1 *emu, int size) { struct list_head *p; struct snd_emu10k1_memblk *blk; int page, psize; psize = get_aligned_page(size + PAGE_SIZE -1); page = 0; list_for_each(p, &emu->memhdr->block) { blk = get_emu10k1_memblk(p, mem.list); if (page + psize <= blk->first_page) goto __found_pages; page = blk->last_page + 1; } if (page + psize > emu->max_cache_pages) return NULL; __found_pages: /* create a new memory block */ blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev); if (blk == NULL) return NULL; blk->mem.offset = aligned_page_offset(page); /* set aligned offset */ emu10k1_memblk_init(blk); return blk; } /* * check if the given pointer is valid for pages */ static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr) { if (addr & ~emu->dma_mask) { snd_printk(KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr); return 0; } if (addr & (EMUPAGESIZE-1)) { snd_printk(KERN_ERR "page is not aligned\n"); return 0; } return 1; } /* * map the given memory block on PTB. * if the block is already mapped, update the link order. * if no empty pages are found, tries to release unsed memory blocks * and retry the mapping. */ int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) { int err; int size; struct list_head *p, *nextp; struct snd_emu10k1_memblk *deleted; unsigned long flags; spin_lock_irqsave(&emu->memblk_lock, flags); if (blk->mapped_page >= 0) { /* update order link */ list_del(&blk->mapped_order_link); list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head); spin_unlock_irqrestore(&emu->memblk_lock, flags); return 0; } if ((err = map_memblk(emu, blk)) < 0) { /* no enough page - try to unmap some blocks */ /* starting from the oldest block */ p = emu->mapped_order_link_head.next; for (; p != &emu->mapped_order_link_head; p = nextp) { nextp = p->next; deleted = get_emu10k1_memblk(p, mapped_order_link); if (deleted->map_locked) continue; size = unmap_memblk(emu, deleted); if (size >= blk->pages) { /* ok the empty region is enough large */ err = map_memblk(emu, blk); break; } } } spin_unlock_irqrestore(&emu->memblk_lock, flags); return err; } EXPORT_SYMBOL(snd_emu10k1_memblk_map); /* * page allocation for DMA */ struct snd_util_memblk * snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_util_memhdr *hdr; struct snd_emu10k1_memblk *blk; int page, err, idx; if (snd_BUG_ON(!emu)) return NULL; if (snd_BUG_ON(runtime->dma_bytes <= 0 || runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE)) return NULL; hdr = emu->memhdr; if (snd_BUG_ON(!hdr)) return NULL; idx = runtime->period_size >= runtime->buffer_size ? (emu->delay_pcm_irq * 2) : 0; mutex_lock(&hdr->block_mutex); blk = search_empty(emu, runtime->dma_bytes + idx); if (blk == NULL) { mutex_unlock(&hdr->block_mutex); return NULL; } /* fill buffer addresses but pointers are not stored so that * snd_free_pci_page() is not called in in synth_free() */ idx = 0; for (page = blk->first_page; page <= blk->last_page; page++, idx++) { unsigned long ofs = idx << PAGE_SHIFT; dma_addr_t addr; addr = snd_pcm_sgbuf_get_addr(substream, ofs); if (! is_valid_page(emu, addr)) { printk(KERN_ERR "emu: failure page = %d\n", idx); mutex_unlock(&hdr->block_mutex); return NULL; } emu->page_addr_table[page] = addr; emu->page_ptr_table[page] = NULL; } /* set PTB entries */ blk->map_locked = 1; /* do not unmap this block! */ err = snd_emu10k1_memblk_map(emu, blk); if (err < 0) { __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk); mutex_unlock(&hdr->block_mutex); return NULL; } mutex_unlock(&hdr->block_mutex); return (struct snd_util_memblk *)blk; } /* * release DMA buffer from page table */ int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk) { if (snd_BUG_ON(!emu || !blk)) return -EINVAL; return snd_emu10k1_synth_free(emu, blk); } /* * memory allocation using multiple pages (for synth) * Unlike the DMA allocation above, non-contiguous pages are assined. */ /* * allocate a synth sample area */ struct snd_util_memblk * snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size) { struct snd_emu10k1_memblk *blk; struct snd_util_memhdr *hdr = hw->memhdr; mutex_lock(&hdr->block_mutex); blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size); if (blk == NULL) { mutex_unlock(&hdr->block_mutex); return NULL; } if (synth_alloc_pages(hw, blk)) { __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk); mutex_unlock(&hdr->block_mutex); return NULL; } snd_emu10k1_memblk_map(hw, blk); mutex_unlock(&hdr->block_mutex); return (struct snd_util_memblk *)blk; } EXPORT_SYMBOL(snd_emu10k1_synth_alloc); /* * free a synth sample area */ int snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk) { struct snd_util_memhdr *hdr = emu->memhdr; struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk; unsigned long flags; mutex_lock(&hdr->block_mutex); spin_lock_irqsave(&emu->memblk_lock, flags); if (blk->mapped_page >= 0) unmap_memblk(emu, blk); spin_unlock_irqrestore(&emu->memblk_lock, flags); synth_free_pages(emu, blk); __snd_util_mem_free(hdr, memblk); mutex_unlock(&hdr->block_mutex); return 0; } EXPORT_SYMBOL(snd_emu10k1_synth_free); /* check new allocation range */ static void get_single_page_range(struct snd_util_memhdr *hdr, struct snd_emu10k1_memblk *blk, int *first_page_ret, int *last_page_ret) { struct list_head *p; struct snd_emu10k1_memblk *q; int first_page, last_page; first_page = blk->first_page; if ((p = blk->mem.list.prev) != &hdr->block) { q = get_emu10k1_memblk(p, mem.list); if (q->last_page == first_page) first_page++; /* first page was already allocated */ } last_page = blk->last_page; if ((p = blk->mem.list.next) != &hdr->block) { q = get_emu10k1_memblk(p, mem.list); if (q->first_page == last_page) last_page--; /* last page was already allocated */ } *first_page_ret = first_page; *last_page_ret = last_page; } /* release allocated pages */ static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page, int last_page) { int page; for (page = first_page; page <= last_page; page++) { free_page((unsigned long)emu->page_ptr_table[page]); emu->page_addr_table[page] = 0; emu->page_ptr_table[page] = NULL; } } /* * allocate kernel pages */ static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) { int page, first_page, last_page; emu10k1_memblk_init(blk); get_single_page_range(emu->memhdr, blk, &first_page, &last_page); /* allocate kernel pages */ for (page = first_page; page <= last_page; page++) { /* first try to allocate from <4GB zone */ struct page *p = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_NOWARN); if (!p || (page_to_pfn(p) & ~(emu->dma_mask >> PAGE_SHIFT))) { if (p) __free_page(p); /* try to allocate from <16MB zone */ p = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_NORETRY | /* no OOM-killer */ __GFP_NOWARN); } if (!p) { __synth_free_pages(emu, first_page, page - 1); return -ENOMEM; } emu->page_addr_table[page] = page_to_phys(p); emu->page_ptr_table[page] = page_address(p); } return 0; } /* * free pages */ static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) { int first_page, last_page; get_single_page_range(emu->memhdr, blk, &first_page, &last_page); __synth_free_pages(emu, first_page, last_page); return 0; } /* calculate buffer pointer from offset address */ static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset) { char *ptr; if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages)) return NULL; ptr = emu->page_ptr_table[page]; if (! ptr) { printk(KERN_ERR "emu10k1: access to NULL ptr: page = %d\n", page); return NULL; } ptr += offset & (PAGE_SIZE - 1); return (void*)ptr; } /* * bzero(blk + offset, size) */ int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, int offset, int size) { int page, nextofs, end_offset, temp, temp1; void *ptr; struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk; offset += blk->offset & (PAGE_SIZE - 1); end_offset = offset + size; page = get_aligned_page(offset); do { nextofs = aligned_page_offset(page + 1); temp = nextofs - offset; temp1 = end_offset - offset; if (temp1 < temp) temp = temp1; ptr = offset_ptr(emu, page + p->first_page, offset); if (ptr) memset(ptr, 0, temp); offset = nextofs; page++; } while (offset < end_offset); return 0; } EXPORT_SYMBOL(snd_emu10k1_synth_bzero); /* * copy_from_user(blk + offset, data, size) */ int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, int offset, const char __user *data, int size) { int page, nextofs, end_offset, temp, temp1; void *ptr; struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk; offset += blk->offset & (PAGE_SIZE - 1); end_offset = offset + size; page = get_aligned_page(offset); do { nextofs = aligned_page_offset(page + 1); temp = nextofs - offset; temp1 = end_offset - offset; if (temp1 < temp) temp = temp1; ptr = offset_ptr(emu, page + p->first_page, offset); if (ptr && copy_from_user(ptr, data, temp)) return -EFAULT; offset = nextofs; data += temp; page++; } while (offset < end_offset); return 0; } EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);
CunningLogic/asteroid_smart_kernel
sound/pci/emu10k1/memory.c
C
gpl-2.0
15,870
/******************************************************************************* * This file contains the iSCSI Target specific utility functions. * * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. * * Licensed to the Linux Foundation under the General Public License (GPL) version 2. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. ******************************************************************************/ #include <linux/list.h> #include <scsi/scsi_tcq.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> #include <target/target_core_configfs.h> #include "iscsi_target_core.h" #include "iscsi_target_parameters.h" #include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_datain_values.h" #include "iscsi_target_erl0.h" #include "iscsi_target_erl1.h" #include "iscsi_target_erl2.h" #include "iscsi_target_tpg.h" #include "iscsi_target_tq.h" #include "iscsi_target_util.h" #include "iscsi_target.h" #define PRINT_BUFF(buff, len) \ { \ int zzz; \ \ pr_debug("%d:\n", __LINE__); \ for (zzz = 0; zzz < len; zzz++) { \ if (zzz % 16 == 0) { \ if (zzz) \ pr_debug("\n"); \ pr_debug("%4i: ", zzz); \ } \ pr_debug("%02x ", (unsigned char) (buff)[zzz]); \ } \ if ((len + 1) % 16) \ pr_debug("\n"); \ } extern struct list_head g_tiqn_list; extern spinlock_t tiqn_lock; /* * Called with cmd->r2t_lock held. */ int iscsit_add_r2t_to_list( struct iscsi_cmd *cmd, u32 offset, u32 xfer_len, int recovery, u32 r2t_sn) { struct iscsi_r2t *r2t; r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC); if (!r2t) { pr_err("Unable to allocate memory for struct iscsi_r2t.\n"); return -1; } INIT_LIST_HEAD(&r2t->r2t_list); r2t->recovery_r2t = recovery; r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn; r2t->offset = offset; r2t->xfer_len = xfer_len; list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list); spin_unlock_bh(&cmd->r2t_lock); iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T); spin_lock_bh(&cmd->r2t_lock); return 0; } struct iscsi_r2t *iscsit_get_r2t_for_eos( struct iscsi_cmd *cmd, u32 offset, u32 length) { struct iscsi_r2t *r2t; spin_lock_bh(&cmd->r2t_lock); list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { if ((r2t->offset <= offset) && (r2t->offset + r2t->xfer_len) >= (offset + length)) { spin_unlock_bh(&cmd->r2t_lock); return r2t; } } spin_unlock_bh(&cmd->r2t_lock); pr_err("Unable to locate R2T for Offset: %u, Length:" " %u\n", offset, length); return NULL; } struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd) { struct iscsi_r2t *r2t; spin_lock_bh(&cmd->r2t_lock); list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { if (!r2t->sent_r2t) { spin_unlock_bh(&cmd->r2t_lock); return r2t; } } spin_unlock_bh(&cmd->r2t_lock); pr_err("Unable to locate next R2T to send for ITT:" " 0x%08x.\n", cmd->init_task_tag); return NULL; } /* * Called with cmd->r2t_lock held. */ void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd) { list_del(&r2t->r2t_list); kmem_cache_free(lio_r2t_cache, r2t); } void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd) { struct iscsi_r2t *r2t, *r2t_tmp; spin_lock_bh(&cmd->r2t_lock); list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) iscsit_free_r2t(r2t, cmd); spin_unlock_bh(&cmd->r2t_lock); } /* * May be called from software interrupt (timer) context for allocating * iSCSI NopINs. */ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) { struct iscsi_cmd *cmd; cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask); if (!cmd) { pr_err("Unable to allocate memory for struct iscsi_cmd.\n"); return NULL; } cmd->conn = conn; INIT_LIST_HEAD(&cmd->i_list); INIT_LIST_HEAD(&cmd->datain_list); INIT_LIST_HEAD(&cmd->cmd_r2t_list); init_completion(&cmd->reject_comp); spin_lock_init(&cmd->datain_lock); spin_lock_init(&cmd->dataout_timeout_lock); spin_lock_init(&cmd->istate_lock); spin_lock_init(&cmd->error_lock); spin_lock_init(&cmd->r2t_lock); return cmd; } /* * Called from iscsi_handle_scsi_cmd() */ struct iscsi_cmd *iscsit_allocate_se_cmd( struct iscsi_conn *conn, u32 data_length, int data_direction, int iscsi_task_attr) { struct iscsi_cmd *cmd; struct se_cmd *se_cmd; int sam_task_attr; cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); if (!cmd) return NULL; cmd->data_direction = data_direction; cmd->data_length = data_length; /* * Figure out the SAM Task Attribute for the incoming SCSI CDB */ if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) || (iscsi_task_attr == ISCSI_ATTR_SIMPLE)) sam_task_attr = MSG_SIMPLE_TAG; else if (iscsi_task_attr == ISCSI_ATTR_ORDERED) sam_task_attr = MSG_ORDERED_TAG; else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE) sam_task_attr = MSG_HEAD_TAG; else if (iscsi_task_attr == ISCSI_ATTR_ACA) sam_task_attr = MSG_ACA_TAG; else { pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using" " MSG_SIMPLE_TAG\n", iscsi_task_attr); sam_task_attr = MSG_SIMPLE_TAG; } se_cmd = &cmd->se_cmd; /* * Initialize struct se_cmd descriptor from target_core_mod infrastructure */ transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops, conn->sess->se_sess, data_length, data_direction, sam_task_attr, &cmd->sense_buffer[0]); return cmd; } struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( struct iscsi_conn *conn, u8 function) { struct iscsi_cmd *cmd; struct se_cmd *se_cmd; int rc; u8 tcm_function; cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); if (!cmd) return NULL; cmd->data_direction = DMA_NONE; cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL); if (!cmd->tmr_req) { pr_err("Unable to allocate memory for" " Task Management command!\n"); goto out; } /* * TASK_REASSIGN for ERL=2 / connection stays inside of * LIO-Target $FABRIC_MOD */ if (function == ISCSI_TM_FUNC_TASK_REASSIGN) return cmd; se_cmd = &cmd->se_cmd; /* * Initialize struct se_cmd descriptor from target_core_mod infrastructure */ transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops, conn->sess->se_sess, 0, DMA_NONE, MSG_SIMPLE_TAG, &cmd->sense_buffer[0]); switch (function) { case ISCSI_TM_FUNC_ABORT_TASK: tcm_function = TMR_ABORT_TASK; break; case ISCSI_TM_FUNC_ABORT_TASK_SET: tcm_function = TMR_ABORT_TASK_SET; break; case ISCSI_TM_FUNC_CLEAR_ACA: tcm_function = TMR_CLEAR_ACA; break; case ISCSI_TM_FUNC_CLEAR_TASK_SET: tcm_function = TMR_CLEAR_TASK_SET; break; case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: tcm_function = TMR_LUN_RESET; break; case ISCSI_TM_FUNC_TARGET_WARM_RESET: tcm_function = TMR_TARGET_WARM_RESET; break; case ISCSI_TM_FUNC_TARGET_COLD_RESET: tcm_function = TMR_TARGET_COLD_RESET; break; default: pr_err("Unknown iSCSI TMR Function:" " 0x%02x\n", function); goto out; } rc = core_tmr_alloc_req(se_cmd, cmd->tmr_req, tcm_function, GFP_KERNEL); if (rc < 0) goto out; cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req; return cmd; out: iscsit_release_cmd(cmd); return NULL; } int iscsit_decide_list_to_build( struct iscsi_cmd *cmd, u32 immediate_data_length) { struct iscsi_build_list bl; struct iscsi_conn *conn = cmd->conn; struct iscsi_session *sess = conn->sess; struct iscsi_node_attrib *na; if (sess->sess_ops->DataSequenceInOrder && sess->sess_ops->DataPDUInOrder) return 0; if (cmd->data_direction == DMA_NONE) return 0; na = iscsit_tpg_get_node_attrib(sess); memset(&bl, 0, sizeof(struct iscsi_build_list)); if (cmd->data_direction == DMA_FROM_DEVICE) { bl.data_direction = ISCSI_PDU_READ; bl.type = PDULIST_NORMAL; if (na->random_datain_pdu_offsets) bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS; if (na->random_datain_seq_offsets) bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS; } else { bl.data_direction = ISCSI_PDU_WRITE; bl.immediate_data_length = immediate_data_length; if (na->random_r2t_offsets) bl.randomize |= RANDOM_R2T_OFFSETS; if (!cmd->immediate_data && !cmd->unsolicited_data) bl.type = PDULIST_NORMAL; else if (cmd->immediate_data && !cmd->unsolicited_data) bl.type = PDULIST_IMMEDIATE; else if (!cmd->immediate_data && cmd->unsolicited_data) bl.type = PDULIST_UNSOLICITED; else if (cmd->immediate_data && cmd->unsolicited_data) bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED; } return iscsit_do_build_list(cmd, &bl); } struct iscsi_seq *iscsit_get_seq_holder_for_datain( struct iscsi_cmd *cmd, u32 seq_send_order) { u32 i; for (i = 0; i < cmd->seq_count; i++) if (cmd->seq_list[i].seq_send_order == seq_send_order) return &cmd->seq_list[i]; return NULL; } struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd) { u32 i; if (!cmd->seq_list) { pr_err("struct iscsi_cmd->seq_list is NULL!\n"); return NULL; } for (i = 0; i < cmd->seq_count; i++) { if (cmd->seq_list[i].type != SEQTYPE_NORMAL) continue; if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) { cmd->seq_send_order++; return &cmd->seq_list[i]; } } return NULL; } struct iscsi_r2t *iscsit_get_holder_for_r2tsn( struct iscsi_cmd *cmd, u32 r2t_sn) { struct iscsi_r2t *r2t; spin_lock_bh(&cmd->r2t_lock); list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { if (r2t->r2t_sn == r2t_sn) { spin_unlock_bh(&cmd->r2t_lock); return r2t; } } spin_unlock_bh(&cmd->r2t_lock); return NULL; } static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn) { int ret; /* * This is the proper method of checking received CmdSN against * ExpCmdSN and MaxCmdSN values, as well as accounting for out * or order CmdSNs due to multiple connection sessions and/or * CRC failures. */ if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) { pr_err("Received CmdSN: 0x%08x is greater than" " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn, sess->max_cmd_sn); ret = CMDSN_ERROR_CANNOT_RECOVER; } else if (cmdsn == sess->exp_cmd_sn) { sess->exp_cmd_sn++; pr_debug("Received CmdSN matches ExpCmdSN," " incremented ExpCmdSN to: 0x%08x\n", sess->exp_cmd_sn); ret = CMDSN_NORMAL_OPERATION; } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) { pr_debug("Received CmdSN: 0x%08x is greater" " than ExpCmdSN: 0x%08x, not acknowledging.\n", cmdsn, sess->exp_cmd_sn); ret = CMDSN_HIGHER_THAN_EXP; } else { pr_err("Received CmdSN: 0x%08x is less than" " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn, sess->exp_cmd_sn); ret = CMDSN_LOWER_THAN_EXP; } return ret; } /* * Commands may be received out of order if MC/S is in use. * Ensure they are executed in CmdSN order. */ int iscsit_sequence_cmd( struct iscsi_conn *conn, struct iscsi_cmd *cmd, u32 cmdsn) { int ret; int cmdsn_ret; mutex_lock(&conn->sess->cmdsn_mutex); cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, cmdsn); switch (cmdsn_ret) { case CMDSN_NORMAL_OPERATION: ret = iscsit_execute_cmd(cmd, 0); if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list)) iscsit_execute_ooo_cmdsns(conn->sess); break; case CMDSN_HIGHER_THAN_EXP: ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, cmdsn); break; case CMDSN_LOWER_THAN_EXP: cmd->i_state = ISTATE_REMOVE; iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); ret = cmdsn_ret; break; default: ret = cmdsn_ret; break; } mutex_unlock(&conn->sess->cmdsn_mutex); return ret; } int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf) { struct iscsi_conn *conn = cmd->conn; struct se_cmd *se_cmd = &cmd->se_cmd; struct iscsi_data *hdr = (struct iscsi_data *) buf; u32 payload_length = ntoh24(hdr->dlength); if (conn->sess->sess_ops->InitialR2T) { pr_err("Received unexpected unsolicited data" " while InitialR2T=Yes, protocol error.\n"); transport_send_check_condition_and_sense(se_cmd, TCM_UNEXPECTED_UNSOLICITED_DATA, 0); return -1; } if ((cmd->first_burst_len + payload_length) > conn->sess->sess_ops->FirstBurstLength) { pr_err("Total %u bytes exceeds FirstBurstLength: %u" " for this Unsolicited DataOut Burst.\n", (cmd->first_burst_len + payload_length), conn->sess->sess_ops->FirstBurstLength); transport_send_check_condition_and_sense(se_cmd, TCM_INCORRECT_AMOUNT_OF_DATA, 0); return -1; } if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) return 0; if (((cmd->first_burst_len + payload_length) != cmd->data_length) && ((cmd->first_burst_len + payload_length) != conn->sess->sess_ops->FirstBurstLength)) { pr_err("Unsolicited non-immediate data received %u" " does not equal FirstBurstLength: %u, and does" " not equal ExpXferLen %u.\n", (cmd->first_burst_len + payload_length), conn->sess->sess_ops->FirstBurstLength, cmd->data_length); transport_send_check_condition_and_sense(se_cmd, TCM_INCORRECT_AMOUNT_OF_DATA, 0); return -1; } return 0; } struct iscsi_cmd *iscsit_find_cmd_from_itt( struct iscsi_conn *conn, u32 init_task_tag) { struct iscsi_cmd *cmd; spin_lock_bh(&conn->cmd_lock); list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { if (cmd->init_task_tag == init_task_tag) { spin_unlock_bh(&conn->cmd_lock); return cmd; } } spin_unlock_bh(&conn->cmd_lock); pr_err("Unable to locate ITT: 0x%08x on CID: %hu", init_task_tag, conn->cid); return NULL; } struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( struct iscsi_conn *conn, u32 init_task_tag, u32 length) { struct iscsi_cmd *cmd; spin_lock_bh(&conn->cmd_lock); list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { if (cmd->init_task_tag == init_task_tag) { spin_unlock_bh(&conn->cmd_lock); return cmd; } } spin_unlock_bh(&conn->cmd_lock); pr_err("Unable to locate ITT: 0x%08x on CID: %hu," " dumping payload\n", init_task_tag, conn->cid); if (length) iscsit_dump_data_payload(conn, length, 1); return NULL; } struct iscsi_cmd *iscsit_find_cmd_from_ttt( struct iscsi_conn *conn, u32 targ_xfer_tag) { struct iscsi_cmd *cmd = NULL; spin_lock_bh(&conn->cmd_lock); list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { if (cmd->targ_xfer_tag == targ_xfer_tag) { spin_unlock_bh(&conn->cmd_lock); return cmd; } } spin_unlock_bh(&conn->cmd_lock); pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n", targ_xfer_tag, conn->cid); return NULL; } int iscsit_find_cmd_for_recovery( struct iscsi_session *sess, struct iscsi_cmd **cmd_ptr, struct iscsi_conn_recovery **cr_ptr, u32 init_task_tag) { struct iscsi_cmd *cmd = NULL; struct iscsi_conn_recovery *cr; /* * Scan through the inactive connection recovery list's command list. * If init_task_tag matches the command is still alligent. */ spin_lock(&sess->cr_i_lock); list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) { spin_lock(&cr->conn_recovery_cmd_lock); list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) { if (cmd->init_task_tag == init_task_tag) { spin_unlock(&cr->conn_recovery_cmd_lock); spin_unlock(&sess->cr_i_lock); *cr_ptr = cr; *cmd_ptr = cmd; return -2; } } spin_unlock(&cr->conn_recovery_cmd_lock); } spin_unlock(&sess->cr_i_lock); /* * Scan through the active connection recovery list's command list. * If init_task_tag matches the command is ready to be reassigned. */ spin_lock(&sess->cr_a_lock); list_for_each_entry(cr, &sess->cr_active_list, cr_list) { spin_lock(&cr->conn_recovery_cmd_lock); list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) { if (cmd->init_task_tag == init_task_tag) { spin_unlock(&cr->conn_recovery_cmd_lock); spin_unlock(&sess->cr_a_lock); *cr_ptr = cr; *cmd_ptr = cmd; return 0; } } spin_unlock(&cr->conn_recovery_cmd_lock); } spin_unlock(&sess->cr_a_lock); return -1; } void iscsit_add_cmd_to_immediate_queue( struct iscsi_cmd *cmd, struct iscsi_conn *conn, u8 state) { struct iscsi_queue_req *qr; qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); if (!qr) { pr_err("Unable to allocate memory for" " struct iscsi_queue_req\n"); return; } INIT_LIST_HEAD(&qr->qr_list); qr->cmd = cmd; qr->state = state; spin_lock_bh(&conn->immed_queue_lock); list_add_tail(&qr->qr_list, &conn->immed_queue_list); atomic_inc(&cmd->immed_queue_count); atomic_set(&conn->check_immediate_queue, 1); spin_unlock_bh(&conn->immed_queue_lock); wake_up(&conn->queues_wq); } struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn) { struct iscsi_queue_req *qr; spin_lock_bh(&conn->immed_queue_lock); if (list_empty(&conn->immed_queue_list)) { spin_unlock_bh(&conn->immed_queue_lock); return NULL; } list_for_each_entry(qr, &conn->immed_queue_list, qr_list) break; list_del(&qr->qr_list); if (qr->cmd) atomic_dec(&qr->cmd->immed_queue_count); spin_unlock_bh(&conn->immed_queue_lock); return qr; } static void iscsit_remove_cmd_from_immediate_queue( struct iscsi_cmd *cmd, struct iscsi_conn *conn) { struct iscsi_queue_req *qr, *qr_tmp; spin_lock_bh(&conn->immed_queue_lock); if (!atomic_read(&cmd->immed_queue_count)) { spin_unlock_bh(&conn->immed_queue_lock); return; } list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { if (qr->cmd != cmd) continue; atomic_dec(&qr->cmd->immed_queue_count); list_del(&qr->qr_list); kmem_cache_free(lio_qr_cache, qr); } spin_unlock_bh(&conn->immed_queue_lock); if (atomic_read(&cmd->immed_queue_count)) { pr_err("ITT: 0x%08x immed_queue_count: %d\n", cmd->init_task_tag, atomic_read(&cmd->immed_queue_count)); } } void iscsit_add_cmd_to_response_queue( struct iscsi_cmd *cmd, struct iscsi_conn *conn, u8 state) { struct iscsi_queue_req *qr; qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); if (!qr) { pr_err("Unable to allocate memory for" " struct iscsi_queue_req\n"); return; } INIT_LIST_HEAD(&qr->qr_list); qr->cmd = cmd; qr->state = state; spin_lock_bh(&conn->response_queue_lock); list_add_tail(&qr->qr_list, &conn->response_queue_list); atomic_inc(&cmd->response_queue_count); spin_unlock_bh(&conn->response_queue_lock); wake_up(&conn->queues_wq); } struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) { struct iscsi_queue_req *qr; spin_lock_bh(&conn->response_queue_lock); if (list_empty(&conn->response_queue_list)) { spin_unlock_bh(&conn->response_queue_lock); return NULL; } list_for_each_entry(qr, &conn->response_queue_list, qr_list) break; list_del(&qr->qr_list); if (qr->cmd) atomic_dec(&qr->cmd->response_queue_count); spin_unlock_bh(&conn->response_queue_lock); return qr; } static void iscsit_remove_cmd_from_response_queue( struct iscsi_cmd *cmd, struct iscsi_conn *conn) { struct iscsi_queue_req *qr, *qr_tmp; spin_lock_bh(&conn->response_queue_lock); if (!atomic_read(&cmd->response_queue_count)) { spin_unlock_bh(&conn->response_queue_lock); return; } list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, qr_list) { if (qr->cmd != cmd) continue; atomic_dec(&qr->cmd->response_queue_count); list_del(&qr->qr_list); kmem_cache_free(lio_qr_cache, qr); } spin_unlock_bh(&conn->response_queue_lock); if (atomic_read(&cmd->response_queue_count)) { pr_err("ITT: 0x%08x response_queue_count: %d\n", cmd->init_task_tag, atomic_read(&cmd->response_queue_count)); } } bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn) { bool empty; spin_lock_bh(&conn->immed_queue_lock); empty = list_empty(&conn->immed_queue_list); spin_unlock_bh(&conn->immed_queue_lock); if (!empty) return empty; spin_lock_bh(&conn->response_queue_lock); empty = list_empty(&conn->response_queue_list); spin_unlock_bh(&conn->response_queue_lock); return empty; } void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn) { struct iscsi_queue_req *qr, *qr_tmp; spin_lock_bh(&conn->immed_queue_lock); list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { list_del(&qr->qr_list); if (qr->cmd) atomic_dec(&qr->cmd->immed_queue_count); kmem_cache_free(lio_qr_cache, qr); } spin_unlock_bh(&conn->immed_queue_lock); spin_lock_bh(&conn->response_queue_lock); list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, qr_list) { list_del(&qr->qr_list); if (qr->cmd) atomic_dec(&qr->cmd->response_queue_count); kmem_cache_free(lio_qr_cache, qr); } spin_unlock_bh(&conn->response_queue_lock); } void iscsit_release_cmd(struct iscsi_cmd *cmd) { struct iscsi_conn *conn = cmd->conn; int i; iscsit_free_r2ts_from_list(cmd); iscsit_free_all_datain_reqs(cmd); kfree(cmd->buf_ptr); kfree(cmd->pdu_list); kfree(cmd->seq_list); kfree(cmd->tmr_req); kfree(cmd->iov_data); for (i = 0; i < cmd->t_mem_sg_nents; i++) __free_page(sg_page(&cmd->t_mem_sg[i])); kfree(cmd->t_mem_sg); if (conn) { iscsit_remove_cmd_from_immediate_queue(cmd, conn); iscsit_remove_cmd_from_response_queue(cmd, conn); } kmem_cache_free(lio_cmd_cache, cmd); } void iscsit_free_cmd(struct iscsi_cmd *cmd) { /* * Determine if a struct se_cmd is assoicated with * this struct iscsi_cmd. */ switch (cmd->iscsi_opcode) { case ISCSI_OP_SCSI_CMD: case ISCSI_OP_SCSI_TMFUNC: transport_generic_free_cmd(&cmd->se_cmd, 1); break; case ISCSI_OP_REJECT: /* * Handle special case for REJECT when iscsi_add_reject*() has * overwritten the original iscsi_opcode assignment, and the * associated cmd->se_cmd needs to be released. */ if (cmd->se_cmd.se_tfo != NULL) { transport_generic_free_cmd(&cmd->se_cmd, 1); break; } /* Fall-through */ default: iscsit_release_cmd(cmd); break; } } int iscsit_check_session_usage_count(struct iscsi_session *sess) { spin_lock_bh(&sess->session_usage_lock); if (sess->session_usage_count != 0) { sess->session_waiting_on_uc = 1; spin_unlock_bh(&sess->session_usage_lock); if (in_interrupt()) return 2; wait_for_completion(&sess->session_waiting_on_uc_comp); return 1; } spin_unlock_bh(&sess->session_usage_lock); return 0; } void iscsit_dec_session_usage_count(struct iscsi_session *sess) { spin_lock_bh(&sess->session_usage_lock); sess->session_usage_count--; if (!sess->session_usage_count && sess->session_waiting_on_uc) complete(&sess->session_waiting_on_uc_comp); spin_unlock_bh(&sess->session_usage_lock); } void iscsit_inc_session_usage_count(struct iscsi_session *sess) { spin_lock_bh(&sess->session_usage_lock); sess->session_usage_count++; spin_unlock_bh(&sess->session_usage_lock); } /* * Setup conn->if_marker and conn->of_marker values based upon * the initial marker-less interval. (see iSCSI v19 A.2) */ int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn) { int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0; /* * IFMarkInt and OFMarkInt are negotiated as 32-bit words. */ u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4); u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4); if (conn->conn_ops->OFMarker) { /* * Account for the first Login Command received not * via iscsi_recv_msg(). */ conn->of_marker += ISCSI_HDR_LEN; if (conn->of_marker <= OFMarkInt) { conn->of_marker = (OFMarkInt - conn->of_marker); } else { login_ofmarker_count = (conn->of_marker / OFMarkInt); next_marker = (OFMarkInt * (login_ofmarker_count + 1)) + (login_ofmarker_count * MARKER_SIZE); conn->of_marker = (next_marker - conn->of_marker); } conn->of_marker_offset = 0; pr_debug("Setting OFMarker value to %u based on Initial" " Markerless Interval.\n", conn->of_marker); } if (conn->conn_ops->IFMarker) { if (conn->if_marker <= IFMarkInt) { conn->if_marker = (IFMarkInt - conn->if_marker); } else { login_ifmarker_count = (conn->if_marker / IFMarkInt); next_marker = (IFMarkInt * (login_ifmarker_count + 1)) + (login_ifmarker_count * MARKER_SIZE); conn->if_marker = (next_marker - conn->if_marker); } pr_debug("Setting IFMarker value to %u based on Initial" " Markerless Interval.\n", conn->if_marker); } return 0; } struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid) { struct iscsi_conn *conn; spin_lock_bh(&sess->conn_lock); list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { if ((conn->cid == cid) && (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) { iscsit_inc_conn_usage_count(conn); spin_unlock_bh(&sess->conn_lock); return conn; } } spin_unlock_bh(&sess->conn_lock); return NULL; } struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid) { struct iscsi_conn *conn; spin_lock_bh(&sess->conn_lock); list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { if (conn->cid == cid) { iscsit_inc_conn_usage_count(conn); spin_lock(&conn->state_lock); atomic_set(&conn->connection_wait_rcfr, 1); spin_unlock(&conn->state_lock); spin_unlock_bh(&sess->conn_lock); return conn; } } spin_unlock_bh(&sess->conn_lock); return NULL; } void iscsit_check_conn_usage_count(struct iscsi_conn *conn) { spin_lock_bh(&conn->conn_usage_lock); if (conn->conn_usage_count != 0) { conn->conn_waiting_on_uc = 1; spin_unlock_bh(&conn->conn_usage_lock); wait_for_completion(&conn->conn_waiting_on_uc_comp); return; } spin_unlock_bh(&conn->conn_usage_lock); } void iscsit_dec_conn_usage_count(struct iscsi_conn *conn) { spin_lock_bh(&conn->conn_usage_lock); conn->conn_usage_count--; if (!conn->conn_usage_count && conn->conn_waiting_on_uc) complete(&conn->conn_waiting_on_uc_comp); spin_unlock_bh(&conn->conn_usage_lock); } void iscsit_inc_conn_usage_count(struct iscsi_conn *conn) { spin_lock_bh(&conn->conn_usage_lock); conn->conn_usage_count++; spin_unlock_bh(&conn->conn_usage_lock); } static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response) { u8 state; struct iscsi_cmd *cmd; cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC); if (!cmd) return -1; cmd->iscsi_opcode = ISCSI_OP_NOOP_IN; state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE : ISTATE_SEND_NOPIN_NO_RESPONSE; cmd->init_task_tag = 0xFFFFFFFF; spin_lock_bh(&conn->sess->ttt_lock); cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ : 0xFFFFFFFF; if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF)) cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++; spin_unlock_bh(&conn->sess->ttt_lock); spin_lock_bh(&conn->cmd_lock); list_add_tail(&cmd->i_list, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); if (want_response) iscsit_start_nopin_response_timer(conn); iscsit_add_cmd_to_immediate_queue(cmd, conn, state); return 0; } static void iscsit_handle_nopin_response_timeout(unsigned long data) { struct iscsi_conn *conn = (struct iscsi_conn *) data; iscsit_inc_conn_usage_count(conn); spin_lock_bh(&conn->nopin_timer_lock); if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) { spin_unlock_bh(&conn->nopin_timer_lock); iscsit_dec_conn_usage_count(conn); return; } pr_debug("Did not receive response to NOPIN on CID: %hu on" " SID: %u, failing connection.\n", conn->cid, conn->sess->sid); conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; spin_unlock_bh(&conn->nopin_timer_lock); { struct iscsi_portal_group *tpg = conn->sess->tpg; struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; if (tiqn) { spin_lock_bh(&tiqn->sess_err_stats.lock); strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name, conn->sess->sess_ops->InitiatorName); tiqn->sess_err_stats.last_sess_failure_type = ISCSI_SESS_ERR_CXN_TIMEOUT; tiqn->sess_err_stats.cxn_timeout_errors++; conn->sess->conn_timeout_errors++; spin_unlock_bh(&tiqn->sess_err_stats.lock); } } iscsit_cause_connection_reinstatement(conn, 0); iscsit_dec_conn_usage_count(conn); } void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn) { struct iscsi_session *sess = conn->sess; struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); spin_lock_bh(&conn->nopin_timer_lock); if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { spin_unlock_bh(&conn->nopin_timer_lock); return; } mod_timer(&conn->nopin_response_timer, (get_jiffies_64() + na->nopin_response_timeout * HZ)); spin_unlock_bh(&conn->nopin_timer_lock); } /* * Called with conn->nopin_timer_lock held. */ void iscsit_start_nopin_response_timer(struct iscsi_conn *conn) { struct iscsi_session *sess = conn->sess; struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); spin_lock_bh(&conn->nopin_timer_lock); if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) { spin_unlock_bh(&conn->nopin_timer_lock); return; } init_timer(&conn->nopin_response_timer); conn->nopin_response_timer.expires = (get_jiffies_64() + na->nopin_response_timeout * HZ); conn->nopin_response_timer.data = (unsigned long)conn; conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout; conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP; conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING; add_timer(&conn->nopin_response_timer); pr_debug("Started NOPIN Response Timer on CID: %d to %u" " seconds\n", conn->cid, na->nopin_response_timeout); spin_unlock_bh(&conn->nopin_timer_lock); } void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn) { spin_lock_bh(&conn->nopin_timer_lock); if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { spin_unlock_bh(&conn->nopin_timer_lock); return; } conn->nopin_response_timer_flags |= ISCSI_TF_STOP; spin_unlock_bh(&conn->nopin_timer_lock); del_timer_sync(&conn->nopin_response_timer); spin_lock_bh(&conn->nopin_timer_lock); conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; spin_unlock_bh(&conn->nopin_timer_lock); } static void iscsit_handle_nopin_timeout(unsigned long data) { struct iscsi_conn *conn = (struct iscsi_conn *) data; iscsit_inc_conn_usage_count(conn); spin_lock_bh(&conn->nopin_timer_lock); if (conn->nopin_timer_flags & ISCSI_TF_STOP) { spin_unlock_bh(&conn->nopin_timer_lock); iscsit_dec_conn_usage_count(conn); return; } conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; spin_unlock_bh(&conn->nopin_timer_lock); iscsit_add_nopin(conn, 1); iscsit_dec_conn_usage_count(conn); } /* * Called with conn->nopin_timer_lock held. */ void __iscsit_start_nopin_timer(struct iscsi_conn *conn) { struct iscsi_session *sess = conn->sess; struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); /* * NOPIN timeout is disabled. */ if (!na->nopin_timeout) return; if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) return; init_timer(&conn->nopin_timer); conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); conn->nopin_timer.data = (unsigned long)conn; conn->nopin_timer.function = iscsit_handle_nopin_timeout; conn->nopin_timer_flags &= ~ISCSI_TF_STOP; conn->nopin_timer_flags |= ISCSI_TF_RUNNING; add_timer(&conn->nopin_timer); pr_debug("Started NOPIN Timer on CID: %d at %u second" " interval\n", conn->cid, na->nopin_timeout); } void iscsit_start_nopin_timer(struct iscsi_conn *conn) { struct iscsi_session *sess = conn->sess; struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); /* * NOPIN timeout is disabled.. */ if (!na->nopin_timeout) return; spin_lock_bh(&conn->nopin_timer_lock); if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) { spin_unlock_bh(&conn->nopin_timer_lock); return; } init_timer(&conn->nopin_timer); conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); conn->nopin_timer.data = (unsigned long)conn; conn->nopin_timer.function = iscsit_handle_nopin_timeout; conn->nopin_timer_flags &= ~ISCSI_TF_STOP; conn->nopin_timer_flags |= ISCSI_TF_RUNNING; add_timer(&conn->nopin_timer); pr_debug("Started NOPIN Timer on CID: %d at %u second" " interval\n", conn->cid, na->nopin_timeout); spin_unlock_bh(&conn->nopin_timer_lock); } void iscsit_stop_nopin_timer(struct iscsi_conn *conn) { spin_lock_bh(&conn->nopin_timer_lock); if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) { spin_unlock_bh(&conn->nopin_timer_lock); return; } conn->nopin_timer_flags |= ISCSI_TF_STOP; spin_unlock_bh(&conn->nopin_timer_lock); del_timer_sync(&conn->nopin_timer); spin_lock_bh(&conn->nopin_timer_lock); conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; spin_unlock_bh(&conn->nopin_timer_lock); } int iscsit_send_tx_data( struct iscsi_cmd *cmd, struct iscsi_conn *conn, int use_misc) { int tx_sent, tx_size; u32 iov_count; struct kvec *iov; send_data: tx_size = cmd->tx_size; if (!use_misc) { iov = &cmd->iov_data[0]; iov_count = cmd->iov_data_count; } else { iov = &cmd->iov_misc[0]; iov_count = cmd->iov_misc_count; } tx_sent = tx_data(conn, &iov[0], iov_count, tx_size); if (tx_size != tx_sent) { if (tx_sent == -EAGAIN) { pr_err("tx_data() returned -EAGAIN\n"); goto send_data; } else return -1; } cmd->tx_size = 0; return 0; } int iscsit_fe_sendpage_sg( struct iscsi_cmd *cmd, struct iscsi_conn *conn) { struct scatterlist *sg = cmd->first_data_sg; struct kvec iov; u32 tx_hdr_size, data_len; u32 offset = cmd->first_data_sg_off; int tx_sent, iov_off; send_hdr: tx_hdr_size = ISCSI_HDR_LEN; if (conn->conn_ops->HeaderDigest) tx_hdr_size += ISCSI_CRC_LEN; iov.iov_base = cmd->pdu; iov.iov_len = tx_hdr_size; tx_sent = tx_data(conn, &iov, 1, tx_hdr_size); if (tx_hdr_size != tx_sent) { if (tx_sent == -EAGAIN) { pr_err("tx_data() returned -EAGAIN\n"); goto send_hdr; } return -1; } data_len = cmd->tx_size - tx_hdr_size - cmd->padding; /* * Set iov_off used by padding and data digest tx_data() calls below * in order to determine proper offset into cmd->iov_data[] */ if (conn->conn_ops->DataDigest) { data_len -= ISCSI_CRC_LEN; if (cmd->padding) iov_off = (cmd->iov_data_count - 2); else iov_off = (cmd->iov_data_count - 1); } else { iov_off = (cmd->iov_data_count - 1); } /* * Perform sendpage() for each page in the scatterlist */ while (data_len) { u32 space = (sg->length - offset); u32 sub_len = min_t(u32, data_len, space); send_pg: tx_sent = conn->sock->ops->sendpage(conn->sock, sg_page(sg), sg->offset + offset, sub_len, 0); if (tx_sent != sub_len) { if (tx_sent == -EAGAIN) { pr_err("tcp_sendpage() returned" " -EAGAIN\n"); goto send_pg; } pr_err("tcp_sendpage() failure: %d\n", tx_sent); return -1; } data_len -= sub_len; offset = 0; sg = sg_next(sg); } send_padding: if (cmd->padding) { struct kvec *iov_p = &cmd->iov_data[iov_off++]; tx_sent = tx_data(conn, iov_p, 1, cmd->padding); if (cmd->padding != tx_sent) { if (tx_sent == -EAGAIN) { pr_err("tx_data() returned -EAGAIN\n"); goto send_padding; } return -1; } } send_datacrc: if (conn->conn_ops->DataDigest) { struct kvec *iov_d = &cmd->iov_data[iov_off]; tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); if (ISCSI_CRC_LEN != tx_sent) { if (tx_sent == -EAGAIN) { pr_err("tx_data() returned -EAGAIN\n"); goto send_datacrc; } return -1; } } return 0; } /* * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU * back to the Initiator when an expection condition occurs with the * errors set in status_class and status_detail. * * Parameters: iSCSI Connection, Status Class, Status Detail. * Returns: 0 on success, -1 on error. */ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail) { u8 iscsi_hdr[ISCSI_HDR_LEN]; int err; struct kvec iov; struct iscsi_login_rsp *hdr; iscsit_collect_login_stats(conn, status_class, status_detail); memset(&iov, 0, sizeof(struct kvec)); memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN); hdr = (struct iscsi_login_rsp *)&iscsi_hdr; hdr->opcode = ISCSI_OP_LOGIN_RSP; hdr->status_class = status_class; hdr->status_detail = status_detail; hdr->itt = cpu_to_be32(conn->login_itt); iov.iov_base = &iscsi_hdr; iov.iov_len = ISCSI_HDR_LEN; PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN); err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN); if (err != ISCSI_HDR_LEN) { pr_err("tx_data returned less than expected\n"); return -1; } return 0; } void iscsit_print_session_params(struct iscsi_session *sess) { struct iscsi_conn *conn; pr_debug("-----------------------------[Session Params for" " SID: %u]-----------------------------\n", sess->sid); spin_lock_bh(&sess->conn_lock); list_for_each_entry(conn, &sess->sess_conn_list, conn_list) iscsi_dump_conn_ops(conn->conn_ops); spin_unlock_bh(&sess->conn_lock); iscsi_dump_sess_ops(sess->sess_ops); } static int iscsit_do_rx_data( struct iscsi_conn *conn, struct iscsi_data_count *count) { int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; struct kvec *iov_p; struct msghdr msg; if (!conn || !conn->sock || !conn->conn_ops) return -1; memset(&msg, 0, sizeof(struct msghdr)); iov_p = count->iov; iov_len = count->iov_count; while (total_rx < data) { rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, (data - total_rx), MSG_WAITALL); if (rx_loop <= 0) { pr_debug("rx_loop: %d total_rx: %d\n", rx_loop, total_rx); return rx_loop; } total_rx += rx_loop; pr_debug("rx_loop: %d, total_rx: %d, data: %d\n", rx_loop, total_rx, data); } return total_rx; } static int iscsit_do_tx_data( struct iscsi_conn *conn, struct iscsi_data_count *count) { int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; struct kvec *iov_p; struct msghdr msg; if (!conn || !conn->sock || !conn->conn_ops) return -1; if (data <= 0) { pr_err("Data length is: %d\n", data); return -1; } memset(&msg, 0, sizeof(struct msghdr)); iov_p = count->iov; iov_len = count->iov_count; while (total_tx < data) { tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, (data - total_tx)); if (tx_loop <= 0) { pr_debug("tx_loop: %d total_tx %d\n", tx_loop, total_tx); return tx_loop; } total_tx += tx_loop; pr_debug("tx_loop: %d, total_tx: %d, data: %d\n", tx_loop, total_tx, data); } return total_tx; } int rx_data( struct iscsi_conn *conn, struct kvec *iov, int iov_count, int data) { struct iscsi_data_count c; if (!conn || !conn->sock || !conn->conn_ops) return -1; memset(&c, 0, sizeof(struct iscsi_data_count)); c.iov = iov; c.iov_count = iov_count; c.data_length = data; c.type = ISCSI_RX_DATA; return iscsit_do_rx_data(conn, &c); } int tx_data( struct iscsi_conn *conn, struct kvec *iov, int iov_count, int data) { struct iscsi_data_count c; if (!conn || !conn->sock || !conn->conn_ops) return -1; memset(&c, 0, sizeof(struct iscsi_data_count)); c.iov = iov; c.iov_count = iov_count; c.data_length = data; c.type = ISCSI_TX_DATA; return iscsit_do_tx_data(conn, &c); } void iscsit_collect_login_stats( struct iscsi_conn *conn, u8 status_class, u8 status_detail) { struct iscsi_param *intrname = NULL; struct iscsi_tiqn *tiqn; struct iscsi_login_stats *ls; tiqn = iscsit_snmp_get_tiqn(conn); if (!tiqn) return; ls = &tiqn->login_stats; spin_lock(&ls->lock); if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) && ((get_jiffies_64() - ls->last_fail_time) < 10)) { /* We already have the failure info for this login */ spin_unlock(&ls->lock); return; } if (status_class == ISCSI_STATUS_CLS_SUCCESS) ls->accepts++; else if (status_class == ISCSI_STATUS_CLS_REDIRECT) { ls->redirects++; ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT; } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) { ls->authenticate_fails++; ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE; } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) { ls->authorize_fails++; ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE; } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) { ls->negotiate_fails++; ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE; } else { ls->other_fails++; ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER; } /* Save initiator name, ip address and time, if it is a failed login */ if (status_class != ISCSI_STATUS_CLS_SUCCESS) { if (conn->param_list) intrname = iscsi_find_param_from_key(INITIATORNAME, conn->param_list); strcpy(ls->last_intr_fail_name, (intrname ? intrname->value : "Unknown")); ls->last_intr_fail_ip_family = conn->sock->sk->sk_family; snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE, "%s", conn->login_ip); ls->last_fail_time = get_jiffies_64(); } spin_unlock(&ls->lock); } struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn) { struct iscsi_portal_group *tpg; if (!conn || !conn->sess) return NULL; tpg = conn->sess->tpg; if (!tpg) return NULL; if (!tpg->tpg_tiqn) return NULL; return tpg->tpg_tiqn; }
CyanogenMod/android_kernel_motorola_titan
drivers/target/iscsi/iscsi_target_util.c
C
gpl-2.0
41,702
/* src/p80211/p80211wep.c * * WEP encode/decode for P80211. * * Copyright (C) 2002 AbsoluteValue Systems, Inc. All Rights Reserved. * -------------------------------------------------------------------- * * linux-wlan * * The contents of this file are subject to the Mozilla Public * License Version 1.1 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or * implied. See the License for the specific language governing * rights and limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU Public License version 2 (the "GPL"), in which * case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use * your version of this file under the MPL, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete * the provisions above, a recipient may use your version of this * file under either the MPL or the GPL. * * -------------------------------------------------------------------- * * Inquiries regarding the linux-wlan Open Source project can be * made directly to: * * AbsoluteValue Systems Inc. * info@linux-wlan.com * http://www.linux-wlan.com * * -------------------------------------------------------------------- * * Portions of the development of this software were funded by * Intersil Corporation as part of PRISM(R) chipset product development. * * -------------------------------------------------------------------- */ /*================================================================*/ /* System Includes */ #include <linux/netdevice.h> #include <linux/wireless.h> #include <linux/random.h> #include <linux/kernel.h> /* #define WEP_DEBUG */ #include "p80211hdr.h" #include "p80211types.h" #include "p80211msg.h" #include "p80211conv.h" #include "p80211netdev.h" #define WEP_KEY(x) (((x) & 0xC0) >> 6) static const u32 wep_crc32_table[256] = { 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL, 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL, 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L, 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L, 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L, 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L, 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L, 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL, 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, 0x2d02ef8dL }; /* keylen in bytes! */ int wep_change_key(wlandevice_t *wlandev, int keynum, u8 *key, int keylen) { if (keylen < 0) return -1; if (keylen >= MAX_KEYLEN) return -1; if (key == NULL) return -1; if (keynum < 0) return -1; if (keynum >= NUM_WEPKEYS) return -1; #ifdef WEP_DEBUG pr_debug("WEP key %d len %d = %*phC\n", keynum, keylen, 8, key); #endif wlandev->wep_keylens[keynum] = keylen; memcpy(wlandev->wep_keys[keynum], key, keylen); return 0; } /* 4-byte IV at start of buffer, 4-byte ICV at end of buffer. if successful, buf start is payload begin, length -= 8; */ int wep_decrypt(wlandevice_t *wlandev, u8 *buf, u32 len, int key_override, u8 *iv, u8 *icv) { u32 i, j, k, crc, keylen; u8 s[256], key[64], c_crc[4]; u8 keyidx; /* Needs to be at least 8 bytes of payload */ if (len <= 0) return -1; /* initialize the first bytes of the key from the IV */ key[0] = iv[0]; key[1] = iv[1]; key[2] = iv[2]; keyidx = WEP_KEY(iv[3]); if (key_override >= 0) keyidx = key_override; if (keyidx >= NUM_WEPKEYS) return -2; keylen = wlandev->wep_keylens[keyidx]; if (keylen == 0) return -3; /* copy the rest of the key over from the designated key */ memcpy(key + 3, wlandev->wep_keys[keyidx], keylen); keylen += 3; /* add in IV bytes */ #ifdef WEP_DEBUG pr_debug("D %d: %*ph (%d %d) %*phC\n", len, 3, key, keyidx, keylen, 5, key + 3); #endif /* set up the RC4 state */ for (i = 0; i < 256; i++) s[i] = i; j = 0; for (i = 0; i < 256; i++) { j = (j + s[i] + key[i % keylen]) & 0xff; swap(i, j); } /* Apply the RC4 to the data, update the CRC32 */ crc = ~0; i = j = 0; for (k = 0; k < len; k++) { i = (i + 1) & 0xff; j = (j + s[i]) & 0xff; swap(i, j); buf[k] ^= s[(s[i] + s[j]) & 0xff]; crc = wep_crc32_table[(crc ^ buf[k]) & 0xff] ^ (crc >> 8); } crc = ~crc; /* now let's check the crc */ c_crc[0] = crc; c_crc[1] = crc >> 8; c_crc[2] = crc >> 16; c_crc[3] = crc >> 24; for (k = 0; k < 4; k++) { i = (i + 1) & 0xff; j = (j + s[i]) & 0xff; swap(i, j); if ((c_crc[k] ^ s[(s[i] + s[j]) & 0xff]) != icv[k]) return -(4 | (k << 4)); /* ICV mismatch */ } return 0; } /* encrypts in-place. */ int wep_encrypt(wlandevice_t *wlandev, u8 *buf, u8 *dst, u32 len, int keynum, u8 *iv, u8 *icv) { u32 i, j, k, crc, keylen; u8 s[256], key[64]; /* no point in WEPping an empty frame */ if (len <= 0) return -1; /* we need to have a real key.. */ if (keynum >= NUM_WEPKEYS) return -2; keylen = wlandev->wep_keylens[keynum]; if (keylen <= 0) return -3; /* use a random IV. And skip known weak ones. */ get_random_bytes(iv, 3); while ((iv[1] == 0xff) && (iv[0] >= 3) && (iv[0] < keylen)) get_random_bytes(iv, 3); iv[3] = (keynum & 0x03) << 6; key[0] = iv[0]; key[1] = iv[1]; key[2] = iv[2]; /* copy the rest of the key over from the designated key */ memcpy(key + 3, wlandev->wep_keys[keynum], keylen); keylen += 3; /* add in IV bytes */ #ifdef WEP_DEBUG pr_debug("E %d (%d/%d %d) %*ph %*phC\n", len, iv[3], keynum, keylen, 3, key, 5, key + 3); #endif /* set up the RC4 state */ for (i = 0; i < 256; i++) s[i] = i; j = 0; for (i = 0; i < 256; i++) { j = (j + s[i] + key[i % keylen]) & 0xff; swap(i, j); } /* Update CRC32 then apply RC4 to the data */ crc = ~0; i = j = 0; for (k = 0; k < len; k++) { crc = wep_crc32_table[(crc ^ buf[k]) & 0xff] ^ (crc >> 8); i = (i + 1) & 0xff; j = (j + s[i]) & 0xff; swap(i, j); dst[k] = buf[k] ^ s[(s[i] + s[j]) & 0xff]; } crc = ~crc; /* now let's encrypt the crc */ icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; for (k = 0; k < 4; k++) { i = (i + 1) & 0xff; j = (j + s[i]) & 0xff; swap(i, j); icv[k] ^= s[(s[i] + s[j]) & 0xff]; } return 0; }
thanhphat11/android_kernel_xiaomi_msm8996
drivers/staging/wlan-ng/p80211wep.c
C
gpl-2.0
9,504
/* * fs/f2fs/namei.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <linux/ctype.h> #include "f2fs.h" #include "node.h" #include "xattr.h" #include "acl.h" #include <trace/events/f2fs.h> static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) { struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); nid_t ino; struct inode *inode; bool nid_free = false; int err, ilock; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); ilock = mutex_lock_op(sbi); if (!alloc_nid(sbi, &ino)) { mutex_unlock_op(sbi, ilock); err = -ENOSPC; goto fail; } mutex_unlock_op(sbi, ilock); inode->i_uid = current_fsuid(); if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) mode |= S_ISGID; } else { inode->i_gid = current_fsgid(); } inode->i_ino = ino; inode->i_mode = mode; inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_generation = sbi->s_next_generation++; err = insert_inode_locked(inode); if (err) { err = -EINVAL; nid_free = true; goto out; } trace_f2fs_new_inode(inode, 0); mark_inode_dirty(inode); return inode; out: clear_nlink(inode); unlock_new_inode(inode); fail: trace_f2fs_new_inode(inode, err); make_bad_inode(inode); iput(inode); if (nid_free) alloc_nid_failed(sbi, ino); return ERR_PTR(err); } static int is_multimedia_file(const unsigned char *s, const char *sub) { size_t slen = strlen(s); size_t sublen = strlen(sub); int ret; if (sublen > slen) return 0; ret = memcmp(s + slen - sublen, sub, sublen); if (ret) { /* compare upper case */ int i; char upper_sub[8]; for (i = 0; i < sublen && i < sizeof(upper_sub); i++) upper_sub[i] = toupper(sub[i]); return !memcmp(s + slen - sublen, upper_sub, sublen); } return !ret; } /* * Set multimedia files as cold files for hot/cold data separation */ static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode, const unsigned char *name) { int i; __u8 (*extlist)[8] = sbi->raw_super->extension_list; int count = le32_to_cpu(sbi->raw_super->extension_count); for (i = 0; i < count; i++) { if (is_multimedia_file(name, extlist[i])) { set_cold_file(inode); break; } } } static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; nid_t ino = 0; int err, ilock; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); if (!test_opt(sbi, DISABLE_EXT_IDENTIFY)) set_cold_files(sbi, inode, dentry->d_name.name); inode->i_op = &f2fs_file_inode_operations; inode->i_fop = &f2fs_file_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; ino = inode->i_ino; ilock = mutex_lock_op(sbi); err = f2fs_add_link(dentry, inode); mutex_unlock_op(sbi, ilock); if (err) goto out; alloc_nid_done(sbi, ino); if (!sbi->por_doing) d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; out: clear_nlink(inode); unlock_new_inode(inode); make_bad_inode(inode); iput(inode); alloc_nid_failed(sbi, ino); return err; } static int f2fs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); int err, ilock; f2fs_balance_fs(sbi); inode->i_ctime = CURRENT_TIME; atomic_inc(&inode->i_count); set_inode_flag(F2FS_I(inode), FI_INC_LINK); ilock = mutex_lock_op(sbi); err = f2fs_add_link(dentry, inode); mutex_unlock_op(sbi, ilock); if (err) goto out; /* * This file should be checkpointed during fsync. * We lost i_pino from now on. */ set_cp_file(inode); d_instantiate(dentry, inode); return 0; out: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); make_bad_inode(inode); iput(inode); return err; } struct dentry *f2fs_get_parent(struct dentry *child) { struct qstr dotdot = QSTR_INIT("..", 2); unsigned long ino = f2fs_inode_by_name(child->d_inode, &dotdot); if (!ino) return ERR_PTR(-ENOENT); return d_obtain_alias(f2fs_iget(child->d_inode->i_sb, ino)); } static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode = NULL; struct f2fs_dir_entry *de; struct page *page; if (dentry->d_name.len > F2FS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (de) { nid_t ino = le32_to_cpu(de->ino); kunmap(page); f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); } return d_splice_alias(inode, dentry); } static int f2fs_unlink(struct inode *dir, struct dentry *dentry) { struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode = dentry->d_inode; struct f2fs_dir_entry *de; struct page *page; int err = -ENOENT; int ilock; trace_f2fs_unlink_enter(dir, dentry); f2fs_balance_fs(sbi); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) goto fail; err = check_orphan_space(sbi); if (err) { kunmap(page); f2fs_put_page(page, 0); goto fail; } ilock = mutex_lock_op(sbi); f2fs_delete_entry(de, page, inode); mutex_unlock_op(sbi, ilock); /* In order to evict this inode, we set it dirty */ mark_inode_dirty(inode); fail: trace_f2fs_unlink_exit(inode, err); return err; } static int f2fs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; size_t symlen = strlen(symname) + 1; int err, ilock; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_symlink_inode_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; ilock = mutex_lock_op(sbi); err = f2fs_add_link(dentry, inode); mutex_unlock_op(sbi, ilock); if (err) goto out; err = page_symlink(inode, symname, symlen); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return err; out: clear_nlink(inode); unlock_new_inode(inode); make_bad_inode(inode); iput(inode); alloc_nid_failed(sbi, inode->i_ino); return err; } static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); struct inode *inode; int err, ilock; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); set_inode_flag(F2FS_I(inode), FI_INC_LINK); ilock = mutex_lock_op(sbi); err = f2fs_add_link(dentry, inode); mutex_unlock_op(sbi, ilock); if (err) goto out_fail; alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; out_fail: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); clear_nlink(inode); unlock_new_inode(inode); make_bad_inode(inode); iput(inode); alloc_nid_failed(sbi, inode->i_ino); return err; } static int f2fs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; if (f2fs_empty_dir(inode)) return f2fs_unlink(dir, dentry); return -ENOTEMPTY; } static int f2fs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; int err = 0; int ilock; if (!new_valid_dev(rdev)) return -EINVAL; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &f2fs_special_inode_operations; ilock = mutex_lock_op(sbi); err = f2fs_add_link(dentry, inode); mutex_unlock_op(sbi, ilock); if (err) goto out; alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; out: clear_nlink(inode); unlock_new_inode(inode); make_bad_inode(inode); iput(inode); alloc_nid_failed(sbi, inode->i_ino); return err; } static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct super_block *sb = old_dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct page *old_dir_page; struct page *old_page; struct f2fs_dir_entry *old_dir_entry = NULL; struct f2fs_dir_entry *old_entry; struct f2fs_dir_entry *new_entry; int err = -ENOENT, ilock = -1; f2fs_balance_fs(sbi); old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) goto out_old; } ilock = mutex_lock_op(sbi); if (new_inode) { struct page *new_page; err = -ENOTEMPTY; if (old_dir_entry && !f2fs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_entry) goto out_dir; f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; if (old_dir_entry) drop_nlink(new_inode); drop_nlink(new_inode); if (!new_inode->i_nlink) add_orphan_inode(sbi, new_inode->i_ino); update_inode_page(new_inode); } else { err = f2fs_add_link(new_dentry, old_inode); if (err) goto out_dir; if (old_dir_entry) { inc_nlink(new_dir); update_inode_page(new_dir); } } old_inode->i_ctime = CURRENT_TIME; mark_inode_dirty(old_inode); f2fs_delete_entry(old_entry, old_page, NULL); if (old_dir_entry) { if (old_dir != new_dir) { f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); } else { kunmap(old_dir_page); f2fs_put_page(old_dir_page, 0); } drop_nlink(old_dir); update_inode_page(old_dir); } mutex_unlock_op(sbi, ilock); return 0; out_dir: if (old_dir_entry) { kunmap(old_dir_page); f2fs_put_page(old_dir_page, 0); } mutex_unlock_op(sbi, ilock); out_old: kunmap(old_page); f2fs_put_page(old_page, 0); out: return err; } const struct inode_operations f2fs_dir_inode_operations = { .create = f2fs_create, .lookup = f2fs_lookup, .link = f2fs_link, .unlink = f2fs_unlink, .symlink = f2fs_symlink, .mkdir = f2fs_mkdir, .rmdir = f2fs_rmdir, .mknod = f2fs_mknod, .rename = f2fs_rename, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .setattr = f2fs_setattr, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_special_inode_operations = { .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif };
NicholasPace/android_kernel_asus_Z00A
fs/f2fs/namei.c
C
gpl-2.0
12,004
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../usb.h" #include "../ps.h" #include "../base.h" #include "reg.h" #include "def.h" #include "phy.h" #include "rf.h" #include "dm.h" #include "mac.h" #include "trx.h" static int _ConfigVerTOutEP(struct ieee80211_hw *hw) { u8 ep_cfg, txqsele; u8 ep_nums = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); rtlusb->out_queue_sel = 0; ep_cfg = rtl_read_byte(rtlpriv, REG_TEST_SIE_OPTIONAL); ep_cfg = (ep_cfg & USB_TEST_EP_MASK) >> USB_TEST_EP_SHIFT; switch (ep_cfg) { case 0: /* 2 bulk OUT, 1 bulk IN */ case 3: rtlusb->out_queue_sel = TX_SELE_HQ | TX_SELE_LQ; ep_nums = 2; break; case 1: /* 1 bulk IN/OUT => map all endpoint to Low queue */ case 2: /* 1 bulk IN, 1 bulk OUT => map all endpoint to High queue */ txqsele = rtl_read_byte(rtlpriv, REG_TEST_USB_TXQS); if (txqsele & 0x0F) /* /map all endpoint to High queue */ rtlusb->out_queue_sel = TX_SELE_HQ; else if (txqsele&0xF0) /* map all endpoint to Low queue */ rtlusb->out_queue_sel = TX_SELE_LQ; ep_nums = 1; break; default: break; } return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL; } static int _ConfigVerNOutEP(struct ieee80211_hw *hw) { u8 ep_cfg; u8 ep_nums = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); rtlusb->out_queue_sel = 0; /* Normal and High queue */ ep_cfg = rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 1)); if (ep_cfg & USB_NORMAL_SIE_EP_MASK) { rtlusb->out_queue_sel |= TX_SELE_HQ; ep_nums++; } if ((ep_cfg >> USB_NORMAL_SIE_EP_SHIFT) & USB_NORMAL_SIE_EP_MASK) { rtlusb->out_queue_sel |= TX_SELE_NQ; ep_nums++; } /* Low queue */ ep_cfg = rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 2)); if (ep_cfg & USB_NORMAL_SIE_EP_MASK) { rtlusb->out_queue_sel |= TX_SELE_LQ; ep_nums++; } return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL; } static void _TwoOutEpMapping(struct ieee80211_hw *hw, bool bIsChipB, bool bwificfg, struct rtl_ep_map *ep_map) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (bwificfg) { /* for WMM */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB Chip-B & WMM Setting.....\n"); ep_map->ep_mapping[RTL_TXQ_BE] = 2; ep_map->ep_mapping[RTL_TXQ_BK] = 3; ep_map->ep_mapping[RTL_TXQ_VI] = 3; ep_map->ep_mapping[RTL_TXQ_VO] = 2; ep_map->ep_mapping[RTL_TXQ_MGT] = 2; ep_map->ep_mapping[RTL_TXQ_BCN] = 2; ep_map->ep_mapping[RTL_TXQ_HI] = 2; } else { /* typical setting */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB typical Setting.....\n"); ep_map->ep_mapping[RTL_TXQ_BE] = 3; ep_map->ep_mapping[RTL_TXQ_BK] = 3; ep_map->ep_mapping[RTL_TXQ_VI] = 2; ep_map->ep_mapping[RTL_TXQ_VO] = 2; ep_map->ep_mapping[RTL_TXQ_MGT] = 2; ep_map->ep_mapping[RTL_TXQ_BCN] = 2; ep_map->ep_mapping[RTL_TXQ_HI] = 2; } } static void _ThreeOutEpMapping(struct ieee80211_hw *hw, bool bwificfg, struct rtl_ep_map *ep_map) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (bwificfg) { /* for WMM */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB 3EP Setting for WMM.....\n"); ep_map->ep_mapping[RTL_TXQ_BE] = 5; ep_map->ep_mapping[RTL_TXQ_BK] = 3; ep_map->ep_mapping[RTL_TXQ_VI] = 3; ep_map->ep_mapping[RTL_TXQ_VO] = 2; ep_map->ep_mapping[RTL_TXQ_MGT] = 2; ep_map->ep_mapping[RTL_TXQ_BCN] = 2; ep_map->ep_mapping[RTL_TXQ_HI] = 2; } else { /* typical setting */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB 3EP Setting for typical.....\n"); ep_map->ep_mapping[RTL_TXQ_BE] = 5; ep_map->ep_mapping[RTL_TXQ_BK] = 5; ep_map->ep_mapping[RTL_TXQ_VI] = 3; ep_map->ep_mapping[RTL_TXQ_VO] = 2; ep_map->ep_mapping[RTL_TXQ_MGT] = 2; ep_map->ep_mapping[RTL_TXQ_BCN] = 2; ep_map->ep_mapping[RTL_TXQ_HI] = 2; } } static void _OneOutEpMapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map) { ep_map->ep_mapping[RTL_TXQ_BE] = 2; ep_map->ep_mapping[RTL_TXQ_BK] = 2; ep_map->ep_mapping[RTL_TXQ_VI] = 2; ep_map->ep_mapping[RTL_TXQ_VO] = 2; ep_map->ep_mapping[RTL_TXQ_MGT] = 2; ep_map->ep_mapping[RTL_TXQ_BCN] = 2; ep_map->ep_mapping[RTL_TXQ_HI] = 2; } static int _out_ep_mapping(struct ieee80211_hw *hw) { int err = 0; bool bIsChipN, bwificfg = false; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); struct rtl_ep_map *ep_map = &(rtlusb->ep_map); bIsChipN = IS_NORMAL_CHIP(rtlhal->version); switch (rtlusb->out_ep_nums) { case 2: _TwoOutEpMapping(hw, bIsChipN, bwificfg, ep_map); break; case 3: /* Test chip doesn't support three out EPs. */ if (!bIsChipN) { err = -EINVAL; goto err_out; } _ThreeOutEpMapping(hw, bIsChipN, ep_map); break; case 1: _OneOutEpMapping(hw, ep_map); break; default: err = -EINVAL; break; } err_out: return err; } /* endpoint mapping */ int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw) { struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); int error = 0; if (likely(IS_NORMAL_CHIP(rtlhal->version))) error = _ConfigVerNOutEP(hw); else error = _ConfigVerTOutEP(hw); if (error) goto err_out; error = _out_ep_mapping(hw); if (error) goto err_out; err_out: return error; } u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index) { u16 hw_queue_index; if (unlikely(ieee80211_is_beacon(fc))) { hw_queue_index = RTL_TXQ_BCN; goto out; } if (ieee80211_is_mgmt(fc)) { hw_queue_index = RTL_TXQ_MGT; goto out; } switch (mac80211_queue_index) { case 0: hw_queue_index = RTL_TXQ_VO; break; case 1: hw_queue_index = RTL_TXQ_VI; break; case 2: hw_queue_index = RTL_TXQ_BE; break; case 3: hw_queue_index = RTL_TXQ_BK; break; default: hw_queue_index = RTL_TXQ_BE; RT_ASSERT(false, "QSLT_BE queue, skb_queue:%d\n", mac80211_queue_index); break; } out: return hw_queue_index; } static enum rtl_desc_qsel _rtl8192cu_mq_to_descq(struct ieee80211_hw *hw, __le16 fc, u16 mac80211_queue_index) { enum rtl_desc_qsel qsel; struct rtl_priv *rtlpriv = rtl_priv(hw); if (unlikely(ieee80211_is_beacon(fc))) { qsel = QSLT_BEACON; goto out; } if (ieee80211_is_mgmt(fc)) { qsel = QSLT_MGNT; goto out; } switch (mac80211_queue_index) { case 0: /* VO */ qsel = QSLT_VO; RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG, "VO queue, set qsel = 0x%x\n", QSLT_VO); break; case 1: /* VI */ qsel = QSLT_VI; RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG, "VI queue, set qsel = 0x%x\n", QSLT_VI); break; case 3: /* BK */ qsel = QSLT_BK; RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG, "BK queue, set qsel = 0x%x\n", QSLT_BK); break; case 2: /* BE */ default: qsel = QSLT_BE; RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG, "BE queue, set qsel = 0x%x\n", QSLT_BE); break; } out: return qsel; } /* =============================================================== */ /*---------------------------------------------------------------------- * * Rx handler * *---------------------------------------------------------------------- */ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, struct ieee80211_rx_status *rx_status, u8 *pdesc, struct sk_buff *skb) { struct rx_fwinfo_92c *p_drvinfo; struct rx_desc_92c *p_desc = (struct rx_desc_92c *)pdesc; u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc); stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); stats->rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(pdesc) * RX_DRV_INFO_SIZE_UNIT; stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03); stats->icv = (u16) GET_RX_DESC_ICV(pdesc); stats->crc = (u16) GET_RX_DESC_CRC32(pdesc); stats->hwerror = (stats->crc | stats->icv); stats->decrypted = !GET_RX_DESC_SWDEC(pdesc); stats->rate = (u8) GET_RX_DESC_RX_MCS(pdesc); stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc); stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1) && (GET_RX_DESC_FAGGR(pdesc) == 1)); stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; if (GET_RX_DESC_CRC32(pdesc)) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (!GET_RX_DESC_SWDEC(pdesc)) rx_status->flag |= RX_FLAG_DECRYPTED; if (GET_RX_DESC_BW(pdesc)) rx_status->flag |= RX_FLAG_40MHZ; if (GET_RX_DESC_RX_HT(pdesc)) rx_status->flag |= RX_FLAG_HT; rx_status->flag |= RX_FLAG_MACTIME_START; if (stats->decrypted) rx_status->flag |= RX_FLAG_DECRYPTED; rx_status->rate_idx = rtlwifi_rate_mapping(hw, (bool)GET_RX_DESC_RX_HT(pdesc), (u8)GET_RX_DESC_RX_MCS(pdesc), (bool)GET_RX_DESC_PAGGR(pdesc)); rx_status->mactime = GET_RX_DESC_TSFL(pdesc); if (phystatus) { p_drvinfo = (struct rx_fwinfo_92c *)(skb->data + stats->rx_bufshift); rtl92c_translate_rx_signal_stuff(hw, skb, stats, p_desc, p_drvinfo); } /*rx_status->qual = stats->signal; */ rx_status->signal = stats->recvsignalpower + 10; /*rx_status->noise = -stats->noise; */ return true; } #define RTL_RX_DRV_INFO_UNIT 8 static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ieee80211_rx_status *rx_status = (struct ieee80211_rx_status *)IEEE80211_SKB_RXCB(skb); u32 skb_len, pkt_len, drvinfo_len; struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *rxdesc; struct rtl_stats stats = { .signal = 0, .noise = -98, .rate = 0, }; struct rx_fwinfo_92c *p_drvinfo; bool bv; __le16 fc; struct ieee80211_hdr *hdr; memset(rx_status, 0, sizeof(*rx_status)); rxdesc = skb->data; skb_len = skb->len; drvinfo_len = (GET_RX_DESC_DRVINFO_SIZE(rxdesc) * RTL_RX_DRV_INFO_UNIT); pkt_len = GET_RX_DESC_PKT_LEN(rxdesc); /* TODO: Error recovery. drop this skb or something. */ WARN_ON(skb_len < (pkt_len + RTL_RX_DESC_SIZE + drvinfo_len)); stats.length = (u16) GET_RX_DESC_PKT_LEN(rxdesc); stats.rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(rxdesc) * RX_DRV_INFO_SIZE_UNIT; stats.rx_bufshift = (u8) (GET_RX_DESC_SHIFT(rxdesc) & 0x03); stats.icv = (u16) GET_RX_DESC_ICV(rxdesc); stats.crc = (u16) GET_RX_DESC_CRC32(rxdesc); stats.hwerror = (stats.crc | stats.icv); stats.decrypted = !GET_RX_DESC_SWDEC(rxdesc); stats.rate = (u8) GET_RX_DESC_RX_MCS(rxdesc); stats.shortpreamble = (u16) GET_RX_DESC_SPLCP(rxdesc); stats.isampdu = (bool) ((GET_RX_DESC_PAGGR(rxdesc) == 1) && (GET_RX_DESC_FAGGR(rxdesc) == 1)); stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc); stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc); /* TODO: is center_freq changed when doing scan? */ /* TODO: Shall we add protection or just skip those two step? */ rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; if (GET_RX_DESC_CRC32(rxdesc)) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (!GET_RX_DESC_SWDEC(rxdesc)) rx_status->flag |= RX_FLAG_DECRYPTED; if (GET_RX_DESC_BW(rxdesc)) rx_status->flag |= RX_FLAG_40MHZ; if (GET_RX_DESC_RX_HT(rxdesc)) rx_status->flag |= RX_FLAG_HT; /* Data rate */ rx_status->rate_idx = rtlwifi_rate_mapping(hw, (bool)GET_RX_DESC_RX_HT(rxdesc), (u8)GET_RX_DESC_RX_MCS(rxdesc), (bool)GET_RX_DESC_PAGGR(rxdesc)); /* There is a phy status after this rx descriptor. */ if (GET_RX_DESC_PHY_STATUS(rxdesc)) { p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE); rtl92c_translate_rx_signal_stuff(hw, skb, &stats, (struct rx_desc_92c *)rxdesc, p_drvinfo); } skb_pull(skb, (drvinfo_len + RTL_RX_DESC_SIZE)); hdr = (struct ieee80211_hdr *)(skb->data); fc = hdr->frame_control; bv = ieee80211_is_probe_resp(fc); if (bv) RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Got probe response frame\n"); if (ieee80211_is_beacon(fc)) RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Got beacon frame\n"); if (ieee80211_is_data(fc)) RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Got data frame\n"); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Fram: fc = 0x%X addr1 = 0x%02X:0x%02X:0x%02X:0x%02X:0x%02X:0x%02X\n", fc, (u32)hdr->addr1[0], (u32)hdr->addr1[1], (u32)hdr->addr1[2], (u32)hdr->addr1[3], (u32)hdr->addr1[4], (u32)hdr->addr1[5]); memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status)); ieee80211_rx(hw, skb); } void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb) { _rtl_rx_process(hw, skb); } void rtl8192c_rx_segregate_hdl( struct ieee80211_hw *hw, struct sk_buff *skb, struct sk_buff_head *skb_list) { } /*---------------------------------------------------------------------- * * Tx handler * *---------------------------------------------------------------------- */ void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb) { } int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb, struct sk_buff *skb) { return 0; } struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *hw, struct sk_buff_head *list) { return skb_dequeue(list); } /*======================================== trx ===============================*/ static void _rtl_fill_usb_tx_desc(u8 *txdesc) { SET_TX_DESC_OWN(txdesc, 1); SET_TX_DESC_LAST_SEG(txdesc, 1); SET_TX_DESC_FIRST_SEG(txdesc, 1); } /** * For HW recovery information */ static void _rtl_tx_desc_checksum(u8 *txdesc) { u16 *ptr = (u16 *)txdesc; u16 checksum = 0; u32 index; /* Clear first */ SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0); for (index = 0; index < 16; index++) checksum = checksum ^ (*(ptr + index)); SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum); } void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc_tx, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff *skb, u8 queue_index, struct rtl_tcb_desc *tcb_desc) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); bool defaultadapter = true; u8 *qc = ieee80211_get_qos_ctl(hdr); u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; u16 seq_number; __le16 fc = hdr->frame_control; u8 rate_flag = info->control.rates[0].flags; u16 pktlen = skb->len; enum rtl_desc_qsel fw_qsel = _rtl8192cu_mq_to_descq(hw, fc, skb_get_queue_mapping(skb)); u8 *txdesc; seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc); txdesc = (u8 *)skb_push(skb, RTL_TX_HEADER_SIZE); memset(txdesc, 0, RTL_TX_HEADER_SIZE); SET_TX_DESC_PKT_SIZE(txdesc, pktlen); SET_TX_DESC_LINIP(txdesc, 0); SET_TX_DESC_PKT_OFFSET(txdesc, RTL_DUMMY_OFFSET); SET_TX_DESC_OFFSET(txdesc, RTL_TX_HEADER_SIZE); SET_TX_DESC_TX_RATE(txdesc, tcb_desc->hw_rate); if (tcb_desc->use_shortgi || tcb_desc->use_shortpreamble) SET_TX_DESC_DATA_SHORTGI(txdesc, 1); if (mac->tids[tid].agg.agg_state == RTL_AGG_ON && info->flags & IEEE80211_TX_CTL_AMPDU) { SET_TX_DESC_AGG_ENABLE(txdesc, 1); SET_TX_DESC_MAX_AGG_NUM(txdesc, 0x14); } else { SET_TX_DESC_AGG_BREAK(txdesc, 1); } SET_TX_DESC_SEQ(txdesc, seq_number); SET_TX_DESC_RTS_ENABLE(txdesc, ((tcb_desc->rts_enable && !tcb_desc->cts_enable) ? 1 : 0)); SET_TX_DESC_HW_RTS_ENABLE(txdesc, ((tcb_desc->rts_enable || tcb_desc->cts_enable) ? 1 : 0)); SET_TX_DESC_CTS2SELF(txdesc, ((tcb_desc->cts_enable) ? 1 : 0)); SET_TX_DESC_RTS_STBC(txdesc, ((tcb_desc->rts_stbc) ? 1 : 0)); SET_TX_DESC_RTS_RATE(txdesc, tcb_desc->rts_rate); SET_TX_DESC_RTS_BW(txdesc, 0); SET_TX_DESC_RTS_SC(txdesc, tcb_desc->rts_sc); SET_TX_DESC_RTS_SHORT(txdesc, ((tcb_desc->rts_rate <= DESC92_RATE54M) ? (tcb_desc->rts_use_shortpreamble ? 1 : 0) : (tcb_desc->rts_use_shortgi ? 1 : 0))); if (mac->bw_40) { if (rate_flag & IEEE80211_TX_RC_DUP_DATA) { SET_TX_DESC_DATA_BW(txdesc, 1); SET_TX_DESC_DATA_SC(txdesc, 3); } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){ SET_TX_DESC_DATA_BW(txdesc, 1); SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc); } else { SET_TX_DESC_DATA_BW(txdesc, 0); SET_TX_DESC_DATA_SC(txdesc, 0); } } else { SET_TX_DESC_DATA_BW(txdesc, 0); SET_TX_DESC_DATA_SC(txdesc, 0); } rcu_read_lock(); sta = ieee80211_find_sta(mac->vif, mac->bssid); if (sta) { u8 ampdu_density = sta->ht_cap.ampdu_density; SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density); } rcu_read_unlock(); if (info->control.hw_key) { struct ieee80211_key_conf *keyconf = info->control.hw_key; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: SET_TX_DESC_SEC_TYPE(txdesc, 0x1); break; case WLAN_CIPHER_SUITE_CCMP: SET_TX_DESC_SEC_TYPE(txdesc, 0x3); break; default: SET_TX_DESC_SEC_TYPE(txdesc, 0x0); break; } } SET_TX_DESC_PKT_ID(txdesc, 0); SET_TX_DESC_QUEUE_SEL(txdesc, fw_qsel); SET_TX_DESC_DATA_RATE_FB_LIMIT(txdesc, 0x1F); SET_TX_DESC_RTS_RATE_FB_LIMIT(txdesc, 0xF); SET_TX_DESC_DISABLE_FB(txdesc, 0); SET_TX_DESC_USE_RATE(txdesc, tcb_desc->use_driver_rate ? 1 : 0); if (ieee80211_is_data_qos(fc)) { if (mac->rdg_en) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "Enable RDG function\n"); SET_TX_DESC_RDG_ENABLE(txdesc, 1); SET_TX_DESC_HTC(txdesc, 1); } } if (rtlpriv->dm.useramask) { SET_TX_DESC_RATE_ID(txdesc, tcb_desc->ratr_index); SET_TX_DESC_MACID(txdesc, tcb_desc->mac_id); } else { SET_TX_DESC_RATE_ID(txdesc, 0xC + tcb_desc->ratr_index); SET_TX_DESC_MACID(txdesc, tcb_desc->ratr_index); } if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps && ppsc->fwctrl_lps) { SET_TX_DESC_HWSEQ_EN(txdesc, 1); SET_TX_DESC_PKT_ID(txdesc, 8); if (!defaultadapter) SET_TX_DESC_QOS(txdesc, 1); } if (ieee80211_has_morefrags(fc)) SET_TX_DESC_MORE_FRAG(txdesc, 1); if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || is_broadcast_ether_addr(ieee80211_get_DA(hdr))) SET_TX_DESC_BMC(txdesc, 1); _rtl_fill_usb_tx_desc(txdesc); _rtl_tx_desc_checksum(txdesc); RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "==>\n"); } void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc, u32 buffer_len, bool bIsPsPoll) { /* Clear all status */ memset(pDesc, 0, RTL_TX_HEADER_SIZE); SET_TX_DESC_FIRST_SEG(pDesc, 1); /* bFirstSeg; */ SET_TX_DESC_LAST_SEG(pDesc, 1); /* bLastSeg; */ SET_TX_DESC_OFFSET(pDesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */ SET_TX_DESC_PKT_SIZE(pDesc, buffer_len); /* Buffer size + command hdr */ SET_TX_DESC_QUEUE_SEL(pDesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */ /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error * vlaue by Hw. */ if (bIsPsPoll) { SET_TX_DESC_NAV_USE_HDR(pDesc, 1); } else { SET_TX_DESC_HWSEQ_EN(pDesc, 1); /* Hw set sequence number */ SET_TX_DESC_PKT_ID(pDesc, 0x100); /* set bit3 to 1. */ } SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */ SET_TX_DESC_OWN(pDesc, 1); SET_TX_DESC_TX_RATE(pDesc, DESC92_RATE1M); _rtl_tx_desc_checksum(pDesc); } void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 fw_queue = QSLT_BEACON; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); __le16 fc = hdr->frame_control; memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE); if (firstseg) SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE); SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M); SET_TX_DESC_SEQ(pdesc, 0); SET_TX_DESC_LINIP(pdesc, 0); SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); SET_TX_DESC_FIRST_SEG(pdesc, 1); SET_TX_DESC_LAST_SEG(pdesc, 1); SET_TX_DESC_RATE_ID(pdesc, 7); SET_TX_DESC_MACID(pdesc, 0); SET_TX_DESC_OWN(pdesc, 1); SET_TX_DESC_PKT_SIZE(pdesc, (u16)skb->len); SET_TX_DESC_FIRST_SEG(pdesc, 1); SET_TX_DESC_LAST_SEG(pdesc, 1); SET_TX_DESC_OFFSET(pdesc, 0x20); SET_TX_DESC_USE_RATE(pdesc, 1); if (!ieee80211_is_data_qos(fc)) { SET_TX_DESC_HWSEQ_EN(pdesc, 1); SET_TX_DESC_PKT_ID(pdesc, 8); } RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content", pdesc, RTL_TX_DESC_SIZE); } bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) { return true; }
Silentlys/android_kernel_lenovo_msm8916
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
C
gpl-2.0
21,493
/* * Connection tracking support for PPTP (Point to Point Tunneling Protocol). * PPTP is a a protocol for creating virtual private networks. * It is a specification defined by Microsoft and some vendors * working with Microsoft. PPTP is built on top of a modified * version of the Internet Generic Routing Encapsulation Protocol. * GRE is defined in RFC 1701 and RFC 1702. Documentation of * PPTP can be found in RFC 2637 * * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org> * * Development of this code funded by Astaro AG (http://www.astaro.com/) * * (C) 2006-2012 Patrick McHardy <kaber@trash.net> * * Limitations: * - We blindly assume that control connections are always * established in PNS->PAC direction. This is a violation * of RFC 2637 * - We can only support one single call within each session * TODO: * - testing of incoming PPTP calls */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/tcp.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_zones.h> #include <linux/netfilter/nf_conntrack_proto_gre.h> #include <linux/netfilter/nf_conntrack_pptp.h> #define NF_CT_PPTP_VERSION "3.1" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); MODULE_DESCRIPTION("Netfilter connection tracking helper module for PPTP"); MODULE_ALIAS("ip_conntrack_pptp"); MODULE_ALIAS_NFCT_HELPER("pptp"); static DEFINE_SPINLOCK(nf_pptp_lock); int (*nf_nat_pptp_hook_outbound)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, struct PptpControlHeader *ctlh, union pptp_ctrl_union *pptpReq) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_outbound); int (*nf_nat_pptp_hook_inbound)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, struct PptpControlHeader *ctlh, union pptp_ctrl_union *pptpReq) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_inbound); void (*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *expect_orig, struct nf_conntrack_expect *expect_reply) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_exp_gre); void (*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct, struct nf_conntrack_expect *exp) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn); #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) /* PptpControlMessageType names */ const char *const pptp_msg_name[] = { "UNKNOWN_MESSAGE", "START_SESSION_REQUEST", "START_SESSION_REPLY", "STOP_SESSION_REQUEST", "STOP_SESSION_REPLY", "ECHO_REQUEST", "ECHO_REPLY", "OUT_CALL_REQUEST", "OUT_CALL_REPLY", "IN_CALL_REQUEST", "IN_CALL_REPLY", "IN_CALL_CONNECT", "CALL_CLEAR_REQUEST", "CALL_DISCONNECT_NOTIFY", "WAN_ERROR_NOTIFY", "SET_LINK_INFO" }; EXPORT_SYMBOL(pptp_msg_name); #endif #define SECS *HZ #define MINS * 60 SECS #define HOURS * 60 MINS #define PPTP_GRE_TIMEOUT (10 MINS) #define PPTP_GRE_STREAM_TIMEOUT (5 HOURS) static void pptp_expectfn(struct nf_conn *ct, struct nf_conntrack_expect *exp) { struct net *net = nf_ct_net(ct); typeof(nf_nat_pptp_hook_expectfn) nf_nat_pptp_expectfn; pr_debug("increasing timeouts\n"); /* increase timeout of GRE data channel conntrack entry */ ct->proto.gre.timeout = PPTP_GRE_TIMEOUT; ct->proto.gre.stream_timeout = PPTP_GRE_STREAM_TIMEOUT; /* Can you see how rusty this code is, compared with the pre-2.6.11 * one? That's what happened to my shiny newnat of 2002 ;( -HW */ rcu_read_lock(); nf_nat_pptp_expectfn = rcu_dereference(nf_nat_pptp_hook_expectfn); if (nf_nat_pptp_expectfn && ct->master->status & IPS_NAT_MASK) nf_nat_pptp_expectfn(ct, exp); else { struct nf_conntrack_tuple inv_t; struct nf_conntrack_expect *exp_other; /* obviously this tuple inversion only works until you do NAT */ nf_ct_invert_tuplepr(&inv_t, &exp->tuple); pr_debug("trying to unexpect other dir: "); nf_ct_dump_tuple(&inv_t); exp_other = nf_ct_expect_find_get(net, nf_ct_zone(ct), &inv_t); if (exp_other) { /* delete other expectation. */ pr_debug("found\n"); nf_ct_unexpect_related(exp_other); nf_ct_expect_put(exp_other); } else { pr_debug("not found\n"); } } rcu_read_unlock(); } static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct, const struct nf_conntrack_tuple *t) { const struct nf_conntrack_tuple_hash *h; struct nf_conntrack_expect *exp; struct nf_conn *sibling; u16 zone = nf_ct_zone(ct); pr_debug("trying to timeout ct or exp for tuple "); nf_ct_dump_tuple(t); h = nf_conntrack_find_get(net, zone, t); if (h) { sibling = nf_ct_tuplehash_to_ctrack(h); pr_debug("setting timeout of conntrack %p to 0\n", sibling); sibling->proto.gre.timeout = 0; sibling->proto.gre.stream_timeout = 0; if (del_timer(&sibling->timeout)) sibling->timeout.function((unsigned long)sibling); nf_ct_put(sibling); return 1; } else { exp = nf_ct_expect_find_get(net, zone, t); if (exp) { pr_debug("unexpect_related of expect %p\n", exp); nf_ct_unexpect_related(exp); nf_ct_expect_put(exp); return 1; } } return 0; } /* timeout GRE data connections */ static void pptp_destroy_siblings(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); const struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct); struct nf_conntrack_tuple t; nf_ct_gre_keymap_destroy(ct); /* try original (pns->pac) tuple */ memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t)); t.dst.protonum = IPPROTO_GRE; t.src.u.gre.key = ct_pptp_info->pns_call_id; t.dst.u.gre.key = ct_pptp_info->pac_call_id; if (!destroy_sibling_or_exp(net, ct, &t)) pr_debug("failed to timeout original pns->pac ct/exp\n"); /* try reply (pac->pns) tuple */ memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t)); t.dst.protonum = IPPROTO_GRE; t.src.u.gre.key = ct_pptp_info->pac_call_id; t.dst.u.gre.key = ct_pptp_info->pns_call_id; if (!destroy_sibling_or_exp(net, ct, &t)) pr_debug("failed to timeout reply pac->pns ct/exp\n"); } /* expect GRE connections (PNS->PAC and PAC->PNS direction) */ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid) { struct nf_conntrack_expect *exp_orig, *exp_reply; enum ip_conntrack_dir dir; int ret = 1; typeof(nf_nat_pptp_hook_exp_gre) nf_nat_pptp_exp_gre; exp_orig = nf_ct_expect_alloc(ct); if (exp_orig == NULL) goto out; exp_reply = nf_ct_expect_alloc(ct); if (exp_reply == NULL) goto out_put_orig; /* original direction, PNS->PAC */ dir = IP_CT_DIR_ORIGINAL; nf_ct_expect_init(exp_orig, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), &ct->tuplehash[dir].tuple.src.u3, &ct->tuplehash[dir].tuple.dst.u3, IPPROTO_GRE, &peer_callid, &callid); exp_orig->expectfn = pptp_expectfn; /* reply direction, PAC->PNS */ dir = IP_CT_DIR_REPLY; nf_ct_expect_init(exp_reply, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), &ct->tuplehash[dir].tuple.src.u3, &ct->tuplehash[dir].tuple.dst.u3, IPPROTO_GRE, &callid, &peer_callid); exp_reply->expectfn = pptp_expectfn; nf_nat_pptp_exp_gre = rcu_dereference(nf_nat_pptp_hook_exp_gre); if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK) nf_nat_pptp_exp_gre(exp_orig, exp_reply); if (nf_ct_expect_related(exp_orig) != 0) goto out_put_both; if (nf_ct_expect_related(exp_reply) != 0) goto out_unexpect_orig; /* Add GRE keymap entries */ if (nf_ct_gre_keymap_add(ct, IP_CT_DIR_ORIGINAL, &exp_orig->tuple) != 0) goto out_unexpect_both; if (nf_ct_gre_keymap_add(ct, IP_CT_DIR_REPLY, &exp_reply->tuple) != 0) { nf_ct_gre_keymap_destroy(ct); goto out_unexpect_both; } ret = 0; out_put_both: nf_ct_expect_put(exp_reply); out_put_orig: nf_ct_expect_put(exp_orig); out: return ret; out_unexpect_both: nf_ct_unexpect_related(exp_reply); out_unexpect_orig: nf_ct_unexpect_related(exp_orig); goto out_put_both; } static inline int pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, struct PptpControlHeader *ctlh, union pptp_ctrl_union *pptpReq, unsigned int reqlen, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { struct nf_ct_pptp_master *info = nfct_help_data(ct); u_int16_t msg; __be16 cid = 0, pcid = 0; typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; msg = ntohs(ctlh->messageType); pr_debug("inbound control message %s\n", pptp_msg_name[msg]); switch (msg) { case PPTP_START_SESSION_REPLY: /* server confirms new control session */ if (info->sstate < PPTP_SESSION_REQUESTED) goto invalid; if (pptpReq->srep.resultCode == PPTP_START_OK) info->sstate = PPTP_SESSION_CONFIRMED; else info->sstate = PPTP_SESSION_ERROR; break; case PPTP_STOP_SESSION_REPLY: /* server confirms end of control session */ if (info->sstate > PPTP_SESSION_STOPREQ) goto invalid; if (pptpReq->strep.resultCode == PPTP_STOP_OK) info->sstate = PPTP_SESSION_NONE; else info->sstate = PPTP_SESSION_ERROR; break; case PPTP_OUT_CALL_REPLY: /* server accepted call, we now expect GRE frames */ if (info->sstate != PPTP_SESSION_CONFIRMED) goto invalid; if (info->cstate != PPTP_CALL_OUT_REQ && info->cstate != PPTP_CALL_OUT_CONF) goto invalid; cid = pptpReq->ocack.callID; pcid = pptpReq->ocack.peersCallID; if (info->pns_call_id != pcid) goto invalid; pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg], ntohs(cid), ntohs(pcid)); if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) { info->cstate = PPTP_CALL_OUT_CONF; info->pac_call_id = cid; exp_gre(ct, cid, pcid); } else info->cstate = PPTP_CALL_NONE; break; case PPTP_IN_CALL_REQUEST: /* server tells us about incoming call request */ if (info->sstate != PPTP_SESSION_CONFIRMED) goto invalid; cid = pptpReq->icreq.callID; pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); info->cstate = PPTP_CALL_IN_REQ; info->pac_call_id = cid; break; case PPTP_IN_CALL_CONNECT: /* server tells us about incoming call established */ if (info->sstate != PPTP_SESSION_CONFIRMED) goto invalid; if (info->cstate != PPTP_CALL_IN_REP && info->cstate != PPTP_CALL_IN_CONF) goto invalid; pcid = pptpReq->iccon.peersCallID; cid = info->pac_call_id; if (info->pns_call_id != pcid) goto invalid; pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid)); info->cstate = PPTP_CALL_IN_CONF; /* we expect a GRE connection from PAC to PNS */ exp_gre(ct, cid, pcid); break; case PPTP_CALL_DISCONNECT_NOTIFY: /* server confirms disconnect */ cid = pptpReq->disc.callID; pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); info->cstate = PPTP_CALL_NONE; /* untrack this call id, unexpect GRE packets */ pptp_destroy_siblings(ct); break; case PPTP_WAN_ERROR_NOTIFY: case PPTP_SET_LINK_INFO: case PPTP_ECHO_REQUEST: case PPTP_ECHO_REPLY: /* I don't have to explain these ;) */ break; default: goto invalid; } nf_nat_pptp_inbound = rcu_dereference(nf_nat_pptp_hook_inbound); if (nf_nat_pptp_inbound && ct->status & IPS_NAT_MASK) return nf_nat_pptp_inbound(skb, ct, ctinfo, protoff, ctlh, pptpReq); return NF_ACCEPT; invalid: pr_debug("invalid %s: type=%d cid=%u pcid=%u " "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, ntohs(info->pns_call_id), ntohs(info->pac_call_id)); return NF_ACCEPT; } static inline int pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, struct PptpControlHeader *ctlh, union pptp_ctrl_union *pptpReq, unsigned int reqlen, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { struct nf_ct_pptp_master *info = nfct_help_data(ct); u_int16_t msg; __be16 cid = 0, pcid = 0; typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; msg = ntohs(ctlh->messageType); pr_debug("outbound control message %s\n", pptp_msg_name[msg]); switch (msg) { case PPTP_START_SESSION_REQUEST: /* client requests for new control session */ if (info->sstate != PPTP_SESSION_NONE) goto invalid; info->sstate = PPTP_SESSION_REQUESTED; break; case PPTP_STOP_SESSION_REQUEST: /* client requests end of control session */ info->sstate = PPTP_SESSION_STOPREQ; break; case PPTP_OUT_CALL_REQUEST: /* client initiating connection to server */ if (info->sstate != PPTP_SESSION_CONFIRMED) goto invalid; info->cstate = PPTP_CALL_OUT_REQ; /* track PNS call id */ cid = pptpReq->ocreq.callID; pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); info->pns_call_id = cid; break; case PPTP_IN_CALL_REPLY: /* client answers incoming call */ if (info->cstate != PPTP_CALL_IN_REQ && info->cstate != PPTP_CALL_IN_REP) goto invalid; cid = pptpReq->icack.callID; pcid = pptpReq->icack.peersCallID; if (info->pac_call_id != pcid) goto invalid; pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg], ntohs(cid), ntohs(pcid)); if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) { /* part two of the three-way handshake */ info->cstate = PPTP_CALL_IN_REP; info->pns_call_id = cid; } else info->cstate = PPTP_CALL_NONE; break; case PPTP_CALL_CLEAR_REQUEST: /* client requests hangup of call */ if (info->sstate != PPTP_SESSION_CONFIRMED) goto invalid; /* FUTURE: iterate over all calls and check if * call ID is valid. We don't do this without newnat, * because we only know about last call */ info->cstate = PPTP_CALL_CLEAR_REQ; break; case PPTP_SET_LINK_INFO: case PPTP_ECHO_REQUEST: case PPTP_ECHO_REPLY: /* I don't have to explain these ;) */ break; default: goto invalid; } nf_nat_pptp_outbound = rcu_dereference(nf_nat_pptp_hook_outbound); if (nf_nat_pptp_outbound && ct->status & IPS_NAT_MASK) return nf_nat_pptp_outbound(skb, ct, ctinfo, protoff, ctlh, pptpReq); return NF_ACCEPT; invalid: pr_debug("invalid %s: type=%d cid=%u pcid=%u " "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, ntohs(info->pns_call_id), ntohs(info->pac_call_id)); return NF_ACCEPT; } static const unsigned int pptp_msg_size[] = { [PPTP_START_SESSION_REQUEST] = sizeof(struct PptpStartSessionRequest), [PPTP_START_SESSION_REPLY] = sizeof(struct PptpStartSessionReply), [PPTP_STOP_SESSION_REQUEST] = sizeof(struct PptpStopSessionRequest), [PPTP_STOP_SESSION_REPLY] = sizeof(struct PptpStopSessionReply), [PPTP_OUT_CALL_REQUEST] = sizeof(struct PptpOutCallRequest), [PPTP_OUT_CALL_REPLY] = sizeof(struct PptpOutCallReply), [PPTP_IN_CALL_REQUEST] = sizeof(struct PptpInCallRequest), [PPTP_IN_CALL_REPLY] = sizeof(struct PptpInCallReply), [PPTP_IN_CALL_CONNECT] = sizeof(struct PptpInCallConnected), [PPTP_CALL_CLEAR_REQUEST] = sizeof(struct PptpClearCallRequest), [PPTP_CALL_DISCONNECT_NOTIFY] = sizeof(struct PptpCallDisconnectNotify), [PPTP_WAN_ERROR_NOTIFY] = sizeof(struct PptpWanErrorNotify), [PPTP_SET_LINK_INFO] = sizeof(struct PptpSetLinkInfo), }; /* track caller id inside control connection, call expect_related */ static int conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { int dir = CTINFO2DIR(ctinfo); const struct nf_ct_pptp_master *info = nfct_help_data(ct); const struct tcphdr *tcph; struct tcphdr _tcph; const struct pptp_pkt_hdr *pptph; struct pptp_pkt_hdr _pptph; struct PptpControlHeader _ctlh, *ctlh; union pptp_ctrl_union _pptpReq, *pptpReq; unsigned int tcplen = skb->len - protoff; unsigned int datalen, reqlen, nexthdr_off; int oldsstate, oldcstate; int ret; u_int16_t msg; /* don't do any tracking before tcp handshake complete */ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) return NF_ACCEPT; nexthdr_off = protoff; tcph = skb_header_pointer(skb, nexthdr_off, sizeof(_tcph), &_tcph); BUG_ON(!tcph); nexthdr_off += tcph->doff * 4; datalen = tcplen - tcph->doff * 4; pptph = skb_header_pointer(skb, nexthdr_off, sizeof(_pptph), &_pptph); if (!pptph) { pr_debug("no full PPTP header, can't track\n"); return NF_ACCEPT; } nexthdr_off += sizeof(_pptph); datalen -= sizeof(_pptph); /* if it's not a control message we can't do anything with it */ if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL || ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) { pr_debug("not a control packet\n"); return NF_ACCEPT; } ctlh = skb_header_pointer(skb, nexthdr_off, sizeof(_ctlh), &_ctlh); if (!ctlh) return NF_ACCEPT; nexthdr_off += sizeof(_ctlh); datalen -= sizeof(_ctlh); reqlen = datalen; msg = ntohs(ctlh->messageType); if (msg > 0 && msg <= PPTP_MSG_MAX && reqlen < pptp_msg_size[msg]) return NF_ACCEPT; if (reqlen > sizeof(*pptpReq)) reqlen = sizeof(*pptpReq); pptpReq = skb_header_pointer(skb, nexthdr_off, reqlen, &_pptpReq); if (!pptpReq) return NF_ACCEPT; oldsstate = info->sstate; oldcstate = info->cstate; spin_lock_bh(&nf_pptp_lock); /* FIXME: We just blindly assume that the control connection is always * established from PNS->PAC. However, RFC makes no guarantee */ if (dir == IP_CT_DIR_ORIGINAL) /* client -> server (PNS -> PAC) */ ret = pptp_outbound_pkt(skb, protoff, ctlh, pptpReq, reqlen, ct, ctinfo); else /* server -> client (PAC -> PNS) */ ret = pptp_inbound_pkt(skb, protoff, ctlh, pptpReq, reqlen, ct, ctinfo); pr_debug("sstate: %d->%d, cstate: %d->%d\n", oldsstate, info->sstate, oldcstate, info->cstate); spin_unlock_bh(&nf_pptp_lock); return ret; } static const struct nf_conntrack_expect_policy pptp_exp_policy = { .max_expected = 2, .timeout = 5 * 60, }; /* control protocol helper */ static struct nf_conntrack_helper pptp __read_mostly = { .name = "pptp", .me = THIS_MODULE, .data_len = sizeof(struct nf_ct_pptp_master), .tuple.src.l3num = AF_INET, .tuple.src.u.tcp.port = cpu_to_be16(PPTP_CONTROL_PORT), .tuple.dst.protonum = IPPROTO_TCP, .help = conntrack_pptp_help, .destroy = pptp_destroy_siblings, .expect_policy = &pptp_exp_policy, }; static void nf_conntrack_pptp_net_exit(struct net *net) { nf_ct_gre_keymap_flush(net); } static struct pernet_operations nf_conntrack_pptp_net_ops = { .exit = nf_conntrack_pptp_net_exit, }; static int __init nf_conntrack_pptp_init(void) { int rv; rv = nf_conntrack_helper_register(&pptp); if (rv < 0) return rv; rv = register_pernet_subsys(&nf_conntrack_pptp_net_ops); if (rv < 0) nf_conntrack_helper_unregister(&pptp); return rv; } static void __exit nf_conntrack_pptp_fini(void) { nf_conntrack_helper_unregister(&pptp); unregister_pernet_subsys(&nf_conntrack_pptp_net_ops); } module_init(nf_conntrack_pptp_init); module_exit(nf_conntrack_pptp_fini);
gchild320/shamu
net/netfilter/nf_conntrack_pptp.c
C
gpl-2.0
19,028
/* * MPC86xx HPCN board specific routines * * Recode: ZHANG WEI <wei.zhang@freescale.com> * Initial author: Xianghua Xiao <x.xiao@freescale.com> * * Copyright 2006 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/prom.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <asm/swiotlb.h> #include <asm/mpic.h> #include <sysdev/fsl_pci.h> #include <sysdev/fsl_soc.h> #include "mpc86xx.h" #undef DEBUG #ifdef DEBUG #define DBG(fmt...) do { printk(KERN_ERR fmt); } while(0) #else #define DBG(fmt...) do { } while(0) #endif #ifdef CONFIG_PCI extern int uli_exclude_device(struct pci_controller *hose, u_char bus, u_char devfn); static int mpc86xx_exclude_device(struct pci_controller *hose, u_char bus, u_char devfn) { if (hose->dn == fsl_pci_primary) return uli_exclude_device(hose, bus, devfn); return PCIBIOS_SUCCESSFUL; } #endif /* CONFIG_PCI */ static void __init mpc86xx_hpcn_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("mpc86xx_hpcn_setup_arch()", 0); #ifdef CONFIG_PCI ppc_md.pci_exclude_device = mpc86xx_exclude_device; #endif printk("MPC86xx HPCN board from Freescale Semiconductor\n"); #ifdef CONFIG_SMP mpc86xx_smp_init(); #endif fsl_pci_assign_primary(); swiotlb_detect_4g(); } static void mpc86xx_hpcn_show_cpuinfo(struct seq_file *m) { uint svid = mfspr(SPRN_SVR); seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n"); seq_printf(m, "SVR\t\t: 0x%x\n", svid); } /* * Called very early, device-tree isn't unflattened */ static int __init mpc86xx_hpcn_probe(void) { unsigned long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "fsl,mpc8641hpcn")) return 1; /* Looks good */ /* Be nice and don't give silent boot death. Delete this in 2.6.27 */ if (of_flat_dt_is_compatible(root, "mpc86xx")) { pr_warning("WARNING: your dts/dtb is old. You must update before the next kernel release\n"); return 1; } return 0; } static long __init mpc86xx_time_init(void) { unsigned int temp; /* Set the time base to zero */ mtspr(SPRN_TBWL, 0); mtspr(SPRN_TBWU, 0); temp = mfspr(SPRN_HID0); temp |= HID0_TBEN; mtspr(SPRN_HID0, temp); asm volatile("isync"); return 0; } static __initdata struct of_device_id of_bus_ids[] = { { .compatible = "simple-bus", }, { .compatible = "fsl,srio", }, { .compatible = "gianfar", }, { .compatible = "fsl,mpc8641-pcie", }, {}, }; static int __init declare_of_platform_devices(void) { of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } machine_arch_initcall(mpc86xx_hpcn, declare_of_platform_devices); machine_arch_initcall(mpc86xx_hpcn, swiotlb_setup_bus_notifier); define_machine(mpc86xx_hpcn) { .name = "MPC86xx HPCN", .probe = mpc86xx_hpcn_probe, .setup_arch = mpc86xx_hpcn_setup_arch, .init_IRQ = mpc86xx_init_irq, .show_cpuinfo = mpc86xx_hpcn_show_cpuinfo, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .time_init = mpc86xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif };
invisiblek/android_kernel_lge_dory
arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
C
gpl-2.0
3,599
/* * linux/drivers/leds-pwm.c * * simple PWM based LED control * * Copyright 2009 Luotao Fu @ Pengutronix (l.fu@pengutronix.de) * * based on leds-gpio.c by Raphael Assenat <raph@8d.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/fb.h> #include <linux/leds.h> #include <linux/err.h> #include <linux/pwm.h> #include <linux/leds_pwm.h> #include <linux/slab.h> struct led_pwm_data { struct led_classdev cdev; struct pwm_device *pwm; unsigned int active_low; unsigned int period; }; static void led_pwm_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct led_pwm_data *led_dat = container_of(led_cdev, struct led_pwm_data, cdev); unsigned int max = led_dat->cdev.max_brightness; unsigned int period = led_dat->period; if (brightness == 0) { pwm_config(led_dat->pwm, 0, period); pwm_disable(led_dat->pwm); } else { pwm_config(led_dat->pwm, brightness * period / max, period); pwm_enable(led_dat->pwm); } } static int led_pwm_probe(struct platform_device *pdev) { struct led_pwm_platform_data *pdata = pdev->dev.platform_data; struct led_pwm *cur_led; struct led_pwm_data *leds_data, *led_dat; int i, ret = 0; if (!pdata) return -EBUSY; leds_data = kzalloc(sizeof(struct led_pwm_data) * pdata->num_leds, GFP_KERNEL); if (!leds_data) return -ENOMEM; for (i = 0; i < pdata->num_leds; i++) { cur_led = &pdata->leds[i]; led_dat = &leds_data[i]; led_dat->pwm = pwm_request(cur_led->pwm_id, cur_led->name); if (IS_ERR(led_dat->pwm)) { ret = PTR_ERR(led_dat->pwm); dev_err(&pdev->dev, "unable to request PWM %d\n", cur_led->pwm_id); goto err; } led_dat->cdev.name = cur_led->name; led_dat->cdev.default_trigger = cur_led->default_trigger; led_dat->active_low = cur_led->active_low; led_dat->period = cur_led->pwm_period_ns; led_dat->cdev.brightness_set = led_pwm_set; led_dat->cdev.brightness = LED_OFF; led_dat->cdev.max_brightness = cur_led->max_brightness; led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; ret = led_classdev_register(&pdev->dev, &led_dat->cdev); if (ret < 0) { pwm_free(led_dat->pwm); goto err; } } platform_set_drvdata(pdev, leds_data); return 0; err: if (i > 0) { for (i = i - 1; i >= 0; i--) { led_classdev_unregister(&leds_data[i].cdev); pwm_free(leds_data[i].pwm); } } kfree(leds_data); return ret; } static int __devexit led_pwm_remove(struct platform_device *pdev) { int i; struct led_pwm_platform_data *pdata = pdev->dev.platform_data; struct led_pwm_data *leds_data; leds_data = platform_get_drvdata(pdev); for (i = 0; i < pdata->num_leds; i++) { led_classdev_unregister(&leds_data[i].cdev); pwm_free(leds_data[i].pwm); } kfree(leds_data); return 0; } static struct platform_driver led_pwm_driver = { .probe = led_pwm_probe, .remove = __devexit_p(led_pwm_remove), .driver = { .name = "leds_pwm", .owner = THIS_MODULE, }, }; static int __init led_pwm_init(void) { return platform_driver_register(&led_pwm_driver); } static void __exit led_pwm_exit(void) { platform_driver_unregister(&led_pwm_driver); } module_init(led_pwm_init); module_exit(led_pwm_exit); MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>"); MODULE_DESCRIPTION("PWM LED driver for PXA"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:leds-pwm");
opensgn/N8000
drivers/leds/leds-pwm.c
C
gpl-2.0
3,597
/* * OHCI HCD (Host Controller Driver) for USB. * * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> * (C) Copyright 2002 Hewlett-Packard Company * * SA1111 Bus Glue * * Written by Christopher Hoover <ch@hpl.hp.com> * Based on fragments of previous driver by Russell King et al. * * This file is licenced under the GPL. */ #include <mach/hardware.h> #include <asm/mach-types.h> #include <mach/assabet.h> #include <mach/badge4.h> #include <asm/hardware/sa1111.h> #ifndef CONFIG_SA1111 #error "This file is SA-1111 bus glue. CONFIG_SA1111 must be defined." #endif extern int usb_disabled(void); /*-------------------------------------------------------------------------*/ static void sa1111_start_hc(struct sa1111_dev *dev) { unsigned int usb_rst = 0; printk(KERN_DEBUG "%s: starting SA-1111 OHCI USB Controller\n", __FILE__); #ifdef CONFIG_SA1100_BADGE4 if (machine_is_badge4()) { badge4_set_5V(BADGE4_5V_USB, 1); } #endif if (machine_is_xp860() || machine_has_neponset() || machine_is_pfs168() || machine_is_badge4()) usb_rst = USB_RESET_PWRSENSELOW | USB_RESET_PWRCTRLLOW; /* * Configure the power sense and control lines. Place the USB * host controller in reset. */ sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET, dev->mapbase + SA1111_USB_RESET); /* * Now, carefully enable the USB clock, and take * the USB host controller out of reset. */ sa1111_enable_device(dev); udelay(11); sa1111_writel(usb_rst, dev->mapbase + SA1111_USB_RESET); } static void sa1111_stop_hc(struct sa1111_dev *dev) { unsigned int usb_rst; printk(KERN_DEBUG "%s: stopping SA-1111 OHCI USB Controller\n", __FILE__); /* * Put the USB host controller into reset. */ usb_rst = sa1111_readl(dev->mapbase + SA1111_USB_RESET); sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET, dev->mapbase + SA1111_USB_RESET); /* * Stop the USB clock. */ sa1111_disable_device(dev); #ifdef CONFIG_SA1100_BADGE4 if (machine_is_badge4()) { /* Disable power to the USB bus */ badge4_set_5V(BADGE4_5V_USB, 0); } #endif } /*-------------------------------------------------------------------------*/ #if 0 static void dump_hci_status(struct usb_hcd *hcd, const char *label) { unsigned long status = sa1111_readl(hcd->regs + SA1111_USB_STATUS); dbg ("%s USB_STATUS = { %s%s%s%s%s}", label, ((status & USB_STATUS_IRQHCIRMTWKUP) ? "IRQHCIRMTWKUP " : ""), ((status & USB_STATUS_IRQHCIBUFFACC) ? "IRQHCIBUFFACC " : ""), ((status & USB_STATUS_NIRQHCIM) ? "" : "IRQHCIM "), ((status & USB_STATUS_NHCIMFCLR) ? "" : "HCIMFCLR "), ((status & USB_STATUS_USBPWRSENSE) ? "USBPWRSENSE " : "")); } #endif /*-------------------------------------------------------------------------*/ /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ /** * usb_hcd_sa1111_probe - initialize SA-1111-based HCDs * Context: !in_interrupt() * * Allocates basic resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. * * Store this function in the HCD's struct pci_driver as probe(). */ int usb_hcd_sa1111_probe (const struct hc_driver *driver, struct sa1111_dev *dev) { struct usb_hcd *hcd; int retval; hcd = usb_create_hcd (driver, &dev->dev, "sa1111"); if (!hcd) return -ENOMEM; hcd->rsrc_start = dev->res.start; hcd->rsrc_len = dev->res.end - dev->res.start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { dbg("request_mem_region failed"); retval = -EBUSY; goto err1; } hcd->regs = dev->mapbase; sa1111_start_hc(dev); ohci_hcd_init(hcd_to_ohci(hcd)); retval = usb_add_hcd(hcd, dev->irq[1], IRQF_DISABLED); if (retval == 0) return retval; sa1111_stop_hc(dev); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err1: usb_put_hcd(hcd); return retval; } /* may be called without controller electrically present */ /* may be called with controller, bus, and devices active */ /** * usb_hcd_sa1111_remove - shutdown processing for SA-1111-based HCDs * @dev: USB Host Controller being removed * Context: !in_interrupt() * * Reverses the effect of usb_hcd_sa1111_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, normally "rmmod", "apmd", or something similar. * */ void usb_hcd_sa1111_remove (struct usb_hcd *hcd, struct sa1111_dev *dev) { usb_remove_hcd(hcd); sa1111_stop_hc(dev); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); } /*-------------------------------------------------------------------------*/ static int __devinit ohci_sa1111_start (struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci (hcd); int ret; if ((ret = ohci_init(ohci)) < 0) return ret; if ((ret = ohci_run (ohci)) < 0) { err ("can't start %s", hcd->self.bus_name); ohci_stop (hcd); return ret; } return 0; } /*-------------------------------------------------------------------------*/ static const struct hc_driver ohci_sa1111_hc_driver = { .description = hcd_name, .product_desc = "SA-1111 OHCI", .hcd_priv_size = sizeof(struct ohci_hcd), /* * generic hardware linkage */ .irq = ohci_irq, .flags = HCD_USB11 | HCD_MEMORY, /* * basic lifecycle operations */ .start = ohci_sa1111_start, .stop = ohci_stop, /* * managing i/o requests and associated device resources */ .urb_enqueue = ohci_urb_enqueue, .urb_dequeue = ohci_urb_dequeue, .endpoint_disable = ohci_endpoint_disable, /* * scheduling support */ .get_frame_number = ohci_get_frame, /* * root hub support */ .hub_status_data = ohci_hub_status_data, .hub_control = ohci_hub_control, #ifdef CONFIG_PM .bus_suspend = ohci_bus_suspend, .bus_resume = ohci_bus_resume, #endif .start_port_reset = ohci_start_port_reset, }; /*-------------------------------------------------------------------------*/ static int ohci_hcd_sa1111_drv_probe(struct sa1111_dev *dev) { int ret; if (usb_disabled()) return -ENODEV; ret = usb_hcd_sa1111_probe(&ohci_sa1111_hc_driver, dev); return ret; } static int ohci_hcd_sa1111_drv_remove(struct sa1111_dev *dev) { struct usb_hcd *hcd = sa1111_get_drvdata(dev); usb_hcd_sa1111_remove(hcd, dev); return 0; } static struct sa1111_driver ohci_hcd_sa1111_driver = { .drv = { .name = "sa1111-ohci", }, .devid = SA1111_DEVID_USB, .probe = ohci_hcd_sa1111_drv_probe, .remove = ohci_hcd_sa1111_drv_remove, };
amitnarkhede/MadKernel_cooper
drivers/usb/host/ohci-sa1111.c
C
gpl-2.0
6,699
/* * ALSA driver for the Aureal Vortex family of soundprocessors. * Author: Manuel Jander (mjander@embedded.cl) * * This driver is the result of the OpenVortex Project from Savannah * (savannah.nongnu.org/projects/openvortex). I would like to thank * the developers of OpenVortex, Jeff Muizelaar and Kester Maddock, from * whom i got plenty of help, and their codebase was invaluable. * Thanks to the ALSA developers, they helped a lot working out * the ALSA part. * Thanks also to Sourceforge for maintaining the old binary drivers, * and the forum, where developers could comunicate. * * Now at least i can play Legacy DOOM with MIDI music :-) */ #include "au88x0.h" #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <sound/initval.h> // module parameters (see "Module Parameters") static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; static int pcifix[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 255 }; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for " CARD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for " CARD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable " CARD_NAME " soundcard."); module_param_array(pcifix, int, NULL, 0444); MODULE_PARM_DESC(pcifix, "Enable VIA-workaround for " CARD_NAME " soundcard."); MODULE_DESCRIPTION("Aureal vortex"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Aureal Semiconductor Inc., Aureal Vortex Sound Processor}}"); MODULE_DEVICE_TABLE(pci, snd_vortex_ids); static void vortex_fix_latency(struct pci_dev *vortex) { int rc; if (!(rc = pci_write_config_byte(vortex, 0x40, 0xff))) { printk(KERN_INFO CARD_NAME ": vortex latency is 0xff\n"); } else { printk(KERN_WARNING CARD_NAME ": could not set vortex latency: pci error 0x%x\n", rc); } } static void vortex_fix_agp_bridge(struct pci_dev *via) { int rc; u8 value; /* * only set the bit (Extend PCI#2 Internal Master for * Efficient Handling of Dummy Requests) if the can * read the config and it is not already set */ if (!(rc = pci_read_config_byte(via, 0x42, &value)) && ((value & 0x10) || !(rc = pci_write_config_byte(via, 0x42, value | 0x10)))) { printk(KERN_INFO CARD_NAME ": bridge config is 0x%x\n", value | 0x10); } else { printk(KERN_WARNING CARD_NAME ": could not set vortex latency: pci error 0x%x\n", rc); } } static void __devinit snd_vortex_workaround(struct pci_dev *vortex, int fix) { struct pci_dev *via = NULL; /* autodetect if workarounds are required */ if (fix == 255) { /* VIA KT133 */ via = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8365_1, NULL); /* VIA Apollo */ if (via == NULL) { via = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C598_1, NULL); /* AMD Irongate */ if (via == NULL) via = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_7007, NULL); } if (via) { printk(KERN_INFO CARD_NAME ": Activating latency workaround...\n"); vortex_fix_latency(vortex); vortex_fix_agp_bridge(via); } } else { if (fix & 0x1) vortex_fix_latency(vortex); if ((fix & 0x2) && (via = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8365_1, NULL))) vortex_fix_agp_bridge(via); if ((fix & 0x4) && (via = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C598_1, NULL))) vortex_fix_agp_bridge(via); if ((fix & 0x8) && (via = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_7007, NULL))) vortex_fix_agp_bridge(via); } pci_dev_put(via); } // component-destructor // (see "Management of Cards and Components") static int snd_vortex_dev_free(struct snd_device *device) { vortex_t *vortex = device->device_data; vortex_gameport_unregister(vortex); vortex_core_shutdown(vortex); // Take down PCI interface. free_irq(vortex->irq, vortex); iounmap(vortex->mmio); pci_release_regions(vortex->pci_dev); pci_disable_device(vortex->pci_dev); kfree(vortex); return 0; } // chip-specific constructor // (see "Management of Cards and Components") static int __devinit snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip) { vortex_t *chip; int err; static struct snd_device_ops ops = { .dev_free = snd_vortex_dev_free, }; *rchip = NULL; // check PCI availability (DMA). if ((err = pci_enable_device(pci)) < 0) return err; if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) < 0) { printk(KERN_ERR "error to set DMA mask\n"); pci_disable_device(pci); return -ENXIO; } chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->card = card; // initialize the stuff chip->pci_dev = pci; chip->io = pci_resource_start(pci, 0); chip->vendor = pci->vendor; chip->device = pci->device; chip->card = card; chip->irq = -1; // (1) PCI resource allocation // Get MMIO area // if ((err = pci_request_regions(pci, CARD_NAME_SHORT)) != 0) goto regions_out; chip->mmio = pci_ioremap_bar(pci, 0); if (!chip->mmio) { printk(KERN_ERR "MMIO area remap failed.\n"); err = -ENOMEM; goto ioremap_out; } /* Init audio core. * This must be done before we do request_irq otherwise we can get spurious * interrupts that we do not handle properly and make a mess of things */ if ((err = vortex_core_init(chip)) != 0) { printk(KERN_ERR "hw core init failed\n"); goto core_out; } if ((err = request_irq(pci->irq, vortex_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) != 0) { printk(KERN_ERR "cannot grab irq\n"); goto irq_out; } chip->irq = pci->irq; pci_set_master(pci); // End of PCI setup. // Register alsa root device. if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { goto alloc_out; } snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; alloc_out: free_irq(chip->irq, chip); irq_out: vortex_core_shutdown(chip); core_out: iounmap(chip->mmio); ioremap_out: pci_release_regions(chip->pci_dev); regions_out: pci_disable_device(chip->pci_dev); //FIXME: this not the right place to unregister the gameport vortex_gameport_unregister(chip); kfree(chip); return err; } // constructor -- see "Constructor" sub-section static int __devinit snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; vortex_t *chip; int err; // (1) if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } // (2) err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; // (3) if ((err = snd_vortex_create(card, pci, &chip)) < 0) { snd_card_free(card); return err; } snd_vortex_workaround(pci, pcifix[dev]); // Card details needed in snd_vortex_midi strcpy(card->driver, CARD_NAME_SHORT); sprintf(card->shortname, "Aureal Vortex %s", CARD_NAME_SHORT); sprintf(card->longname, "%s at 0x%lx irq %i", card->shortname, chip->io, chip->irq); // (4) Alloc components. err = snd_vortex_mixer(chip); if (err < 0) { snd_card_free(card); return err; } // ADB pcm. err = snd_vortex_new_pcm(chip, VORTEX_PCM_ADB, NR_PCM); if (err < 0) { snd_card_free(card); return err; } #ifndef CHIP_AU8820 // ADB SPDIF if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_SPDIF, 1)) < 0) { snd_card_free(card); return err; } // A3D if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_A3D, NR_A3D)) < 0) { snd_card_free(card); return err; } #endif /* // ADB I2S if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_I2S, 1)) < 0) { snd_card_free(card); return err; } */ #ifndef CHIP_AU8810 // WT pcm. if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_WT, NR_WT)) < 0) { snd_card_free(card); return err; } #endif if ((err = snd_vortex_midi(chip)) < 0) { snd_card_free(card); return err; } vortex_gameport_register(chip); #if 0 if (snd_seq_device_new(card, 1, SNDRV_SEQ_DEV_ID_VORTEX_SYNTH, sizeof(snd_vortex_synth_arg_t), &wave) < 0 || wave == NULL) { snd_printk(KERN_ERR "Can't initialize Aureal wavetable synth\n"); } else { snd_vortex_synth_arg_t *arg; arg = SNDRV_SEQ_DEVICE_ARGPTR(wave); strcpy(wave->name, "Aureal Synth"); arg->hwptr = vortex; arg->index = 1; arg->seq_ports = seq_ports[dev]; arg->max_voices = max_synth_voices[dev]; } #endif // (5) if ((err = pci_read_config_word(pci, PCI_DEVICE_ID, &(chip->device))) < 0) { snd_card_free(card); return err; } if ((err = pci_read_config_word(pci, PCI_VENDOR_ID, &(chip->vendor))) < 0) { snd_card_free(card); return err; } chip->rev = pci->revision; #ifdef CHIP_AU8830 if ((chip->rev) != 0xfe && (chip->rev) != 0xfa) { printk(KERN_ALERT "vortex: The revision (%x) of your card has not been seen before.\n", chip->rev); printk(KERN_ALERT "vortex: Please email the results of 'lspci -vv' to openvortex-dev@nongnu.org.\n"); snd_card_free(card); err = -ENODEV; return err; } #endif // (6) if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } // (7) pci_set_drvdata(pci, card); dev++; vortex_connect_default(chip, 1); vortex_enable_int(chip); return 0; } // destructor -- see "Destructor" sub-section static void __devexit snd_vortex_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } // pci_driver definition static struct pci_driver driver = { .name = KBUILD_MODNAME, .id_table = snd_vortex_ids, .probe = snd_vortex_probe, .remove = __devexit_p(snd_vortex_remove), }; // initialization of the module static int __init alsa_card_vortex_init(void) { return pci_register_driver(&driver); } // clean up the module static void __exit alsa_card_vortex_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_vortex_init) module_exit(alsa_card_vortex_exit)
akw28888/kernel_zte_msm8x25q
sound/pci/au88x0/au88x0.c
C
gpl-2.0
10,267
/* * acm_ms.c -- Composite driver, with ACM and mass storage support * * Copyright (C) 2008 David Brownell * Copyright (C) 2008 Nokia Corporation * Author: David Brownell * Modified: Klaus Schwarzkopf <schwarzkopf@sensortherm.de> * * Heavily based on multi.c and cdc2.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/utsname.h> #include "u_serial.h" #define DRIVER_DESC "Composite Gadget (ACM + MS)" #define DRIVER_VERSION "2011/10/10" /*-------------------------------------------------------------------------*/ /* * DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!! * Instead: allocate your own, using normal USB-IF procedures. */ #define ACM_MS_VENDOR_NUM 0x1d6b /* Linux Foundation */ #define ACM_MS_PRODUCT_NUM 0x0106 /* Composite Gadget: ACM + MS*/ /*-------------------------------------------------------------------------*/ /* * Kbuild is not very cooperative with respect to linking separately * compiled library objects into one module. So for now we won't use * separate compilation ... ensuring init/exit sections work to shrink * the runtime footprint, and giving us at least some parts of what * a "gcc --combine ... part1.c part2.c part3.c ... " build would. */ #include "composite.c" #include "usbstring.c" #include "config.c" #include "epautoconf.c" #include "u_serial.c" #include "f_acm.c" #include "f_mass_storage.c" /*-------------------------------------------------------------------------*/ static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = cpu_to_le16(0x0200), .bDeviceClass = USB_CLASS_MISC /* 0xEF */, .bDeviceSubClass = 2, .bDeviceProtocol = 1, /* .bMaxPacketSize0 = f(hardware) */ /* Vendor and product id can be overridden by module parameters. */ .idVendor = cpu_to_le16(ACM_MS_VENDOR_NUM), .idProduct = cpu_to_le16(ACM_MS_PRODUCT_NUM), /* .bcdDevice = f(hardware) */ /* .iManufacturer = DYNAMIC */ /* .iProduct = DYNAMIC */ /* NO SERIAL NUMBER */ /*.bNumConfigurations = DYNAMIC*/ }; static struct usb_otg_descriptor otg_descriptor = { .bLength = sizeof otg_descriptor, .bDescriptorType = USB_DT_OTG, /* * REVISIT SRP-only hardware is possible, although * it would not be called "OTG" ... */ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP, }; static const struct usb_descriptor_header *otg_desc[] = { (struct usb_descriptor_header *) &otg_descriptor, NULL, }; /* string IDs are assigned dynamically */ #define STRING_MANUFACTURER_IDX 0 #define STRING_PRODUCT_IDX 1 static char manufacturer[50]; static struct usb_string strings_dev[] = { [STRING_MANUFACTURER_IDX].s = manufacturer, [STRING_PRODUCT_IDX].s = DRIVER_DESC, { } /* end of list */ }; static struct usb_gadget_strings stringtab_dev = { .language = 0x0409, /* en-us */ .strings = strings_dev, }; static struct usb_gadget_strings *dev_strings[] = { &stringtab_dev, NULL, }; /****************************** Configurations ******************************/ static struct fsg_module_parameters fsg_mod_data = { .stall = 1 }; FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data); static struct fsg_common fsg_common; /*-------------------------------------------------------------------------*/ /* * We _always_ have both ACM and mass storage functions. */ static int __init acm_ms_do_config(struct usb_configuration *c) { int status; if (gadget_is_otg(c->cdev->gadget)) { c->descriptors = otg_desc; c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; } status = acm_bind_config(c, 0); if (status < 0) return status; status = fsg_bind_config(c->cdev, c, &fsg_common); if (status < 0) return status; return 0; } static struct usb_configuration acm_ms_config_driver = { .label = DRIVER_DESC, .bConfigurationValue = 1, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; /*-------------------------------------------------------------------------*/ static int __init acm_ms_bind(struct usb_composite_dev *cdev) { int gcnum; struct usb_gadget *gadget = cdev->gadget; int status; void *retp; /* set up serial link layer */ status = gserial_setup(cdev->gadget, 1); if (status < 0) return status; /* set up mass storage function */ retp = fsg_common_from_params(&fsg_common, cdev, &fsg_mod_data); if (IS_ERR(retp)) { status = PTR_ERR(retp); goto fail0; } /* set bcdDevice */ gcnum = usb_gadget_controller_number(gadget); if (gcnum >= 0) { device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum); } else { WARNING(cdev, "controller '%s' not recognized; trying %s\n", gadget->name, acm_ms_config_driver.label); device_desc.bcdDevice = cpu_to_le16(0x0300 | 0x0099); } /* * Allocate string descriptor numbers ... note that string * contents can be overridden by the composite_dev glue. */ /* device descriptor strings: manufacturer, product */ snprintf(manufacturer, sizeof manufacturer, "%s %s with %s", init_utsname()->sysname, init_utsname()->release, gadget->name); status = usb_string_id(cdev); if (status < 0) goto fail1; strings_dev[STRING_MANUFACTURER_IDX].id = status; device_desc.iManufacturer = status; status = usb_string_id(cdev); if (status < 0) goto fail1; strings_dev[STRING_PRODUCT_IDX].id = status; device_desc.iProduct = status; /* register our configuration */ status = usb_add_config(cdev, &acm_ms_config_driver, acm_ms_do_config); if (status < 0) goto fail1; dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n", DRIVER_DESC); fsg_common_put(&fsg_common); return 0; /* error recovery */ fail1: fsg_common_put(&fsg_common); fail0: gserial_cleanup(); return status; } static int __exit acm_ms_unbind(struct usb_composite_dev *cdev) { gserial_cleanup(); return 0; } static struct usb_composite_driver acm_ms_driver = { .name = "g_acm_ms", .dev = &device_desc, .strings = dev_strings, .unbind = __exit_p(acm_ms_unbind), }; MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Klaus Schwarzkopf <schwarzkopf@sensortherm.de>"); MODULE_LICENSE("GPL v2"); static int __init init(void) { return usb_composite_probe(&acm_ms_driver, acm_ms_bind); } module_init(init); static void __exit cleanup(void) { usb_composite_unregister(&acm_ms_driver); } module_exit(cleanup);
figue/chroma_kernel_mako
drivers/usb/gadget/acm_ms.c
C
gpl-2.0
6,550
/* * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> * Copyright (C) 2010 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. */ #include <linux/platform_device.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/mxc_ehci.h> #define USBCTRL_OTGBASE_OFFSET 0x600 #define MX27_OTG_SIC_SHIFT 29 #define MX27_OTG_SIC_MASK (0x3 << MX27_OTG_SIC_SHIFT) #define MX27_OTG_PM_BIT (1 << 24) #define MX27_H2_SIC_SHIFT 21 #define MX27_H2_SIC_MASK (0x3 << MX27_H2_SIC_SHIFT) #define MX27_H2_PM_BIT (1 << 16) #define MX27_H2_DT_BIT (1 << 5) #define MX27_H1_SIC_SHIFT 13 #define MX27_H1_SIC_MASK (0x3 << MX27_H1_SIC_SHIFT) #define MX27_H1_PM_BIT (1 << 8) #define MX27_H1_DT_BIT (1 << 4) int mx27_initialize_usb_hw(int port, unsigned int flags) { unsigned int v; v = readl(MX27_IO_ADDRESS(MX27_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET)); switch (port) { case 0: /* OTG port */ v &= ~(MX27_OTG_SIC_MASK | MX27_OTG_PM_BIT); v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX27_OTG_SIC_SHIFT; if (!(flags & MXC_EHCI_POWER_PINS_ENABLED)) v |= MX27_OTG_PM_BIT; break; case 1: /* H1 port */ v &= ~(MX27_H1_SIC_MASK | MX27_H1_PM_BIT | MX27_H1_DT_BIT); v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX27_H1_SIC_SHIFT; if (!(flags & MXC_EHCI_POWER_PINS_ENABLED)) v |= MX27_H1_PM_BIT; if (!(flags & MXC_EHCI_TTL_ENABLED)) v |= MX27_H1_DT_BIT; break; case 2: /* H2 port */ v &= ~(MX27_H2_SIC_MASK | MX27_H2_PM_BIT | MX27_H2_DT_BIT); v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX27_H2_SIC_SHIFT; if (!(flags & MXC_EHCI_POWER_PINS_ENABLED)) v |= MX27_H2_PM_BIT; if (!(flags & MXC_EHCI_TTL_ENABLED)) v |= MX27_H2_DT_BIT; break; default: return -EINVAL; } writel(v, MX27_IO_ADDRESS(MX27_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET)); return 0; }
NoelMacwan/Kernel-NanHu-11.3.A.1.39
arch/arm/mach-imx/ehci-imx27.c
C
gpl-2.0
2,276
(function ($) { Drupal.toolbar = Drupal.toolbar || {}; /** * Attach toggling behavior and notify the overlay of the toolbar. */ Drupal.behaviors.toolbar = { attach: function(context) { // Set the initial state of the toolbar. $('#toolbar', context).once('toolbar', Drupal.toolbar.init); // Toggling toolbar drawer. $('#toolbar a.toggle', context).once('toolbar-toggle').click(function(e) { Drupal.toolbar.toggle(); // Allow resize event handlers to recalculate sizes/positions. $(window).triggerHandler('resize'); return false; }); } }; /** * Retrieve last saved cookie settings and set up the initial toolbar state. */ Drupal.toolbar.init = function() { // Retrieve the collapsed status from a stored cookie. var collapsed = $.cookie('Drupal.toolbar.collapsed'); // Expand or collapse the toolbar based on the cookie value. if (collapsed == 1) { Drupal.toolbar.collapse(); } else { Drupal.toolbar.expand(); } }; /** * Collapse the toolbar. */ Drupal.toolbar.collapse = function() { var toggle_text = Drupal.t('Show shortcuts'); $('#toolbar div.toolbar-drawer').addClass('collapsed'); $('#toolbar a.toggle') .removeClass('toggle-active') .attr('title', toggle_text) .html(toggle_text); $('body').removeClass('toolbar-drawer').css('paddingTop', Drupal.toolbar.height()); $.cookie( 'Drupal.toolbar.collapsed', 1, { path: Drupal.settings.basePath, // The cookie should "never" expire. expires: 36500 } ); }; /** * Expand the toolbar. */ Drupal.toolbar.expand = function() { var toggle_text = Drupal.t('Hide shortcuts'); $('#toolbar div.toolbar-drawer').removeClass('collapsed'); $('#toolbar a.toggle') .addClass('toggle-active') .attr('title', toggle_text) .html(toggle_text); $('body').addClass('toolbar-drawer').css('paddingTop', Drupal.toolbar.height()); $.cookie( 'Drupal.toolbar.collapsed', 0, { path: Drupal.settings.basePath, // The cookie should "never" expire. expires: 36500 } ); }; /** * Toggle the toolbar. */ Drupal.toolbar.toggle = function() { if ($('#toolbar div.toolbar-drawer').hasClass('collapsed')) { Drupal.toolbar.expand(); } else { Drupal.toolbar.collapse(); } }; Drupal.toolbar.height = function() { var $toolbar = $('#toolbar'); var height = $toolbar.outerHeight(); // In modern browsers (including IE9), when box-shadow is defined, use the // normal height. var cssBoxShadowValue = $toolbar.css('box-shadow'); var boxShadow = (typeof cssBoxShadowValue !== 'undefined' && cssBoxShadowValue !== 'none'); // In IE8 and below, we use the shadow filter to apply box-shadow styles to // the toolbar. It adds some extra height that we need to remove. if (!boxShadow && /DXImageTransform\.Microsoft\.Shadow/.test($toolbar.css('filter'))) { height -= $toolbar[0].filters.item("DXImageTransform.Microsoft.Shadow").strength; } return height; }; })(jQuery);
thinkdrop/drupal
modules/toolbar/toolbar.js
JavaScript
gpl-2.0
3,020
var bindCallback = require('../internal/bindCallback'); /* Native method references for those with the same name as other `lodash` methods. */ var nativeFloor = Math.floor, nativeIsFinite = global.isFinite, nativeMin = Math.min; /** Used as references for the maximum length and index of an array. */ var MAX_ARRAY_LENGTH = 4294967295; /** * Invokes the iteratee function `n` times, returning an array of the results * of each invocation. The `iteratee` is bound to `thisArg` and invoked with * one argument; (index). * * @static * @memberOf _ * @category Utility * @param {number} n The number of times to invoke `iteratee`. * @param {Function} [iteratee=_.identity] The function invoked per iteration. * @param {*} [thisArg] The `this` binding of `iteratee`. * @returns {Array} Returns the array of results. * @example * * var diceRolls = _.times(3, _.partial(_.random, 1, 6, false)); * // => [3, 6, 4] * * _.times(3, function(n) { * mage.castSpell(n); * }); * // => invokes `mage.castSpell(n)` three times with `n` of `0`, `1`, and `2` * * _.times(3, function(n) { * this.cast(n); * }, mage); * // => also invokes `mage.castSpell(n)` three times */ function times(n, iteratee, thisArg) { n = nativeFloor(n); // Exit early to avoid a JSC JIT bug in Safari 8 // where `Array(0)` is treated as `Array(1)`. if (n < 1 || !nativeIsFinite(n)) { return []; } var index = -1, result = Array(nativeMin(n, MAX_ARRAY_LENGTH)); iteratee = bindCallback(iteratee, thisArg, 1); while (++index < n) { if (index < MAX_ARRAY_LENGTH) { result[index] = iteratee(index); } else { iteratee(index); } } return result; } module.exports = times;
cosmopod/angularblog
node_modules/gulp-jshint/node_modules/lodash/utility/times.js
JavaScript
gpl-2.0
1,721
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/kernel.h> #include <linux/mtd/mtd.h> #include "nodelist.h" int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen) { if (!jffs2_is_writebuffered(c)) { if (jffs2_sum_active()) { int res; res = jffs2_sum_add_kvec(c, vecs, count, (uint32_t) to); if (res) { return res; } } } return mtd_writev(c->mtd, vecs, count, to, retlen); } int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf) { int ret; ret = mtd_write(c->mtd, ofs, len, retlen, buf); if (jffs2_sum_active()) { struct kvec vecs[1]; int res; vecs[0].iov_base = (unsigned char *) buf; vecs[0].iov_len = len; res = jffs2_sum_add_kvec(c, vecs, 1, (uint32_t) ofs); if (res) { return res; } } return ret; }
alvinhochun/sony-xperia-m-kernel
fs/jffs2/writev.c
C
gpl-2.0
1,103
/* * cx18 gpio functions * * Derived from ivtv-gpio.c * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 USA */ #include "cx18-driver.h" #include "cx18-io.h" #include "cx18-cards.h" #include "cx18-gpio.h" #include "tuner-xc2028.h" /********************* GPIO stuffs *********************/ /* GPIO registers */ #define CX18_REG_GPIO_IN 0xc72010 #define CX18_REG_GPIO_OUT1 0xc78100 #define CX18_REG_GPIO_DIR1 0xc78108 #define CX18_REG_GPIO_OUT2 0xc78104 #define CX18_REG_GPIO_DIR2 0xc7810c /* * HVR-1600 GPIO pins, courtesy of Hauppauge: * * gpio0: zilog ir process reset pin * gpio1: zilog programming pin (you should never use this) * gpio12: cx24227 reset pin * gpio13: cs5345 reset pin */ /* * File scope utility functions */ static void gpio_write(struct cx18 *cx) { u32 dir_lo = cx->gpio_dir & 0xffff; u32 val_lo = cx->gpio_val & 0xffff; u32 dir_hi = cx->gpio_dir >> 16; u32 val_hi = cx->gpio_val >> 16; cx18_write_reg_expect(cx, dir_lo << 16, CX18_REG_GPIO_DIR1, ~dir_lo, dir_lo); cx18_write_reg_expect(cx, (dir_lo << 16) | val_lo, CX18_REG_GPIO_OUT1, val_lo, dir_lo); cx18_write_reg_expect(cx, dir_hi << 16, CX18_REG_GPIO_DIR2, ~dir_hi, dir_hi); cx18_write_reg_expect(cx, (dir_hi << 16) | val_hi, CX18_REG_GPIO_OUT2, val_hi, dir_hi); } static void gpio_update(struct cx18 *cx, u32 mask, u32 data) { if (mask == 0) return; mutex_lock(&cx->gpio_lock); cx->gpio_val = (cx->gpio_val & ~mask) | (data & mask); gpio_write(cx); mutex_unlock(&cx->gpio_lock); } static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi, unsigned int assert_msecs, unsigned int recovery_msecs) { u32 mask; mask = active_lo | active_hi; if (mask == 0) return; /* * Assuming that active_hi and active_lo are a subsets of the bits in * gpio_dir. Also assumes that active_lo and active_hi don't overlap * in any bit position */ /* Assert */ gpio_update(cx, mask, ~active_lo); schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs)); /* Deassert */ gpio_update(cx, mask, ~active_hi); schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs)); } /* * GPIO Multiplexer - logical device */ static int gpiomux_log_status(struct v4l2_subdev *sd) { struct cx18 *cx = v4l2_get_subdevdata(sd); mutex_lock(&cx->gpio_lock); CX18_INFO_DEV(sd, "GPIO: direction 0x%08x, value 0x%08x\n", cx->gpio_dir, cx->gpio_val); mutex_unlock(&cx->gpio_lock); return 0; } static int gpiomux_s_radio(struct v4l2_subdev *sd) { struct cx18 *cx = v4l2_get_subdevdata(sd); /* * FIXME - work out the cx->active/audio_input mess - this is * intended to handle the switch to radio mode and set the * audio routing, but we need to update the state in cx */ gpio_update(cx, cx->card->gpio_audio_input.mask, cx->card->gpio_audio_input.radio); return 0; } static int gpiomux_s_std(struct v4l2_subdev *sd, v4l2_std_id norm) { struct cx18 *cx = v4l2_get_subdevdata(sd); u32 data; switch (cx->card->audio_inputs[cx->audio_input].muxer_input) { case 1: data = cx->card->gpio_audio_input.linein; break; case 0: data = cx->card->gpio_audio_input.tuner; break; default: /* * FIXME - work out the cx->active/audio_input mess - this is * intended to handle the switch from radio mode and set the * audio routing, but we need to update the state in cx */ data = cx->card->gpio_audio_input.tuner; break; } gpio_update(cx, cx->card->gpio_audio_input.mask, data); return 0; } static int gpiomux_s_audio_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct cx18 *cx = v4l2_get_subdevdata(sd); u32 data; switch (input) { case 0: data = cx->card->gpio_audio_input.tuner; break; case 1: data = cx->card->gpio_audio_input.linein; break; case 2: data = cx->card->gpio_audio_input.radio; break; default: return -EINVAL; } gpio_update(cx, cx->card->gpio_audio_input.mask, data); return 0; } static const struct v4l2_subdev_core_ops gpiomux_core_ops = { .log_status = gpiomux_log_status, .s_std = gpiomux_s_std, }; static const struct v4l2_subdev_tuner_ops gpiomux_tuner_ops = { .s_radio = gpiomux_s_radio, }; static const struct v4l2_subdev_audio_ops gpiomux_audio_ops = { .s_routing = gpiomux_s_audio_routing, }; static const struct v4l2_subdev_ops gpiomux_ops = { .core = &gpiomux_core_ops, .tuner = &gpiomux_tuner_ops, .audio = &gpiomux_audio_ops, }; /* * GPIO Reset Controller - logical device */ static int resetctrl_log_status(struct v4l2_subdev *sd) { struct cx18 *cx = v4l2_get_subdevdata(sd); mutex_lock(&cx->gpio_lock); CX18_INFO_DEV(sd, "GPIO: direction 0x%08x, value 0x%08x\n", cx->gpio_dir, cx->gpio_val); mutex_unlock(&cx->gpio_lock); return 0; } static int resetctrl_reset(struct v4l2_subdev *sd, u32 val) { struct cx18 *cx = v4l2_get_subdevdata(sd); const struct cx18_gpio_i2c_slave_reset *p; p = &cx->card->gpio_i2c_slave_reset; switch (val) { case CX18_GPIO_RESET_I2C: gpio_reset_seq(cx, p->active_lo_mask, p->active_hi_mask, p->msecs_asserted, p->msecs_recovery); break; case CX18_GPIO_RESET_Z8F0811: /* * Assert timing for the Z8F0811 on HVR-1600 boards: * 1. Assert RESET for min of 4 clock cycles at 18.432 MHz to * initiate * 2. Reset then takes 66 WDT cycles at 10 kHz + 16 xtal clock * cycles (6,601,085 nanoseconds ~= 7 milliseconds) * 3. DBG pin must be high before chip exits reset for normal * operation. DBG is open drain and hopefully pulled high * since we don't normally drive it (GPIO 1?) for the * HVR-1600 * 4. Z8F0811 won't exit reset until RESET is deasserted * 5. Zilog comes out of reset, loads reset vector address and * executes from there. Required recovery delay unknown. */ gpio_reset_seq(cx, p->ir_reset_mask, 0, p->msecs_asserted, p->msecs_recovery); break; case CX18_GPIO_RESET_XC2028: if (cx->card->tuners[0].tuner == TUNER_XC2028) gpio_reset_seq(cx, (1 << cx->card->xceive_pin), 0, 1, 1); break; } return 0; } static const struct v4l2_subdev_core_ops resetctrl_core_ops = { .log_status = resetctrl_log_status, .reset = resetctrl_reset, }; static const struct v4l2_subdev_ops resetctrl_ops = { .core = &resetctrl_core_ops, }; /* * External entry points */ void cx18_gpio_init(struct cx18 *cx) { mutex_lock(&cx->gpio_lock); cx->gpio_dir = cx->card->gpio_init.direction; cx->gpio_val = cx->card->gpio_init.initial_value; if (cx->card->tuners[0].tuner == TUNER_XC2028) { cx->gpio_dir |= 1 << cx->card->xceive_pin; cx->gpio_val |= 1 << cx->card->xceive_pin; } if (cx->gpio_dir == 0) { mutex_unlock(&cx->gpio_lock); return; } CX18_DEBUG_INFO("GPIO initial dir: %08x/%08x out: %08x/%08x\n", cx18_read_reg(cx, CX18_REG_GPIO_DIR1), cx18_read_reg(cx, CX18_REG_GPIO_DIR2), cx18_read_reg(cx, CX18_REG_GPIO_OUT1), cx18_read_reg(cx, CX18_REG_GPIO_OUT2)); gpio_write(cx); mutex_unlock(&cx->gpio_lock); } int cx18_gpio_register(struct cx18 *cx, u32 hw) { struct v4l2_subdev *sd; const struct v4l2_subdev_ops *ops; char *str; switch (hw) { case CX18_HW_GPIO_MUX: sd = &cx->sd_gpiomux; ops = &gpiomux_ops; str = "gpio-mux"; break; case CX18_HW_GPIO_RESET_CTRL: sd = &cx->sd_resetctrl; ops = &resetctrl_ops; str = "gpio-reset-ctrl"; break; default: return -EINVAL; } v4l2_subdev_init(sd, ops); v4l2_set_subdevdata(sd, cx); snprintf(sd->name, sizeof(sd->name), "%s %s", cx->v4l2_dev.name, str); sd->grp_id = hw; return v4l2_device_register_subdev(&cx->v4l2_dev, sd); } void cx18_reset_ir_gpio(void *data) { struct cx18 *cx = to_cx18((struct v4l2_device *)data); if (cx->card->gpio_i2c_slave_reset.ir_reset_mask == 0) return; CX18_DEBUG_INFO("Resetting IR microcontroller\n"); v4l2_subdev_call(&cx->sd_resetctrl, core, reset, CX18_GPIO_RESET_Z8F0811); } EXPORT_SYMBOL(cx18_reset_ir_gpio); /* This symbol is exported for use by lirc_pvr150 for the IR-blaster */ /* Xceive tuner reset function */ int cx18_reset_tuner_gpio(void *dev, int component, int cmd, int value) { struct i2c_algo_bit_data *algo = dev; struct cx18_i2c_algo_callback_data *cb_data = algo->data; struct cx18 *cx = cb_data->cx; if (cmd != XC2028_TUNER_RESET || cx->card->tuners[0].tuner != TUNER_XC2028) return 0; CX18_DEBUG_INFO("Resetting XCeive tuner\n"); return v4l2_subdev_call(&cx->sd_resetctrl, core, reset, CX18_GPIO_RESET_XC2028); }
AnguisCaptor/PwnKernel_Shamu_M
drivers/media/pci/cx18/cx18-gpio.c
C
gpl-2.0
9,275
/* linux/drivers/mtd/maps/scx200_docflash.c Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> National Semiconductor SCx200 flash mapped with DOCCS */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/pci.h> #include <linux/scx200.h> #define NAME "scx200_docflash" MODULE_AUTHOR("Christer Weinigel <wingel@hack.org>"); MODULE_DESCRIPTION("NatSemi SCx200 DOCCS Flash Driver"); MODULE_LICENSE("GPL"); static int probe = 0; /* Don't autoprobe */ static unsigned size = 0x1000000; /* 16 MiB the whole ISA address space */ static unsigned width = 8; /* Default to 8 bits wide */ static char *flashtype = "cfi_probe"; module_param(probe, int, 0); MODULE_PARM_DESC(probe, "Probe for a BIOS mapping"); module_param(size, int, 0); MODULE_PARM_DESC(size, "Size of the flash mapping"); module_param(width, int, 0); MODULE_PARM_DESC(width, "Data width of the flash mapping (8/16)"); module_param(flashtype, charp, 0); MODULE_PARM_DESC(flashtype, "Type of MTD probe to do"); static struct resource docmem = { .flags = IORESOURCE_MEM, .name = "NatSemi SCx200 DOCCS Flash", }; static struct mtd_info *mymtd; static struct mtd_partition partition_info[] = { { .name = "DOCCS Boot kernel", .offset = 0, .size = 0xc0000 }, { .name = "DOCCS Low BIOS", .offset = 0xc0000, .size = 0x40000 }, { .name = "DOCCS File system", .offset = 0x100000, .size = ~0 /* calculate from flash size */ }, { .name = "DOCCS High BIOS", .offset = ~0, /* calculate from flash size */ .size = 0x80000 }, }; #define NUM_PARTITIONS ARRAY_SIZE(partition_info) static struct map_info scx200_docflash_map = { .name = "NatSemi SCx200 DOCCS Flash", }; static int __init init_scx200_docflash(void) { unsigned u; unsigned base; unsigned ctrl; unsigned pmr; struct pci_dev *bridge; printk(KERN_DEBUG NAME ": NatSemi SCx200 DOCCS Flash Driver\n"); if ((bridge = pci_get_device(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE, NULL)) == NULL) return -ENODEV; /* check that we have found the configuration block */ if (!scx200_cb_present()) { pci_dev_put(bridge); return -ENODEV; } if (probe) { /* Try to use the present flash mapping if any */ pci_read_config_dword(bridge, SCx200_DOCCS_BASE, &base); pci_read_config_dword(bridge, SCx200_DOCCS_CTRL, &ctrl); pci_dev_put(bridge); pmr = inl(scx200_cb_base + SCx200_PMR); if (base == 0 || (ctrl & 0x07000000) != 0x07000000 || (ctrl & 0x0007ffff) == 0) return -ENODEV; size = ((ctrl&0x1fff)<<13) + (1<<13); for (u = size; u > 1; u >>= 1) ; if (u != 1) return -ENODEV; if (pmr & (1<<6)) width = 16; else width = 8; docmem.start = base; docmem.end = base + size; if (request_resource(&iomem_resource, &docmem)) { printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n"); return -ENOMEM; } } else { pci_dev_put(bridge); for (u = size; u > 1; u >>= 1) ; if (u != 1) { printk(KERN_ERR NAME ": invalid size for flash mapping\n"); return -EINVAL; } if (width != 8 && width != 16) { printk(KERN_ERR NAME ": invalid bus width for flash mapping\n"); return -EINVAL; } if (allocate_resource(&iomem_resource, &docmem, size, 0xc0000000, 0xffffffff, size, NULL, NULL)) { printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n"); return -ENOMEM; } ctrl = 0x07000000 | ((size-1) >> 13); printk(KERN_INFO "DOCCS BASE=0x%08lx, CTRL=0x%08lx\n", (long)docmem.start, (long)ctrl); pci_write_config_dword(bridge, SCx200_DOCCS_BASE, docmem.start); pci_write_config_dword(bridge, SCx200_DOCCS_CTRL, ctrl); pmr = inl(scx200_cb_base + SCx200_PMR); if (width == 8) { pmr &= ~(1<<6); } else { pmr |= (1<<6); } outl(pmr, scx200_cb_base + SCx200_PMR); } printk(KERN_INFO NAME ": DOCCS mapped at %pR, width %d\n", &docmem, width); scx200_docflash_map.size = size; if (width == 8) scx200_docflash_map.bankwidth = 1; else scx200_docflash_map.bankwidth = 2; simple_map_init(&scx200_docflash_map); scx200_docflash_map.phys = docmem.start; scx200_docflash_map.virt = ioremap(docmem.start, scx200_docflash_map.size); if (!scx200_docflash_map.virt) { printk(KERN_ERR NAME ": failed to ioremap the flash\n"); release_resource(&docmem); return -EIO; } mymtd = do_map_probe(flashtype, &scx200_docflash_map); if (!mymtd) { printk(KERN_ERR NAME ": unable to detect flash\n"); iounmap(scx200_docflash_map.virt); release_resource(&docmem); return -ENXIO; } if (size < mymtd->size) printk(KERN_WARNING NAME ": warning, flash mapping is smaller than flash size\n"); mymtd->owner = THIS_MODULE; partition_info[3].offset = mymtd->size-partition_info[3].size; partition_info[2].size = partition_info[3].offset-partition_info[2].offset; mtd_device_register(mymtd, partition_info, NUM_PARTITIONS); return 0; } static void __exit cleanup_scx200_docflash(void) { if (mymtd) { mtd_device_unregister(mymtd); map_destroy(mymtd); } if (scx200_docflash_map.virt) { iounmap(scx200_docflash_map.virt); release_resource(&docmem); } } module_init(init_scx200_docflash); module_exit(cleanup_scx200_docflash); /* Local variables: compile-command: "make -k -C ../../.. SUBDIRS=drivers/mtd/maps modules" c-basic-offset: 8 End: */
whoi-acomms/linux
drivers/mtd/maps/scx200_docflash.c
C
gpl-2.0
5,559
/* * The platform specific code for virtex devices since a boot loader is not * always used. * * (C) Copyright 2008 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "io.h" #include "stdio.h" #define UART_DLL 0 /* Out: Divisor Latch Low */ #define UART_DLM 1 /* Out: Divisor Latch High */ #define UART_FCR 2 /* Out: FIFO Control Register */ #define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */ #define UART_FCR_CLEAR_XMIT 0x04 /* Clear the XMIT FIFO */ #define UART_LCR 3 /* Out: Line Control Register */ #define UART_MCR 4 /* Out: Modem Control Register */ #define UART_MCR_RTS 0x02 /* RTS complement */ #define UART_MCR_DTR 0x01 /* DTR complement */ #define UART_LCR_DLAB 0x80 /* Divisor latch access bit */ #define UART_LCR_WLEN8 0x03 /* Wordlength: 8 bits */ static int virtex_ns16550_console_init(void *devp) { unsigned char *reg_base; u32 reg_shift, reg_offset, clk, spd; u16 divisor; int n; if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1) return -1; n = getprop(devp, "reg-offset", &reg_offset, sizeof(reg_offset)); if (n == sizeof(reg_offset)) reg_base += reg_offset; n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift)); if (n != sizeof(reg_shift)) reg_shift = 0; n = getprop(devp, "current-speed", (void *)&spd, sizeof(spd)); if (n != sizeof(spd)) spd = 9600; /* should there be a default clock rate?*/ n = getprop(devp, "clock-frequency", (void *)&clk, sizeof(clk)); if (n != sizeof(clk)) return -1; divisor = clk / (16 * spd); /* Access baud rate */ out_8(reg_base + (UART_LCR << reg_shift), UART_LCR_DLAB); /* Baud rate based on input clock */ out_8(reg_base + (UART_DLL << reg_shift), divisor & 0xFF); out_8(reg_base + (UART_DLM << reg_shift), divisor >> 8); /* 8 data, 1 stop, no parity */ out_8(reg_base + (UART_LCR << reg_shift), UART_LCR_WLEN8); /* RTS/DTR */ out_8(reg_base + (UART_MCR << reg_shift), UART_MCR_RTS | UART_MCR_DTR); /* Clear transmitter and receiver */ out_8(reg_base + (UART_FCR << reg_shift), UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR); return 0; } /* For virtex, the kernel may be loaded without using a bootloader and if so some UARTs need more setup than is provided in the normal console init */ int platform_specific_init(void) { void *devp; char devtype[MAX_PROP_LEN]; char path[MAX_PATH_LEN]; devp = finddevice("/chosen"); if (devp == NULL) return -1; if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0) { devp = finddevice(path); if (devp == NULL) return -1; if ((getprop(devp, "device_type", devtype, sizeof(devtype)) > 0) && !strcmp(devtype, "serial") && (dt_is_compatible(devp, "ns16550"))) virtex_ns16550_console_init(devp); } return 0; }
sultanxda/android_kernel_lge_v400
arch/powerpc/boot/virtex.c
C
gpl-2.0
2,919
// license:BSD-3-Clause // copyright-holders:Nicola Salmoria /*************************************************************************** Bagman memory map driver by Nicola Salmoria protection and speech emulation by Jarek Burczynski protection info Andrew Deschenes memory map: 0000-5fff ROM 6000-67ff RAM 9000-93ff Video RAM 9800-9bff Color RAM 9800-981f Sprites (hidden portion of color RAM) 9c00-9fff ? (filled with 3f, not used otherwise) c000-ffff ROM (Super Bagman only) memory mapped ports: read: a000 PAL16r6 output. (RD4 line) a800 ? (read only in one place, not used) (RD5 line) b000 DSW (RD6 line) b800 watchdog reset (RD7 line) write: a000 interrupt enable a001 horizontal flip a002 vertical flip a003 video enable, not available on earlier hardware revision(s) a004 coin counter a007 ? /SCS line in the schems connected to AY8910 pin A4 or AA (schems are unreadable) a800-a805 these lines control the state machine driving TMS5110 (only bit 0 matters) a800,a801,a802 - speech roms BIT select (000 bit 7, 001 bit 4, 010 bit 2) a803 - 0 keeps the state machine in reset state; 1 starts speech a804 - connected to speech rom 11 (QS) chip enable a805 - connected to speech rom 12 (QT) chip enable b000 ? b800 ? PAL16r6 This chip is custom logic used for guards controlling. Inputs are connected to buffered address(!!!) lines AB0,AB1,AB2,AB3,AB4,AB5,AB6 We simulate this writing a800 to a805 there (which is wrong but works) I/O ports: I/O 8 ;AY-3-8910 Control Reg. I/O 9 ;AY-3-8910 Data Write Reg. I/O C ;AY-3-8910 Data Read Reg. Port A of the 8910 is connected to IN0 Port B of the 8910 is connected to IN1 DIP locations verified for: - bagman (manual) - squaitsa (manual) ***************************************************************************/ #include "emu.h" #include "cpu/z80/z80.h" #include "sound/ay8910.h" #include "includes/bagman.h" void bagman_state::machine_start() { save_item(NAME(m_irq_mask)); save_item(NAME(m_columnvalue)); } MACHINE_START_MEMBER(bagman_state, bagman) { bagman_state::machine_start(); save_item(NAME(m_ls259_buf)); } MACHINE_START_MEMBER(bagman_state, squaitsa) { bagman_state::machine_start(); save_item(NAME(m_p1_res)); save_item(NAME(m_p1_old_val)); save_item(NAME(m_p2_res)); save_item(NAME(m_p2_old_val)); } WRITE8_MEMBER(bagman_state::ls259_w) { pal16r6_w(space, offset,data); /*this is just a simulation*/ if (m_ls259_buf[offset] != (data&1) ) { m_ls259_buf[offset] = data&1; switch (offset) { case 0: case 1: case 2: m_tmsprom->bit_w(space, 0, 7 - ((m_ls259_buf[0]<<2) | (m_ls259_buf[1]<<1) | (m_ls259_buf[2]<<0))); break; case 3: m_tmsprom->enable_w(m_ls259_buf[offset]); break; case 4: m_tmsprom->rom_csq_w(space, 0, m_ls259_buf[offset]); break; case 5: m_tmsprom->rom_csq_w(space, 1, m_ls259_buf[offset]); break; } } } WRITE8_MEMBER(bagman_state::coincounter_w) { machine().bookkeeping().coin_counter_w(offset,data); } WRITE8_MEMBER(bagman_state::irq_mask_w) { m_irq_mask = data & 1; } static ADDRESS_MAP_START( main_map, AS_PROGRAM, 8, bagman_state ) AM_RANGE(0x0000, 0x5fff) AM_ROM AM_RANGE(0x6000, 0x67ff) AM_RAM AM_RANGE(0x9000, 0x93ff) AM_RAM_WRITE(videoram_w) AM_SHARE("videoram") AM_RANGE(0x9800, 0x9bff) AM_RAM_WRITE(colorram_w) AM_SHARE("colorram") AM_RANGE(0x9c00, 0x9fff) AM_WRITENOP /* written to, but unused */ AM_RANGE(0xa000, 0xa000) AM_READ(pal16r6_r) //AM_RANGE(0xa800, 0xa805) AM_READ(bagman_ls259_r) /*just for debugging purposes*/ AM_RANGE(0xa000, 0xa000) AM_WRITE(irq_mask_w) AM_RANGE(0xa001, 0xa002) AM_WRITE(flipscreen_w) AM_RANGE(0xa003, 0xa003) AM_WRITEONLY AM_SHARE("video_enable") AM_RANGE(0xc000, 0xffff) AM_ROM /* Super Bagman only */ AM_RANGE(0x9800, 0x981f) AM_WRITEONLY AM_SHARE("spriteram") /* hidden portion of color RAM */ /* here only to initialize the pointer, */ /* writes are handled by colorram_w */ AM_RANGE(0xa800, 0xa805) AM_WRITE(ls259_w) /* TMS5110 driving state machine */ AM_RANGE(0xa004, 0xa004) AM_WRITE(coincounter_w) AM_RANGE(0xb000, 0xb000) AM_READ_PORT("DSW") AM_RANGE(0xb800, 0xb800) AM_READNOP /* looks like watchdog from schematics */ #if 0 AM_RANGE(0xa007, 0xa007) AM_WRITENOP /* ???? */ AM_RANGE(0xb000, 0xb000) AM_WRITENOP /* ???? */ AM_RANGE(0xb800, 0xb800) AM_WRITENOP /* ???? */ #endif ADDRESS_MAP_END static ADDRESS_MAP_START( pickin_map, AS_PROGRAM, 8, bagman_state ) AM_RANGE(0x0000, 0x5fff) AM_ROM AM_RANGE(0x7000, 0x77ff) AM_RAM AM_RANGE(0x8800, 0x8bff) AM_RAM_WRITE(videoram_w) AM_SHARE("videoram") AM_RANGE(0x9800, 0x9bff) AM_RAM_WRITE(colorram_w) AM_SHARE("colorram") AM_RANGE(0x9800, 0x981f) AM_WRITEONLY AM_SHARE("spriteram") /* hidden portion of color RAM */ /* here only to initialize the pointer, */ /* writes are handled by colorram_w */ AM_RANGE(0x9c00, 0x9fff) AM_WRITENOP /* written to, but unused */ AM_RANGE(0xa000, 0xa000) AM_WRITE(irq_mask_w) AM_RANGE(0xa001, 0xa002) AM_WRITE(flipscreen_w) AM_RANGE(0xa003, 0xa003) AM_WRITEONLY AM_SHARE("video_enable") AM_RANGE(0xa004, 0xa004) AM_WRITE(coincounter_w) AM_RANGE(0xa800, 0xa800) AM_READ_PORT("DSW") AM_RANGE(0xa005, 0xa005) AM_WRITENOP /* ???? */ AM_RANGE(0xa006, 0xa006) AM_WRITENOP /* ???? */ AM_RANGE(0xa007, 0xa007) AM_WRITENOP /* ???? */ /* guess */ AM_RANGE(0xb000, 0xb000) AM_DEVWRITE("ay2", ay8910_device, address_w) AM_RANGE(0xb800, 0xb800) AM_DEVREADWRITE("ay2", ay8910_device, data_r, data_w) ADDRESS_MAP_END static ADDRESS_MAP_START( main_portmap, AS_IO, 8, bagman_state ) ADDRESS_MAP_GLOBAL_MASK(0xff) AM_RANGE(0x08, 0x09) AM_DEVWRITE("aysnd", ay8910_device, address_data_w) AM_RANGE(0x0c, 0x0c) AM_DEVREAD("aysnd", ay8910_device, data_r) //AM_RANGE(0x56, 0x56) AM_WRITENOP ADDRESS_MAP_END static INPUT_PORTS_START( bagman ) PORT_START("P1") PORT_BIT( 0x01, IP_ACTIVE_LOW, IPT_COIN1 ) PORT_BIT( 0x02, IP_ACTIVE_LOW, IPT_COIN2 ) PORT_BIT( 0x04, IP_ACTIVE_LOW, IPT_START1 ) PORT_BIT( 0x08, IP_ACTIVE_LOW, IPT_JOYSTICK_LEFT ) PORT_8WAY PORT_BIT( 0x10, IP_ACTIVE_LOW, IPT_JOYSTICK_RIGHT ) PORT_8WAY PORT_BIT( 0x20, IP_ACTIVE_LOW, IPT_JOYSTICK_UP ) PORT_8WAY PORT_BIT( 0x40, IP_ACTIVE_LOW, IPT_JOYSTICK_DOWN ) PORT_8WAY PORT_BIT( 0x80, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_START("P2") PORT_BIT( 0x01, IP_ACTIVE_LOW, IPT_COIN3 ) PORT_BIT( 0x02, IP_ACTIVE_LOW, IPT_COIN4 ) PORT_BIT( 0x04, IP_ACTIVE_LOW, IPT_START2 ) PORT_BIT( 0x08, IP_ACTIVE_LOW, IPT_JOYSTICK_LEFT ) PORT_8WAY PORT_COCKTAIL PORT_BIT( 0x10, IP_ACTIVE_LOW, IPT_JOYSTICK_RIGHT ) PORT_8WAY PORT_COCKTAIL PORT_BIT( 0x20, IP_ACTIVE_LOW, IPT_JOYSTICK_UP ) PORT_8WAY PORT_COCKTAIL PORT_BIT( 0x40, IP_ACTIVE_LOW, IPT_JOYSTICK_DOWN ) PORT_8WAY PORT_COCKTAIL PORT_BIT( 0x80, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_COCKTAIL PORT_START("DSW") PORT_DIPNAME( 0x03, 0x02, DEF_STR( Lives ) ) PORT_DIPLOCATION("SW1:1,2") PORT_DIPSETTING( 0x03, "2" ) PORT_DIPSETTING( 0x02, "3" ) PORT_DIPSETTING( 0x01, "4" ) PORT_DIPSETTING( 0x00, "5" ) PORT_DIPNAME( 0x04, 0x04, DEF_STR( Coinage ) ) PORT_DIPLOCATION("SW1:3") PORT_DIPSETTING( 0x00, "2C/1C 1C/1C 1C/3C 1C/7C" ) PORT_DIPSETTING( 0x04, "1C/1C 1C/2C 1C/6C 1C/14C" ) PORT_DIPNAME( 0x18, 0x18, DEF_STR( Difficulty ) ) PORT_DIPLOCATION("SW1:4,5") PORT_DIPSETTING( 0x18, DEF_STR( Easy ) ) PORT_DIPSETTING( 0x10, DEF_STR( Medium ) ) PORT_DIPSETTING( 0x08, DEF_STR( Hard ) ) PORT_DIPSETTING( 0x00, DEF_STR( Hardest ) ) PORT_DIPNAME( 0x20, 0x20, DEF_STR( Language ) ) PORT_DIPLOCATION("SW1:6") PORT_DIPSETTING( 0x20, DEF_STR( English ) ) PORT_DIPSETTING( 0x00, DEF_STR( French ) ) PORT_DIPNAME( 0x40, 0x40, DEF_STR( Bonus_Life ) ) PORT_DIPLOCATION("SW1:7") PORT_DIPSETTING( 0x40, "30000" ) PORT_DIPSETTING( 0x00, "40000" ) PORT_DIPNAME( 0x80, 0x80, DEF_STR( Cabinet ) ) /* Cabinet type set through edge connector, not dip switch (verified on real pcb) */ PORT_DIPSETTING( 0x80, DEF_STR( Upright ) ) PORT_DIPSETTING( 0x00, DEF_STR( Cocktail ) ) INPUT_PORTS_END static INPUT_PORTS_START( bagmans ) PORT_INCLUDE( bagman ) PORT_MODIFY("DSW") PORT_DIPNAME( 0x20, 0x20, DEF_STR ( Demo_Sounds ) ) PORT_DIPSETTING( 0x00, DEF_STR ( Off ) ) PORT_DIPSETTING( 0x20, DEF_STR ( On ) ) INPUT_PORTS_END static INPUT_PORTS_START( sbagman ) PORT_INCLUDE( bagman ) PORT_MODIFY("P1") PORT_BIT( 0x04, IP_ACTIVE_LOW, IPT_BUTTON2 ) /* double-function button, start and shoot */ PORT_MODIFY("P2") PORT_BIT( 0x04, IP_ACTIVE_LOW, IPT_BUTTON2 ) PORT_COCKTAIL /* double-function button, start and shoot */ INPUT_PORTS_END static INPUT_PORTS_START( pickin ) PORT_INCLUDE( bagman ) PORT_MODIFY("DSW") PORT_DIPNAME( 0x01, 0x01, DEF_STR( Coinage ) ) PORT_DIPLOCATION("SW1:1") PORT_DIPSETTING( 0x00, "2C/1C 1C/1C 1C/3C 1C/7C" ) PORT_DIPSETTING( 0x01, "1C/1C 1C/2C 1C/6C 1C/14C" ) PORT_DIPNAME( 0x06, 0x04, DEF_STR( Lives ) ) PORT_DIPLOCATION("SW1:2,3") PORT_DIPSETTING( 0x06, "2" ) PORT_DIPSETTING( 0x04, "3" ) PORT_DIPSETTING( 0x02, "4" ) PORT_DIPSETTING( 0x00, "5" ) PORT_DIPNAME( 0x08, 0x08, DEF_STR( Free_Play ) ) PORT_DIPLOCATION("SW1:4") PORT_DIPSETTING( 0x08, DEF_STR( Off ) ) PORT_DIPSETTING( 0x00, DEF_STR( On ) ) PORT_DIPUNKNOWN_DIPLOC( 0x10, 0x10, "SW1:5" ) PORT_DIPUNKNOWN_DIPLOC( 0x20, 0x20, "SW1:6" ) PORT_DIPNAME( 0x40, 0x40, DEF_STR( Language ) ) PORT_DIPLOCATION("SW1:7") PORT_DIPSETTING( 0x40, DEF_STR( English ) ) PORT_DIPSETTING( 0x00, DEF_STR( French ) ) INPUT_PORTS_END static INPUT_PORTS_START( botanicf ) PORT_INCLUDE( bagman ) PORT_MODIFY("DSW") PORT_DIPNAME( 0x03, 0x02, DEF_STR( Lives ) ) PORT_DIPLOCATION("SW1:1,2") PORT_DIPSETTING( 0x00, "1" ) PORT_DIPSETTING( 0x03, "2" ) PORT_DIPSETTING( 0x02, "3" ) PORT_DIPSETTING( 0x01, "4" ) PORT_DIPNAME( 0x04, 0x00, DEF_STR( Coinage ) ) PORT_DIPLOCATION("SW1:3") PORT_DIPSETTING( 0x00, "1C/1C 1C/2C 1C/6C 1C/14C" ) PORT_DIPSETTING( 0x04, "2C/1C 1C/2C 1C/3C 1C/7C" ) PORT_DIPNAME( 0x08, 0x08, "Invulnerability Fruits" ) PORT_DIPLOCATION("SW1:4") PORT_DIPSETTING( 0x08, "3" ) PORT_DIPSETTING( 0x00, DEF_STR( None ) ) PORT_DIPUNKNOWN_DIPLOC( 0x10, 0x10, "SW1:5" ) PORT_DIPUNKNOWN_DIPLOC( 0x20, 0x20, "SW1:6" ) PORT_DIPUNKNOWN_DIPLOC( 0x40, 0x40, "SW1:7" ) INPUT_PORTS_END static INPUT_PORTS_START( botanici ) PORT_INCLUDE( botanicf ) PORT_MODIFY("P2") // only seems to have 2 coin slots PORT_BIT( 0x01, IP_ACTIVE_LOW, IPT_UNKNOWN ) PORT_BIT( 0x02, IP_ACTIVE_HIGH, IPT_UNKNOWN ) // this must be ACTIVE_HIGH or the game fails after you complete a level, protection? PORT_MODIFY("DSW") // dipswitches are a bit messy on this set PORT_DIPNAME( 0x04, 0x00, DEF_STR( Coinage ) ) PORT_DIPLOCATION("SW1:3") PORT_DIPSETTING( 0x00, "1C/1C 1C/2C" ) PORT_DIPSETTING( 0x04, "2C/1C 1C/2C" ) PORT_DIPNAME( 0x18, 0x18, "Invulnerability Fruits" ) PORT_DIPLOCATION("SW1:4,5") PORT_DIPSETTING( 0x00, "2" ) PORT_DIPSETTING( 0x08, "3" ) PORT_DIPSETTING( 0x10, "3 (duplicate 1)" ) PORT_DIPSETTING( 0x18, "3 (duplicate 2)" ) PORT_DIPNAME( 0x20, 0x20, "Language / Disable Invlunerability Fruits" ) PORT_DIPLOCATION("SW1:6") // changing this off, even in game, seems to remove all fruits you have? PORT_DIPSETTING( 0x20, "Fruits On, English" ) PORT_DIPSETTING( 0x00, "Fruits Off, Spanish" ) INPUT_PORTS_END static INPUT_PORTS_START( squaitsa ) PORT_START("P1") PORT_BIT( 0x01, IP_ACTIVE_LOW, IPT_COIN1 ) PORT_BIT( 0x02, IP_ACTIVE_LOW, IPT_COIN2 ) PORT_BIT( 0x04, IP_ACTIVE_LOW, IPT_START1 ) PORT_BIT( 0x08, IP_ACTIVE_LOW, IPT_JOYSTICK_LEFT ) PORT_8WAY PORT_BIT( 0x10, IP_ACTIVE_LOW, IPT_JOYSTICK_RIGHT ) PORT_8WAY PORT_BIT( 0x20, IP_ACTIVE_LOW, IPT_SPECIAL ) // special handling for the p1 dial PORT_BIT( 0x40, IP_ACTIVE_LOW, IPT_SPECIAL ) // ^ PORT_BIT( 0x80, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_START("P2") PORT_BIT( 0x01, IP_ACTIVE_LOW, IPT_COIN3 ) PORT_BIT( 0x02, IP_ACTIVE_LOW, IPT_COIN4 ) PORT_BIT( 0x04, IP_ACTIVE_LOW, IPT_START2 ) PORT_BIT( 0x08, IP_ACTIVE_LOW, IPT_JOYSTICK_LEFT ) PORT_8WAY PORT_COCKTAIL PORT_BIT( 0x10, IP_ACTIVE_LOW, IPT_JOYSTICK_RIGHT ) PORT_8WAY PORT_COCKTAIL PORT_BIT( 0x20, IP_ACTIVE_LOW, IPT_SPECIAL ) // special handling for the p2 dial PORT_BIT( 0x40, IP_ACTIVE_LOW, IPT_SPECIAL ) // ^ PORT_BIT( 0x80, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_COCKTAIL PORT_START("DSW") PORT_DIPNAME( 0x01, 0x01, DEF_STR( Coinage ) ) PORT_DIPLOCATION("SW:1") PORT_DIPSETTING( 0x00, DEF_STR( 2C_1C ) ) PORT_DIPSETTING( 0x01, DEF_STR( 1C_1C ) ) PORT_DIPNAME( 0x06, 0x06, "Max Points" ) PORT_DIPLOCATION("SW:2,3") PORT_DIPSETTING( 0x06, "7" ) PORT_DIPSETTING( 0x04, "11" ) PORT_DIPSETTING( 0x02, "15" ) PORT_DIPSETTING( 0x00, "21" ) PORT_DIPNAME( 0x18, 0x18, DEF_STR( Difficulty ) ) PORT_DIPLOCATION("SW:4,5") PORT_DIPSETTING( 0x00, "Level 1" ) PORT_DIPSETTING( 0x08, "Level 2" ) PORT_DIPSETTING( 0x10, "Level 3" ) PORT_DIPSETTING( 0x18, "Level 4" ) PORT_DIPNAME( 0x20, 0x20, DEF_STR( Language ) ) PORT_DIPLOCATION("SW:6") PORT_DIPSETTING( 0x20, DEF_STR( Spanish ) ) PORT_DIPSETTING( 0x00, DEF_STR( English ) ) PORT_DIPNAME( 0x40, 0x40, "Body Fault" ) PORT_DIPLOCATION("SW:7") PORT_DIPSETTING( 0x40, DEF_STR( Off ) ) PORT_DIPSETTING( 0x00, DEF_STR( On ) ) PORT_DIPNAME( 0x80, 0x00, "Protection?" ) /* Left empty in the dips scan */ PORT_DIPSETTING( 0x80, DEF_STR( Off ) ) PORT_DIPSETTING( 0x00, DEF_STR( On ) ) PORT_START("DIAL_P1") PORT_BIT( 0xff, 0, IPT_DIAL ) PORT_SENSITIVITY(25) PORT_KEYDELTA(5) PORT_START("DIAL_P2") PORT_BIT( 0xff, 0, IPT_DIAL ) PORT_SENSITIVITY(25) PORT_KEYDELTA(5) PORT_COCKTAIL INPUT_PORTS_END static const gfx_layout charlayout = { 8,8, /* 8*8 characters */ 512, /* 512 characters */ 2, /* 2 bits per pixel */ { 0, 512*8*8 }, /* the two bitplanes are separated */ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* pretty straightforward layout */ { 0*8, 1*8, 2*8, 3*8, 4*8, 5*8, 6*8, 7*8 }, 8*8 /* every char takes 8 consecutive bytes */ }; static const gfx_layout spritelayout = { 16,16, /* 16*16 sprites */ 128, /* 128 sprites */ 2, /* 2 bits per pixel */ { 0, 128*16*16 }, /* the two bitplanes are separated */ { 0, 1, 2, 3, 4, 5, 6, 7, /* pretty straightforward layout */ 8*8+0, 8*8+1, 8*8+2, 8*8+3, 8*8+4, 8*8+5, 8*8+6, 8*8+7 }, { 0*8, 1*8, 2*8, 3*8, 4*8, 5*8, 6*8, 7*8, 16*8, 17*8, 18*8, 19*8, 20*8, 21*8, 22*8, 23*8 }, 32*8 /* every sprite takes 32 consecutive bytes */ }; static GFXDECODE_START( bagman ) GFXDECODE_ENTRY( "gfx1", 0, charlayout, 0, 16 ) /* char set #1 */ GFXDECODE_ENTRY( "gfx1", 0, spritelayout, 0, 16 ) /* sprites */ GFXDECODE_ENTRY( "gfx2", 0, charlayout, 0, 16 ) /* char set #2 */ GFXDECODE_END static GFXDECODE_START( pickin ) GFXDECODE_ENTRY( "gfx1", 0, charlayout, 0, 16 ) /* char set #1 */ GFXDECODE_ENTRY( "gfx1", 0, spritelayout, 0, 16 ) /* sprites */ /* no gfx2 */ GFXDECODE_END /* squaitsa doesn't map the dial directly, instead it polls the results of the dial through an external circuitry. I don't know if the following is correct, there can possibly be multiple solutions for the same problem. */ READ8_MEMBER(bagman_state::dial_input_p1_r) { UINT8 dial_val; dial_val = ioport("DIAL_P1")->read(); if(m_p1_res != 0x60) m_p1_res = 0x60; else if(dial_val > m_p1_old_val) m_p1_res = 0x40; else if(dial_val < m_p1_old_val) m_p1_res = 0x20; else m_p1_res = 0x60; m_p1_old_val = dial_val; return (ioport("P1")->read() & 0x9f) | (m_p1_res); } READ8_MEMBER(bagman_state::dial_input_p2_r) { UINT8 dial_val; dial_val = ioport("DIAL_P2")->read(); if(m_p2_res != 0x60) m_p2_res = 0x60; else if(dial_val > m_p2_old_val) m_p2_res = 0x40; else if(dial_val < m_p2_old_val) m_p2_res = 0x20; else m_p2_res = 0x60; m_p2_old_val = dial_val; return (ioport("P2")->read() & 0x9f) | (m_p2_res); } INTERRUPT_GEN_MEMBER(bagman_state::vblank_irq) { if(m_irq_mask) device.execute().set_input_line(0, HOLD_LINE); } static MACHINE_CONFIG_START( bagman, bagman_state ) /* basic machine hardware */ MCFG_CPU_ADD("maincpu", Z80, BAGMAN_H0) MCFG_CPU_PROGRAM_MAP(main_map) MCFG_CPU_IO_MAP(main_portmap) MCFG_CPU_VBLANK_INT_DRIVER("screen", bagman_state, vblank_irq) MCFG_MACHINE_START_OVERRIDE(bagman_state, bagman) /* video hardware */ MCFG_SCREEN_ADD("screen", RASTER) MCFG_SCREEN_RAW_PARAMS(BAGMAN_HCLK, HTOTAL, HBEND, HBSTART, VTOTAL, VBEND, VBSTART) MCFG_SCREEN_UPDATE_DRIVER(bagman_state, screen_update) MCFG_SCREEN_PALETTE("palette") MCFG_GFXDECODE_ADD("gfxdecode", "palette", bagman) MCFG_PALETTE_ADD("palette", 64) MCFG_PALETTE_INIT_OWNER(bagman_state,bagman) MCFG_DEVICE_ADD("tmsprom", TMSPROM, 640000 / 2) /* rom clock */ MCFG_TMSPROM_REGION("5110ctrl") /* prom memory region - sound region is automatically assigned */ MCFG_TMSPROM_ROM_SIZE(0x1000) /* individual rom_size */ MCFG_TMSPROM_PDC_BIT(1) /* bit # of pdc line */ /* virtual bit 8: constant 0, virtual bit 9:constant 1 */ MCFG_TMSPROM_CTL1_BIT(8) /* bit # of ctl1 line */ MCFG_TMSPROM_CTL2_BIT(2) /* bit # of ctl2 line */ MCFG_TMSPROM_CTL4_BIT(8) /* bit # of ctl4 line */ MCFG_TMSPROM_CTL8_BIT(2) /* bit # of ctl8 line */ MCFG_TMSPROM_RESET_BIT(6) /* bit # of rom reset */ MCFG_TMSPROM_STOP_BIT(7) /* bit # of stop */ MCFG_TMSPROM_PDC_CB(DEVWRITELINE("tms", tms5110_device, pdc_w)) /* tms pdc func */ MCFG_TMSPROM_CTL_CB(DEVWRITE8("tms", tms5110_device, ctl_w)) /* tms ctl func */ /* sound hardware */ MCFG_SPEAKER_STANDARD_MONO("mono") MCFG_SOUND_ADD("aysnd", AY8910, BAGMAN_H0 / 2) MCFG_AY8910_PORT_A_READ_CB(IOPORT("P1")) MCFG_AY8910_PORT_B_READ_CB(IOPORT("P2")) MCFG_SOUND_ROUTE(ALL_OUTPUTS, "mono", 0.40) MCFG_SOUND_ADD("tms", TMS5110A, 640000) MCFG_TMS5110_M0_CB(DEVWRITELINE("tmsprom", tmsprom_device, m0_w)) MCFG_TMS5110_DATA_CB(DEVREADLINE("tmsprom", tmsprom_device, data_r)) MCFG_SOUND_ROUTE(ALL_OUTPUTS, "mono", 1.0) MACHINE_CONFIG_END static MACHINE_CONFIG_START( pickin, bagman_state ) /* basic machine hardware */ MCFG_CPU_ADD("maincpu", Z80, BAGMAN_H0) MCFG_CPU_PROGRAM_MAP(pickin_map) MCFG_CPU_IO_MAP(main_portmap) MCFG_CPU_VBLANK_INT_DRIVER("screen", bagman_state, vblank_irq) /* video hardware */ MCFG_SCREEN_ADD("screen", RASTER) MCFG_SCREEN_RAW_PARAMS(BAGMAN_HCLK, HTOTAL, HBEND, HBSTART, VTOTAL, VBEND, VBSTART) MCFG_SCREEN_UPDATE_DRIVER(bagman_state, screen_update) MCFG_SCREEN_PALETTE("palette") MCFG_GFXDECODE_ADD("gfxdecode", "palette", pickin) MCFG_PALETTE_ADD("palette", 64) MCFG_PALETTE_INIT_OWNER(bagman_state,bagman) /* sound hardware */ MCFG_SPEAKER_STANDARD_MONO("mono") MCFG_SOUND_ADD("aysnd", AY8910, 1500000) MCFG_AY8910_PORT_A_READ_CB(IOPORT("P1")) MCFG_AY8910_PORT_B_READ_CB(IOPORT("P2")) MCFG_SOUND_ROUTE(ALL_OUTPUTS, "mono", 0.40) /* maybe */ MCFG_SOUND_ADD("ay2", AY8910, 1500000) MCFG_SOUND_ROUTE(ALL_OUTPUTS, "mono", 0.40) MACHINE_CONFIG_END /* Botanic Valadon Automation 1983 z80 6116 - work ram 2x 2114 - screen ram 2x 2114 6x 27ls00 - sprite buffer ram 2x ay8910 18.432mhz crystal */ static MACHINE_CONFIG_START( botanic, bagman_state ) /* basic machine hardware */ MCFG_CPU_ADD("maincpu", Z80, BAGMAN_H0) MCFG_CPU_PROGRAM_MAP(pickin_map) MCFG_CPU_IO_MAP(main_portmap) MCFG_CPU_VBLANK_INT_DRIVER("screen", bagman_state, vblank_irq) /* video hardware */ MCFG_SCREEN_ADD("screen", RASTER) MCFG_SCREEN_RAW_PARAMS(BAGMAN_HCLK, HTOTAL, HBEND, HBSTART, VTOTAL, VBEND, VBSTART) MCFG_SCREEN_UPDATE_DRIVER(bagman_state, screen_update) MCFG_SCREEN_PALETTE("palette") MCFG_GFXDECODE_ADD("gfxdecode", "palette", bagman) MCFG_PALETTE_ADD("palette", 64) MCFG_PALETTE_INIT_OWNER(bagman_state,bagman) /* sound hardware */ MCFG_SPEAKER_STANDARD_MONO("mono") MCFG_SOUND_ADD("aysnd", AY8910, 1500000) MCFG_AY8910_PORT_A_READ_CB(IOPORT("P1")) MCFG_AY8910_PORT_B_READ_CB(IOPORT("P2")) MCFG_SOUND_ROUTE(ALL_OUTPUTS, "mono", 0.40) MCFG_SOUND_ADD("ay2", AY8910, 1500000) MCFG_SOUND_ROUTE(ALL_OUTPUTS, "mono", 0.40) MACHINE_CONFIG_END static MACHINE_CONFIG_DERIVED( squaitsa, botanic ) MCFG_MACHINE_START_OVERRIDE(bagman_state, squaitsa) MCFG_SOUND_MODIFY("aysnd") MCFG_AY8910_PORT_A_READ_CB(READ8(bagman_state, dial_input_p1_r)) MCFG_AY8910_PORT_B_READ_CB(READ8(bagman_state, dial_input_p2_r)) MACHINE_CONFIG_END /*************************************************************************** Game driver(s) ***************************************************************************/ ROM_START( bagman ) ROM_REGION( 0x10000, "maincpu", 0 ) ROM_LOAD( "e9_b05.bin", 0x0000, 0x1000, CRC(e0156191) SHA1(bb5f16d49fbe48f3bac118acd1fea51ec4bc5355) ) ROM_LOAD( "f9_b06.bin", 0x1000, 0x1000, CRC(7b758982) SHA1(c8460023b43fed4aca9c6b987faea334832c5e30) ) ROM_LOAD( "f9_b07.bin", 0x2000, 0x1000, CRC(302a077b) SHA1(916c4a6ea1e631cc72bdb91ff9d263dcbaf08bb2) ) ROM_LOAD( "k9_b08.bin", 0x3000, 0x1000, CRC(f04293cb) SHA1(ce6b0ae4088ce28c75d414f506fad2cf2b6920c2) ) ROM_LOAD( "m9_b09s.bin", 0x4000, 0x1000, CRC(68e83e4f) SHA1(9454564885a1003cee7107db18bedb387b85e9ab) ) ROM_LOAD( "n9_b10.bin", 0x5000, 0x1000, CRC(1d6579f7) SHA1(3ab54329f516156b1c9f68efbe59c95d5240bc8c) ) ROM_REGION( 0x2000, "gfx1", 0 ) ROM_LOAD( "e1_b02.bin", 0x0000, 0x1000, CRC(4a0a6b55) SHA1(955f8bd4bd9b0fc3c6c359c25ba543ba26c04cbd) ) ROM_LOAD( "j1_b04.bin", 0x1000, 0x1000, CRC(c680ef04) SHA1(79406bc786374abfcd9f548268c445b5c8d8858d) ) ROM_REGION( 0x2000, "gfx2", 0 ) ROM_LOAD( "c1_b01.bin", 0x0000, 0x1000, CRC(705193b2) SHA1(ca9cfd05f9195c2a38e8854012de51b6ee6bb403) ) ROM_LOAD( "f1_b03s.bin", 0x1000, 0x1000, CRC(dba1eda7) SHA1(26d877028b3a31dd671f9e667316c8a14780ca73) ) ROM_REGION( 0x0040, "proms", 0 ) ROM_LOAD( "p3.bin", 0x0000, 0x0020, CRC(2a855523) SHA1(91e032233fee397c90b7c1662934aca9e0671482) ) ROM_LOAD( "r3.bin", 0x0020, 0x0020, CRC(ae6f1019) SHA1(fd711882b670380cb4bd909c840ba06277b8fbe3) ) ROM_REGION( 0x0020, "5110ctrl", 0) ROM_LOAD( "r6.bin", 0x0000, 0x0020, CRC(c58a4f6a) SHA1(35ef244b3e94032df2610aa594ea5670b91e1449) ) /*state machine driving TMS5110*/ ROM_REGION( 0x2000, "tmsprom", 0 ) /* data for the TMS5110 speech chip */ ROM_LOAD( "r9_b11.bin", 0x0000, 0x1000, CRC(2e0057ff) SHA1(33e3ffa6418f86864eb81e5e9bda4bf540c143a6) ) ROM_LOAD( "t9_b12.bin", 0x1000, 0x1000, CRC(b2120edd) SHA1(52b89dbcc749b084331fa82b13d0876e911fce52) ) ROM_END ROM_START( bagnard ) ROM_REGION( 0x10000, "maincpu", 0 ) ROM_LOAD( "e9_b05.bin", 0x0000, 0x1000, CRC(e0156191) SHA1(bb5f16d49fbe48f3bac118acd1fea51ec4bc5355) ) ROM_LOAD( "f9_b06.bin", 0x1000, 0x1000, CRC(7b758982) SHA1(c8460023b43fed4aca9c6b987faea334832c5e30) ) ROM_LOAD( "f9_b07.bin", 0x2000, 0x1000, CRC(302a077b) SHA1(916c4a6ea1e631cc72bdb91ff9d263dcbaf08bb2) ) ROM_LOAD( "k9_b08.bin", 0x3000, 0x1000, CRC(f04293cb) SHA1(ce6b0ae4088ce28c75d414f506fad2cf2b6920c2) ) ROM_LOAD( "bagnard.009", 0x4000, 0x1000, CRC(4f0088ab) SHA1(a8009f5b8517ba4d84fbc483b199f2514f24eae8) ) ROM_LOAD( "bagnard.010", 0x5000, 0x1000, CRC(cd2cac01) SHA1(76749161feb9af2b3e928408a21b93d143915b57) ) ROM_REGION( 0x2000, "gfx1", 0 ) ROM_LOAD( "e1_b02.bin", 0x0000, 0x1000, CRC(4a0a6b55) SHA1(955f8bd4bd9b0fc3c6c359c25ba543ba26c04cbd) ) ROM_LOAD( "j1_b04.bin", 0x1000, 0x1000, CRC(c680ef04) SHA1(79406bc786374abfcd9f548268c445b5c8d8858d) ) ROM_REGION( 0x2000, "gfx2", 0 ) ROM_LOAD( "bagnard.001", 0x0000, 0x1000, CRC(060b044c) SHA1(3121f07adb661663a2303085eea1b662968f8f98) ) ROM_LOAD( "bagnard.003", 0x1000, 0x1000, CRC(8043bc1a) SHA1(bd2f3dfe26cf8d987d9ecaa41eac4bdc4e16a692) ) ROM_REGION( 0x0040, "proms", 0 ) ROM_LOAD( "p3.bin", 0x0000, 0x0020, CRC(2a855523) SHA1(91e032233fee397c90b7c1662934aca9e0671482) ) ROM_LOAD( "r3.bin", 0x0020, 0x0020, CRC(ae6f1019) SHA1(fd711882b670380cb4bd909c840ba06277b8fbe3) ) ROM_REGION( 0x0020, "5110ctrl", 0) ROM_LOAD( "r6.bin", 0x0000, 0x0020, CRC(c58a4f6a) SHA1(35ef244b3e94032df2610aa594ea5670b91e1449) ) /*state machine driving TMS5110*/ ROM_REGION( 0x2000, "tmsprom", 0 ) /* data for the TMS5110 speech chip */ ROM_LOAD( "r9_b11.bin", 0x0000, 0x1000, CRC(2e0057ff) SHA1(33e3ffa6418f86864eb81e5e9bda4bf540c143a6) ) ROM_LOAD( "t9_b12.bin", 0x1000, 0x1000, CRC(b2120edd) SHA1(52b89dbcc749b084331fa82b13d0876e911fce52) ) ROM_END ROM_START( bagnarda ) ROM_REGION( 0x10000, "maincpu", 0 ) ROM_LOAD( "bagman.005", 0x0000, 0x1000, CRC(98fca49c) SHA1(60bf15d700cf4174ac531c11febf21d69ec02db5) ) ROM_LOAD( "bagman.006", 0x1000, 0x1000, CRC(8f447432) SHA1(71fee4feb92cdd35dcd3ad9e95ea9f186cb25e25) ) ROM_LOAD( "bagman.007", 0x2000, 0x1000, CRC(236203a6) SHA1(3d661c135a5036adeaf5fed2be38c97bbc72cd0a) ) ROM_LOAD( "bagman.008", 0x3000, 0x1000, CRC(8bd8c6cb) SHA1(3d34333b20d8ef189425334985285e0634c5ee23) ) ROM_LOAD( "bagman.009", 0x4000, 0x1000, CRC(6211ba82) SHA1(6d43e16cc99159b188f93bed7f9afef81c1b7fb3) ) ROM_LOAD( "bagman.010", 0x5000, 0x1000, CRC(08ed1247) SHA1(172fb0d1b919fb80f5603ebb52779664122f8e94) ) ROM_REGION( 0x2000, "gfx1", 0 ) ROM_LOAD( "bagman.002", 0x0000, 0x1000, CRC(7dc57abc) SHA1(73ae325ac1077936741833d33095ad6375353c31) ) ROM_LOAD( "bagman.004", 0x1000, 0x1000, CRC(1e21577e) SHA1(fc849c2fbaf7353a44a9f2743ccf6ac1adb8dc62) ) ROM_REGION( 0x2000, "gfx2", 0 ) ROM_LOAD( "bagman.001", 0x0000, 0x1000, CRC(1eb56acd) SHA1(f75f6709006e78417999d423d2078ed80eae73a2) ) ROM_LOAD( "bagman.003", 0x1000, 0x1000, CRC(0ad82a39) SHA1(30ac0ff5bc63934c3eb572c7c13df324757e5e44) ) ROM_REGION( 0x0040, "proms", 0 ) ROM_LOAD( "p3.bin", 0x0000, 0x0020, CRC(2a855523) SHA1(91e032233fee397c90b7c1662934aca9e0671482) ) ROM_LOAD( "r3.bin", 0x0020, 0x0020, CRC(ae6f1019) SHA1(fd711882b670380cb4bd909c840ba06277b8fbe3) ) ROM_REGION( 0x0020, "5110ctrl", 0) ROM_LOAD( "r6.bin", 0x0000, 0x0020, CRC(c58a4f6a) SHA1(35ef244b3e94032df2610aa594ea5670b91e1449) ) /*state machine driving TMS5110*/ ROM_REGION( 0x2000, "tmsprom", 0 ) /* data for the TMS5110 speech chip */ ROM_LOAD( "r9_b11.bin", 0x0000, 0x1000, CRC(2e0057ff) SHA1(33e3ffa6418f86864eb81e5e9bda4bf540c143a6) ) ROM_LOAD( "t9_b12.bin", 0x1000, 0x1000, CRC(b2120edd) SHA1(52b89dbcc749b084331fa82b13d0876e911fce52) ) ROM_END ROM_START( bagnardi ) // based on bagnard set with mods for license text ROM_REGION( 0x10000, "maincpu", 0 ) ROM_LOAD( "bagnardi_05.e9", 0x0000, 0x1000, CRC(e0156191) SHA1(bb5f16d49fbe48f3bac118acd1fea51ec4bc5355) ) // == e9_b05.bin ROM_LOAD( "bagnardi_06.f9", 0x1000, 0x1000, CRC(2e98c072) SHA1(d1f2341fc0c04f48615cb21a44736c83b7ded3ee) ) ROM_LOAD( "bagnardi_07.j9", 0x2000, 0x1000, CRC(698f17b3) SHA1(619498e9e06fcde0a1db67f4347e06c4fc669e6c) ) ROM_LOAD( "bagnardi_08.k9", 0x3000, 0x1000, CRC(f212e287) SHA1(8ed4b8e555239862eec2a2e7496054a9eda341ad) ) ROM_LOAD( "bagnardi_09.m9", 0x4000, 0x1000, CRC(4f0088ab) SHA1(a8009f5b8517ba4d84fbc483b199f2514f24eae8) ) // == bagnard.009 ROM_LOAD( "bagnardi_10.n9", 0x5000, 0x1000, CRC(423c54be) SHA1(f3ad41142441eb73bd17ea7cbdb7070f02c18cb8) ) ROM_REGION( 0x2000, "gfx1", 0 ) ROM_LOAD( "bagnardi_02.e1", 0x0000, 0x1000, CRC(4a0a6b55) SHA1(955f8bd4bd9b0fc3c6c359c25ba543ba26c04cbd) ) // == e1_b02.bin ROM_LOAD( "bagnardi_04.j1", 0x1000, 0x1000, CRC(c680ef04) SHA1(79406bc786374abfcd9f548268c445b5c8d8858d) ) // == j1_b04.bin ROM_REGION( 0x2000, "gfx2", 0 ) ROM_LOAD( "bagnardi_01.c1", 0x0000, 0x1000, CRC(060b044c) SHA1(3121f07adb661663a2303085eea1b662968f8f98) ) // == bagnard.001 ROM_LOAD( "bagnardi_03.f1", 0x1000, 0x1000, CRC(8043bc1a) SHA1(bd2f3dfe26cf8d987d9ecaa41eac4bdc4e16a692) ) // == bagnard.003 ROM_REGION( 0x0040, "proms", 0 ) ROM_LOAD( "p3.bin", 0x0000, 0x0020, CRC(2a855523) SHA1(91e032233fee397c90b7c1662934aca9e0671482) ) ROM_LOAD( "r3.bin", 0x0020, 0x0020, CRC(ae6f1019) SHA1(fd711882b670380cb4bd909c840ba06277b8fbe3) ) ROM_REGION( 0x0020, "5110ctrl", 0) ROM_LOAD( "r6.bin", 0x0000, 0x0020, CRC(c58a4f6a) SHA1(35ef244b3e94032df2610aa594ea5670b91e1449) ) /*state machine driving TMS5110*/ ROM_REGION( 0x2000, "tmsprom", 0 ) /* data for the TMS5110 speech chip */ ROM_LOAD( "bagnardi_11.r9", 0x0000, 0x1000, CRC(2e0057ff) SHA1(33e3ffa6418f86864eb81e5e9bda4bf540c143a6) ) // == r9_b11.bin ROM_LOAD( "bagnardi_12.t9", 0x1000, 0x1000, CRC(b2120edd) SHA1(52b89dbcc749b084331fa82b13d0876e911fce52) ) // == t9_b12.bin ROM_END ROM_START( bagmans ) ROM_REGION( 0x10000, "maincpu", 0 ) ROM_LOAD( "a4_9e.bin", 0x0000, 0x1000, CRC(5fb0a1a3) SHA1(849cd60b58de9585a78a1c4c1747f666a4a4fcc3) ) ROM_LOAD( "a5-9f", 0x1000, 0x1000, CRC(2ddf6bb9) SHA1(151068dddc55163bb6f925f68e5d04e347ded6a5) ) ROM_LOAD( "a4_9j.bin", 0x2000, 0x1000, CRC(b2da8b77) SHA1(ea36cd6be42c5548a9a91054aeebb4b985ba15c9) ) ROM_LOAD( "a5-9k", 0x3000, 0x1000, CRC(f91d617b) SHA1(a3323b51277e08747701cc4e2d3a9c466e96d4c1) ) ROM_LOAD( "a4_9m.bin", 0x4000, 0x1000, CRC(b8e75eb6) SHA1(433fd736512f10bc0879b15821eb55cc41d58d33) ) ROM_LOAD( "a5-9n", 0x5000, 0x1000, CRC(68e4b64d) SHA1(55950d7c07c621cafa001d5d3bfec6bbc02712e2) ) ROM_REGION( 0x2000, "gfx1", 0 ) ROM_LOAD( "a2_1e.bin", 0x0000, 0x1000, CRC(f217ac09) SHA1(a9716674401dff27344a01df8121b6b648688680) ) ROM_LOAD( "j1_b04.bin", 0x1000, 0x1000, CRC(c680ef04) SHA1(79406bc786374abfcd9f548268c445b5c8d8858d) ) ROM_REGION( 0x2000, "gfx2", 0 ) ROM_LOAD( "a2_1c.bin", 0x0000, 0x1000, CRC(f3e11bd7) SHA1(43ee00ff777008c89f619eb183e7c5e63f6c7694) ) ROM_LOAD( "a2_1f.bin", 0x1000, 0x1000, CRC(d0f7105b) SHA1(fb382703850a4ded567706e02ebb7f3e22531b7c) ) ROM_REGION( 0x0040, "proms", 0 ) ROM_LOAD( "p3.bin", 0x0000, 0x0020, CRC(2a855523) SHA1(91e032233fee397c90b7c1662934aca9e0671482) ) ROM_LOAD( "r3.bin", 0x0020, 0x0020, CRC(ae6f1019) SHA1(fd711882b670380cb4bd909c840ba06277b8fbe3) ) ROM_REGION( 0x0020, "5110ctrl", 0) ROM_LOAD( "r6.bin", 0x0000, 0x0020, CRC(c58a4f6a) SHA1(35ef244b3e94032df2610aa594ea5670b91e1449) ) /*state machine driving TMS5110*/ ROM_REGION( 0x2000, "tmsprom", 0 ) /* data for the TMS5110 speech chip */ ROM_LOAD( "r9_b11.bin", 0x0000, 0x1000, CRC(2e0057ff) SHA1(33e3ffa6418f86864eb81e5e9bda4bf540c143a6) ) ROM_LOAD( "t9_b12.bin", 0x1000, 0x1000, CRC(b2120edd) SHA1(52b89dbcc749b084331fa82b13d0876e911fce52) ) ROM_END ROM_START( bagmans2 ) ROM_REGION( 0x10000, "maincpu", 0 ) ROM_LOAD( "a4_9e.bin", 0x0000, 0x1000, CRC(5fb0a1a3) SHA1(849cd60b58de9585a78a1c4c1747f666a4a4fcc3) ) ROM_LOAD( "a4_9f.bin", 0x1000, 0x1000, CRC(7871206e) SHA1(14d9b7a0779d59a870e0d4b911797dff5435a16c) ) ROM_LOAD( "a4_9j.bin", 0x2000, 0x1000, CRC(b2da8b77) SHA1(ea36cd6be42c5548a9a91054aeebb4b985ba15c9) ) ROM_LOAD( "a4_9k.bin", 0x3000, 0x1000, CRC(36b6a944) SHA1(270dd2566b36129366adcbdd5a8db396bec7631f) ) ROM_LOAD( "a4_9m.bin", 0x4000, 0x1000, CRC(b8e75eb6) SHA1(433fd736512f10bc0879b15821eb55cc41d58d33) ) ROM_LOAD( "a4_9n.bin", 0x5000, 0x1000, CRC(83fccb1c) SHA1(7225d738b64a2cdaaec8860017de4229f2852ed2) ) ROM_REGION( 0x2000, "gfx1", 0 ) ROM_LOAD( "a2_1e.bin", 0x0000, 0x1000, CRC(f217ac09) SHA1(a9716674401dff27344a01df8121b6b648688680) ) ROM_LOAD( "j1_b04.bin", 0x1000, 0x1000, CRC(c680ef04) SHA1(79406bc786374abfcd9f548268c445b5c8d8858d) ) ROM_REGION( 0x2000, "gfx2", 0 ) ROM_LOAD( "a2_1c.bin", 0x0000, 0x1000, CRC(f3e11bd7) SHA1(43ee00ff777008c89f619eb183e7c5e63f6c7694) ) ROM_LOAD( "a2_1f.bin", 0x1000, 0x1000, CRC(d0f7105b) SHA1(fb382703850a4ded567706e02ebb7f3e22531b7c) ) ROM_REGION( 0x0040, "proms", 0 ) ROM_LOAD( "p3.bin", 0x0000, 0x0020, CRC(2a855523) SHA1(91e032233fee397c90b7c1662934aca9e0671482) ) ROM_LOAD( "r3.bin", 0x0020, 0x0020, CRC(ae6f1019) SHA1(fd711882b670380cb4bd909c840ba06277b8fbe3) ) ROM_REGION( 0x0020, "5110ctrl", 0) ROM_LOAD( "r6.bin", 0x0000, 0x0020, CRC(c58a4f6a) SHA1(35ef244b3e94032df2610aa594ea5670b91e1449) ) /*state machine driving TMS5110*/ ROM_REGION( 0x2000, "tmsprom", 0 ) /* data for the TMS5110 speech chip */ ROM_LOAD( "r9_b11.bin", 0x0000, 0x1000, CRC(2e0057ff) SHA1(33e3ffa6418f86864eb81e5e9bda4bf540c143a6) ) ROM_LOAD( "t9_b12.bin", 0x1000, 0x1000, CRC(b2120edd) SHA1(52b89dbcc749b084331fa82b13d0876e911fce52) ) ROM_END ROM_START( sbagman ) ROM_REGION( 0x10000, "maincpu", 0 ) ROM_LOAD( "5.9e", 0x0000, 0x1000, CRC(1b1d6b0a) SHA1(549161f6adc88fa16339815e05af33ca57815660) ) ROM_LOAD( "6.9f", 0x1000, 0x1000, CRC(ac49cb82) SHA1(5affa0c03bedf2c9d5368c7f075818e1760c12ae) ) ROM_LOAD( "7.9j", 0x2000, 0x1000, CRC(9a1c778d) SHA1(a655e25dc9efdf60cc5b34e42c93c4acaa4a7922) ) ROM_LOAD( "8.9k", 0x3000, 0x1000, CRC(b94fbb73) SHA1(5d676c5d1d864d70d98f0137c4072062a781b3a0) ) ROM_LOAD( "9.9m", 0x4000, 0x1000, CRC(601f34ba) SHA1(1b7ee61a341b9a87abe4fe10b0c647a9b0b97d38) ) ROM_LOAD( "10.9n", 0x5000, 0x1000, CRC(5f750918) SHA1(3dc44f259e88999dbb95b4d4376281cc81c1ab87) ) ROM_LOAD( "13.8d", 0xc000, 0x0e00, CRC(944a4453) SHA1(cd64d9267d2c5cea39464ba9308752c690e7fd24) ) ROM_CONTINUE( 0xfe00, 0x0200 ) ROM_LOAD( "14.8f", 0xd000, 0x0400, CRC(83b10139) SHA1(8a1880c6ab8a345676fe30465351d69cc1b416b2) ) ROM_CONTINUE( 0xe400, 0x0200 ) ROM_CONTINUE( 0xd600, 0x0a00 ) ROM_LOAD( "15.8j", 0xe000, 0x0400, CRC(fe924879) SHA1(b80cbf9cba91e553f7685aef348854c02f0619c7) ) ROM_CONTINUE( 0xd400, 0x0200 ) ROM_CONTINUE( 0xe600, 0x0a00 ) ROM_LOAD( "16.8k", 0xf000, 0x0e00, CRC(b77eb1f5) SHA1(ef94c1b449e3fa230491052fc3bd4db3f1239263) ) ROM_CONTINUE( 0xce00, 0x0200 ) ROM_REGION( 0x2000, "gfx1", 0 ) ROM_LOAD( "2.1e", 0x0000, 0x1000, CRC(f4d3d4e6) SHA1(167ad0259578966fe86384df844e69cf2cc77443) ) ROM_LOAD( "4.1j", 0x1000, 0x1000, CRC(2c6a510d) SHA1(304064f11e80f4ec471174823b8aaf59844061ac) ) ROM_REGION( 0x2000, "gfx2", 0 ) ROM_LOAD( "1.1c", 0x0000, 0x1000, CRC(a046ff44) SHA1(af319cfb74e5efe435c26e971de13bd390f4b378) ) ROM_LOAD( "3.1f", 0x1000, 0x1000, CRC(a4422da4) SHA1(3aa55ca8c99566c1c9eb097b6d645c4216e09dfb) ) ROM_REGION( 0x0040, "proms", 0 ) ROM_LOAD( "p3.bin", 0x0000, 0x0020, CRC(2a855523) SHA1(91e032233fee397c90b7c1662934aca9e0671482) ) ROM_LOAD( "r3.bin", 0x0020, 0x0020, CRC(ae6f1019) SHA1(fd711882b670380cb4bd909c840ba06277b8fbe3) ) ROM_REGION( 0x0020, "5110ctrl", 0) ROM_LOAD( "r6.bin", 0x0000, 0x0020, CRC(c58a4f6a) SHA1(35ef244b3e94032df2610aa594ea5670b91e1449) ) /*state machine driving TMS5110*/ ROM_REGION( 0x2000, "tmsprom", 0 ) /* data for the TMS5110 speech chip */ ROM_LOAD( "11.9r", 0x0000, 0x1000, CRC(2e0057ff) SHA1(33e3ffa6418f86864eb81e5e9bda4bf540c143a6) ) ROM_LOAD( "12.9t", 0x1000, 0x1000, CRC(b2120edd) SHA1(52b89dbcc749b084331fa82b13d0876e911fce52) ) ROM_END ROM_START( sbagmans ) ROM_REGION( 0x10000, "maincpu", 0 ) ROM_LOAD( "sbag_9e.bin", 0x0000, 0x1000, CRC(c19696f2) SHA1(3a40202a97201a123033358f7afcb06f8ac15063) ) ROM_LOAD( "6.9f", 0x1000, 0x1000, CRC(ac49cb82) SHA1(5affa0c03bedf2c9d5368c7f075818e1760c12ae) ) ROM_LOAD( "7.9j", 0x2000, 0x1000, CRC(9a1c778d) SHA1(a655e25dc9efdf60cc5b34e42c93c4acaa4a7922) ) ROM_LOAD( "8.9k", 0x3000, 0x1000, CRC(b94fbb73) SHA1(5d676c5d1d864d70d98f0137c4072062a781b3a0) ) ROM_LOAD( "sbag_9m.bin", 0x4000, 0x1000, CRC(b21e246e) SHA1(39d2e93ac5240bb45e76c30c535d12e302690dde) ) ROM_LOAD( "10.9n", 0x5000, 0x1000, CRC(5f750918) SHA1(3dc44f259e88999dbb95b4d4376281cc81c1ab87) ) ROM_LOAD( "13.8d", 0xc000, 0x0e00, CRC(944a4453) SHA1(cd64d9267d2c5cea39464ba9308752c690e7fd24) ) ROM_CONTINUE( 0xfe00, 0x0200 ) ROM_LOAD( "sbag_f8.bin", 0xd000, 0x0400, CRC(0f3e6de4) SHA1(a7e50d210630b500e534d626d76110dee4aeb18d) ) ROM_CONTINUE( 0xe400, 0x0200 ) ROM_CONTINUE( 0xd600, 0x0a00 ) ROM_LOAD( "15.8j", 0xe000, 0x0400, CRC(fe924879) SHA1(b80cbf9cba91e553f7685aef348854c02f0619c7) ) ROM_CONTINUE( 0xd400, 0x0200 ) ROM_CONTINUE( 0xe600, 0x0a00 ) ROM_LOAD( "16.8k", 0xf000, 0x0e00, CRC(b77eb1f5) SHA1(ef94c1b449e3fa230491052fc3bd4db3f1239263) ) ROM_CONTINUE( 0xce00, 0x0200 ) ROM_REGION( 0x2000, "gfx1", 0 ) ROM_LOAD( "2.1e", 0x0000, 0x1000, CRC(f4d3d4e6) SHA1(167ad0259578966fe86384df844e69cf2cc77443) ) ROM_LOAD( "4.1j", 0x1000, 0x1000, CRC(2c6a510d) SHA1(304064f11e80f4ec471174823b8aaf59844061ac) ) ROM_REGION( 0x2000, "gfx2", 0 ) ROM_LOAD( "sbag_1c.bin", 0x0000, 0x1000, CRC(262f870a) SHA1(90877b869a7e927cfa4f9729ec3d6eac3a95dc8f) ) ROM_LOAD( "sbag_1f.bin", 0x1000, 0x1000, CRC(350ed0fb) SHA1(c7804e9618ebc88a1e3684a92a98d9a181441a1f) ) ROM_REGION( 0x0040, "proms", 0 ) ROM_LOAD( "p3.bin", 0x0000, 0x0020, CRC(2a855523) SHA1(91e032233fee397c90b7c1662934aca9e0671482) ) ROM_LOAD( "r3.bin", 0x0020, 0x0020, CRC(ae6f1019) SHA1(fd711882b670380cb4bd909c840ba06277b8fbe3) ) ROM_REGION( 0x0020, "5110ctrl", 0) ROM_LOAD( "r6.bin", 0x0000, 0x0020, CRC(c58a4f6a) SHA1(35ef244b3e94032df2610aa594ea5670b91e1449) ) /*state machine driving TMS5110*/ ROM_REGION( 0x2000, "tmsprom", 0 ) /* data for the TMS5110 speech chip */ ROM_LOAD( "11.9r", 0x0000, 0x1000, CRC(2e0057ff) SHA1(33e3ffa6418f86864eb81e5e9bda4bf540c143a6) ) ROM_LOAD( "12.9t", 0x1000, 0x1000, CRC(b2120edd) SHA1(52b89dbcc749b084331fa82b13d0876e911fce52) ) ROM_END ROM_START( pickin ) ROM_REGION( 0x10000, "maincpu", 0 ) ROM_LOAD( "9e", 0x0000, 0x1000, CRC(efd0bd43) SHA1(b70a471a809c08286a82934046357fb46556f641) ) ROM_LOAD( "9f", 0x1000, 0x1000, CRC(b5785a23) SHA1(9eddda5695981cb0470dfea68d5e2e8e220382b1) ) ROM_LOAD( "9j", 0x2000, 0x1000, CRC(65ee9fd4) SHA1(2efa40c19a7b0644ef4f4b2ce6a025b2b880239d) ) ROM_LOAD( "9k", 0x3000, 0x1000, CRC(7b23350e) SHA1(dff19602a0e46ca0bcdbdf2a1d61fd2c80ac70e7) ) ROM_LOAD( "9m", 0x4000, 0x1000, CRC(935a7248) SHA1(d9af4405d51ce1ff6c4b84709dc85c0db88b1d54) ) ROM_LOAD( "9n", 0x5000, 0x1000, CRC(52485d1d) SHA1(c309eec506f978388463f20d56d958e6639c31e8) ) ROM_REGION( 0x2000, "gfx1", 0 ) ROM_LOAD( "1f", 0x0000, 0x1000, CRC(c5e96ac6) SHA1(b2d740b6d07c765e8eb2dce31fe285a15a9fe597) ) ROM_LOAD( "1j", 0x1000, 0x1000, CRC(41c4ac1c) SHA1(aac58a9d675a9b70140d82341231bcf6c77c7b41) ) /* no gfx2 */ ROM_REGION( 0x0040, "proms", 0 ) ROM_LOAD( "6331-1.3p", 0x0000, 0x0020, CRC(fac81668) SHA1(5fa369a5c0ad3a2fc068305336e24772b8e84b62) ) ROM_LOAD( "6331-1.3r", 0x0020, 0x0020, CRC(14ee1603) SHA1(f3c071399606b727ae7dd0bfc21e1c6ca2d43c7c) ) ROM_END ROM_START( botanic ) ROM_REGION( 0x10000, "maincpu", 0 ) ROM_LOAD( "5.9e", 0x0000, 0x1000, CRC(907f01c7) SHA1(156b6b6bbc2176998fb0c18ad453fc42185ae490) ) ROM_LOAD( "06.9f", 0x1000, 0x1000, CRC(ff2533fb) SHA1(808a1555c16470b87fca0aea73e0291dbe0b9355) ) ROM_LOAD( "07.9j", 0x2000, 0x1000, CRC(b7c544ef) SHA1(75b5224c313e97c2c02ca7e9bc3f682278cb7a5c) ) ROM_LOAD( "08.9k", 0x3000, 0x1000, CRC(2df22793) SHA1(d1f27c915e7563abba4d14ec3fd6757a4d6137be) ) ROM_LOAD( "09.9m", 0x4000, 0x1000, CRC(f7d908ec) SHA1(ee5827f84505c1f37bebf48181d3e7759421fada) ) ROM_LOAD( "10.9n", 0x5000, 0x1000, CRC(7ce9fbc8) SHA1(cd2ba01470964640fad9ccf6ff23cbd76c0c2aeb) ) ROM_REGION( 0x2000, "gfx1", 0 ) ROM_LOAD( "2.1e", 0x0000, 0x1000, CRC(bea449a6) SHA1(fe06208996d15a4d50753fb62a3020063a0a6620) ) ROM_LOAD( "4.1j", 0x1000, 0x1000, CRC(a5deb8ed) SHA1(b6b38daffdda263a366656168a6d094ad2b1458f) ) ROM_REGION( 0x2000, "gfx2", 0 ) ROM_LOAD( "1.1c", 0x0000, 0x1000, CRC(a1148d89) SHA1(b1424693cebc410749216457d07bae54b903bc07) ) ROM_LOAD( "3.1f", 0x1000, 0x1000, CRC(70be5565) SHA1(a7eab667a82d3e7321f393073f29c6e5e865ec6b) ) ROM_REGION( 0x0040, "proms", 0 ) ROM_LOAD( "prom.3p", 0x0000, 0x0020, CRC(a8a2ddd2) SHA1(fc2da863d13e92f7682f393a08bc9357841ae7ea) ) ROM_LOAD( "prom.3r", 0x0020, 0x0020, CRC(edf88f34) SHA1(b9c342d51303d552f87df2543a34e38c30acd07c) ) ROM_END ROM_START( botanicf ) ROM_REGION( 0x10000, "maincpu", 0 ) ROM_LOAD( "bota_05.9e", 0x0000, 0x1000, CRC(cc66e6f8) SHA1(251481b16f8925a11f02f49e5a79f6524460aa6c) ) ROM_LOAD( "bota_06.9f", 0x1000, 0x1000, CRC(59892f41) SHA1(eb01601a9163679560b878366aaf7cc0fb54a3e9) ) ROM_LOAD( "bota_07.9j", 0x2000, 0x1000, CRC(b7c544ef) SHA1(75b5224c313e97c2c02ca7e9bc3f682278cb7a5c) ) ROM_LOAD( "bota_08.9k", 0x3000, 0x1000, CRC(0afea479) SHA1(d69b2263b4ed09d8f4e40f379aa4a64187a75a52) ) ROM_LOAD( "bota_09.9m", 0x4000, 0x1000, CRC(2da36120) SHA1(359d7747d8b7c7b4ce876fed722f19dc20e58b89) ) ROM_LOAD( "bota_10.9n", 0x5000, 0x1000, CRC(7ce9fbc8) SHA1(cd2ba01470964640fad9ccf6ff23cbd76c0c2aeb) ) ROM_REGION( 0x2000, "gfx1", 0 ) ROM_LOAD( "bota_02.1e", 0x0000, 0x1000, CRC(bea449a6) SHA1(fe06208996d15a4d50753fb62a3020063a0a6620) ) ROM_LOAD( "bota_04.1j", 0x1000, 0x1000, CRC(a5deb8ed) SHA1(b6b38daffdda263a366656168a6d094ad2b1458f) ) ROM_REGION( 0x2000, "gfx2", 0 ) ROM_LOAD( "bota_01.1c", 0x0000, 0x1000, CRC(a1148d89) SHA1(b1424693cebc410749216457d07bae54b903bc07) ) ROM_LOAD( "bota_03.1f", 0x1000, 0x1000, CRC(70be5565) SHA1(a7eab667a82d3e7321f393073f29c6e5e865ec6b) ) ROM_REGION( 0x0040, "proms", 0 ) ROM_LOAD( "bota_3p.3p", 0x0000, 0x0020, CRC(a8a2ddd2) SHA1(fc2da863d13e92f7682f393a08bc9357841ae7ea) ) ROM_LOAD( "bota_3a.3a", 0x0020, 0x0020, CRC(edf88f34) SHA1(b9c342d51303d552f87df2543a34e38c30acd07c) ) ROM_END /* Squash (Itisa) Anno 1984 Produttore Itisa-Valadon-gecas CPU 1x SGS Z8400AB1-Z80ACPU (main) 2x AY-3-8910 (sound) 1x LM380 (sound) 1x oscillator 18432 ROMs 7x 2732 2x MMI6331 Note 1x 22x2 edge connector 1x trimmer (volume) 1x 8 switches dip This is a strange thing: the PCB is marked "Valadon Automation (C) 1983" and "Fabrique sous license par GECAS/MILANO" (manufactured under license from GECAS/MILANO) But if you look in rom 7 with an hex editor you can see the following: "(C) 1984 ITISA" and "UN BONJOUR A JACQUES DE PEPE PETIT ET HENK" (a good morning to Jacques from Pepe Petit and Henk). These are the programmers in ITISA, Henk Spits, Josep M. Petit, Josep Morillas, the very same 3 persons working on BOTANIC (1984)(ITISA). Game writings in the eprom are in English and Spanish. So we have an English/Spanish game with a French easter egg on a French PCB manufactured under license from an Italian company! Let's call it melting pot! */ ROM_START( squaitsa ) ROM_REGION( 0x10000, "maincpu", 0 ) ROM_LOAD( "sq5.3.9e", 0x0000, 0x1000,CRC(04128d92) SHA1(ca7b7c4be5f40bcefc92b231ce3bba859c9967ee) ) ROM_LOAD( "sq6.4.9f", 0x1000, 0x1000,CRC(4ff7dd56) SHA1(1955675a9ee3ad7b9185cd027bc42284e15c7451) ) ROM_LOAD( "sq7.5.9j", 0x2000, 0x1000,CRC(e46ecda6) SHA1(25cd94b6c9602cc00fe3459b524639fd3beb72be) ) ROM_REGION( 0x2000, "gfx1", 0 ) ROM_LOAD( "sq2.1.1e", 0x0000, 0x1000,CRC(0eb6ecad) SHA1(da2facbfa5f2fe233ea09777e9880b4f1d3c1079) ) ROM_LOAD( "sq4.2.1j", 0x1000, 0x1000,CRC(8d875b0e) SHA1(f949da71167aa81c1cfaefc6f3d88b57792b6191) ) ROM_REGION( 0x2000, "gfx2", 0 ) ROM_LOAD( "sq1.1c", 0x0000, 0x1000,CRC(b6d563e5) SHA1(90a89fd8e892a612c74bd2c7e38acb08c22c6046) ) ROM_LOAD( "sq3.1f", 0x1000, 0x1000,CRC(0d9d87e6) SHA1(881039d3b8805bb1a546e28abda3273e79714033) ) ROM_REGION( 0x0040, "proms", 0 ) ROM_LOAD( "mmi6331.3p", 0x0000, 0x0020,CRC(06eab7ce) SHA1(d0bafedb340bf12d81446cc672307bb01e5d3026) ) ROM_LOAD( "mmi6331.3r", 0x0020, 0x0020,CRC(86c1e7db) SHA1(5c974b51d770a555ddab5c23f03a666c6f286cbf) ) ROM_END DRIVER_INIT_MEMBER(bagman_state,bagman) { /* Unmap video enable register, not available on earlier hardware revision(s) Bagman is supposed to have glitches during screen transitions */ m_maincpu->space(AS_PROGRAM).unmap_write(0xa003, 0xa003); *m_video_enable = 1; } GAME( 1982, bagman, 0, bagman, bagman, bagman_state, bagman, ROT270, "Valadon Automation", "Bagman", MACHINE_SUPPORTS_SAVE ) GAME( 1982, bagnard, bagman, bagman, bagman, bagman_state, bagman, ROT270, "Valadon Automation", "Le Bagnard (set 1)", MACHINE_SUPPORTS_SAVE ) GAME( 1982, bagnarda, bagman, bagman, bagman, bagman_state, bagman, ROT270, "Valadon Automation", "Le Bagnard (set 2)", MACHINE_SUPPORTS_SAVE ) GAME( 1982, bagnardi, bagman, bagman, bagman, bagman_state, bagman, ROT90, "Valadon Automation (Itisa license)", "Le Bagnard (Itisa, Spain)", MACHINE_SUPPORTS_SAVE ) GAME( 1982, bagmans, bagman, bagman, bagmans, bagman_state, bagman, ROT270, "Valadon Automation (Stern Electronics license)", "Bagman (Stern Electronics, set 1)", MACHINE_SUPPORTS_SAVE ) GAME( 1982, bagmans2, bagman, bagman, bagman, bagman_state, bagman, ROT270, "Valadon Automation (Stern Electronics license)", "Bagman (Stern Electronics, set 2)", MACHINE_SUPPORTS_SAVE ) GAME( 1984, sbagman, 0, bagman, sbagman, driver_device, 0, ROT270, "Valadon Automation", "Super Bagman", MACHINE_SUPPORTS_SAVE ) GAME( 1984, sbagmans, sbagman, bagman, sbagman, driver_device, 0, ROT270, "Valadon Automation (Stern Electronics license)", "Super Bagman (Stern Electronics)", MACHINE_SUPPORTS_SAVE ) GAME( 1983, pickin, 0, pickin, pickin, driver_device, 0, ROT270, "Valadon Automation", "Pickin'", MACHINE_SUPPORTS_SAVE ) GAME( 1983, botanic, 0, botanic, botanici,driver_device, 0, ROT90, "Itisa", "Botanic (English / Spanish)", MACHINE_SUPPORTS_SAVE ) GAME( 1984, botanicf, botanic, botanic, botanicf,driver_device, 0, ROT270, "Itisa (Valadon Automation license)", "Botanic (French)", MACHINE_SUPPORTS_SAVE ) GAME( 1984, squaitsa, 0, squaitsa,squaitsa, driver_device,0, ROT0, "Itisa", "Squash (Itisa)", MACHINE_SUPPORTS_SAVE )
sum2012/mame
src/mame/drivers/bagman.cpp
C++
gpl-2.0
46,009
/* */ #include <linux/kernel.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/export.h> static const char *skip_arg(const char *cp) { while (*cp && !isspace(*cp)) cp++; return cp; } static int count_argc(const char *str) { int count = 0; while (*str) { str = skip_spaces(str); if (*str) { count++; str = skip_arg(str); } } return count; } /* */ void argv_free(char **argv) { char **p; for (p = argv; *p; p++) kfree(*p); kfree(argv); } EXPORT_SYMBOL(argv_free); /* */ char **argv_split(gfp_t gfp, const char *str, int *argcp) { int argc = count_argc(str); char **argv = kzalloc(sizeof(*argv) * (argc+1), gfp); char **argvp; if (argv == NULL) goto out; if (argcp) *argcp = argc; argvp = argv; while (*str) { str = skip_spaces(str); if (*str) { const char *p = str; char *t; str = skip_arg(str); t = kstrndup(p, str-p, gfp); if (t == NULL) goto fail; *argvp++ = t; } } *argvp = NULL; out: return argv; fail: argv_free(argv); return NULL; } EXPORT_SYMBOL(argv_split);
toastcfh/android_kernel_lge_d851
lib/argv_split.c
C
gpl-2.0
1,826
/* * ISA Plug & Play support * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Changelog: * 2000-01-01 Added quirks handling for buggy hardware * Peter Denison <peterd@pnd-pc.demon.co.uk> * 2000-06-14 Added isapnp_probe_devs() and isapnp_activate_dev() * Christoph Hellwig <hch@infradead.org> * 2001-06-03 Added release_region calls to correspond with * request_region calls when a failure occurs. Also * added KERN_* constants to printk() calls. * 2001-11-07 Added isapnp_{,un}register_driver calls along the lines * of the pci driver interface * Kai Germaschewski <kai.germaschewski@gmx.de> * 2002-06-06 Made the use of dma channel 0 configurable * Gerald Teschl <gerald.teschl@univie.ac.at> * 2002-10-06 Ported to PnP Layer - Adam Belay <ambx1@neo.rr.com> * 2003-08-11 Resource Management Updates - Adam Belay <ambx1@neo.rr.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/isapnp.h> #include <linux/mutex.h> #include <asm/io.h> #include "../base.h" #if 0 #define ISAPNP_REGION_OK #endif int isapnp_disable; /* */ static int isapnp_rdp; /* */ static int isapnp_reset = 1; /* */ static int isapnp_verbose = 1; /* */ MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Generic ISA Plug & Play support"); module_param(isapnp_disable, int, 0); MODULE_PARM_DESC(isapnp_disable, "ISA Plug & Play disable"); module_param(isapnp_rdp, int, 0); MODULE_PARM_DESC(isapnp_rdp, "ISA Plug & Play read data port"); module_param(isapnp_reset, int, 0); MODULE_PARM_DESC(isapnp_reset, "ISA Plug & Play reset all cards"); module_param(isapnp_verbose, int, 0); MODULE_PARM_DESC(isapnp_verbose, "ISA Plug & Play verbose mode"); MODULE_LICENSE("GPL"); #define _PIDXR 0x279 #define _PNPWRP 0xa79 /* */ #define _STAG_PNPVERNO 0x01 #define _STAG_LOGDEVID 0x02 #define _STAG_COMPATDEVID 0x03 #define _STAG_IRQ 0x04 #define _STAG_DMA 0x05 #define _STAG_STARTDEP 0x06 #define _STAG_ENDDEP 0x07 #define _STAG_IOPORT 0x08 #define _STAG_FIXEDIO 0x09 #define _STAG_VENDOR 0x0e #define _STAG_END 0x0f /* */ #define _LTAG_MEMRANGE 0x81 #define _LTAG_ANSISTR 0x82 #define _LTAG_UNICODESTR 0x83 #define _LTAG_VENDOR 0x84 #define _LTAG_MEM32RANGE 0x85 #define _LTAG_FIXEDMEM32RANGE 0x86 /* */ #define ISAPNP_CFG_ACTIVATE 0x30 /* */ #define ISAPNP_CFG_MEM 0x40 /* */ #define ISAPNP_CFG_PORT 0x60 /* */ #define ISAPNP_CFG_IRQ 0x70 /* */ #define ISAPNP_CFG_DMA 0x74 /* */ /* */ #define ISAPNP_MAX_MEM 4 #define ISAPNP_MAX_PORT 8 #define ISAPNP_MAX_IRQ 2 #define ISAPNP_MAX_DMA 2 static unsigned char isapnp_checksum_value; static DEFINE_MUTEX(isapnp_cfg_mutex); static int isapnp_csn_count; /* */ static inline void write_data(unsigned char x) { outb(x, _PNPWRP); } static inline void write_address(unsigned char x) { outb(x, _PIDXR); udelay(20); } static inline unsigned char read_data(void) { unsigned char val = inb(isapnp_rdp); return val; } unsigned char isapnp_read_byte(unsigned char idx) { write_address(idx); return read_data(); } static unsigned short isapnp_read_word(unsigned char idx) { unsigned short val; val = isapnp_read_byte(idx); val = (val << 8) + isapnp_read_byte(idx + 1); return val; } void isapnp_write_byte(unsigned char idx, unsigned char val) { write_address(idx); write_data(val); } static void isapnp_write_word(unsigned char idx, unsigned short val) { isapnp_write_byte(idx, val >> 8); isapnp_write_byte(idx + 1, val); } static void isapnp_key(void) { unsigned char code = 0x6a, msb; int i; mdelay(1); write_address(0x00); write_address(0x00); write_address(code); for (i = 1; i < 32; i++) { msb = ((code & 0x01) ^ ((code & 0x02) >> 1)) << 7; code = (code >> 1) | msb; write_address(code); } } /* */ static void isapnp_wait(void) { isapnp_write_byte(0x02, 0x02); } static void isapnp_wake(unsigned char csn) { isapnp_write_byte(0x03, csn); } static void isapnp_device(unsigned char logdev) { isapnp_write_byte(0x07, logdev); } static void isapnp_activate(unsigned char logdev) { isapnp_device(logdev); isapnp_write_byte(ISAPNP_CFG_ACTIVATE, 1); udelay(250); } static void isapnp_deactivate(unsigned char logdev) { isapnp_device(logdev); isapnp_write_byte(ISAPNP_CFG_ACTIVATE, 0); udelay(500); } static void __init isapnp_peek(unsigned char *data, int bytes) { int i, j; unsigned char d = 0; for (i = 1; i <= bytes; i++) { for (j = 0; j < 20; j++) { d = isapnp_read_byte(0x05); if (d & 1) break; udelay(100); } if (!(d & 1)) { if (data != NULL) *data++ = 0xff; continue; } d = isapnp_read_byte(0x04); /* */ isapnp_checksum_value += d; if (data != NULL) *data++ = d; } } #define RDP_STEP 32 /* */ static int isapnp_next_rdp(void) { int rdp = isapnp_rdp; static int old_rdp = 0; if (old_rdp) { release_region(old_rdp, 1); old_rdp = 0; } while (rdp <= 0x3ff) { /* */ if ((rdp < 0x280 || rdp > 0x380) && request_region(rdp, 1, "ISAPnP")) { isapnp_rdp = rdp; old_rdp = rdp; return 0; } rdp += RDP_STEP; } return -1; } /* */ static inline void isapnp_set_rdp(void) { isapnp_write_byte(0x00, isapnp_rdp >> 2); udelay(100); } /* */ static int __init isapnp_isolate_rdp_select(void) { isapnp_wait(); isapnp_key(); /* */ isapnp_write_byte(0x02, isapnp_reset ? 0x05 : 0x04); mdelay(2); isapnp_wait(); isapnp_key(); isapnp_wake(0x00); if (isapnp_next_rdp() < 0) { isapnp_wait(); return -1; } isapnp_set_rdp(); udelay(1000); write_address(0x01); udelay(1000); return 0; } /* */ static int __init isapnp_isolate(void) { unsigned char checksum = 0x6a; unsigned char chksum = 0x00; unsigned char bit = 0x00; int data; int csn = 0; int i; int iteration = 1; isapnp_rdp = 0x213; if (isapnp_isolate_rdp_select() < 0) return -1; while (1) { for (i = 1; i <= 64; i++) { data = read_data() << 8; udelay(250); data = data | read_data(); udelay(250); if (data == 0x55aa) bit = 0x01; checksum = ((((checksum ^ (checksum >> 1)) & 0x01) ^ bit) << 7) | (checksum >> 1); bit = 0x00; } for (i = 65; i <= 72; i++) { data = read_data() << 8; udelay(250); data = data | read_data(); udelay(250); if (data == 0x55aa) chksum |= (1 << (i - 65)); } if (checksum != 0x00 && checksum == chksum) { csn++; isapnp_write_byte(0x06, csn); udelay(250); iteration++; isapnp_wake(0x00); isapnp_set_rdp(); udelay(1000); write_address(0x01); udelay(1000); goto __next; } if (iteration == 1) { isapnp_rdp += RDP_STEP; if (isapnp_isolate_rdp_select() < 0) return -1; } else if (iteration > 1) { break; } __next: if (csn == 255) break; checksum = 0x6a; chksum = 0x00; bit = 0x00; } isapnp_wait(); isapnp_csn_count = csn; return csn; } /* */ static int __init isapnp_read_tag(unsigned char *type, unsigned short *size) { unsigned char tag, tmp[2]; isapnp_peek(&tag, 1); if (tag == 0) /* */ return -1; if (tag & 0x80) { /* */ *type = tag; isapnp_peek(tmp, 2); *size = (tmp[1] << 8) | tmp[0]; } else { *type = (tag >> 3) & 0x0f; *size = tag & 0x07; } #if 0 printk(KERN_DEBUG "tag = 0x%x, type = 0x%x, size = %i\n", tag, *type, *size); #endif if (*type == 0xff && *size == 0xffff) /* */ return -1; return 0; } /* */ static void __init isapnp_skip_bytes(int count) { isapnp_peek(NULL, count); } /* */ static struct pnp_dev *__init isapnp_parse_device(struct pnp_card *card, int size, int number) { unsigned char tmp[6]; struct pnp_dev *dev; u32 eisa_id; char id[8]; isapnp_peek(tmp, size); eisa_id = tmp[0] | tmp[1] << 8 | tmp[2] << 16 | tmp[3] << 24; pnp_eisa_id_to_string(eisa_id, id); dev = pnp_alloc_dev(&isapnp_protocol, number, id); if (!dev) return NULL; dev->card = card; dev->capabilities |= PNP_CONFIGURABLE; dev->capabilities |= PNP_READ; dev->capabilities |= PNP_WRITE; dev->capabilities |= PNP_DISABLE; pnp_init_resources(dev); return dev; } /* */ static void __init isapnp_parse_irq_resource(struct pnp_dev *dev, unsigned int option_flags, int size) { unsigned char tmp[3]; unsigned long bits; pnp_irq_mask_t map; unsigned char flags = IORESOURCE_IRQ_HIGHEDGE; isapnp_peek(tmp, size); bits = (tmp[1] << 8) | tmp[0]; bitmap_zero(map.bits, PNP_IRQ_NR); bitmap_copy(map.bits, &bits, 16); if (size > 2) flags = tmp[2]; pnp_register_irq_resource(dev, option_flags, &map, flags); } /* */ static void __init isapnp_parse_dma_resource(struct pnp_dev *dev, unsigned int option_flags, int size) { unsigned char tmp[2]; isapnp_peek(tmp, size); pnp_register_dma_resource(dev, option_flags, tmp[0], tmp[1]); } /* */ static void __init isapnp_parse_port_resource(struct pnp_dev *dev, unsigned int option_flags, int size) { unsigned char tmp[7]; resource_size_t min, max, align, len; unsigned char flags; isapnp_peek(tmp, size); min = (tmp[2] << 8) | tmp[1]; max = (tmp[4] << 8) | tmp[3]; align = tmp[5]; len = tmp[6]; flags = tmp[0] ? IORESOURCE_IO_16BIT_ADDR : 0; pnp_register_port_resource(dev, option_flags, min, max, align, len, flags); } /* */ static void __init isapnp_parse_fixed_port_resource(struct pnp_dev *dev, unsigned int option_flags, int size) { unsigned char tmp[3]; resource_size_t base, len; isapnp_peek(tmp, size); base = (tmp[1] << 8) | tmp[0]; len = tmp[2]; pnp_register_port_resource(dev, option_flags, base, base, 0, len, IORESOURCE_IO_FIXED); } /* */ static void __init isapnp_parse_mem_resource(struct pnp_dev *dev, unsigned int option_flags, int size) { unsigned char tmp[9]; resource_size_t min, max, align, len; unsigned char flags; isapnp_peek(tmp, size); min = ((tmp[2] << 8) | tmp[1]) << 8; max = ((tmp[4] << 8) | tmp[3]) << 8; align = (tmp[6] << 8) | tmp[5]; len = ((tmp[8] << 8) | tmp[7]) << 8; flags = tmp[0]; pnp_register_mem_resource(dev, option_flags, min, max, align, len, flags); } /* */ static void __init isapnp_parse_mem32_resource(struct pnp_dev *dev, unsigned int option_flags, int size) { unsigned char tmp[17]; resource_size_t min, max, align, len; unsigned char flags; isapnp_peek(tmp, size); min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; max = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5]; align = (tmp[12] << 24) | (tmp[11] << 16) | (tmp[10] << 8) | tmp[9]; len = (tmp[16] << 24) | (tmp[15] << 16) | (tmp[14] << 8) | tmp[13]; flags = tmp[0]; pnp_register_mem_resource(dev, option_flags, min, max, align, len, flags); } /* */ static void __init isapnp_parse_fixed_mem32_resource(struct pnp_dev *dev, unsigned int option_flags, int size) { unsigned char tmp[9]; resource_size_t base, len; unsigned char flags; isapnp_peek(tmp, size); base = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; len = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5]; flags = tmp[0]; pnp_register_mem_resource(dev, option_flags, base, base, 0, len, flags); } /* */ static void __init isapnp_parse_name(char *name, unsigned int name_max, unsigned short *size) { if (name[0] == '\0') { unsigned short size1 = *size >= name_max ? (name_max - 1) : *size; isapnp_peek(name, size1); name[size1] = '\0'; *size -= size1; /* */ while (size1 > 0 && name[--size1] == ' ') name[size1] = '\0'; } } /* */ static int __init isapnp_create_device(struct pnp_card *card, unsigned short size) { int number = 0, skip = 0, priority, compat = 0; unsigned char type, tmp[17]; unsigned int option_flags; struct pnp_dev *dev; u32 eisa_id; char id[8]; if ((dev = isapnp_parse_device(card, size, number++)) == NULL) return 1; option_flags = 0; pnp_add_card_device(card, dev); while (1) { if (isapnp_read_tag(&type, &size) < 0) return 1; if (skip && type != _STAG_LOGDEVID && type != _STAG_END) goto __skip; switch (type) { case _STAG_LOGDEVID: if (size >= 5 && size <= 6) { if ((dev = isapnp_parse_device(card, size, number++)) == NULL) return 1; size = 0; skip = 0; option_flags = 0; pnp_add_card_device(card, dev); } else { skip = 1; } compat = 0; break; case _STAG_COMPATDEVID: if (size == 4 && compat < DEVICE_COUNT_COMPATIBLE) { isapnp_peek(tmp, 4); eisa_id = tmp[0] | tmp[1] << 8 | tmp[2] << 16 | tmp[3] << 24; pnp_eisa_id_to_string(eisa_id, id); pnp_add_id(dev, id); compat++; size = 0; } break; case _STAG_IRQ: if (size < 2 || size > 3) goto __skip; isapnp_parse_irq_resource(dev, option_flags, size); size = 0; break; case _STAG_DMA: if (size != 2) goto __skip; isapnp_parse_dma_resource(dev, option_flags, size); size = 0; break; case _STAG_STARTDEP: if (size > 1) goto __skip; priority = PNP_RES_PRIORITY_ACCEPTABLE; if (size > 0) { isapnp_peek(tmp, size); priority = tmp[0]; size = 0; } option_flags = pnp_new_dependent_set(dev, priority); break; case _STAG_ENDDEP: if (size != 0) goto __skip; option_flags = 0; break; case _STAG_IOPORT: if (size != 7) goto __skip; isapnp_parse_port_resource(dev, option_flags, size); size = 0; break; case _STAG_FIXEDIO: if (size != 3) goto __skip; isapnp_parse_fixed_port_resource(dev, option_flags, size); size = 0; break; case _STAG_VENDOR: break; case _LTAG_MEMRANGE: if (size != 9) goto __skip; isapnp_parse_mem_resource(dev, option_flags, size); size = 0; break; case _LTAG_ANSISTR: isapnp_parse_name(dev->name, sizeof(dev->name), &size); break; case _LTAG_UNICODESTR: /* */ /* */ break; case _LTAG_VENDOR: break; case _LTAG_MEM32RANGE: if (size != 17) goto __skip; isapnp_parse_mem32_resource(dev, option_flags, size); size = 0; break; case _LTAG_FIXEDMEM32RANGE: if (size != 9) goto __skip; isapnp_parse_fixed_mem32_resource(dev, option_flags, size); size = 0; break; case _STAG_END: if (size > 0) isapnp_skip_bytes(size); return 1; default: dev_err(&dev->dev, "unknown tag %#x (card %i), " "ignored\n", type, card->number); } __skip: if (size > 0) isapnp_skip_bytes(size); } return 0; } /* */ static void __init isapnp_parse_resource_map(struct pnp_card *card) { unsigned char type, tmp[17]; unsigned short size; while (1) { if (isapnp_read_tag(&type, &size) < 0) return; switch (type) { case _STAG_PNPVERNO: if (size != 2) goto __skip; isapnp_peek(tmp, 2); card->pnpver = tmp[0]; card->productver = tmp[1]; size = 0; break; case _STAG_LOGDEVID: if (size >= 5 && size <= 6) { if (isapnp_create_device(card, size) == 1) return; size = 0; } break; case _STAG_VENDOR: break; case _LTAG_ANSISTR: isapnp_parse_name(card->name, sizeof(card->name), &size); break; case _LTAG_UNICODESTR: /* */ /* */ break; case _LTAG_VENDOR: break; case _STAG_END: if (size > 0) isapnp_skip_bytes(size); return; default: dev_err(&card->dev, "unknown tag %#x, ignored\n", type); } __skip: if (size > 0) isapnp_skip_bytes(size); } } /* */ static unsigned char __init isapnp_checksum(unsigned char *data) { int i, j; unsigned char checksum = 0x6a, bit, b; for (i = 0; i < 8; i++) { b = data[i]; for (j = 0; j < 8; j++) { bit = 0; if (b & (1 << j)) bit = 1; checksum = ((((checksum ^ (checksum >> 1)) & 0x01) ^ bit) << 7) | (checksum >> 1); } } return checksum; } /* */ static int __init isapnp_build_device_list(void) { int csn; unsigned char header[9], checksum; struct pnp_card *card; u32 eisa_id; char id[8]; isapnp_wait(); isapnp_key(); for (csn = 1; csn <= isapnp_csn_count; csn++) { isapnp_wake(csn); isapnp_peek(header, 9); checksum = isapnp_checksum(header); eisa_id = header[0] | header[1] << 8 | header[2] << 16 | header[3] << 24; pnp_eisa_id_to_string(eisa_id, id); card = pnp_alloc_card(&isapnp_protocol, csn, id); if (!card) continue; #if 0 dev_info(&card->dev, "vendor: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", header[0], header[1], header[2], header[3], header[4], header[5], header[6], header[7], header[8]); dev_info(&card->dev, "checksum = %#x\n", checksum); #endif INIT_LIST_HEAD(&card->devices); card->serial = (header[7] << 24) | (header[6] << 16) | (header[5] << 8) | header[4]; isapnp_checksum_value = 0x00; isapnp_parse_resource_map(card); if (isapnp_checksum_value != 0x00) dev_err(&card->dev, "invalid checksum %#x\n", isapnp_checksum_value); card->checksum = isapnp_checksum_value; pnp_add_card(card); } isapnp_wait(); return 0; } /* */ int isapnp_present(void) { struct pnp_card *card; pnp_for_each_card(card) { if (card->protocol == &isapnp_protocol) return 1; } return 0; } int isapnp_cfg_begin(int csn, int logdev) { if (csn < 1 || csn > isapnp_csn_count || logdev > 10) return -EINVAL; mutex_lock(&isapnp_cfg_mutex); isapnp_wait(); isapnp_key(); isapnp_wake(csn); #if 0 /* */ /* */ /* */ /* */ isapnp_write_byte(0x02, 0x04); /* */ mdelay(2); /* */ isapnp_wake(csn); /* */ isapnp_wake(0); /* */ isapnp_set_rdp(); /* */ udelay(1000); /* */ isapnp_write_byte(0x06, csn); /* */ udelay(250); /* */ #endif if (logdev >= 0) isapnp_device(logdev); return 0; } int isapnp_cfg_end(void) { isapnp_wait(); mutex_unlock(&isapnp_cfg_mutex); return 0; } /* */ EXPORT_SYMBOL(isapnp_protocol); EXPORT_SYMBOL(isapnp_present); EXPORT_SYMBOL(isapnp_cfg_begin); EXPORT_SYMBOL(isapnp_cfg_end); EXPORT_SYMBOL(isapnp_write_byte); static int isapnp_get_resources(struct pnp_dev *dev) { int i, ret; pnp_dbg(&dev->dev, "get resources\n"); pnp_init_resources(dev); isapnp_cfg_begin(dev->card->number, dev->number); dev->active = isapnp_read_byte(ISAPNP_CFG_ACTIVATE); if (!dev->active) goto __end; for (i = 0; i < ISAPNP_MAX_PORT; i++) { ret = isapnp_read_word(ISAPNP_CFG_PORT + (i << 1)); pnp_add_io_resource(dev, ret, ret, ret == 0 ? IORESOURCE_DISABLED : 0); } for (i = 0; i < ISAPNP_MAX_MEM; i++) { ret = isapnp_read_word(ISAPNP_CFG_MEM + (i << 3)) << 8; pnp_add_mem_resource(dev, ret, ret, ret == 0 ? IORESOURCE_DISABLED : 0); } for (i = 0; i < ISAPNP_MAX_IRQ; i++) { ret = isapnp_read_word(ISAPNP_CFG_IRQ + (i << 1)) >> 8; pnp_add_irq_resource(dev, ret, ret == 0 ? IORESOURCE_DISABLED : 0); } for (i = 0; i < ISAPNP_MAX_DMA; i++) { ret = isapnp_read_byte(ISAPNP_CFG_DMA + i); pnp_add_dma_resource(dev, ret, ret == 4 ? IORESOURCE_DISABLED : 0); } __end: isapnp_cfg_end(); return 0; } static int isapnp_set_resources(struct pnp_dev *dev) { struct resource *res; int tmp; pnp_dbg(&dev->dev, "set resources\n"); isapnp_cfg_begin(dev->card->number, dev->number); dev->active = 1; for (tmp = 0; tmp < ISAPNP_MAX_PORT; tmp++) { res = pnp_get_resource(dev, IORESOURCE_IO, tmp); if (pnp_resource_enabled(res)) { pnp_dbg(&dev->dev, " set io %d to %#llx\n", tmp, (unsigned long long) res->start); isapnp_write_word(ISAPNP_CFG_PORT + (tmp << 1), res->start); } } for (tmp = 0; tmp < ISAPNP_MAX_IRQ; tmp++) { res = pnp_get_resource(dev, IORESOURCE_IRQ, tmp); if (pnp_resource_enabled(res)) { int irq = res->start; if (irq == 2) irq = 9; pnp_dbg(&dev->dev, " set irq %d to %d\n", tmp, irq); isapnp_write_byte(ISAPNP_CFG_IRQ + (tmp << 1), irq); } } for (tmp = 0; tmp < ISAPNP_MAX_DMA; tmp++) { res = pnp_get_resource(dev, IORESOURCE_DMA, tmp); if (pnp_resource_enabled(res)) { pnp_dbg(&dev->dev, " set dma %d to %lld\n", tmp, (unsigned long long) res->start); isapnp_write_byte(ISAPNP_CFG_DMA + tmp, res->start); } } for (tmp = 0; tmp < ISAPNP_MAX_MEM; tmp++) { res = pnp_get_resource(dev, IORESOURCE_MEM, tmp); if (pnp_resource_enabled(res)) { pnp_dbg(&dev->dev, " set mem %d to %#llx\n", tmp, (unsigned long long) res->start); isapnp_write_word(ISAPNP_CFG_MEM + (tmp << 3), (res->start >> 8) & 0xffff); } } /* */ isapnp_activate(dev->number); isapnp_cfg_end(); return 0; } static int isapnp_disable_resources(struct pnp_dev *dev) { if (!dev->active) return -EINVAL; isapnp_cfg_begin(dev->card->number, dev->number); isapnp_deactivate(dev->number); dev->active = 0; isapnp_cfg_end(); return 0; } struct pnp_protocol isapnp_protocol = { .name = "ISA Plug and Play", .get = isapnp_get_resources, .set = isapnp_set_resources, .disable = isapnp_disable_resources, }; static int __init isapnp_init(void) { int cards; struct pnp_card *card; struct pnp_dev *dev; if (isapnp_disable) { printk(KERN_INFO "isapnp: ISA Plug & Play support disabled\n"); return 0; } #ifdef CONFIG_PPC if (check_legacy_ioport(_PIDXR) || check_legacy_ioport(_PNPWRP)) return -EINVAL; #endif #ifdef ISAPNP_REGION_OK if (!request_region(_PIDXR, 1, "isapnp index")) { printk(KERN_ERR "isapnp: Index Register 0x%x already used\n", _PIDXR); return -EBUSY; } #endif if (!request_region(_PNPWRP, 1, "isapnp write")) { printk(KERN_ERR "isapnp: Write Data Register 0x%x already used\n", _PNPWRP); #ifdef ISAPNP_REGION_OK release_region(_PIDXR, 1); #endif return -EBUSY; } if (pnp_register_protocol(&isapnp_protocol) < 0) return -EBUSY; /* */ printk(KERN_INFO "isapnp: Scanning for PnP cards...\n"); if (isapnp_rdp >= 0x203 && isapnp_rdp <= 0x3ff) { isapnp_rdp |= 3; if (!request_region(isapnp_rdp, 1, "isapnp read")) { printk(KERN_ERR "isapnp: Read Data Register 0x%x already used\n", isapnp_rdp); #ifdef ISAPNP_REGION_OK release_region(_PIDXR, 1); #endif release_region(_PNPWRP, 1); return -EBUSY; } isapnp_set_rdp(); } if (isapnp_rdp < 0x203 || isapnp_rdp > 0x3ff) { cards = isapnp_isolate(); if (cards < 0 || (isapnp_rdp < 0x203 || isapnp_rdp > 0x3ff)) { #ifdef ISAPNP_REGION_OK release_region(_PIDXR, 1); #endif release_region(_PNPWRP, 1); printk(KERN_INFO "isapnp: No Plug & Play device found\n"); return 0; } request_region(isapnp_rdp, 1, "isapnp read"); } isapnp_build_device_list(); cards = 0; protocol_for_each_card(&isapnp_protocol, card) { cards++; if (isapnp_verbose) { dev_info(&card->dev, "card '%s'\n", card->name[0] ? card->name : "unknown"); if (isapnp_verbose < 2) continue; card_for_each_dev(card, dev) { dev_info(&card->dev, "device '%s'\n", dev->name[0] ? dev->name : "unknown"); } } } if (cards) printk(KERN_INFO "isapnp: %i Plug & Play card%s detected total\n", cards, cards > 1 ? "s" : ""); else printk(KERN_INFO "isapnp: No Plug & Play card found\n"); isapnp_proc_init(); return 0; } device_initcall(isapnp_init); /* */ static int __init isapnp_setup_disable(char *str) { isapnp_disable = 1; return 1; } __setup("noisapnp", isapnp_setup_disable); /* */ static int __init isapnp_setup_isapnp(char *str) { (void)((get_option(&str, &isapnp_rdp) == 2) && (get_option(&str, &isapnp_reset) == 2) && (get_option(&str, &isapnp_verbose) == 2)); return 1; } __setup("isapnp=", isapnp_setup_isapnp);
toastcfh/android_kernel_lge_d851
drivers/pnp/isapnp/core.c
C
gpl-2.0
26,504
<?php /** * @file * Definition of ModuleBuider\Generator\Readme. */ namespace ModuleBuider\Generator; /** * Generator base class for module README file. * * (You were going to write a README file, right?) */ class Readme extends File { /** * Collect the code files. */ function collectFiles(&$files) { $files['readme'] = array( 'path' => '', // Means the base folder. // The extension is in lowercase for good reasons which I don't remember // right now, but probably to do with Windows being rubbish. 'filename' => 'README.txt', 'body' => $this->lines(), // We are returning single lines, so they need to be joined with line // breaks. 'join_string' => "\n", ); } /** * Return an array of lines. * * @return * An array of lines of text. */ function lines() { return array( $this->base_component->component_data['module_readable_name'], str_repeat('=', strlen($this->base_component->component_data['module_readable_name'])), '', 'TODO: write some documentation.', '', ); } }
dirckdigler/drupal7
sites/all/modules/module_builder/Generator/Readme.php
PHP
gpl-2.0
1,116
<?php namespace TYPO3\CMS\Core\Tests\Unit\Core; /** * This file is part of the TYPO3 CMS project. * * It is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License, either version 2 * of the License, or any later version. * * For the full copyright and license information, please read the * LICENSE.txt file that was distributed with this source code. * * The TYPO3 project - inspiring people to share! */ use org\bovigo\vfs\vfsStream; /** * Testcase for TYPO3\CMS\Core\Core\ClassLoader * * @author Andreas Wolf <andreas.wolf@ikt-werk.de> */ class ClassLoaderTest extends \TYPO3\CMS\Core\Tests\UnitTestCase { /** * @var array Backup of typo3CacheManager */ protected $typo3CacheManager = NULL; /** * @var array Register of temporary extensions in typo3temp */ protected $fakedExtensions = array(); /** * @var \TYPO3\CMS\Core\Core\ClassLoader */ protected $classLoader; /** * @var \TYPO3\CMS\Core\Core\ClassAliasMap */ protected $orinalClassAliasMap; /** * Test flag used in in this test case * * @var boolean */ public static $testClassWasLoaded = FALSE; /** * Fix a race condition that GeneralUtility is not available * during tearDown if fiddling with the autoloader where * backupGlobals is not set up again yet */ public function setUp() { vfsStream::setup('Test'); mkdir('vfs://Test/Packages/Application/Acme.MyApp/Classes/', 0770, TRUE); file_put_contents('vfs://Test/Packages/Application/Acme.MyApp/composer.json', '{"name": "acme/myapp", "type": "flow-test"}'); $package1 = new \TYPO3\Flow\Package\Package($this->getMock('TYPO3\Flow\Package\PackageManager'), 'Acme.MyApp', 'vfs://Test/Packages/Application/Acme.MyApp/', 'Classes'); mkdir('vfs://Test/Packages/Application/Acme.MyAppAddon/Classes/', 0770, TRUE); file_put_contents('vfs://Test/Packages/Application/Acme.MyAppAddon/composer.json', '{"name": "acme/myappaddon", "type": "flow-test"}'); $package2 = new \TYPO3\Flow\Package\Package($this->getMock('TYPO3\Flow\Package\PackageManager'), 'Acme.MyAppAddon', 'vfs://Test/Packages/Application/Acme.MyAppAddon/', 'Classes'); $mockClassAliasMap = $this->getMock('TYPO3\\CMS\\Core\\Core\\ClassAliasMap', array('setPackagesButDontBuildMappingFilesReturnClassNameToAliasMappingInstead', 'buildMappingFiles'), array(), '', FALSE); $mockClassAliasMap->expects($this->any())->method('setPackagesButDontBuildMappingFilesReturnClassNameToAliasMappingInstead')->will($this->returnValue(array())); $this->orinalClassAliasMap = \TYPO3\CMS\Core\Core\Bootstrap::getInstance()->getEarlyInstance('TYPO3\\CMS\\Core\\Core\\ClassAliasMap'); $this->classLoader = new \TYPO3\CMS\Core\Core\ClassLoader(\TYPO3\CMS\Core\Core\Bootstrap::getInstance()->getApplicationContext()); $this->classLoader->injectClassAliasMap($mockClassAliasMap); $this->classLoader->setPackages(array('Acme.MyApp' => $package1, 'Acme.MyAppAddon' => $package2)); } /** * The class alias map is kept static in the class loader for legacy reasons * and has to be reset after mocking. */ public function tearDown() { $this->classLoader->injectClassAliasMap($this->orinalClassAliasMap); parent::tearDown(); } /** * Creates a fake extension inside typo3temp/. No configuration is created, * just the folder, plus the extension is registered in $TYPO3_LOADED_EXT * * @return string The extension key */ protected function createFakeExtension() { $extKey = strtolower(uniqid('testing')); $absExtPath = PATH_site . 'typo3temp/' . $extKey . '/'; $relPath = 'typo3temp/' . $extKey . '/'; \TYPO3\CMS\Core\Utility\GeneralUtility::mkdir($absExtPath); $GLOBALS['TYPO3_LOADED_EXT'][$extKey] = array( 'siteRelPath' => $relPath ); $this->fakedExtensions[] = $extKey; \TYPO3\CMS\Core\Utility\ExtensionManagementUtility::clearExtensionKeyMap(); return $extKey; } /** * Checks if the package autoloader loads classes from subdirectories. * * @test */ public function classesFromSubDirectoriesAreLoaded() { mkdir('vfs://Test/Packages/Application/Acme.MyApp/Classes/Acme/MyApp/SubDirectory', 0770, TRUE); file_put_contents('vfs://Test/Packages/Application/Acme.MyApp/Classes/Acme/MyApp/SubDirectory/ClassInSubDirectory.php', '<?php ' . __CLASS__ . '::$testClassWasLoaded = TRUE; ?>'); self::$testClassWasLoaded = FALSE; $this->classLoader->loadClass('Acme\MyApp\SubDirectory\ClassInSubDirectory'); $this->assertTrue(self::$testClassWasLoaded); } /** * @test */ public function classesFromDeeplyNestedSubDirectoriesAreLoaded() { mkdir('vfs://Test/Packages/Application/Acme.MyApp/Classes/Acme/MyApp/SubDirectory/A/B/C/D', 0770, TRUE); file_put_contents('vfs://Test/Packages/Application/Acme.MyApp/Classes/Acme/MyApp/SubDirectory/A/B/C/D/E.php', '<?php ' . __CLASS__ . '::$testClassWasLoaded = TRUE; ?>'); self::$testClassWasLoaded = FALSE; $this->classLoader->loadClass('Acme\MyApp\SubDirectory\A\B\C\D\E'); $this->assertTrue(self::$testClassWasLoaded); } /** * Checks if the package autoloader loads classes from packages that match a * substring of another package (e.g. TYPO3CR vs TYPO3). * * @test */ public function classesFromSubMatchingPackagesAreLoaded() { mkdir('vfs://Test/Packages/Application/Acme.MyAppAddon/Classes/Acme/MyAppAddon', 0770, TRUE); file_put_contents('vfs://Test/Packages/Application/Acme.MyAppAddon/Classes/Acme/MyAppAddon/Class.php', '<?php ' . __CLASS__ . '::$testClassWasLoaded = TRUE; ?>'); self::$testClassWasLoaded = FALSE; $this->classLoader->loadClass('Acme\MyAppAddon\Class'); $this->assertTrue(self::$testClassWasLoaded); } /** * Checks if the package autoloader loads classes from subdirectories. * * @test */ public function classesWithUnderscoresAreLoaded() { mkdir('vfs://Test/Packages/Application/Acme.MyApp/Classes/Acme/MyApp', 0770, TRUE); file_put_contents('vfs://Test/Packages/Application/Acme.MyApp/Classes/Acme/MyApp/Foo.php', '<?php ' . __CLASS__ . '::$testClassWasLoaded = TRUE; ?>'); self::$testClassWasLoaded = FALSE; $this->classLoader->loadClass('Acme\MyApp_Foo'); $this->assertTrue(self::$testClassWasLoaded); } /** * Checks if the package autoloader loads classes from subdirectories with underscores. * * @test */ public function namespaceWithUnderscoresAreLoaded() { mkdir('vfs://Test/Packages/Application/Acme.MyApp/Classes/Acme/MyApp/My_Underscore', 0770, TRUE); file_put_contents('vfs://Test/Packages/Application/Acme.MyApp/Classes/Acme/MyApp/My_Underscore/Foo.php', '<?php ' . __CLASS__ . '::$testClassWasLoaded = TRUE; ?>'); self::$testClassWasLoaded = FALSE; $this->classLoader->loadClass('Acme\MyApp\My_Underscore\Foo'); $this->assertTrue(self::$testClassWasLoaded); } /** * Checks if the package autoloader loads classes from subdirectories. * * @test */ public function classesWithOnlyUnderscoresAreLoaded() { mkdir('vfs://Test/Packages/Application/Acme.MyApp/Classes/Acme/MyApp', 0770, TRUE); file_put_contents('vfs://Test/Packages/Application/Acme.MyApp/Classes/Acme/MyApp/UnderscoredOnly.php', '<?php ' . __CLASS__ . '::$testClassWasLoaded = TRUE; ?>'); self::$testClassWasLoaded = FALSE; $this->classLoader->loadClass('Acme_MyApp_UnderscoredOnly'); $this->assertTrue(self::$testClassWasLoaded); } /** * @test */ public function classesWithLeadingBackslashAreLoaded() { mkdir('vfs://Test/Packages/Application/Acme.MyApp/Classes/Acme/MyApp', 0770, TRUE); file_put_contents('vfs://Test/Packages/Application/Acme.MyApp/Classes/Acme/MyApp/WithLeadingBackslash.php', '<?php ' . __CLASS__ . '::$testClassWasLoaded = TRUE; ?>'); self::$testClassWasLoaded = FALSE; $this->classLoader->loadClass('\Acme\MyApp\WithLeadingBackslash'); $this->assertTrue(self::$testClassWasLoaded); } }
famelo/TYPO3-Site
typo3/sysext/core/Tests/Unit/Core/ClassLoaderTest.php
PHP
gpl-2.0
7,837
<?php /** * * Description * * @package VirtueMart * @subpackage * @author RolandD, Max Milbers * @link http://www.virtuemart.net * @copyright Copyright (c) 2004 - 2010 VirtueMart Team. All rights reserved. * @license http://www.gnu.org/copyleft/gpl.html GNU/GPL, see LICENSE.php * VirtueMart is free software. This version may have been modified pursuant * to the GNU General Public License, and as distributed it includes or * is derivative of works licensed under the GNU General Public License or * other free or open source software licenses. * @version $Id: ratings.php 8130 2014-07-14 16:47:09Z Milbo $ */ // Check to ensure this file is included in Joomla! defined('_JEXEC') or die('Restricted access'); if (!class_exists ('VmModel')){ require(JPATH_VM_ADMINISTRATOR . DS . 'helpers' . DS . 'vmmodel.php'); } /** * Model for VirtueMart Products * * @package VirtueMart * @author RolandD */ class VirtueMartModelRatings extends VmModel { var $_productBought = array(); /** * constructs a VmModel * setMainTable defines the maintable of the model * @author Max Milbers */ function __construct() { parent::__construct(); $this->setMainTable('ratings'); $layout = vRequest::getString('layout','default'); $task = vRequest::getCmd('task','default'); if($layout == 'list_reviews' or $task == 'listreviews'){ vmdebug('in review list'); $myarray = array('pr.created_on','virtuemart_rating_review_id','vote'); $this->removevalidOrderingFieldName('created_on'); $this->removevalidOrderingFieldName('product_name'); $this->removevalidOrderingFieldName('virtuemart_rating_id'); $this->removevalidOrderingFieldName('rating'); $this->_selectedOrdering = 'pr.created_on'; } else { $myarray = array('created_on','product_name','virtuemart_rating_id'); $this->removevalidOrderingFieldName('pr.created_on'); $this->removevalidOrderingFieldName('virtuemart_rating_review_id'); $this->removevalidOrderingFieldName('vote'); $this->_selectedOrdering = 'created_on'; } $this->addvalidOrderingFieldName($myarray); } /** * Select the products to list on the product list page */ public function getRatings() { $tables = ' FROM `#__virtuemart_ratings` AS `r` JOIN `#__virtuemart_products_'.VmConfig::$vmlang.'` AS `p` USING (`virtuemart_product_id`) '; $whereString = ''; $this->_data = $this->exeSortSearchListQuery(0,' r.*,p.`product_name` ',$tables,$whereString,'',$this->_getOrdering()); return $this->_data; } /** * Load a single rating * @author RolandD */ public function getRating($cids) { if (empty($cids)) { return; } /* First copy the product in the product table */ $ratings_data = $this->getTable('ratings'); /* Load the rating */ $joinValue = array('product_name' =>'#__virtuemart_products'); if ($cids) { $ratings_data->load ($cids[0], $joinValue, 'virtuemart_product_id'); } /* Add some variables for a new rating */ if (vRequest::getCmd('task') == 'add') { $virtuemart_product_id = vRequest::getInt('virtuemart_product_id'); if(is_array($virtuemart_product_id) && count($virtuemart_product_id) > 0){ $virtuemart_product_id = (int)$virtuemart_product_id[0]; } else { $virtuemart_product_id = (int)$virtuemart_product_id; } $ratings_data->virtuemart_product_id = $virtuemart_product_id; /* User ID */ $user = JFactory::getUser(); $ratings_data->virtuemart_user_id = $user->id; } return $ratings_data; } /** * @author Max Milbers * @param $virtuemart_product_id * @return null */ function getReviews($virtuemart_product_id){ if (empty($virtuemart_product_id)) { return NULL; } $select = '`u`.*,`pr`.*,`p`.`product_name`,`rv`.`vote`, `u`.`name` AS customer, `pr`.`published`'; $tables = ' FROM `#__virtuemart_rating_reviews` AS `pr` LEFT JOIN `#__users` AS `u` ON `pr`.`created_by` = `u`.`id` LEFT JOIN `#__virtuemart_products_'.VmConfig::$vmlang.'` AS `p` ON `p`.`virtuemart_product_id` = `pr`.`virtuemart_product_id` LEFT JOIN `#__virtuemart_rating_votes` AS `rv` on `rv`.`virtuemart_product_id`=`pr`.`virtuemart_product_id` and `rv`.`created_by`=`u`.`id`'; $whereString = ' WHERE `p`.`virtuemart_product_id` = "'.$virtuemart_product_id.'"'; $result = $this->exeSortSearchListQuery(0,$select,$tables,$whereString,'',$this->_getOrdering()); return $result; } /** * @author Max Milbers * @param $cids * @return mixed@ */ function getReview($cids){ $q = 'SELECT `u`.*,`pr`.*,`p`.`product_name`,`rv`.`vote`,CONCAT_WS(" ",`u`.`title`,u.`last_name`,`u`.`first_name`) as customer FROM `#__virtuemart_rating_reviews` AS `pr` LEFT JOIN `#__virtuemart_userinfos` AS `u` ON `pr`.`created_by` = `u`.`virtuemart_user_id` LEFT JOIN `#__virtuemart_products_'.VmConfig::$vmlang.'` AS `p` ON `p`.`virtuemart_product_id` = `pr`.`virtuemart_product_id` LEFT JOIN `#__virtuemart_rating_votes` as `rv` on `rv`.`virtuemart_product_id`=`pr`.`virtuemart_product_id` and `rv`.`created_by`=`pr`.`created_by` WHERE virtuemart_rating_review_id="'.(int)$cids[0].'" ' ; $db = JFactory::getDBO(); $db->setQuery($q); vmdebug('getReview',$db->getQuery()); return $db->loadObject(); } /** * gets a rating by a product id * * @author Max Milbers * @param int $product_id */ function getRatingByProduct($product_id){ $q = 'SELECT * FROM `#__virtuemart_ratings` WHERE `virtuemart_product_id` = "'.(int)$product_id.'" '; $db = JFactory::getDBO(); $db->setQuery($q); return $db->loadObject(); } /** * gets a review by a product id * * @author Max Milbers * @param int $product_id */ function getReviewByProduct($product_id,$userId=0){ if(empty($userId)){ $user = JFactory::getUser(); $userId = $user->id; } $q = 'SELECT * FROM `#__virtuemart_rating_reviews` WHERE `virtuemart_product_id` = "'.(int)$product_id.'" AND `created_by` = "'.(int)$userId.'" '; $db = JFactory::getDBO(); $db->setQuery($q); return $db->loadObject(); } /** * gets a reviews by a product id * * @author Max Milbers * @param int $product_id */ function getReviewsByProduct($product_id){ if(empty($userId)){ $user = JFactory::getUser(); $userId = $user->id; } $q = 'SELECT * FROM `#__virtuemart_rating_reviews` WHERE `virtuemart_product_id` = "'.(int)$product_id.'" '; $db = JFactory::getDBO(); $db->setQuery($q); return $db->loadObjectList(); } /** * gets a vote by a product id and userId * * @author Max Milbers * @param int $product_id */ function getVoteByProduct($product_id,$userId=0){ if(empty($userId)){ $user = JFactory::getUser(); $userId = $user->id; } $q = 'SELECT * FROM `#__virtuemart_rating_votes` WHERE `virtuemart_product_id` = "'.(int)$product_id.'" AND `created_by` = "'.(int)$userId.'" '; $db = JFactory::getDBO(); $db->setQuery($q); return $db->loadObject(); } /** * Save a rating * @author Max Milbers */ public function saveRating($data=0) { //Check user_rating $maxrating = VmConfig::get('vm_maximum_rating_scale',5); $virtuemart_product_id = vRequest::getInt('virtuemart_product_id',0); $app = JFactory::getApplication(); if( $app->isSite() ){ $user = JFactory::getUser(); $userId = $user->id; $allowReview = $this->allowReview($virtuemart_product_id); $allowRating = $this->allowRating($virtuemart_product_id); } else { $userId = $data['created_by']; $allowReview = true; $allowRating = true; } if(!empty($virtuemart_product_id)){ //if ( !empty($data['virtuemart_product_id']) && !empty($userId)){ if(empty($data)) $data = vRequest::getPost(); if($allowRating){ //normalize the rating if ($data['vote'] < 0) { $data['vote'] = 0; } if ($data['vote'] > ($maxrating + 1)) { $data['vote'] = $maxrating; } $data['lastip'] = $_SERVER['REMOTE_ADDR']; $data['vote'] = (int) $data['vote']; $rating = $this->getRatingByProduct($data['virtuemart_product_id']); vmdebug('$rating',$rating); $vote = $this->getVoteByProduct($data['virtuemart_product_id'],$userId); vmdebug('$vote',$vote); $data['virtuemart_rating_vote_id'] = empty($vote->virtuemart_rating_vote_id)? 0: $vote->virtuemart_rating_vote_id; if(isset($data['vote'])){ $votesTable = $this->getTable('rating_votes'); $votesTable->bindChecknStore($data,TRUE); $errors = $votesTable->getErrors(); foreach($errors as $error){ vmError(get_class( $this ).'::Error store votes '.$error); } } if(!empty($rating->rates) && empty($vote) ){ $data['rates'] = $rating->rates + $data['vote']; $data['ratingcount'] = $rating->ratingcount+1; } else { if (!empty($rating->rates) && !empty($vote->vote)) { $data['rates'] = $rating->rates - $vote->vote + $data['vote']; $data['ratingcount'] = $rating->ratingcount; } else { $data['rates'] = $data['vote']; $data['ratingcount'] = 1; } } if(empty($data['rates']) || empty($data['ratingcount']) ){ $data['rating'] = 0; } else { $data['rating'] = $data['rates']/$data['ratingcount']; } $data['virtuemart_rating_id'] = empty($rating->virtuemart_rating_id)? 0: $rating->virtuemart_rating_id; vmdebug('saveRating $data',$data); $rating = $this->getTable('ratings'); $rating->bindChecknStore($data,TRUE); $errors = $rating->getErrors(); foreach($errors as $error){ vmError(get_class( $this ).'::Error store rating '.$error); } } if($allowReview and !empty($data['comment'])){ //if(!empty($data['comment'])){ $data['comment'] = substr($data['comment'], 0, VmConfig::get('vm_reviews_maximum_comment_length', 2000)) ; // no HTML TAGS but permit all alphabet $value = preg_replace('@<[\/\!]*?[^<>]*?>@si','',$data['comment']);//remove all html tags $value = (string)preg_replace('#on[a-z](.+?)\)#si','',$value);//replace start of script onclick() onload()... $value = trim(str_replace('"', ' ', $value),"'") ; $data['comment'] = (string)preg_replace('#^\'#si','',$value);//replace ' at start $data['comment'] = nl2br($data['comment']); // keep returns //set to defaut value not used (prevent hack) $data['review_ok'] = 0; $data['review_rating'] = 0; $data['review_editable'] = 0; // Check if ratings are auto-published (set to 0 prevent injected by user) // $app = JFactory::getApplication(); if( $app->isSite() ){ if (VmConfig::get ('reviews_autopublish', 1)) { $data['published'] = 1; } else { $model = new VmModel(); $product = $model->getTable('products'); $product->load($data['virtuemart_product_id']); $vendorId = VmConfig::isSuperVendor(); if(!$user->authorise('core.admin','com_virtuemart') and !$user->authorise('core.manage','com_virtuemart') or $vendorId!=$product->virtuemart_vendor_id){ $data['published'] = 0; } } } $review = $this->getReviewByProduct($data['virtuemart_product_id'],$userId); if(!empty($review->review_rates)){ $data['review_rates'] = $review->review_rates + $data['vote']; } else { $data['review_rates'] = $data['vote']; } if(!empty($review->review_ratingcount)){ $data['review_ratingcount'] = $review->review_ratingcount+1; } else { $data['review_ratingcount'] = 1; } $data['review_rating'] = $data['review_rates']/$data['review_ratingcount']; $data['virtuemart_rating_review_id'] = empty($review->virtuemart_rating_review_id)? 0: $review->virtuemart_rating_review_id; $reviewTable = $this->getTable('rating_reviews'); $reviewTable->bindChecknStore($data,TRUE); $errors = $reviewTable->getErrors(); foreach($errors as $error){ vmError(get_class( $this ).'::Error store review '.$error); } } return $data['virtuemart_rating_review_id']; } else{ vmError('Cant save rating/review/vote without vote/product_id'); return FALSE; } } /** * removes a product and related table entries * * @author Max Milberes */ public function remove($ids) { $rating = $this->getTable($this->_maintablename); $review = $this->getTable('rating_reviews'); $votes = $this->getTable('rating_votes'); $ok = TRUE; foreach($ids as $id) { $rating->load($id); $prod_id = $rating->virtuemart_product_id; if (!$rating->delete($id)) { vmError(get_class( $this ).'::Error deleting ratings '.$rating->getError()); $ok = FALSE; } if (!$review->delete($prod_id,'virtuemart_product_id')) { vmError(get_class( $this ).'::Error deleting review '.$review->getError()); $ok = FALSE; } if (!$votes->delete($prod_id,'virtuemart_product_id')) { vmError(get_class( $this ).'::Error deleting votes '.$votes->getError()); $ok = FALSE; } } return $ok; } /** * Returns the number of reviews assigned to a product * * @author RolandD * @param int $pid Product ID * @return int */ public function countReviewsForProduct($pid) { $db = JFactory::getDBO(); $q = "SELECT COUNT(*) AS total FROM #__virtuemart_rating_reviews WHERE virtuemart_product_id=".(int)$pid; $db->setQuery($q); $reviews = $db->loadResult(); return $reviews; } public function showReview($product_id){ return $this->show($product_id, VmConfig::get('showReviewFor','all')); } public function showRating($product_id = 0){ return $this->show($product_id, VmConfig::get('showRatingFor','all')); } public function allowReview($product_id){ return $this->show($product_id, VmConfig::get('reviewMode','bought')); } public function allowRating($product_id){ return $this->show($product_id, VmConfig::get('ratingMode','bought')); } /** * Decides if the rating/review should be shown on the FE * @author Max Milbers */ private function show($product_id, $show){ //dont show if($show == 'none'){ return false; } //show all else { if ($show == 'all') { return true; } //show only registered else { if ($show == 'registered') { $user = JFactory::getUser (); return !empty($user->id); } //show only registered && who bought the product else { if ($show == 'bought') { if (empty($product_id)) { return false; } if (isset($this->_productBought[$product_id])) { return $this->_productBought[$product_id]; } $user = JFactory::getUser (); $rr_os=VmConfig::get('rr_os',array('C')); if(!is_array($rr_os)) $rr_os = array($rr_os); $db = JFactory::getDBO (); $q = 'SELECT COUNT(*) as total FROM `#__virtuemart_orders` AS o LEFT JOIN `#__virtuemart_order_items` AS oi '; $q .= 'ON `o`.`virtuemart_order_id` = `oi`.`virtuemart_order_id` '; $q .= 'WHERE o.virtuemart_user_id = "' . $user->id . '" AND oi.virtuemart_product_id = "' . $product_id . '" '; $q .= 'AND o.order_status IN (\'' . implode("','",$rr_os). '\') '; $db->setQuery ($q); $count = $db->loadResult (); if ($count) { $this->_productBought[$product_id] = true; return true; } else { $this->_productBought[$product_id] = false; return false; } } } } } } } // pure php no closing tag
arlesonsilva/fortalpet
tmp/install_5488d6f0c9a33/administrator/components/com_virtuemart/models/ratings.php
PHP
gpl-2.0
15,606
/* * IdeaVim - Vim emulator for IDEs based on the IntelliJ platform * Copyright (C) 2003-2014 The IdeaVim authors * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package com.maddyhome.idea.vim.action.change.insert; import com.intellij.openapi.actionSystem.DataContext; import com.intellij.openapi.editor.Editor; import com.intellij.openapi.editor.actionSystem.EditorAction; import com.intellij.openapi.editor.actionSystem.EditorActionHandler; import com.intellij.psi.impl.source.tree.injected.InjectedLanguageUtil; import com.maddyhome.idea.vim.VimPlugin; import org.jetbrains.annotations.NotNull; /** */ public class InsertEnterAction extends EditorAction { public InsertEnterAction() { super(new Handler()); } private static class Handler extends EditorActionHandler { public void execute(@NotNull Editor editor, @NotNull DataContext context) { VimPlugin.getChange().processEnter(InjectedLanguageUtil.getTopLevelEditor(editor), context); } } }
flyhawk007/ideavim
src/com/maddyhome/idea/vim/action/change/insert/InsertEnterAction.java
Java
gpl-2.0
1,582
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // GNU/Linux library calls. package syscall import "unsafe" //sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) //__go_openat(dirfd _C_int, path *byte, flags _C_int, mode Mode_t) _C_int //sys futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) //futimesat(dirfd _C_int, path *byte, times *[2]Timeval) _C_int func Futimesat(dirfd int, path string, tv []Timeval) (err error) { if len(tv) != 2 { return EINVAL } return futimesat(dirfd, StringBytePtr(path), (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } func Futimes(fd int, tv []Timeval) (err error) { // Believe it or not, this is the best we can do on GNU/Linux // (and is what glibc does). return Utimes("/proc/self/fd/"+itoa(fd), tv) } //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) //ptrace(request _C_int, pid Pid_t, addr *byte, data *byte) _C_long //sysnb raw_ptrace(request int, pid int, addr *byte, data *byte) (err Errno) //ptrace(request _C_int, pid Pid_t, addr *byte, data *byte) _C_long func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) { // The peek requests are machine-size oriented, so we wrap it // to retrieve arbitrary-length data. // The ptrace syscall differs from glibc's ptrace. // Peeks returns the word in *data, not as the return value. var buf [sizeofPtr]byte // Leading edge. PEEKTEXT/PEEKDATA don't require aligned // access (PEEKUSER warns that it might), but if we don't // align our reads, we might straddle an unmapped page // boundary and not get the bytes leading up to the page // boundary. n := 0 if addr%sizeofPtr != 0 { err = ptrace(req, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) if err != nil { return 0, err } n += copy(out, buf[addr%sizeofPtr:]) out = out[n:] } // Remainder. for len(out) > 0 { // We use an internal buffer to gaurantee alignment. // It's not documented if this is necessary, but we're paranoid. err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) if err != nil { return n, err } copied := copy(out, buf[0:]) n += copied out = out[copied:] } return n, nil } func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) { return ptracePeek(PTRACE_PEEKTEXT, pid, addr, out) } func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { return ptracePeek(PTRACE_PEEKDATA, pid, addr, out) } func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (count int, err error) { // As for ptracePeek, we need to align our accesses to deal // with the possibility of straddling an invalid page. // Leading edge. n := 0 if addr%sizeofPtr != 0 { var buf [sizeofPtr]byte err = ptrace(peekReq, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) if err != nil { return 0, err } n += copy(buf[addr%sizeofPtr:], data) word := *((*uintptr)(unsafe.Pointer(&buf[0]))) err = ptrace(pokeReq, pid, addr-addr%sizeofPtr, word) if err != nil { return 0, err } data = data[n:] } // Interior. for len(data) > int(sizeofPtr) { word := *((*uintptr)(unsafe.Pointer(&data[0]))) err = ptrace(pokeReq, pid, addr+uintptr(n), word) if err != nil { return n, err } n += int(sizeofPtr) data = data[sizeofPtr:] } // Trailing edge. if len(data) > 0 { var buf [sizeofPtr]byte err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) if err != nil { return n, err } copy(buf[0:], data) word := *((*uintptr)(unsafe.Pointer(&buf[0]))) err = ptrace(pokeReq, pid, addr+uintptr(n), word) if err != nil { return n, err } n += len(data) } return n, nil } func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { return ptracePoke(PTRACE_POKETEXT, PTRACE_PEEKTEXT, pid, addr, data) } func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) { return ptracePoke(PTRACE_POKEDATA, PTRACE_PEEKDATA, pid, addr, data) } func PtraceSetOptions(pid int, options int) (err error) { return ptrace(PTRACE_SETOPTIONS, pid, 0, uintptr(options)) } func PtraceGetEventMsg(pid int) (msg uint, err error) { var data _C_long err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data))) msg = uint(data) return } func PtraceCont(pid int, signal int) (err error) { return ptrace(PTRACE_CONT, pid, 0, uintptr(signal)) } func PtraceSingleStep(pid int) (err error) { return ptrace(PTRACE_SINGLESTEP, pid, 0, 0) } func PtraceAttach(pid int) (err error) { return ptrace(PTRACE_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PTRACE_DETACH, pid, 0, 0) } //sys reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) //reboot(magic1 _C_uint, magic2 _C_uint, cmd _C_int, arg *byte) _C_int func Reboot(cmd int) (err error) { return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "") } //sys accept4(fd int, sa *RawSockaddrAny, len *Socklen_t, flags int) (nfd int, err error) //accept4(fd _C_int, sa *RawSockaddrAny, len *Socklen_t, flags _C_int) _C_int func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { var rsa RawSockaddrAny var len Socklen_t = SizeofSockaddrAny nfd, err = accept4(fd, &rsa, &len, flags) if err != nil { return -1, nil, err } sa, err = anyToSockaddr(&rsa) if err != nil { Close(nfd) return -1, nil, err } return nfd, sa, nil } //sys Acct(path string) (err error) //acct(path *byte) _C_int //sys Adjtimex(buf *Timex) (state int, err error) //adjtimex(buf *Timex) _C_int //sysnb Dup3(oldfd int, newfd int, flags int) (err error) //dup3(oldfd _C_int, newfd _C_int, flags _C_int) _C_int //sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //faccessat(dirfd _C_int, pathname *byte, mode _C_int, flags _C_int) _C_int //sys Fallocate(fd int, mode uint32, off int64, len int64) (err error) //fallocate(fd _C_int, mode _C_int, offset Offset_t, len Offset_t) _C_int //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //fchmodat(dirfd _C_int, pathname *byte, mode Mode_t, flags _C_int) _C_int //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //fchownat(dirfd _C_int, path *byte, owner Uid_t, group Gid_t, flags _C_int) _C_int //sys Flock(fd int, how int) (err error) //flock(fd _C_int, how _C_int) _C_int //sys Fstatfs(fd int, buf *Statfs_t) (err error) //fstatfs(fd _C_int, buf *Statfs_t) _C_int func Gettid() (tid int) { r1, _, _ := Syscall(SYS_GETTID, 0, 0, 0) return int(r1) } func Getdents(fd int, buf []byte) (n int, err error) { var p *byte if len(buf) > 0 { p = &buf[0] } else { p = (*byte)(unsafe.Pointer(&_zero)) } s := SYS_GETDENTS64 if s == 0 { s = SYS_GETDENTS } r1, _, errno := Syscall(uintptr(s), uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(len(buf))) n = int(r1) if n < 0 { err = errno } return } func clen(n []byte) int { for i := 0; i < len(n); i++ { if n[i] == 0 { return i } } return len(n) } func ReadDirent(fd int, buf []byte) (n int, err error) { return Getdents(fd, buf) } func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) { origlen := len(buf) count = 0 for max != 0 && len(buf) > 0 { dirent := (*Dirent)(unsafe.Pointer(&buf[0])) buf = buf[dirent.Reclen:] if dirent.Ino == 0 { // File absent in directory. continue } bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) var name = string(bytes[0:clen(bytes[:])]) if name == "." || name == ".." { // Useless names continue } max-- count++ names = append(names, name) } return origlen - len(buf), count, names } //sys Getxattr(path string, attr string, dest []byte) (sz int, err error) //getxattr(path *byte, attr *byte, buf *byte, count Size_t) Ssize_t //sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) //inotify_add_watch(fd _C_int, pathname *byte, mask uint32) _C_int //sysnb InotifyInit() (fd int, err error) //inotify_init() _C_int //sysnb InotifyInit1(flags int) (fd int, err error) //inotify_init1(flags _C_int) _C_int //sysnb InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) //inotify_rm_watch(fd _C_int, wd uint32) _C_int //sys Klogctl(typ int, buf []byte) (n int, err error) //klogctl(typ _C_int, bufp *byte, len _C_int) _C_int //sys Listxattr(path string, dest []byte) (sz int, err error) //listxattr(path *byte, list *byte, size Size_t) Ssize_t //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //mkdirat(dirfd _C_int, path *byte, mode Mode_t) _C_int //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //mknodat(dirfd _C_int, path *byte, mode Mode_t, dev _dev_t) _C_int //sysnb pipe2(p *[2]_C_int, flags int) (err error) //pipe2(p *[2]_C_int, flags _C_int) _C_int func Pipe2(p []int, flags int) (err error) { if len(p) != 2 { return EINVAL } var pp [2]_C_int err = pipe2(&pp, flags) p[0] = int(pp[0]) p[1] = int(pp[1]) return } //sys PivotRoot(newroot string, putold string) (err error) //pivot_root(newroot *byte, putold *byte) _C_int //sys Removexattr(path string, attr string) (err error) //removexattr(path *byte, name *byte) _C_int //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //renameat(olddirfd _C_int, oldpath *byte, newdirfd _C_int, newpath *byte) _C_int //sys sendfile(outfd int, infd int, offset *Offset_t, count int) (written int, err error) //sendfile64(outfd _C_int, infd _C_int, offset *Offset_t, count Size_t) Ssize_t func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } var soff Offset_t var psoff *Offset_t if offset != nil { soff = Offset_t(*offset) psoff = &soff } written, err = sendfile(outfd, infd, psoff, count) if offset != nil { *offset = int64(soff) } return } //sys Setfsgid(gid int) (err error) //setfsgid(gid Gid_t) _C_int //sys Setfsuid(uid int) (err error) //setfsuid(uid Uid_t) _C_int //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //setresgid(rgid Gid_t, egid Gid_t, sgid Gid_t) _C_int //sysnb Setresuid(ruid int, eguid int, suid int) (err error) //setresuid(ruid Uid_t, euid Uid_t, suid Uid_t) _C_int //sys Setxattr(path string, attr string, data []byte, flags int) (err error) //setxattr(path *byte, name *byte, value *byte, size Size_t, flags _C_int) _C_int //sys splice(rfd int, roff *_loff_t, wfd int, woff *_loff_t, len int, flags int) (n int64, err error) //splice(rfd _C_int, roff *_loff_t, wfd _C_int, woff *_loff_t, len Size_t, flags _C_uint) Ssize_t func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { var lroff _loff_t var plroff *_loff_t if roff != nil { lroff = _loff_t(*roff) plroff = &lroff } var lwoff _loff_t var plwoff *_loff_t if woff != nil { lwoff = _loff_t(*woff) plwoff = &lwoff } n, err = splice(rfd, plroff, wfd, plwoff, len, flags) if roff != nil { *roff = int64(lroff) } if woff != nil { *woff = int64(lwoff) } return } //sys Statfs(path string, buf *Statfs_t) (err error) //statfs(path *byte, buf *Statfs_t) _C_int //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sync_file_range(fd _C_int, off Offset_t, n Offset_t, flags _C_uint) _C_int //sysnb Sysinfo(info *Sysinfo_t) (err error) //sysinfo(info *Sysinfo_t) _C_int //sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error) //tee(rfd _C_int, wfd _C_int, len Size_t, flags _C_uint) Ssize_t func Tgkill(tgid int, tid int, sig Signal) error { r1, _, errno := Syscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) if r1 < 0 { return errno } return nil } //sys unlinkat(dirfd int, path string, flags int) (err error) //unlinkat(dirfd _C_int, path *byte, flags _C_int) _C_int func Unlinkat(dirfd int, path string) (err error) { return unlinkat(dirfd, path, 0) } //sys Unmount(target string, flags int) (err error) = SYS_UMOUNT2 //umount2(target *byte, flags _C_int) _C_int //sys Unshare(flags int) (err error) //unshare(flags _C_int) _C_int //sys Ustat(dev int, ubuf *Ustat_t) (err error) //ustat(dev _dev_t, ubuf *Ustat_t) _C_int
iains/darwin-gcc-5
libgo/go/syscall/libcall_linux.go
GO
gpl-2.0
12,396
<?xml version="1.0" encoding="UTF-8" standalone="no"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><head><title>Chapter 12.  Numerics</title><meta name="generator" content="DocBook XSL-NS Stylesheets V1.76.1"/><meta name="keywords" content="&#10; ISO C++&#10; , &#10; library&#10; "/><meta name="keywords" content="&#10; ISO C++&#10; , &#10; runtime&#10; , &#10; library&#10; "/><link rel="home" href="../index.html" title="The GNU C++ Library"/><link rel="up" href="bk01pt02.html" title="Part II.  Standard Contents"/><link rel="prev" href="algorithms.html" title="Chapter 11.  Algorithms"/><link rel="next" href="generalized_numeric_operations.html" title="Generalized Operations"/></head><body><div class="navheader"><table width="100%" summary="Navigation header"><tr><th colspan="3" align="center">Chapter 12.  Numerics </th></tr><tr><td align="left"><a accesskey="p" href="algorithms.html">Prev</a> </td><th width="60%" align="center">Part II.  Standard Contents </th><td align="right"> <a accesskey="n" href="generalized_numeric_operations.html">Next</a></td></tr></table><hr/></div><div class="chapter" title="Chapter 12.  Numerics"><div class="titlepage"><div><div><h2 class="title"><a id="std.numerics"/>Chapter 12.  Numerics <a id="id504587" class="indexterm"/> </h2></div></div></div><div class="toc"><p><strong>Table of Contents</strong></p><dl><dt><span class="section"><a href="numerics.html#std.numerics.complex">Complex</a></span></dt><dd><dl><dt><span class="section"><a href="numerics.html#numerics.complex.processing">complex Processing</a></span></dt></dl></dd><dt><span class="section"><a href="generalized_numeric_operations.html">Generalized Operations</a></span></dt><dt><span class="section"><a href="numerics_and_c.html">Interacting with C</a></span></dt><dd><dl><dt><span class="section"><a href="numerics_and_c.html#numerics.c.array">Numerics vs. Arrays</a></span></dt><dt><span class="section"><a href="numerics_and_c.html#numerics.c.c99">C99</a></span></dt></dl></dd></dl></div><div class="section" title="Complex"><div class="titlepage"><div><div><h2 class="title"><a id="std.numerics.complex"/>Complex</h2></div></div></div><p> </p><div class="section" title="complex Processing"><div class="titlepage"><div><div><h3 class="title"><a id="numerics.complex.processing"/>complex Processing</h3></div></div></div><p> </p><p>Using <code class="code">complex&lt;&gt;</code> becomes even more comple- er, sorry, <span class="emphasis"><em>complicated</em></span>, with the not-quite-gratuitously-incompatible addition of complex types to the C language. David Tribble has compiled a list of C++98 and C99 conflict points; his description of C's new type versus those of C++ and how to get them playing together nicely is <a class="link" href="http://david.tribble.com/text/cdiffs.htm#C99-complex">here</a>. </p><p><code class="code">complex&lt;&gt;</code> is intended to be instantiated with a floating-point type. As long as you meet that and some other basic requirements, then the resulting instantiation has all of the usual math operators defined, as well as definitions of <code class="code">op&lt;&lt;</code> and <code class="code">op&gt;&gt;</code> that work with iostreams: <code class="code">op&lt;&lt;</code> prints <code class="code">(u,v)</code> and <code class="code">op&gt;&gt;</code> can read <code class="code">u</code>, <code class="code">(u)</code>, and <code class="code">(u,v)</code>. </p></div></div></div><div class="navfooter"><hr/><table width="100%" summary="Navigation footer"><tr><td align="left"><a accesskey="p" href="algorithms.html">Prev</a> </td><td align="center"><a accesskey="u" href="bk01pt02.html">Up</a></td><td align="right"> <a accesskey="n" href="generalized_numeric_operations.html">Next</a></td></tr><tr><td align="left" valign="top">Chapter 11.  Algorithms  </td><td align="center"><a accesskey="h" href="../index.html">Home</a></td><td align="right" valign="top"> Generalized Operations</td></tr></table></div></body></html>
MicroTrustRepos/microkernel
src/l4/pkg/libstdc++-v3/contrib/libstdc++-v3-4.7/doc/html/manual/numerics.html
HTML
gpl-2.0
4,256
/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/mfd/msm-adie-codec.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/debugfs.h> #include <asm/uaccess.h> #include <mach/qdsp5v2/snddev_icodec.h> #include <mach/qdsp5v2/audio_dev_ctl.h> #include <mach/qdsp5v2/audio_interct.h> #include <mach/qdsp5v2/mi2s.h> #include <mach/qdsp5v2/afe.h> #include <mach/qdsp5v2/lpa.h> #include <mach/qdsp5v2/marimba_profile.h> #include <mach/vreg.h> #include <mach/pmic.h> #include <linux/wakelock.h> #include <mach/debug_mm.h> #include <mach/rpc_pmapp.h> #include <mach/qdsp5v2/audio_acdb_def.h> #include <linux/slab.h> #define SMPS_AUDIO_PLAYBACK_ID "AUPB" #define SMPS_AUDIO_RECORD_ID "AURC" #define SNDDEV_ICODEC_PCM_SZ 32 /* 16 bit / sample stereo mode */ #define SNDDEV_ICODEC_MUL_FACTOR 3 /* Multi by 8 Shift by 3 */ #define SNDDEV_ICODEC_CLK_RATE(freq) \ (((freq) * (SNDDEV_ICODEC_PCM_SZ)) << (SNDDEV_ICODEC_MUL_FACTOR)) #ifdef CONFIG_DEBUG_FS static struct adie_codec_action_unit debug_rx_actions[] = HANDSET_RX_8000_OSR_256; static struct adie_codec_action_unit debug_tx_lb_actions[] = { { ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF }, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x01, 0x01)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x01, 0x00) }, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x30, 0x30)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xfc, 0xfc)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x13, 0xfc, 0x58)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xff, 0x65)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x15, 0xff, 0x64)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xff, 0x5C)}, { ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY }, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xF0, 0xd0)}, { ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x14, 0x14)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xff, 0x00)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x50, 0x40)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x91, 0xFF, 0x01)}, /* Start loop back */ { ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x10, 0x30)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x14, 0x00)}, { ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xff, 0x00)} }; static struct adie_codec_action_unit debug_tx_actions[] = HANDSET_TX_8000_OSR_256; static struct adie_codec_hwsetting_entry debug_rx_settings[] = { { .freq_plan = 8000, .osr = 256, .actions = debug_rx_actions, .action_sz = ARRAY_SIZE(debug_rx_actions), } }; static struct adie_codec_hwsetting_entry debug_tx_settings[] = { { .freq_plan = 8000, .osr = 256, .actions = debug_tx_actions, .action_sz = ARRAY_SIZE(debug_tx_actions), } }; static struct adie_codec_hwsetting_entry debug_tx_lb_settings[] = { { .freq_plan = 8000, .osr = 256, .actions = debug_tx_lb_actions, .action_sz = ARRAY_SIZE(debug_tx_lb_actions), } }; static struct adie_codec_dev_profile debug_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = debug_rx_settings, .setting_sz = ARRAY_SIZE(debug_rx_settings), }; static struct adie_codec_dev_profile debug_tx_profile = { .path_type = ADIE_CODEC_TX, .settings = debug_tx_settings, .setting_sz = ARRAY_SIZE(debug_tx_settings), }; static struct adie_codec_dev_profile debug_tx_lb_profile = { .path_type = ADIE_CODEC_TX, .settings = debug_tx_lb_settings, .setting_sz = ARRAY_SIZE(debug_tx_lb_settings), }; #endif /* CONFIG_DEBUG_FS */ /* Context for each internal codec sound device */ struct snddev_icodec_state { struct snddev_icodec_data *data; struct adie_codec_path *adie_path; u32 sample_rate; u32 enabled; }; /* Global state for the driver */ struct snddev_icodec_drv_state { struct mutex rx_lock; struct mutex tx_lock; u32 rx_active; /* ensure one rx device at a time */ u32 tx_active; /* ensure one tx device at a time */ struct clk *rx_mclk; struct clk *rx_sclk; struct clk *tx_mclk; struct clk *tx_sclk; struct clk *lpa_codec_clk; struct clk *lpa_core_clk; struct clk *lpa_p_clk; struct lpa_drv *lpa; struct wake_lock rx_idlelock; struct wake_lock tx_idlelock; }; static struct snddev_icodec_drv_state snddev_icodec_drv; #if 0//!defined(CONFIG_LGE_MODEL_E739) #ifdef CONFIG_LGE_AUDIO_LOOPBACK /* LGE_CHANGE * added for Audio loopback * 2010-02-24, bob.cho@lge.com */ #include "lge_audio_amp.h" #include <linux/delay.h> #define LB_DISABLE 0x00 #define LB_ENABLE 0x01 #define LB_PATH_HANDSET 0x10 #define LB_PATH_HEADSET 0x20 #define LB_PATH_SPEAKER 0x30 #define LB_IS_ENABLE(x) (x&0x0F) /******************************************************************************* * headset loopback Rx section * ********************************************************************************/ static struct adie_codec_action_unit headset_mono_rx_8KHz_osr256_actions[] = HEADSET_RX_LEGACY_8000_OSR_256; static struct adie_codec_hwsetting_entry headset_lb_rx_settings[] = { { .freq_plan = 8000, .osr = 256, .actions = headset_mono_rx_8KHz_osr256_actions, .action_sz = ARRAY_SIZE(headset_mono_rx_8KHz_osr256_actions), } }; static struct adie_codec_dev_profile lb_headset_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = headset_lb_rx_settings, .setting_sz = ARRAY_SIZE(headset_lb_rx_settings), }; /******************************************************************************* * headset loopback Tx section * ********************************************************************************/ static struct adie_codec_action_unit headset_mono_tx_8KHz_osr256_actions[] = { {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x01, 0x01)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x01, 0x00)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x30, 0x30)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xfc, 0xfc)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x13, 0xfc, 0x58)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xff, 0x65)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x15, 0xfc, 0x64)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x5D)}, {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFf, 0xc8)}, {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x14, 0x14)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xff, 0x00)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x50, 0x40)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x91, 0xFF, 0x01)}, {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x10, 0x10)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x14, 0x00)}, {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xff, 0x00)}, }; static struct adie_codec_hwsetting_entry headset_lb_tx_settings[] = { { .freq_plan = 8000, .osr = 256, .actions = headset_mono_tx_8KHz_osr256_actions, .action_sz = ARRAY_SIZE(headset_mono_tx_8KHz_osr256_actions), } }; static struct adie_codec_dev_profile lb_headset_tx_profile = { .path_type = ADIE_CODEC_TX, .settings = headset_lb_tx_settings, .setting_sz = ARRAY_SIZE(headset_lb_tx_settings), }; /******************************************************************************* * speaker loopback Tx section * ********************************************************************************/ static struct adie_codec_action_unit ispeaker_tx_8KHz_osr256_actions[] = {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x01, 0x01)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x01, 0x00)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x30, 0x30)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xfc, 0xfc)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x13, 0xfc, 0x58)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xff, 0x65)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x15, 0xff, 0x64)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xff, 0x5E)}, {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC1)}, {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x14, 0x14)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xff, 0x00)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x50, 0x40)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x91, 0xFF, 0x01)}, /*start loopback*/ {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x10, 0x30)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x14, 0x00)}, {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xff, 0x00)}}; static struct adie_codec_hwsetting_entry speaker_lb_tx_settings[] = { { .freq_plan = 8000, .osr = 256, .actions = ispeaker_tx_8KHz_osr256_actions, .action_sz = ARRAY_SIZE(ispeaker_tx_8KHz_osr256_actions), } }; static struct adie_codec_dev_profile lb_speaker_tx_profile = { .path_type = ADIE_CODEC_TX, .settings = speaker_lb_tx_settings, .setting_sz = ARRAY_SIZE(speaker_lb_tx_settings), }; /******************************************************************************* * handset loopback Rx section * ********************************************************************************/ static struct adie_codec_action_unit handset_rx_8KHz_osr256_actions[] = HANDSET_RX_8000_OSR_256; static struct adie_codec_hwsetting_entry handset_lb_rx_settings[] = { { .freq_plan = 8000, .osr = 256, .actions = handset_rx_8KHz_osr256_actions, .action_sz = ARRAY_SIZE(handset_rx_8KHz_osr256_actions), } }; static struct adie_codec_dev_profile lb_handset_rx_profile = { .path_type = ADIE_CODEC_RX, .settings = handset_lb_rx_settings, .setting_sz = ARRAY_SIZE(handset_lb_rx_settings), }; /******************************************************************************* * handset loopback Tx section * ********************************************************************************/ static struct adie_codec_action_unit handset_tx_8KHz_osr256_actions[] = { { ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF }, { ADIE_CODEC_ACTION_ENTRY,ADIE_CODEC_PACK_ENTRY(0x80, 0x01, 0x01)}, { ADIE_CODEC_ACTION_ENTRY,ADIE_CODEC_PACK_ENTRY(0x80, 0x01, 0x00) }, { ADIE_CODEC_ACTION_ENTRY,ADIE_CODEC_PACK_ENTRY(0x8A, 0x30, 0x30)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xfc, 0xfc)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x13, 0xfc, 0x58)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xff, 0x65)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x15, 0xff, 0x64)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xff, 0x5C)}, { ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY }, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xF0, 0xd0)}, { ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x14, 0x14)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xff, 0x00)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x50, 0x40)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x91, 0xFF, 0x01)}, /* Start loop back */ { ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x10, 0x30)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x14, 0x00)}, { ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, { ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xff, 0x00)} }; static struct adie_codec_hwsetting_entry handset_lb_tx_settings[] = { { .freq_plan = 8000, .osr = 256, .actions = handset_tx_8KHz_osr256_actions, .action_sz = ARRAY_SIZE(handset_tx_8KHz_osr256_actions), } }; static struct adie_codec_dev_profile lb_handset_tx_profile = { .path_type = ADIE_CODEC_TX, .settings = handset_lb_tx_settings, .setting_sz = ARRAY_SIZE(handset_lb_tx_settings), }; static int lb_status = 0; static int lb_suspend = 0; static struct adie_codec_path *lb_rx_adie; static struct adie_codec_path *lb_tx_adie; static void set_loopback_mode(u32 loop) { int trc; struct msm_afe_config afe_config; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; struct lpa_codec_config lpa_config; if (loop & LB_ENABLE) { /* adie config profile for loopback*/ struct adie_codec_dev_profile *rx_profile; struct adie_codec_dev_profile *tx_profile; int timeout; timeout = 10; while (drv->rx_active || drv->tx_active ) { msleep(1000); if (--timeout < 0) return; } switch (loop & 0xF0) { case LB_PATH_HANDSET : rx_profile = &lb_handset_rx_profile; tx_profile = &lb_handset_tx_profile; break; case LB_PATH_HEADSET : rx_profile = &lb_headset_rx_profile; tx_profile = &lb_headset_tx_profile; break; case LB_PATH_SPEAKER : rx_profile = &lb_handset_rx_profile; tx_profile = &lb_speaker_tx_profile; break; default : return; } /* Vote for SMPS mode*/ pmapp_smps_mode_vote(SMPS_AUDIO_PLAYBACK_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_PWM); /* enable MI2S RX master block */ /* enable MI2S RX bit clock */ trc = clk_set_rate(drv->rx_mclk, SNDDEV_ICODEC_CLK_RATE(8000)); if (IS_ERR_VALUE(trc)) MM_ERR("failed to set clk rate\n"); clk_enable(drv->rx_mclk); clk_enable(drv->rx_sclk); clk_enable(drv->lpa_p_clk); clk_enable(drv->lpa_codec_clk); clk_enable(drv->lpa_core_clk); /* Enable LPA sub system */ drv->lpa = lpa_get(); if (!drv->lpa) MM_ERR("failed to enable lpa\n"); lpa_config.sample_rate = 8000; lpa_config.sample_width = 16; lpa_config.output_interface = LPA_OUTPUT_INTF_WB_CODEC; lpa_config.num_channels = 1; lpa_cmd_codec_config(drv->lpa, &lpa_config); /* Set audio interconnect reg to LPA */ audio_interct_codec(AUDIO_INTERCT_LPA); mi2s_set_codec_output_path(MI2S_CHAN_MONO_PACKED, WT_16_BIT); MM_INFO("configure ADIE RX path\n"); /* Configure ADIE */ adie_codec_open(rx_profile, &lb_rx_adie); adie_codec_setpath(lb_rx_adie, 8000, 256); lpa_cmd_enable_codec(drv->lpa, 1); /* Start AFE for RX */ afe_config.sample_rate = 0x8; afe_config.channel_mode = 1; afe_config.volume = AFE_VOLUME_UNITY; MM_INFO("enable afe\n"); trc = afe_enable(AFE_HW_PATH_CODEC_RX, &afe_config); if (IS_ERR_VALUE(trc)) MM_ERR("fail to enable afe RX\n"); adie_codec_proceed_stage(lb_rx_adie, ADIE_CODEC_DIGITAL_READY); adie_codec_proceed_stage(lb_rx_adie, ADIE_CODEC_DIGITAL_ANALOG_READY); // ? /* Set amp for headset path */ if ((loop & 0xF0) == LB_PATH_HEADSET) lge_amp_hp_phone_on(); /* Vote for PWM mode*/ pmapp_smps_mode_vote(SMPS_AUDIO_RECORD_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_PWM); MM_INFO("Enable Handset Mic bias\n"); if ((loop & 0xF0) == LB_PATH_HANDSET) pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_PWM_TCXO); else if ((loop & 0xF0) == LB_PATH_SPEAKER) pmic_hsed_enable(PM_HSED_CONTROLLER_1, PM_HSED_ENABLE_PWM_TCXO); /* enable MI2S TX master block */ /* enable MI2S TX bit clock */ clk_set_rate(drv->tx_mclk, SNDDEV_ICODEC_CLK_RATE(8000)); clk_enable(drv->tx_mclk); clk_enable(drv->tx_sclk); /* Set MI2S */ mi2s_set_codec_input_path(MI2S_CHAN_MONO_PACKED, WT_16_BIT); MM_INFO("configure ADIE TX path\n"); /* Configure ADIE */ adie_codec_open(tx_profile, &lb_tx_adie); adie_codec_setpath(lb_tx_adie, 8000, 256); adie_codec_proceed_stage(lb_tx_adie, ADIE_CODEC_DIGITAL_READY); adie_codec_proceed_stage(lb_tx_adie, // ? ADIE_CODEC_DIGITAL_ANALOG_READY); /* Start AFE for TX */ afe_config.sample_rate = 0x8; afe_config.channel_mode = 1; afe_config.volume = AFE_VOLUME_UNITY; trc = afe_enable(AFE_HW_PATH_CODEC_TX, &afe_config); if (IS_ERR_VALUE(trc)) MM_ERR("failed to enable AFE TX\n"); /* Set the volume level to non unity, to avoid loopback effect */ afe_device_volume_ctrl(AFE_HW_PATH_CODEC_RX, 0x0500); /* enable afe loopback */ /* afe_loopback(1); */ MM_INFO("loopback enabled %d\n", loop); lb_status = loop; lb_suspend = 0; } else { /* disable afe loopback */ /* afe_loopback(0); */ /* Remove the vote for SMPS mode*/ pmapp_smps_mode_vote(SMPS_AUDIO_PLAYBACK_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_DONTCARE); /* Disable ADIE */ adie_codec_proceed_stage(lb_rx_adie, ADIE_CODEC_DIGITAL_OFF); adie_codec_close(lb_rx_adie); /* Disable AFE for RX */ afe_disable(AFE_HW_PATH_CODEC_RX); /* Disable LPA Sub system */ lpa_cmd_enable_codec(drv->lpa, 0); lpa_put(drv->lpa); /* Disable LPA clocks */ clk_disable(drv->lpa_p_clk); clk_disable(drv->lpa_codec_clk); clk_disable(drv->lpa_core_clk); /* Disable MI2S RX master block */ /* Disable MI2S RX bit clock */ clk_disable(drv->rx_sclk); clk_disable(drv->rx_mclk); pmapp_smps_mode_vote(SMPS_AUDIO_RECORD_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_DONTCARE); /* Disable AFE for TX */ afe_disable(AFE_HW_PATH_CODEC_TX); /* Disable ADIE */ adie_codec_proceed_stage(lb_tx_adie, ADIE_CODEC_DIGITAL_OFF); adie_codec_close(lb_tx_adie); /* Power Off amp for headset path */ if ((lb_status & 0xF0) == LB_PATH_HEADSET) lge_amp_off_all(); /* Disable MI2S TX master block */ /* Disable MI2S TX bit clock */ clk_disable(drv->tx_sclk); clk_disable(drv->tx_mclk); MM_INFO("Disable Handset Mic bias\n"); if ((lb_status & 0xF0) == LB_PATH_HANDSET) pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_OFF); else if ((lb_status & 0xF0) == LB_PATH_SPEAKER) pmic_hsed_enable(PM_HSED_CONTROLLER_1, PM_HSED_ENABLE_OFF); MM_INFO("AFE loopback disabled\n"); lb_status = 0; } } static ssize_t store_loopback_value(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int lb = 0; sscanf(buf, "%d", &lb); /* skip when same status */ if (lb == lb_status) return count; /* ignore disabling when in disable status */ if (!LB_IS_ENABLE(lb) && !LB_IS_ENABLE(lb_status)) return count; /* disable loopback when loopback is already enabled*/ if (LB_IS_ENABLE(lb) && LB_IS_ENABLE(lb_status)) set_loopback_mode(lb_status & 0xF0); /* set loopback */ set_loopback_mode(lb); return count; } static ssize_t show_loopback_value(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", lb_status); } static DEVICE_ATTR(loopback, S_IRUGO|S_IWUGO, show_loopback_value, store_loopback_value); #endif /*CONFIG_LGE_AUDIO_LOOPBACK*/ #endif static int snddev_icodec_open_rx(struct snddev_icodec_state *icodec) { int trc, err; int smps_mode = PMAPP_SMPS_MODE_VOTE_PWM; struct msm_afe_config afe_config; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; struct lpa_codec_config lpa_config; wake_lock(&drv->rx_idlelock); if ((icodec->data->acdb_id == ACDB_ID_HEADSET_SPKR_MONO) || (icodec->data->acdb_id == ACDB_ID_HEADSET_SPKR_STEREO)) { /* Vote PMAPP_SMPS_MODE_VOTE_PFM for headset */ smps_mode = PMAPP_SMPS_MODE_VOTE_PFM; MM_DBG("snddev_icodec_open_rx: PMAPP_SMPS_MODE_VOTE_PFM \n"); } else MM_DBG("snddev_icodec_open_rx: PMAPP_SMPS_MODE_VOTE_PWM \n"); /* Vote for SMPS mode*/ err = pmapp_smps_mode_vote(SMPS_AUDIO_PLAYBACK_ID, PMAPP_VREG_S4, smps_mode); if (err != 0) MM_ERR("pmapp_smps_mode_vote error %d\n", err); /* enable MI2S RX master block */ /* enable MI2S RX bit clock */ trc = clk_set_rate(drv->rx_mclk, SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate)); if (IS_ERR_VALUE(trc)) goto error_invalid_freq; clk_enable(drv->rx_mclk); clk_enable(drv->rx_sclk); /* clk_set_rate(drv->lpa_codec_clk, 1); */ /* Remove if use pcom */ clk_enable(drv->lpa_p_clk); clk_enable(drv->lpa_codec_clk); clk_enable(drv->lpa_core_clk); /* Enable LPA sub system */ drv->lpa = lpa_get(); if (!drv->lpa) goto error_lpa; lpa_config.sample_rate = icodec->sample_rate; lpa_config.sample_width = 16; lpa_config.output_interface = LPA_OUTPUT_INTF_WB_CODEC; lpa_config.num_channels = icodec->data->channel_mode; lpa_cmd_codec_config(drv->lpa, &lpa_config); /* Set audio interconnect reg to LPA */ audio_interct_codec(AUDIO_INTERCT_LPA); /* Set MI2S */ mi2s_set_codec_output_path((icodec->data->channel_mode == 2 ? MI2S_CHAN_STEREO : MI2S_CHAN_MONO_PACKED), WT_16_BIT); if (icodec->data->voltage_on) icodec->data->voltage_on(); /* Configure ADIE */ trc = adie_codec_open(icodec->data->profile, &icodec->adie_path); if (IS_ERR_VALUE(trc)) goto error_adie; /* OSR default to 256, can be changed for power optimization * If OSR is to be changed, need clock API for setting the divider */ adie_codec_setpath(icodec->adie_path, icodec->sample_rate, 256); /* Start AFE */ afe_config.sample_rate = icodec->sample_rate / 1000; afe_config.channel_mode = icodec->data->channel_mode; afe_config.volume = AFE_VOLUME_UNITY; trc = afe_enable(AFE_HW_PATH_CODEC_RX, &afe_config); if (IS_ERR_VALUE(trc)) goto error_afe; lpa_cmd_enable_codec(drv->lpa, 1); /* Enable ADIE */ adie_codec_proceed_stage(icodec->adie_path, ADIE_CODEC_DIGITAL_READY); adie_codec_proceed_stage(icodec->adie_path, ADIE_CODEC_DIGITAL_ANALOG_READY); /* Enable power amplifier */ if (icodec->data->pamp_on) icodec->data->pamp_on(); icodec->enabled = 1; wake_unlock(&drv->rx_idlelock); return 0; error_afe: adie_codec_close(icodec->adie_path); icodec->adie_path = NULL; error_adie: lpa_put(drv->lpa); error_lpa: clk_disable(drv->lpa_p_clk); clk_disable(drv->lpa_codec_clk); clk_disable(drv->lpa_core_clk); clk_disable(drv->rx_sclk); clk_disable(drv->rx_mclk); error_invalid_freq: MM_ERR("encounter error\n"); wake_unlock(&drv->rx_idlelock); return -ENODEV; } static int snddev_icodec_open_tx(struct snddev_icodec_state *icodec) { int trc; int i, err; struct msm_afe_config afe_config; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;; wake_lock(&drv->tx_idlelock); /* Vote for PWM mode*/ err = pmapp_smps_mode_vote(SMPS_AUDIO_RECORD_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_PWM); if (err != 0) MM_ERR("pmapp_smps_mode_vote error %d\n", err); /* Reuse pamp_on for TX platform-specific setup */ if (icodec->data->pamp_on) icodec->data->pamp_on(); for (i = 0; i < icodec->data->pmctl_id_sz; i++) { pmic_hsed_enable(icodec->data->pmctl_id[i], PM_HSED_ENABLE_PWM_TCXO); } /* enable MI2S TX master block */ /* enable MI2S TX bit clock */ trc = clk_set_rate(drv->tx_mclk, SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate)); if (IS_ERR_VALUE(trc)) goto error_invalid_freq; clk_enable(drv->tx_mclk); clk_enable(drv->tx_sclk); /* Set MI2S */ mi2s_set_codec_input_path((icodec->data->channel_mode == REAL_STEREO_CHANNEL_MODE ? MI2S_CHAN_STEREO : (icodec->data->channel_mode == 2 ? MI2S_CHAN_STEREO : MI2S_CHAN_MONO_RAW)), WT_16_BIT); /* Configure ADIE */ trc = adie_codec_open(icodec->data->profile, &icodec->adie_path); if (IS_ERR_VALUE(trc)) goto error_adie; /* Enable ADIE */ adie_codec_setpath(icodec->adie_path, icodec->sample_rate, 256); adie_codec_proceed_stage(icodec->adie_path, ADIE_CODEC_DIGITAL_READY); adie_codec_proceed_stage(icodec->adie_path, ADIE_CODEC_DIGITAL_ANALOG_READY); /* Start AFE */ afe_config.sample_rate = icodec->sample_rate / 1000; afe_config.channel_mode = icodec->data->channel_mode; afe_config.volume = AFE_VOLUME_UNITY; trc = afe_enable(AFE_HW_PATH_CODEC_TX, &afe_config); if (IS_ERR_VALUE(trc)) goto error_afe; icodec->enabled = 1; wake_unlock(&drv->tx_idlelock); return 0; error_afe: adie_codec_close(icodec->adie_path); icodec->adie_path = NULL; error_adie: clk_disable(drv->tx_sclk); clk_disable(drv->tx_mclk); error_invalid_freq: /* Disable mic bias */ for (i = 0; i < icodec->data->pmctl_id_sz; i++) { pmic_hsed_enable(icodec->data->pmctl_id[i], PM_HSED_ENABLE_OFF); } if (icodec->data->pamp_off) icodec->data->pamp_off(); MM_ERR("encounter error\n"); wake_unlock(&drv->tx_idlelock); return -ENODEV; } static int snddev_icodec_close_rx(struct snddev_icodec_state *icodec) { int err; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; wake_lock(&drv->rx_idlelock); /* Remove the vote for SMPS mode*/ err = pmapp_smps_mode_vote(SMPS_AUDIO_PLAYBACK_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_DONTCARE); if (err != 0) MM_ERR("pmapp_smps_mode_vote error %d\n", err); /* Disable power amplifier */ if (icodec->data->pamp_off) icodec->data->pamp_off(); /* Disable ADIE */ adie_codec_proceed_stage(icodec->adie_path, ADIE_CODEC_DIGITAL_OFF); adie_codec_close(icodec->adie_path); icodec->adie_path = NULL; afe_disable(AFE_HW_PATH_CODEC_RX); if (icodec->data->voltage_off) icodec->data->voltage_off(); /* Disable LPA Sub system */ lpa_cmd_enable_codec(drv->lpa, 0); lpa_put(drv->lpa); /* Disable LPA clocks */ clk_disable(drv->lpa_p_clk); clk_disable(drv->lpa_codec_clk); clk_disable(drv->lpa_core_clk); /* Disable MI2S RX master block */ /* Disable MI2S RX bit clock */ clk_disable(drv->rx_sclk); clk_disable(drv->rx_mclk); icodec->enabled = 0; wake_unlock(&drv->rx_idlelock); return 0; } static int snddev_icodec_close_tx(struct snddev_icodec_state *icodec) { struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; int i, err; wake_lock(&drv->tx_idlelock); /* Remove the vote for SMPS mode*/ err = pmapp_smps_mode_vote(SMPS_AUDIO_RECORD_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_DONTCARE); if (err != 0) MM_ERR("pmapp_smps_mode_vote error %d\n", err); afe_disable(AFE_HW_PATH_CODEC_TX); /* Disable ADIE */ adie_codec_proceed_stage(icodec->adie_path, ADIE_CODEC_DIGITAL_OFF); adie_codec_close(icodec->adie_path); icodec->adie_path = NULL; /* Disable MI2S TX master block */ /* Disable MI2S TX bit clock */ clk_disable(drv->tx_sclk); clk_disable(drv->tx_mclk); /* Disable mic bias */ for (i = 0; i < icodec->data->pmctl_id_sz; i++) { pmic_hsed_enable(icodec->data->pmctl_id[i], PM_HSED_ENABLE_OFF); } /* Reuse pamp_off for TX platform-specific setup */ if (icodec->data->pamp_off) icodec->data->pamp_off(); icodec->enabled = 0; wake_unlock(&drv->tx_idlelock); return 0; } static int snddev_icodec_set_device_volume_impl( struct msm_snddev_info *dev_info, u32 volume) { struct snddev_icodec_state *icodec; u8 afe_path_id; int rc = 0; icodec = dev_info->private_data; if (icodec->data->capability & SNDDEV_CAP_RX) afe_path_id = AFE_HW_PATH_CODEC_RX; else afe_path_id = AFE_HW_PATH_CODEC_TX; if (icodec->data->dev_vol_type & SNDDEV_DEV_VOL_DIGITAL) { rc = adie_codec_set_device_digital_volume(icodec->adie_path, icodec->data->channel_mode == REAL_STEREO_CHANNEL_MODE ? 2 : icodec->data->channel_mode, volume); if (rc < 0) { MM_ERR("unable to set_device_digital_volume for" "%s volume in percentage = %u\n", dev_info->name, volume); return rc; } } else if (icodec->data->dev_vol_type & SNDDEV_DEV_VOL_ANALOG) { rc = adie_codec_set_device_analog_volume(icodec->adie_path, icodec->data->channel_mode == REAL_STEREO_CHANNEL_MODE ? 2 : icodec->data->channel_mode, volume); if (rc < 0) { MM_ERR("unable to set_device_analog_volume for" "%s volume in percentage = %u\n", dev_info->name, volume); return rc; } } else { MM_ERR("Invalid device volume control\n"); return -EPERM; } return rc; } static int snddev_icodec_close(struct msm_snddev_info *dev_info) { int rc = 0; struct snddev_icodec_state *icodec; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; if (!dev_info) { rc = -EINVAL; goto error; } icodec = dev_info->private_data; if (icodec->data->capability & SNDDEV_CAP_RX) { mutex_lock(&drv->rx_lock); if (!drv->rx_active) { mutex_unlock(&drv->rx_lock); rc = -EPERM; goto error; } rc = snddev_icodec_close_rx(icodec); if (!IS_ERR_VALUE(rc)) drv->rx_active = 0; mutex_unlock(&drv->rx_lock); } else { mutex_lock(&drv->tx_lock); if (!drv->tx_active) { mutex_unlock(&drv->tx_lock); rc = -EPERM; goto error; } rc = snddev_icodec_close_tx(icodec); if (!IS_ERR_VALUE(rc)) drv->tx_active = 0; mutex_unlock(&drv->tx_lock); } #if 0//!defined(CONFIG_LGE_MODEL_E739) /* LGE_CHANGE * added for Audio loopback * 2010-02-24, bob.cho@lge.com */ if (lb_suspend && drv->rx_active == 0 && drv->tx_active == 0) { /* enable loopback mode */ MM_INFO("snddev_icodec_close : loopback enabled\n"); set_loopback_mode(lb_suspend); lb_suspend = 0; } #endif error: return rc; } static int snddev_icodec_open(struct msm_snddev_info *dev_info) { int rc = 0; struct snddev_icodec_state *icodec; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; if (!dev_info) { rc = -EINVAL; goto error; } #if 0//!defined(CONFIG_LGE_MODEL_E739) /* LGE_CHANGE * added for Audio loopback * 2010-02-24, bob.cho@lge.com */ if (lb_suspend == 0 && LB_IS_ENABLE(lb_status)) { MM_INFO("snddev_icodec_open : loopback off\n"); /* disable loopback mode */ lb_suspend = lb_status; set_loopback_mode(lb_status&0xF0); msleep(100); } #endif icodec = dev_info->private_data; if (icodec->data->capability & SNDDEV_CAP_RX) { mutex_lock(&drv->rx_lock); if (drv->rx_active) { mutex_unlock(&drv->rx_lock); rc = -EBUSY; goto error; } #ifdef CONFIG_LGE_DOMESTIC { extern void set_QTRcal_data(void); set_QTRcal_data(); } #endif rc = snddev_icodec_open_rx(icodec); if (!IS_ERR_VALUE(rc)) { drv->rx_active = 1; if ((icodec->data->dev_vol_type & ( SNDDEV_DEV_VOL_DIGITAL | SNDDEV_DEV_VOL_ANALOG))) rc = snddev_icodec_set_device_volume_impl( dev_info, dev_info->dev_volume); if (IS_ERR_VALUE(rc)) { MM_ERR("Failed to set device volume" " impl for rx device\n"); snddev_icodec_close(dev_info); mutex_unlock(&drv->rx_lock); goto error; } } mutex_unlock(&drv->rx_lock); } else { mutex_lock(&drv->tx_lock); if (drv->tx_active) { mutex_unlock(&drv->tx_lock); rc = -EBUSY; goto error; } rc = snddev_icodec_open_tx(icodec); if (!IS_ERR_VALUE(rc)) { drv->tx_active = 1; if ((icodec->data->dev_vol_type & ( SNDDEV_DEV_VOL_DIGITAL | SNDDEV_DEV_VOL_ANALOG))) rc = snddev_icodec_set_device_volume_impl( dev_info, dev_info->dev_volume); if (IS_ERR_VALUE(rc)) { MM_ERR("Failed to set device volume" " impl for tx device\n"); snddev_icodec_close(dev_info); mutex_unlock(&drv->tx_lock); goto error; } } mutex_unlock(&drv->tx_lock); } error: return rc; } static int snddev_icodec_check_freq(u32 req_freq) { int rc = -EINVAL; if ((req_freq != 0) && (req_freq >= 8000) && (req_freq <= 48000)) { if ((req_freq == 8000) || (req_freq == 11025) || (req_freq == 12000) || (req_freq == 16000) || (req_freq == 22050) || (req_freq == 24000) || (req_freq == 32000) || (req_freq == 44100) || (req_freq == 48000)) { rc = 0; } else MM_INFO("Unsupported Frequency:%d\n", req_freq); } return rc; } static int snddev_icodec_set_freq(struct msm_snddev_info *dev_info, u32 rate) { int rc; struct snddev_icodec_state *icodec; if (!dev_info) { rc = -EINVAL; goto error; } icodec = dev_info->private_data; if (adie_codec_freq_supported(icodec->data->profile, rate) != 0) { rc = -EINVAL; goto error; } else { if (snddev_icodec_check_freq(rate) != 0) { rc = -EINVAL; goto error; } else icodec->sample_rate = rate; } if (icodec->enabled) { snddev_icodec_close(dev_info); snddev_icodec_open(dev_info); } return icodec->sample_rate; error: return rc; } static int snddev_icodec_enable_sidetone(struct msm_snddev_info *dev_info, u32 enable) { int rc = 0; struct snddev_icodec_state *icodec; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; if (!dev_info) { MM_ERR("invalid dev_info\n"); rc = -EINVAL; goto error; } icodec = dev_info->private_data; if (icodec->data->capability & SNDDEV_CAP_RX) { mutex_lock(&drv->rx_lock); if (!drv->rx_active || !dev_info->opened) { MM_ERR("dev not active\n"); rc = -EPERM; mutex_unlock(&drv->rx_lock); goto error; } rc = adie_codec_enable_sidetone(icodec->adie_path, enable); mutex_unlock(&drv->rx_lock); } else { rc = -EINVAL; MM_ERR("rx device only\n"); } error: return rc; } int snddev_icodec_set_device_volume(struct msm_snddev_info *dev_info, u32 volume) { struct snddev_icodec_state *icodec; struct mutex *lock; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; int rc = -EPERM; if (!dev_info) { MM_INFO("device not intilized.\n"); return -EINVAL; } icodec = dev_info->private_data; if (!(icodec->data->dev_vol_type & (SNDDEV_DEV_VOL_DIGITAL | SNDDEV_DEV_VOL_ANALOG))) { MM_INFO("device %s does not support device volume " "control.", dev_info->name); return -EPERM; } dev_info->dev_volume = volume; if (icodec->data->capability & SNDDEV_CAP_RX) lock = &drv->rx_lock; else lock = &drv->tx_lock; mutex_lock(lock); rc = snddev_icodec_set_device_volume_impl(dev_info, dev_info->dev_volume); mutex_unlock(lock); return rc; } static int snddev_icodec_probe(struct platform_device *pdev) { int rc = 0, i; struct snddev_icodec_data *pdata; struct msm_snddev_info *dev_info; struct snddev_icodec_state *icodec; if (!pdev || !pdev->dev.platform_data) { printk(KERN_ALERT "Invalid caller \n"); rc = -1; goto error; } pdata = pdev->dev.platform_data; if ((pdata->capability & SNDDEV_CAP_RX) && (pdata->capability & SNDDEV_CAP_TX)) { MM_ERR("invalid device data either RX or TX\n"); goto error; } icodec = kzalloc(sizeof(struct snddev_icodec_state), GFP_KERNEL); if (!icodec) { rc = -ENOMEM; goto error; } dev_info = kmalloc(sizeof(struct msm_snddev_info), GFP_KERNEL); if (!dev_info) { kfree(icodec); rc = -ENOMEM; goto error; } dev_info->name = pdata->name; dev_info->copp_id = pdata->copp_id; dev_info->acdb_id = pdata->acdb_id; dev_info->private_data = (void *) icodec; dev_info->dev_ops.open = snddev_icodec_open; dev_info->dev_ops.close = snddev_icodec_close; dev_info->dev_ops.set_freq = snddev_icodec_set_freq; dev_info->dev_ops.set_device_volume = snddev_icodec_set_device_volume; dev_info->capability = pdata->capability; dev_info->opened = 0; msm_snddev_register(dev_info); icodec->data = pdata; icodec->sample_rate = pdata->default_sample_rate; dev_info->sample_rate = pdata->default_sample_rate; if (pdata->capability & SNDDEV_CAP_RX) { for (i = 0; i < VOC_RX_VOL_ARRAY_NUM; i++) { dev_info->max_voc_rx_vol[i] = pdata->max_voice_rx_vol[i]; dev_info->min_voc_rx_vol[i] = pdata->min_voice_rx_vol[i]; } /*sidetone is enabled only for the device which property set for side tone*/ if (pdata->property & SIDE_TONE_MASK) dev_info->dev_ops.enable_sidetone = snddev_icodec_enable_sidetone; else dev_info->dev_ops.enable_sidetone = NULL; } else { dev_info->dev_ops.enable_sidetone = NULL; } #if 0//!defined(CONFIG_LGE_MODEL_E739) /* LGE_CHANGE * added for Audio loopback * 2010-02-24, bob.cho@lge.com */ /* Register sysfs hooks */ rc = device_create_file(&pdev->dev, &dev_attr_loopback); if (rc) { printk(KERN_ALERT "sysfs register failed for loopback attr\n"); goto error; } #endif /*CONFIG_LGE_AUDIO_LOOPBACK*/ error: return rc; } static int snddev_icodec_remove(struct platform_device *pdev) { return 0; } static struct platform_driver snddev_icodec_driver = { .probe = snddev_icodec_probe, .remove = snddev_icodec_remove, .driver = { .name = "snddev_icodec" } }; #ifdef CONFIG_DEBUG_FS static struct dentry *debugfs_sdev_dent; static struct dentry *debugfs_afelb; static struct dentry *debugfs_adielb; static struct adie_codec_path *debugfs_rx_adie; static struct adie_codec_path *debugfs_tx_adie; static int snddev_icodec_debug_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; MM_INFO("snddev_icodec: debug intf %s\n", (char *) file->private_data); return 0; } static void debugfs_adie_loopback(u32 loop) { struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; if (loop) { /* enable MI2S RX master block */ /* enable MI2S RX bit clock */ clk_set_rate(drv->rx_mclk, SNDDEV_ICODEC_CLK_RATE(8000)); clk_enable(drv->rx_mclk); clk_enable(drv->rx_sclk); MM_INFO("configure ADIE RX path\n"); /* Configure ADIE */ adie_codec_open(&debug_rx_profile, &debugfs_rx_adie); adie_codec_setpath(debugfs_rx_adie, 8000, 256); adie_codec_proceed_stage(debugfs_rx_adie, ADIE_CODEC_DIGITAL_ANALOG_READY); MM_INFO("Enable Handset Mic bias\n"); pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_PWM_TCXO); /* enable MI2S TX master block */ /* enable MI2S TX bit clock */ clk_set_rate(drv->tx_mclk, SNDDEV_ICODEC_CLK_RATE(8000)); clk_enable(drv->tx_mclk); clk_enable(drv->tx_sclk); MM_INFO("configure ADIE TX path\n"); /* Configure ADIE */ adie_codec_open(&debug_tx_lb_profile, &debugfs_tx_adie); adie_codec_setpath(debugfs_tx_adie, 8000, 256); adie_codec_proceed_stage(debugfs_tx_adie, ADIE_CODEC_DIGITAL_ANALOG_READY); } else { /* Disable ADIE */ adie_codec_proceed_stage(debugfs_rx_adie, ADIE_CODEC_DIGITAL_OFF); adie_codec_close(debugfs_rx_adie); adie_codec_proceed_stage(debugfs_tx_adie, ADIE_CODEC_DIGITAL_OFF); adie_codec_close(debugfs_tx_adie); pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_OFF); /* Disable MI2S RX master block */ /* Disable MI2S RX bit clock */ clk_disable(drv->rx_sclk); clk_disable(drv->rx_mclk); /* Disable MI2S TX master block */ /* Disable MI2S TX bit clock */ clk_disable(drv->tx_sclk); clk_disable(drv->tx_mclk); } } static void debugfs_afe_loopback(u32 loop) { int trc; struct msm_afe_config afe_config; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; struct lpa_codec_config lpa_config; if (loop) { /* Vote for SMPS mode*/ pmapp_smps_mode_vote(SMPS_AUDIO_PLAYBACK_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_PWM); /* enable MI2S RX master block */ /* enable MI2S RX bit clock */ trc = clk_set_rate(drv->rx_mclk, SNDDEV_ICODEC_CLK_RATE(8000)); if (IS_ERR_VALUE(trc)) MM_ERR("failed to set clk rate\n"); clk_enable(drv->rx_mclk); clk_enable(drv->rx_sclk); clk_enable(drv->lpa_p_clk); clk_enable(drv->lpa_codec_clk); clk_enable(drv->lpa_core_clk); /* Enable LPA sub system */ drv->lpa = lpa_get(); if (!drv->lpa) MM_ERR("failed to enable lpa\n"); lpa_config.sample_rate = 8000; lpa_config.sample_width = 16; lpa_config.output_interface = LPA_OUTPUT_INTF_WB_CODEC; lpa_config.num_channels = 1; lpa_cmd_codec_config(drv->lpa, &lpa_config); /* Set audio interconnect reg to LPA */ audio_interct_codec(AUDIO_INTERCT_LPA); mi2s_set_codec_output_path(MI2S_CHAN_MONO_PACKED, WT_16_BIT); MM_INFO("configure ADIE RX path\n"); /* Configure ADIE */ adie_codec_open(&debug_rx_profile, &debugfs_rx_adie); adie_codec_setpath(debugfs_rx_adie, 8000, 256); lpa_cmd_enable_codec(drv->lpa, 1); /* Start AFE for RX */ afe_config.sample_rate = 0x8; afe_config.channel_mode = 1; afe_config.volume = AFE_VOLUME_UNITY; MM_INFO("enable afe\n"); trc = afe_enable(AFE_HW_PATH_CODEC_RX, &afe_config); if (IS_ERR_VALUE(trc)) MM_ERR("fail to enable afe RX\n"); adie_codec_proceed_stage(debugfs_rx_adie, ADIE_CODEC_DIGITAL_READY); adie_codec_proceed_stage(debugfs_rx_adie, ADIE_CODEC_DIGITAL_ANALOG_READY); /* Vote for PWM mode*/ pmapp_smps_mode_vote(SMPS_AUDIO_RECORD_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_PWM); MM_INFO("Enable Handset Mic bias\n"); pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_PWM_TCXO); /* enable MI2S TX master block */ /* enable MI2S TX bit clock */ clk_set_rate(drv->tx_mclk, SNDDEV_ICODEC_CLK_RATE(8000)); clk_enable(drv->tx_mclk); clk_enable(drv->tx_sclk); /* Set MI2S */ mi2s_set_codec_input_path(MI2S_CHAN_MONO_PACKED, WT_16_BIT); MM_INFO("configure ADIE TX path\n"); /* Configure ADIE */ adie_codec_open(&debug_tx_profile, &debugfs_tx_adie); adie_codec_setpath(debugfs_tx_adie, 8000, 256); adie_codec_proceed_stage(debugfs_tx_adie, ADIE_CODEC_DIGITAL_READY); adie_codec_proceed_stage(debugfs_tx_adie, ADIE_CODEC_DIGITAL_ANALOG_READY); /* Start AFE for TX */ afe_config.sample_rate = 0x8; afe_config.channel_mode = 1; afe_config.volume = AFE_VOLUME_UNITY; trc = afe_enable(AFE_HW_PATH_CODEC_TX, &afe_config); if (IS_ERR_VALUE(trc)) MM_ERR("failed to enable AFE TX\n"); /* Set the volume level to non unity, to avoid loopback effect */ afe_device_volume_ctrl(AFE_HW_PATH_CODEC_RX, 0x0500); /* enable afe loopback */ afe_loopback(1); MM_INFO("AFE loopback enabled\n"); } else { /* disable afe loopback */ afe_loopback(0); /* Remove the vote for SMPS mode*/ pmapp_smps_mode_vote(SMPS_AUDIO_PLAYBACK_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_DONTCARE); /* Disable ADIE */ adie_codec_proceed_stage(debugfs_rx_adie, ADIE_CODEC_DIGITAL_OFF); adie_codec_close(debugfs_rx_adie); /* Disable AFE for RX */ afe_disable(AFE_HW_PATH_CODEC_RX); /* Disable LPA Sub system */ lpa_cmd_enable_codec(drv->lpa, 0); lpa_put(drv->lpa); /* Disable LPA clocks */ clk_disable(drv->lpa_p_clk); clk_disable(drv->lpa_codec_clk); clk_disable(drv->lpa_core_clk); /* Disable MI2S RX master block */ /* Disable MI2S RX bit clock */ clk_disable(drv->rx_sclk); clk_disable(drv->rx_mclk); pmapp_smps_mode_vote(SMPS_AUDIO_RECORD_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_DONTCARE); /* Disable AFE for TX */ afe_disable(AFE_HW_PATH_CODEC_TX); /* Disable ADIE */ adie_codec_proceed_stage(debugfs_tx_adie, ADIE_CODEC_DIGITAL_OFF); adie_codec_close(debugfs_tx_adie); /* Disable MI2S TX master block */ /* Disable MI2S TX bit clock */ clk_disable(drv->tx_sclk); clk_disable(drv->tx_mclk); pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_OFF); MM_INFO("AFE loopback disabled\n"); } } static ssize_t snddev_icodec_debug_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char *lb_str = filp->private_data; char cmd; if (get_user(cmd, ubuf)) return -EFAULT; MM_INFO("%s %c\n", lb_str, cmd); if (!strcmp(lb_str, "adie_loopback")) { switch (cmd) { case '1': debugfs_adie_loopback(1); break; case '0': debugfs_adie_loopback(0); break; } } else if (!strcmp(lb_str, "afe_loopback")) { switch (cmd) { case '1': debugfs_afe_loopback(1); break; case '0': debugfs_afe_loopback(0); break; } } return cnt; } static const struct file_operations snddev_icodec_debug_fops = { .open = snddev_icodec_debug_open, .write = snddev_icodec_debug_write }; #endif static int __init snddev_icodec_init(void) { s32 rc; struct snddev_icodec_drv_state *icodec_drv = &snddev_icodec_drv; rc = platform_driver_register(&snddev_icodec_driver); if (IS_ERR_VALUE(rc)) goto error_platform_driver; icodec_drv->rx_mclk = clk_get(NULL, "mi2s_codec_rx_m_clk"); if (IS_ERR(icodec_drv->rx_mclk)) goto error_rx_mclk; icodec_drv->rx_sclk = clk_get(NULL, "mi2s_codec_rx_s_clk"); if (IS_ERR(icodec_drv->rx_sclk)) goto error_rx_sclk; icodec_drv->tx_mclk = clk_get(NULL, "mi2s_codec_tx_m_clk"); if (IS_ERR(icodec_drv->tx_mclk)) goto error_tx_mclk; icodec_drv->tx_sclk = clk_get(NULL, "mi2s_codec_tx_s_clk"); if (IS_ERR(icodec_drv->tx_sclk)) goto error_tx_sclk; icodec_drv->lpa_codec_clk = clk_get(NULL, "lpa_codec_clk"); if (IS_ERR(icodec_drv->lpa_codec_clk)) goto error_lpa_codec_clk; icodec_drv->lpa_core_clk = clk_get(NULL, "lpa_core_clk"); if (IS_ERR(icodec_drv->lpa_core_clk)) goto error_lpa_core_clk; icodec_drv->lpa_p_clk = clk_get(NULL, "lpa_pclk"); if (IS_ERR(icodec_drv->lpa_p_clk)) goto error_lpa_p_clk; #ifdef CONFIG_DEBUG_FS debugfs_sdev_dent = debugfs_create_dir("snddev_icodec", 0); if (debugfs_sdev_dent) { debugfs_afelb = debugfs_create_file("afe_loopback", S_IFREG | S_IWUGO, debugfs_sdev_dent, (void *) "afe_loopback", &snddev_icodec_debug_fops); debugfs_adielb = debugfs_create_file("adie_loopback", S_IFREG | S_IWUGO, debugfs_sdev_dent, (void *) "adie_loopback", &snddev_icodec_debug_fops); } #endif mutex_init(&icodec_drv->rx_lock); mutex_init(&icodec_drv->tx_lock); icodec_drv->rx_active = 0; icodec_drv->tx_active = 0; icodec_drv->lpa = NULL; wake_lock_init(&icodec_drv->tx_idlelock, WAKE_LOCK_IDLE, "snddev_tx_idle"); wake_lock_init(&icodec_drv->rx_idlelock, WAKE_LOCK_IDLE, "snddev_rx_idle"); return 0; error_lpa_p_clk: clk_put(icodec_drv->lpa_core_clk); error_lpa_core_clk: clk_put(icodec_drv->lpa_codec_clk); error_lpa_codec_clk: clk_put(icodec_drv->tx_sclk); error_tx_sclk: clk_put(icodec_drv->tx_mclk); error_tx_mclk: clk_put(icodec_drv->rx_sclk); error_rx_sclk: clk_put(icodec_drv->rx_mclk); error_rx_mclk: platform_driver_unregister(&snddev_icodec_driver); error_platform_driver: MM_ERR("encounter error\n"); return -ENODEV; } static void __exit snddev_icodec_exit(void) { struct snddev_icodec_drv_state *icodec_drv = &snddev_icodec_drv; #ifdef CONFIG_DEBUG_FS if (debugfs_afelb) debugfs_remove(debugfs_afelb); if (debugfs_adielb) debugfs_remove(debugfs_adielb); if (debugfs_sdev_dent) debugfs_remove(debugfs_sdev_dent); #endif platform_driver_unregister(&snddev_icodec_driver); clk_put(icodec_drv->rx_sclk); clk_put(icodec_drv->rx_mclk); clk_put(icodec_drv->tx_sclk); clk_put(icodec_drv->tx_mclk); return; } module_init(snddev_icodec_init); module_exit(snddev_icodec_exit); MODULE_DESCRIPTION("ICodec Sound Device driver"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL v2");
andr00ib/stock-v10f-kernel-e730
arch/arm/mach-msm/qdsp5v2/snddev_icodec.c
C
gpl-2.0
47,326
/* Copyright (c) 2008,2009 Frank Lahm <franklahm@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif /* HAVE_CONFIG_H */ #include <unistd.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <stdarg.h> #ifdef HAVE_LDAP #define LDAP_DEPRECATED 1 #include <ldap.h> #endif #include <atalk/ldapconfig.h> #include <atalk/uuid.h> #include <atalk/logger.h> #define STRNCMP(a, R, b, l) (strncmp(a,b,l) R 0) static void usage() { printf("Usage: afpldaptest -u <user> | -g <group> | -i <UUID>\n"); } static void parse_ldapconf() { static int inited = 0; if (! inited) { #ifdef HAVE_LDAP /* Parse afp_ldap.conf */ printf("Start parsing afp_ldap.conf\n"); acl_ldap_readconfig(_PATH_ACL_LDAPCONF); printf("Finished parsing afp_ldap.conf\n"); if (ldap_config_valid) { if (ldap_auth_method == LDAP_AUTH_NONE) printf("afp_ldap.conf is ok. Using anonymous bind.\n"); else if (ldap_auth_method == LDAP_AUTH_SIMPLE) printf("afp_ldap.conf is ok. Using simple bind.\n"); else { ldap_config_valid = 0; printf("afp_ldap.conf wants SASL which is not yet supported.\n"); exit(EXIT_FAILURE); } } else { printf("afp_ldap.conf is not ok, not using LDAP. Only local UUID testing available.\n"); } #else printf("Built without LDAP support, only local UUID testing available.\n"); #endif inited = 1; } } int main( int argc, char **argv) { int ret, c; int verbose = 0; atalk_uuid_t uuid; int logsetup = 0; uuidtype_t type; char *name = NULL; while ((c = getopt(argc, argv, ":vu:g:i:")) != -1) { switch(c) { case 'v': if (! verbose) { verbose = 1; setuplog("default log_maxdebug /dev/tty"); logsetup = 1; } break; case 'u': if (! logsetup) setuplog("default log_info /dev/tty"); parse_ldapconf(); printf("Searching user: %s\n", optarg); ret = getuuidfromname( optarg, UUID_USER, uuid); if (ret == 0) { printf("User: %s ==> UUID: %s\n", optarg, uuid_bin2string(uuid)); } else { printf("User %s not found.\n", optarg); } break; case 'g': if (! logsetup) setuplog("default log_info /dev/tty"); parse_ldapconf(); printf("Searching group: %s\n", optarg); ret = getuuidfromname( optarg, UUID_GROUP, uuid); if (ret == 0) { printf("Group: %s ==> UUID: %s\n", optarg, uuid_bin2string(uuid)); } else { printf("Group %s not found.\n", optarg); } break; case 'i': if (! logsetup) setuplog("default log_info /dev/tty"); parse_ldapconf(); printf("Searching uuid: %s\n", optarg); uuid_string2bin(optarg, uuid); ret = getnamefromuuid( uuid, &name, &type); if (ret == 0) { switch (type) { case UUID_USER: printf("UUID: %s ==> User: %s\n", optarg, name); break; case UUID_GROUP: printf("UUID: %s ==> Group: %s\n", optarg, name); break; default: printf("???: %s\n", optarg); break; } free(name); } else { printf("UUID: %s not found.\n", optarg); } break; case ':': case '?': case 'h': usage(); exit(EXIT_FAILURE); } } return 0; }
jrmithdobbs/netatalk-2-2-0-p6
bin/misc/uuidtest.c
C
gpl-2.0
4,405
#!/usr/bin/env python2 import dns import dns.edns import dns.flags import dns.message import dns.query class CookiesOption(dns.edns.Option): """Implementation of draft-ietf-dnsop-cookies-09. """ def __init__(self, client, server): super(CookiesOption, self).__init__(10) if len(client) != 8: raise Exception('invalid client cookie length') if server is not None and len(server) != 0 and (len(server) < 8 or len(server) > 32): raise Exception('invalid server cookie length') self.client = client self.server = server def to_wire(self, file): """Create EDNS packet as defined in draft-ietf-dnsop-cookies-09.""" file.write(self.client) if self.server and len(self.server) > 0: file.write(self.server) def from_wire(cls, otype, wire, current, olen): """Read EDNS packet as defined in draft-ietf-dnsop-cookies-09. Returns: An instance of CookiesOption based on the EDNS packet """ data = wire[current:current + olen] if len(data) != 8 and (len(data) < 16 or len(data) > 40): raise Exception('Invalid EDNS Cookies option') client = data[:8] if len(data) > 8: server = data[8:] else: server = None return cls(client, server) from_wire = classmethod(from_wire) def __repr__(self): return '%s(%s, %s)' % ( self.__class__.__name__, self.client, self.server ) def __eq__(self, other): if not isinstance(other, CookiesOption): return False if self.client != other.client: return False if self.server != other.server: return False return True def __ne__(self, other): return not self.__eq__(other) dns.edns._type_to_class[0x000A] = CookiesOption
ahupowerdns/pdns
regression-tests.dnsdist/cookiesoption.py
Python
gpl-2.0
1,937
#! /bin/sh ############################################################################### # # # Copyright (c) 2010 FUJITSU LIMITED # # # # This program is free software; you can redistribute it and/or modify it # # under the terms of the GNU General Public License as published by the Free # # Software Foundation; either version 2 of the License, or (at your option) # # any later version. # # # # Author: Li Zefan <lizf@cn.fujitsu.com> # # # ############################################################################### LOOP=200 should_skip=0 nr_cpus=`tst_ncpus` if [ ! -e "$TRACING_PATH"/function_profile_enabled ]; then should_skip=1 fi # For kernels older than 2.6.36, this testcase can result in # divide-by-zero kernel bug if tst_kvcmp -lt "2.6.36"; then should_skip=1 fi while true; do if [ $should_skip -eq 1 ]; then sleep 2 continue fi cpu=$(tst_random 0 $((nr_cpus - 1))) i=0; while [ $i -lt $LOOP ]; do cat "$TRACING_PATH"/trace_stat/function${cpu} > /dev/null 2>&1 i=$((i + 1)) done sleep 1 done
linux-test-project/ltp
testcases/kernel/tracing/ftrace_test/ftrace_stress/ftrace_trace_stat.sh
Shell
gpl-2.0
1,489
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <HTML ><HEAD ><TITLE >External parsers</TITLE ><META NAME="GENERATOR" CONTENT="Modular DocBook HTML Stylesheet Version 1.79"><LINK REL="HOME" TITLE="DataparkSearch Engine 4.54" HREF="index.en.html"><LINK REL="UP" TITLE="Indexing" HREF="dpsearch-indexing.en.html"><LINK REL="PREVIOUS" TITLE="Servers Table" HREF="dpsearch-srvtable.en.html"><LINK REL="NEXT" TITLE="Other commands are used in indexer.conf" HREF="dpsearch-indexcmd.en.html"><LINK REL="STYLESHEET" TYPE="text/css" HREF="datapark.css"><META NAME="Description" CONTENT="DataparkSearch - Full Featured Web site Open Source Search Engine Software over the Internet and Intranet Web Sites Based on SQL Database. It is a Free search software covered by GNU license."><META NAME="Keywords" CONTENT="shareware, freeware, download, internet, unix, utilities, search engine, text retrieval, knowledge retrieval, text search, information retrieval, database search, mining, intranet, webserver, index, spider, filesearch, meta, free, open source, full-text, udmsearch, website, find, opensource, search, searching, software, udmsearch, engine, indexing, system, web, ftp, http, cgi, php, SQL, MySQL, database, php3, FreeBSD, Linux, Unix, DataparkSearch, MacOS X, Mac OS X, Windows, 2000, NT, 95, 98, GNU, GPL, url, grabbing"></HEAD ><BODY CLASS="SECT1" BGCOLOR="#FFFFFF" TEXT="#000000" LINK="#0000C4" VLINK="#1200B2" ALINK="#C40000" ><!--#include virtual="body-before.html"--><DIV CLASS="NAVHEADER" ><TABLE SUMMARY="Header navigation table" WIDTH="100%" BORDER="0" CELLPADDING="0" CELLSPACING="0" ><TR ><TH COLSPAN="3" ALIGN="center" >DataparkSearch Engine 4.54: Reference manual</TH ></TR ><TR ><TD WIDTH="10%" ALIGN="left" VALIGN="bottom" ><A HREF="dpsearch-srvtable.en.html" ACCESSKEY="P" >Prev</A ></TD ><TD WIDTH="80%" ALIGN="center" VALIGN="bottom" >Chapter 3. Indexing</TD ><TD WIDTH="10%" ALIGN="right" VALIGN="bottom" ><A HREF="dpsearch-indexcmd.en.html" ACCESSKEY="N" >Next</A ></TD ></TR ></TABLE ><HR ALIGN="LEFT" WIDTH="100%"></DIV ><DIV CLASS="SECT1" ><H1 CLASS="SECT1" ><A NAME="PARS" >3.9. External parsers</A ></H1 ><A NAME="AEN1417" ></A ><P ><SPAN CLASS="APPLICATION" >DataparkSearch</SPAN > indexer can use external parsers to index various file types (MIME types).</P ><P >Parser is an executable program which converts one of the mime types to <TT CLASS="LITERAL" >text/plain</TT > or <TT CLASS="LITERAL" >text/html</TT >. For example, if you have postscript files, you can use ps2ascii parser (filter), which reads postscript file from stdin and produces ascii to stdout.</P ><DIV CLASS="SECT2" ><H2 CLASS="SECT2" ><A NAME="PARS-SUP" >3.9.1. Supported parser types</A ></H2 ><P >Indexer supports four types of parsers that can:</P ><P ></P ><UL ><LI ><P >read data from stdin and send result to stdout</P ></LI ><LI ><P >read data from file and send result to stdout</P ></LI ><LI ><P >read data from file and send result to file</P ></LI ><LI ><P >read data from stdin and send result to file</P ></LI ></UL ></DIV ><DIV CLASS="SECT2" ><H2 CLASS="SECT2" ><A NAME="PARS-SETUP" >3.9.2. Setting up parsers</A ></H2 ><P ></P ><OL TYPE="1" ><LI ><P >Configure mime types</P ><P >Configure your web server to send appropriate "Content-Type" header. For apache, have a look at <TT CLASS="FILENAME" >mime.types</TT > file, most mime types are already defined there.</P ><P ><A NAME="AEN1444" ></A > If you want to index local files or via <TT CLASS="LITERAL" >ftp</TT > use "AddType" command in <TT CLASS="FILENAME" >indexer.conf</TT > to associate file name extensions with their mime types. For example: <PRE CLASS="PROGRAMLISTING" >AddType text/html *.html</PRE > </P ></LI ><LI ><P ><A NAME="AEN1452" ></A > Add parsers</P ><P >Add lines with parsers definitions. Lines have the following format with three arguments: <PRE CLASS="PROGRAMLISTING" >Mime &lt;from_mime&gt; &lt;to_mime&gt; [&lt;command line&gt;]</PRE > </P ><P >For example, the following line defines parser for man pages: <PRE CLASS="PROGRAMLISTING" ># Use deroff for parsing man pages ( *.man ) Mime application/x-troff-man text/plain deroff</PRE > </P ><P >This parser will take data from stdin and output result to stdout.</P ><P >Many parsers can not operate on stdin and require a file to read from. In this case indexer creates a temporary file in /tmp and will remove it when parser exits. Use $1 macro in parser command line to substitute file name. For example, Mime command for "catdoc" MS Word to ASCII converters may look like this: <PRE CLASS="PROGRAMLISTING" >Mime application/msword text/plain "/usr/bin/catdoc -a $1"</PRE > </P ><P >If your parser writes result into output file, use $2 macro. indexer will replace $2 by temporary file name, start parser, read result from this temporary file then remove it. For example: <PRE CLASS="PROGRAMLISTING" >Mime application/msword text/plain "/usr/bin/catdoc -a $1 &#62;$2"</PRE > </P ><P >The parser above will read data from first temporary file and write result to second one. Both temporary files will be removed when parser exists. Note that result of usage of this parser will be absolutely the same with the previous one, but they use different execution mode: file-&gt;stdout and file-&gt;file correspondingly.</P ><P >If the &lt;command line&gt; parameter is omitted this means both MIME type are synonyms. E.g. some sites can supply incorrect type for MP3 files as <KBD CLASS="USERINPUT" >application/mp3</KBD >. You can alter it into correct one <KBD CLASS="USERINPUT" >audio/mpeg</KBD > and therefore process them: <PRE CLASS="PROGRAMLISTING" >Mime application/mp3 audio/mpeg</PRE ></P ></LI ></OL ></DIV ><DIV CLASS="SECT2" ><H2 CLASS="SECT2" ><A NAME="PARSERTIMEOUT" >3.9.3. Avoid indexer hang on parser execution</A ></H2 ><P ><A NAME="AEN1472" ></A > To avoid a indexer hang on parser execution, you may specify the amount of time in seconds for parser execution in your <TT CLASS="FILENAME" >indexer.conf</TT > by <TT CLASS="LITERAL" >ParserTimeOut</TT > command. For example: <PRE CLASS="PROGRAMLISTING" >ParserTimeOut 600</PRE ></P ><P >Default value is 300 seconds, i.e. 5 minutes.</P ></DIV ><DIV CLASS="SECT2" ><H2 CLASS="SECT2" ><A NAME="PARS-PIPES" >3.9.4. Pipes in parser's command line</A ></H2 ><P >You can use pipes in parser's command line. For example, these lines will be useful to index gzipped man pages from local disk: <PRE CLASS="PROGRAMLISTING" >AddType application/x-gzipped-man *.1.gz *.2.gz *.3.gz *.4.gz Mime application/x-gzipped-man text/plain "zcat | deroff"</PRE > </P ></DIV ><DIV CLASS="SECT2" ><H2 CLASS="SECT2" ><A NAME="PARS-CHAR" >3.9.5. Charsets and parsers</A ></H2 ><A NAME="AEN1485" ></A ><P >Some parsers can produce output in other charset than given in LocalCharset command. Specify charset to make indexer convert parser's output to proper one. For example, if your catdoc is configured to produce output in windows-1251 charset but LocalCharset is koi8-r, use this command for parsing MS Word documents: <PRE CLASS="PROGRAMLISTING" >Mime application/msword "text/plain; charset=windows-1251" "catdoc -a $1"</PRE > </P ></DIV ><DIV CLASS="SECT2" ><H2 CLASS="SECT2" ><A NAME="PARS-UDMURL" >3.9.6. DPS_URL environment variable</A ></H2 ><A NAME="AEN1492" ></A ><P >When executing a parser <B CLASS="COMMAND" >indexer</B > creates DPS_URL environment variable with an URL being processed as a value. You can use this variable in parser scripts.</P ></DIV ><DIV CLASS="SECT2" ><H2 CLASS="SECT2" ><A NAME="PARS-LINKS" >3.9.7. Some third-party parsers</A ></H2 ><A NAME="AEN1498" ></A ><P ></P ><UL ><LI ><P >RPM parser by Mario Lang <CODE CLASS="EMAIL" >&#60;<A HREF="mailto:lang@zid.tu-graz.ac.at" >lang@zid.tu-graz.ac.at</A >&#62;</CODE ></P ><P > /usr/local/bin/rpminfo: <PRE CLASS="PROGRAMLISTING" >#!/bin/bash /usr/bin/rpm -q --queryformat="&lt;html&gt;&lt;head&gt;&lt;title&gt;RPM: %{NAME} %{VERSION}-%{RELEASE} (%{GROUP})&lt;/title&gt;&lt;meta name=\"description\" content=\"%{SUMMARY}\"&gt;&lt;/head&gt;&lt;body&gt; %{DESCRIPTION}\n&lt;/body&gt;&lt;/html&gt;" -p $1</PRE ></P ><P >indexer.conf: <PRE CLASS="PROGRAMLISTING" >Mime application/x-rpm text/html "/usr/local/bin/rpminfo $1"</PRE ></P ><P >It renders to such nice RPM information: <PRE CLASS="PROGRAMLISTING" >3. RPM: mysql 3.20.32a-3 (Applications/Databases) [4] Mysql is a SQL (Structured Query Language) database server. Mysql was written by Michael (monty) Widenius. See the CREDITS file in the distribution for more credits for mysql and related things.... (application/x-rpm) 2088855 bytes</PRE ></P ></LI ><LI ><P >catdoc MS Word to text converter</P ><P > <A HREF="http://freshmeat.net/redir/catdoc/1055/url_homepage/" TARGET="_top" >Home page</A >, also listed on <A HREF="http://freshmeat.net/" TARGET="_top" >Freshmeat</A >.</P ><P ><TT CLASS="FILENAME" >indexer.conf</TT >: <PRE CLASS="PROGRAMLISTING" >&#13;Mime application/msword text/plain "catdoc $1"</PRE ></P ></LI ><LI ><P >xls2csv MS Excel to text converter</P ><P >It is supplied with catdoc.</P ><P ><TT CLASS="FILENAME" >indexer.conf</TT >: <PRE CLASS="PROGRAMLISTING" >&#13;Mime application/vnd.ms-excel text/plain "xls2csv $1"</PRE ></P ></LI ><LI ><P >pdftotext Adobe PDF converter</P ><P >Supplied with xpdf project.</P ><P > <A HREF="http://freshmeat.net/redir/xpdf/12080/url_homepage/" TARGET="_top" >Homepage</A >, also listed on <A HREF="http://freshmeat.net/" TARGET="_top" >Freshmeat</A >.</P ><P ><TT CLASS="FILENAME" >indexer.conf</TT >: <PRE CLASS="PROGRAMLISTING" >&#13;Mime application/pdf text/plain "pdftotext $1 -"</PRE ></P ></LI ><LI ><P >unrtf RTF to html converter</P ><P > <A HREF="ftp://ftp.gnu.org/pub/gnu/unrtf/" TARGET="_top" >Homepage</A ></P ><P ><TT CLASS="FILENAME" >indexer.conf</TT >: <PRE CLASS="PROGRAMLISTING" >&#13;Mime text/rtf* text/html "/usr/local/dpsearch/sbin/unrtf --html $1" Mime application/rtf text/html "/usr/local/dpsearch/sbin/unrtf --html $1"</PRE ></P ></LI ><LI ><P >xlhtml XLS to html converter</P ><P > <A HREF="http://chicago.sourceforge.net/xlhtml/" TARGET="_top" >Homepage</A ></P ><P ><TT CLASS="FILENAME" >indexer.conf</TT >: <PRE CLASS="PROGRAMLISTING" >&#13;Mime application/vnd.ms-excel text/html "/usr/local/dpsearch/sbin/xlhtml $1"</PRE ></P ></LI ><LI ><P >ppthtml PowerPoint (PPT) to html converter. Part of <SPAN CLASS="APPLICATION" >xlhtml 0.5</SPAN >.</P ><P > <A HREF="http://chicago.sourceforge.net/xlhtml/" TARGET="_top" >Homepage</A ></P ><P ><TT CLASS="FILENAME" >indexer.conf</TT >: <PRE CLASS="PROGRAMLISTING" >&#13;Mime application/vnd.ms-powerpoint text/html "/usr/local/dpsearch/sbin/ppthtml $1"</PRE ></P ></LI ><LI ><P >Using <A HREF="http://wvWare.sourceforge.net/" TARGET="_top" >vwHtml</A > (DOC to html).</P ><P ><TT CLASS="FILENAME" >/usr/local/dpsearch/sbin/0vwHtml.pl</TT >: <PRE CLASS="PROGRAMLISTING" >#!/usr/bin/perl -w $p = $ARGV[1]; $f = $ARGV[1]; $p =~ s/(.*)\/([^\/]*)/$1\//; $f =~ s/(.*)\/([^\/]*)/$2/; system("/usr/local/bin/wvHtml --targetdir=$p $ARGV[0] $f");</PRE ></P ><P ><TT CLASS="FILENAME" >indexer.conf</TT >: <PRE CLASS="PROGRAMLISTING" >&#13;Mime application/msword text/html "/usr/local/dpsearch/sbin/0wvHtml.pl $1 $2" Mime application/vnd.ms-word text/html "/usr/local/dpsearch/sbin/0wvHtml.pl $1 $2"</PRE ></P ></LI ><LI ><P >swf2html from <A HREF="http://www.macromedia.com/software/flash/download/search_engine/" TARGET="_top" >Flash Search Engine SDK</A ></P ><P ><TT CLASS="FILENAME" >indexer.conf</TT >: <PRE CLASS="PROGRAMLISTING" >&#13;Mime application/x-shockwave-flash text/html "/usr/local/dpsearch/sbin/swf2html $1"</PRE ></P ></LI ><LI ><P >djvutxt from <A HREF="http://djvu.sourceforge.net/" TARGET="_top" >djvuLibre</A ></P ><P ><TT CLASS="FILENAME" >indexer.conf</TT >: <PRE CLASS="PROGRAMLISTING" >&#13;Mime image/djvu text/plain "/usr/local/bin/djvutxt $1 $2" Mime image/x.djvu text/plain "/usr/local/bin/djvutxt $1 $2" Mime image/x-djvu text/plain "/usr/local/bin/djvutxt $1 $2" Mime image/vnd.djvu text/plain "/usr/local/bin/djvutxt $1 $2"</PRE ></P ></LI ></UL ></DIV ><DIV CLASS="SECT2" ><H2 CLASS="SECT2" ><A NAME="LIBEXTRACTOR" >3.9.8. libextractor library</A ></H2 ><A NAME="AEN1579" ></A ><P ><SPAN CLASS="APPLICATION" >DataparkSearch</SPAN > can be build with <A HREF="http://gnunet.org/libextractor/" TARGET="_top" >libextractor library</A >. Using this library, <SPAN CLASS="APPLICATION" >DataparkSearch</SPAN > can index keywords from files of the following formats: PDF, PS, OLE2 (DOC, XLS, PPT), OpenOffice (sxw), StarOffice (sdw), DVI, MAN, FLAC, MP3 (ID3v1 and ID3v2), NSF(E) (NES music), SID (C64 music), OGG, WAV, EXIV2, JPEG, GIF, PNG, TIFF, DEB, RPM, TAR(.GZ), ZIP, ELF, S3M (Scream Tracker 3), XM (eXtended Module), IT (Impulse Tracker), FLV, REAL, RIFF (AVI), MPEG, QT and ASF.</P ><P >To build <SPAN CLASS="APPLICATION" >DataparkSearch</SPAN > with <SPAN CLASS="APPLICATION" >libextractor library</SPAN >, install the library, and then configure and compile <SPAN CLASS="APPLICATION" >DataparkSearch</SPAN >.</P ><P >Bellow the relationship between keyword types of <SPAN CLASS="APPLICATION" >libextractor</SPAN > version prior to 0.6 and <SPAN CLASS="APPLICATION" >DataparkSearch</SPAN >'s section names is given:</P ><DIV CLASS="TABLE" ><A NAME="AEN1592" ></A ><P ><B >Table 3-1. Relationship between libextractor's keyword types and DataparkSearch section names</B ></P ><TABLE BORDER="1" CLASS="CALSTABLE" ><COL><COL><THEAD ><TR ><TH >Keyword Type</TH ><TH >Section name</TH ></TR ></THEAD ><TBODY ><TR ><TD >EXTRACTOR_FILENAME</TD ><TD > Filename</TD ></TR ><TR ><TD >EXTRACTOR_MIMETYPE</TD ><TD > Mimetype</TD ></TR ><TR ><TD >EXTRACTOR_TITLE</TD ><TD > Title</TD ></TR ><TR ><TD >EXTRACTOR_AUTHOR</TD ><TD > Author</TD ></TR ><TR ><TD >EXTRACTOR_ARTIST </TD ><TD >Artist</TD ></TR ><TR ><TD >EXTRACTOR_DESCRIPTION </TD ><TD >Description</TD ></TR ><TR ><TD >EXTRACTOR_COMMENT </TD ><TD >Comment</TD ></TR ><TR ><TD >EXTRACTOR_DATE </TD ><TD >Date</TD ></TR ><TR ><TD >EXTRACTOR_PUBLISHER </TD ><TD >Publisher</TD ></TR ><TR ><TD >EXTRACTOR_LANGUAGE </TD ><TD >Content-Language</TD ></TR ><TR ><TD >EXTRACTOR_ALBUM </TD ><TD >Album</TD ></TR ><TR ><TD >EXTRACTOR_GENRE </TD ><TD >Genre</TD ></TR ><TR ><TD >EXTRACTOR_LOCATION </TD ><TD >Location</TD ></TR ><TR ><TD >EXTRACTOR_VERSIONNUMBER </TD ><TD >VersionNumber</TD ></TR ><TR ><TD >EXTRACTOR_ORGANIZATION </TD ><TD >Organization</TD ></TR ><TR ><TD >EXTRACTOR_COPYRIGHT </TD ><TD >Copyright</TD ></TR ><TR ><TD >EXTRACTOR_SUBJECT </TD ><TD >Subject</TD ></TR ><TR ><TD >EXTRACTOR_KEYWORDS </TD ><TD >Meta.Keywords</TD ></TR ><TR ><TD >EXTRACTOR_CONTRIBUTOR </TD ><TD >Contributor</TD ></TR ><TR ><TD >EXTRACTOR_RESOURCE_TYPE </TD ><TD >Resource-Type</TD ></TR ><TR ><TD >EXTRACTOR_FORMAT </TD ><TD >Format</TD ></TR ><TR ><TD >EXTRACTOR_RESOURCE_IDENTIFIER </TD ><TD >Resource-Idendifier</TD ></TR ><TR ><TD >EXTRACTOR_SOURCE </TD ><TD >Source</TD ></TR ><TR ><TD >EXTRACTOR_RELATION </TD ><TD >Relation</TD ></TR ><TR ><TD >EXTRACTOR_COVERAGE </TD ><TD >Coverage</TD ></TR ><TR ><TD >EXTRACTOR_SOFTWARE </TD ><TD >Software</TD ></TR ><TR ><TD >EXTRACTOR_DISCLAIMER </TD ><TD >Disclaimer</TD ></TR ><TR ><TD >EXTRACTOR_WARNING </TD ><TD >Warning</TD ></TR ><TR ><TD >EXTRACTOR_TRANSLATED </TD ><TD >Translated</TD ></TR ><TR ><TD >EXTRACTOR_CREATION_DATE </TD ><TD >Creation-Date</TD ></TR ><TR ><TD >EXTRACTOR_MODIFICATION_DATE </TD ><TD >Modification-Date</TD ></TR ><TR ><TD >EXTRACTOR_CREATOR </TD ><TD >Creator</TD ></TR ><TR ><TD >EXTRACTOR_PRODUCER </TD ><TD >Producer</TD ></TR ><TR ><TD >EXTRACTOR_PAGE_COUNT </TD ><TD >Page-Count</TD ></TR ><TR ><TD >EXTRACTOR_PAGE_ORIENTATION </TD ><TD >Page-Orientation</TD ></TR ><TR ><TD >EXTRACTOR_PAPER_SIZE </TD ><TD >Paper-Size</TD ></TR ><TR ><TD >EXTRACTOR_USED_FONTS </TD ><TD >Used-Fonts</TD ></TR ><TR ><TD >EXTRACTOR_PAGE_ORDER </TD ><TD >Page-Order</TD ></TR ><TR ><TD >EXTRACTOR_CREATED_FOR </TD ><TD >Created-For</TD ></TR ><TR ><TD >EXTRACTOR_MAGNIFICATION </TD ><TD >Magnification</TD ></TR ><TR ><TD >EXTRACTOR_RELEASE </TD ><TD >Release</TD ></TR ><TR ><TD >EXTRACTOR_GROUP </TD ><TD >Group</TD ></TR ><TR ><TD >EXTRACTOR_SIZE </TD ><TD >Size</TD ></TR ><TR ><TD >EXTRACTOR_SUMMARY </TD ><TD >Summary</TD ></TR ><TR ><TD >EXTRACTOR_PACKAGER </TD ><TD >Packager</TD ></TR ><TR ><TD >EXTRACTOR_VENDOR </TD ><TD >Vendor</TD ></TR ><TR ><TD >EXTRACTOR_LICENSE </TD ><TD >License</TD ></TR ><TR ><TD >EXTRACTOR_DISTRIBUTION </TD ><TD >Distribution</TD ></TR ><TR ><TD >EXTRACTOR_BUILDHOST </TD ><TD >BuildHost</TD ></TR ><TR ><TD >EXTRACTOR_OS </TD ><TD >OS</TD ></TR ><TR ><TD >EXTRACTOR_DEPENDENCY </TD ><TD >Dependency</TD ></TR ><TR ><TD >EXTRACTOR_HASH_MD4 </TD ><TD >Hash-MD4</TD ></TR ><TR ><TD >EXTRACTOR_HASH_MD5 </TD ><TD >Hash-MD5</TD ></TR ><TR ><TD >EXTRACTOR_HASH_SHA0 </TD ><TD >Hash-SHA0</TD ></TR ><TR ><TD >EXTRACTOR_HASH_SHA1 </TD ><TD >Hash-SHA1</TD ></TR ><TR ><TD >EXTRACTOR_HASH_RMD160 </TD ><TD >Hash-RMD160</TD ></TR ><TR ><TD >EXTRACTOR_RESOLUTION </TD ><TD >Resolution</TD ></TR ><TR ><TD >EXTRACTOR_CATEGORY </TD ><TD >Ext.Category</TD ></TR ><TR ><TD >EXTRACTOR_BOOKTITLE </TD ><TD >BookTitle</TD ></TR ><TR ><TD >EXTRACTOR_PRIORITY </TD ><TD >Priority</TD ></TR ><TR ><TD >EXTRACTOR_CONFLICTS </TD ><TD >Conflicts</TD ></TR ><TR ><TD >EXTRACTOR_REPLACES </TD ><TD >Replaces</TD ></TR ><TR ><TD >EXTRACTOR_PROVIDES </TD ><TD >Provides</TD ></TR ><TR ><TD >EXTRACTOR_CONDUCTOR </TD ><TD >Conductor</TD ></TR ><TR ><TD >EXTRACTOR_INTERPRET </TD ><TD >Interpret</TD ></TR ><TR ><TD >EXTRACTOR_OWNER </TD ><TD >Owner</TD ></TR ><TR ><TD >EXTRACTOR_LYRICS </TD ><TD >Lyrics</TD ></TR ><TR ><TD >EXTRACTOR_MEDIA_TYPE </TD ><TD >Media-Type</TD ></TR ><TR ><TD >EXTRACTOR_CONTACT </TD ><TD >Contact</TD ></TR ><TR ><TD >EXTRACTOR_THUMBNAIL_DATA </TD ><TD >Thumbnail-Data</TD ></TR ><TR ><TD >EXTRACTOR_PUBLICATION_DATE </TD ><TD >Publication-Date</TD ></TR ><TR ><TD >EXTRACTOR_CAMERA_MAKE </TD ><TD >Camera-Make</TD ></TR ><TR ><TD >EXTRACTOR_CAMERA_MODEL </TD ><TD >Camera-Model</TD ></TR ><TR ><TD >EXTRACTOR_EXPOSURE </TD ><TD >Exposure</TD ></TR ><TR ><TD >EXTRACTOR_APERTURE </TD ><TD >Aperture</TD ></TR ><TR ><TD >EXTRACTOR_EXPOSURE_BIAS </TD ><TD >Exposure-Bias</TD ></TR ><TR ><TD >EXTRACTOR_FLASH </TD ><TD >Flash</TD ></TR ><TR ><TD >EXTRACTOR_FLASH_BIAS </TD ><TD >Flash-Bias</TD ></TR ><TR ><TD >EXTRACTOR_FOCAL_LENGTH </TD ><TD >Focal-Length</TD ></TR ><TR ><TD >EXTRACTOR_FOCAL_LENGTH_35MM </TD ><TD >Focal-Length-35MM</TD ></TR ><TR ><TD >EXTRACTOR_ISO_SPEED </TD ><TD >ISO-Speed</TD ></TR ><TR ><TD >EXTRACTOR_EXPOSURE_MODE </TD ><TD >Exposure-Mode</TD ></TR ><TR ><TD >EXTRACTOR_METERING_MODE </TD ><TD >Metering-Mode</TD ></TR ><TR ><TD >EXTRACTOR_MACRO_MODE </TD ><TD >Macro-Mode</TD ></TR ><TR ><TD >EXTRACTOR_IMAGE_QUALITY </TD ><TD >Image-Quality</TD ></TR ><TR ><TD >EXTRACTOR_WHITE_BALANCE </TD ><TD >White-Balance</TD ></TR ><TR ><TD >EXTRACTOR_ORIENTATION </TD ><TD >Orientation</TD ></TR ><TR ><TD >EXTRACTOR_TEMPLATE </TD ><TD >Template</TD ></TR ><TR ><TD >EXTRACTOR_SPLIT </TD ><TD >Split</TD ></TR ><TR ><TD >EXTRACTOR_PRODUCTVERSION </TD ><TD >ProductVersion</TD ></TR ><TR ><TD >EXTRACTOR_LAST_SAVED_BY </TD ><TD >Last-Saved-By</TD ></TR ><TR ><TD >EXTRACTOR_LAST_PRINTED </TD ><TD >Last-Printed</TD ></TR ><TR ><TD >EXTRACTOR_WORD_COUNT </TD ><TD >Word-Count</TD ></TR ><TR ><TD >EXTRACTOR_CHARACTER_COUNT </TD ><TD >Character-Count</TD ></TR ><TR ><TD >EXTRACTOR_TOTAL_EDITING_TIME </TD ><TD >Total-Editing-Time</TD ></TR ><TR ><TD >EXTRACTOR_THUMBNAILS </TD ><TD >Thumbnails</TD ></TR ><TR ><TD >EXTRACTOR_SECURITY </TD ><TD >Security</TD ></TR ><TR ><TD >EXTRACTOR_CREATED_BY_SOFTWARE </TD ><TD >Created-By-Software</TD ></TR ><TR ><TD >EXTRACTOR_MODIFIED_BY_SOFTWARE </TD ><TD >Modified-By-Software</TD ></TR ><TR ><TD >EXTRACTOR_REVISION_HISTORY </TD ><TD >Revision-History</TD ></TR ><TR ><TD >EXTRACTOR_LOWERCASE </TD ><TD >Lowercase</TD ></TR ><TR ><TD >EXTRACTOR_COMPANY </TD ><TD >Company</TD ></TR ><TR ><TD >EXTRACTOR_GENERATOR </TD ><TD >Generator</TD ></TR ><TR ><TD >EXTRACTOR_CHARACTER_SET </TD ><TD >Meta-Charset</TD ></TR ><TR ><TD >EXTRACTOR_LINE_COUNT </TD ><TD >Line-Count</TD ></TR ><TR ><TD >EXTRACTOR_PARAGRAPH_COUNT </TD ><TD >Paragraph-Count</TD ></TR ><TR ><TD >EXTRACTOR_EDITING_CYCLES </TD ><TD >Editing-Cycles</TD ></TR ><TR ><TD >EXTRACTOR_SCALE </TD ><TD >Scale</TD ></TR ><TR ><TD >EXTRACTOR_MANAGER </TD ><TD >Manager</TD ></TR ><TR ><TD >EXTRACTOR_MOVIE_DIRECTOR </TD ><TD >Movie-Director</TD ></TR ><TR ><TD >EXTRACTOR_DURATION </TD ><TD >Duration</TD ></TR ><TR ><TD >EXTRACTOR_INFORMATION </TD ><TD >Information</TD ></TR ><TR ><TD >EXTRACTOR_FULL_NAME </TD ><TD >Full-Name</TD ></TR ><TR ><TD >EXTRACTOR_CHAPTER </TD ><TD >Chapter</TD ></TR ><TR ><TD >EXTRACTOR_YEAR </TD ><TD >Year</TD ></TR ><TR ><TD >EXTRACTOR_LINK </TD ><TD >Link</TD ></TR ><TR ><TD >EXTRACTOR_MUSIC_CD_IDENTIFIER </TD ><TD >Music-CD-Identifier</TD ></TR ><TR ><TD >EXTRACTOR_PLAY_COUNTER </TD ><TD >Play-Counter</TD ></TR ><TR ><TD >EXTRACTOR_POPULARITY_METER </TD ><TD >Popularity-Meter</TD ></TR ><TR ><TD >EXTRACTOR_CONTENT_TYPE </TD ><TD >Ext.Content-Type</TD ></TR ><TR ><TD >EXTRACTOR_ENCODED_BY </TD ><TD >Encoded-By</TD ></TR ><TR ><TD >EXTRACTOR_TIME </TD ><TD >Time</TD ></TR ><TR ><TD >EXTRACTOR_MUSICIAN_CREDITS_LIST </TD ><TD >Musician-Credits-List</TD ></TR ><TR ><TD >EXTRACTOR_MOOD </TD ><TD >Mood</TD ></TR ><TR ><TD >EXTRACTOR_FORMAT_VERSION </TD ><TD >Format-Version</TD ></TR ><TR ><TD >EXTRACTOR_TELEVISION_SYSTEM </TD ><TD >Television-System</TD ></TR ><TR ><TD >EXTRACTOR_SONG_COUNT </TD ><TD >Song-Count</TD ></TR ><TR ><TD >EXTRACTOR_STARTING_SONG </TD ><TD >Strting-Song</TD ></TR ><TR ><TD >EXTRACTOR_HARDWARE_DEPENDENCY </TD ><TD >Hardware-Dependency</TD ></TR ><TR ><TD >EXTRACTOR_RIPPER </TD ><TD >Ripper</TD ></TR ><TR ><TD >EXTRACTOR_FILE_SIZE </TD ><TD >File-Size</TD ></TR ><TR ><TD >EXTRACTOR_TRACK_NUMBER </TD ><TD >Track-Number</TD ></TR ><TR ><TD >EXTRACTOR_ISRC </TD ><TD >ISRC</TD ></TR ><TR ><TD >EXTRACTOR_DISC_NUMBER </TD ><TD >Disc-Number</TD ></TR ></TBODY ></TABLE ></DIV ><P >If a section name from the list above doesn't specified in sections.conf, the value of corresponding keyword is written as <CODE CLASS="VARNAME" >body</CODE > section. Keywords of unknown type are written as <CODE CLASS="VARNAME" >body</CODE > section as well.</P ><P >For <SPAN CLASS="APPLICATION" >libextractor 0.6.x</SPAN >, the values returned by EXTRACTOR_metatype_to_string function are used as section names.</P ></DIV ></DIV ><DIV CLASS="NAVFOOTER" ><HR ALIGN="LEFT" WIDTH="100%"><TABLE SUMMARY="Footer navigation table" WIDTH="100%" BORDER="0" CELLPADDING="0" CELLSPACING="0" ><TR ><TD WIDTH="33%" ALIGN="left" VALIGN="top" ><A HREF="dpsearch-srvtable.en.html" ACCESSKEY="P" >Prev</A ></TD ><TD WIDTH="34%" ALIGN="center" VALIGN="top" ><A HREF="index.en.html" ACCESSKEY="H" >Home</A ></TD ><TD WIDTH="33%" ALIGN="right" VALIGN="top" ><A HREF="dpsearch-indexcmd.en.html" ACCESSKEY="N" >Next</A ></TD ></TR ><TR ><TD WIDTH="33%" ALIGN="left" VALIGN="top" >Servers Table</TD ><TD WIDTH="34%" ALIGN="center" VALIGN="top" ><A HREF="dpsearch-indexing.en.html" ACCESSKEY="U" >Up</A ></TD ><TD WIDTH="33%" ALIGN="right" VALIGN="top" >Other commands are used in <TT CLASS="FILENAME" >indexer.conf</TT ></TD ></TR ></TABLE ></DIV ><!--#include virtual="body-after.html"--></BODY ></HTML >
lhcardoso/dataparksearch
doc/dpsearch-pars.en.html
HTML
gpl-2.0
23,844
/* * OF helpers for the GPIO API * * Copyright (c) 2007-2008 MontaVista Software, Inc. * * Author: Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/device.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/io.h> #include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_gpio.h> #include <linux/pinctrl/pinctrl.h> #include <linux/slab.h> #include <linux/gpio/machine.h> #include "gpiolib.h" static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data) { struct of_phandle_args *gpiospec = data; return chip->gpiodev->dev.of_node == gpiospec->np && chip->of_xlate(chip, gpiospec, NULL) >= 0; } static struct gpio_chip *of_find_gpiochip_by_xlate( struct of_phandle_args *gpiospec) { return gpiochip_find(gpiospec, of_gpiochip_match_node_and_xlate); } static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip, struct of_phandle_args *gpiospec, enum of_gpio_flags *flags) { int ret; if (chip->of_gpio_n_cells != gpiospec->args_count) return ERR_PTR(-EINVAL); ret = chip->of_xlate(chip, gpiospec, flags); if (ret < 0) return ERR_PTR(ret); return gpiochip_get_desc(chip, ret); } /** * of_get_named_gpiod_flags() - Get a GPIO descriptor and flags for GPIO API * @np: device node to get GPIO from * @propname: property name containing gpio specifier(s) * @index: index of the GPIO * @flags: a flags pointer to fill in * * Returns GPIO descriptor to use with Linux GPIO API, or one of the errno * value on the error condition. If @flags is not NULL the function also fills * in flags for the GPIO. */ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, const char *propname, int index, enum of_gpio_flags *flags) { struct of_phandle_args gpiospec; struct gpio_chip *chip; struct gpio_desc *desc; int ret; ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index, &gpiospec); if (ret) { pr_debug("%s: can't parse '%s' property of node '%pOF[%d]'\n", __func__, propname, np, index); return ERR_PTR(ret); } chip = of_find_gpiochip_by_xlate(&gpiospec); if (!chip) { desc = ERR_PTR(-EPROBE_DEFER); goto out; } desc = of_xlate_and_get_gpiod_flags(chip, &gpiospec, flags); if (IS_ERR(desc)) goto out; pr_debug("%s: parsed '%s' property of node '%pOF[%d]' - status (%d)\n", __func__, propname, np, index, PTR_ERR_OR_ZERO(desc)); out: of_node_put(gpiospec.np); return desc; } int of_get_named_gpio_flags(struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags) { struct gpio_desc *desc; desc = of_get_named_gpiod_flags(np, list_name, index, flags); if (IS_ERR(desc)) return PTR_ERR(desc); else return desc_to_gpio(desc); } EXPORT_SYMBOL(of_get_named_gpio_flags); struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, unsigned int idx, enum gpio_lookup_flags *flags) { char prop_name[32]; /* 32 is max size of property name */ enum of_gpio_flags of_flags; struct gpio_desc *desc; unsigned int i; for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) { if (con_id) snprintf(prop_name, sizeof(prop_name), "%s-%s", con_id, gpio_suffixes[i]); else snprintf(prop_name, sizeof(prop_name), "%s", gpio_suffixes[i]); desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, &of_flags); if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT)) break; } if (IS_ERR(desc)) return desc; if (of_flags & OF_GPIO_ACTIVE_LOW) *flags |= GPIO_ACTIVE_LOW; if (of_flags & OF_GPIO_SINGLE_ENDED) { if (of_flags & OF_GPIO_OPEN_DRAIN) *flags |= GPIO_OPEN_DRAIN; else *flags |= GPIO_OPEN_SOURCE; } if (of_flags & OF_GPIO_SLEEP_MAY_LOSE_VALUE) *flags |= GPIO_SLEEP_MAY_LOSE_VALUE; return desc; } /** * of_parse_own_gpio() - Get a GPIO hog descriptor, names and flags for GPIO API * @np: device node to get GPIO from * @chip: GPIO chip whose hog is parsed * @idx: Index of the GPIO to parse * @name: GPIO line name * @lflags: gpio_lookup_flags - returned from of_find_gpio() or * of_parse_own_gpio() * @dflags: gpiod_flags - optional GPIO initialization flags * * Returns GPIO descriptor to use with Linux GPIO API, or one of the errno * value on the error condition. */ static struct gpio_desc *of_parse_own_gpio(struct device_node *np, struct gpio_chip *chip, unsigned int idx, const char **name, enum gpio_lookup_flags *lflags, enum gpiod_flags *dflags) { struct device_node *chip_np; enum of_gpio_flags xlate_flags; struct of_phandle_args gpiospec; struct gpio_desc *desc; unsigned int i; u32 tmp; int ret; chip_np = chip->of_node; if (!chip_np) return ERR_PTR(-EINVAL); xlate_flags = 0; *lflags = 0; *dflags = 0; ret = of_property_read_u32(chip_np, "#gpio-cells", &tmp); if (ret) return ERR_PTR(ret); gpiospec.np = chip_np; gpiospec.args_count = tmp; for (i = 0; i < tmp; i++) { ret = of_property_read_u32_index(np, "gpios", idx * tmp + i, &gpiospec.args[i]); if (ret) return ERR_PTR(ret); } desc = of_xlate_and_get_gpiod_flags(chip, &gpiospec, &xlate_flags); if (IS_ERR(desc)) return desc; if (xlate_flags & OF_GPIO_ACTIVE_LOW) *lflags |= GPIO_ACTIVE_LOW; if (of_property_read_bool(np, "input")) *dflags |= GPIOD_IN; else if (of_property_read_bool(np, "output-low")) *dflags |= GPIOD_OUT_LOW; else if (of_property_read_bool(np, "output-high")) *dflags |= GPIOD_OUT_HIGH; else { pr_warn("GPIO line %d (%s): no hogging state specified, bailing out\n", desc_to_gpio(desc), np->name); return ERR_PTR(-EINVAL); } if (name && of_property_read_string(np, "line-name", name)) *name = np->name; return desc; } /** * of_gpiochip_scan_gpios - Scan gpio-controller for gpio definitions * @chip: gpio chip to act on * * This is only used by of_gpiochip_add to request/set GPIO initial * configuration. * It returns error if it fails otherwise 0 on success. */ static int of_gpiochip_scan_gpios(struct gpio_chip *chip) { struct gpio_desc *desc = NULL; struct device_node *np; const char *name; enum gpio_lookup_flags lflags; enum gpiod_flags dflags; unsigned int i; int ret; for_each_available_child_of_node(chip->of_node, np) { if (!of_property_read_bool(np, "gpio-hog")) continue; for (i = 0;; i++) { desc = of_parse_own_gpio(np, chip, i, &name, &lflags, &dflags); if (IS_ERR(desc)) break; ret = gpiod_hog(desc, name, lflags, dflags); if (ret < 0) { of_node_put(np); return ret; } } } return 0; } /** * of_gpio_simple_xlate - translate gpiospec to the GPIO number and flags * @gc: pointer to the gpio_chip structure * @gpiospec: GPIO specifier as found in the device tree * @flags: a flags pointer to fill in * * This is simple translation function, suitable for the most 1:1 mapped * GPIO chips. This function performs only one sanity check: whether GPIO * is less than ngpios (that is specified in the gpio_chip). */ int of_gpio_simple_xlate(struct gpio_chip *gc, const struct of_phandle_args *gpiospec, u32 *flags) { /* * We're discouraging gpio_cells < 2, since that way you'll have to * write your own xlate function (that will have to retrieve the GPIO * number and the flags from a single gpio cell -- this is possible, * but not recommended). */ if (gc->of_gpio_n_cells < 2) { WARN_ON(1); return -EINVAL; } if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells)) return -EINVAL; if (gpiospec->args[0] >= gc->ngpio) return -EINVAL; if (flags) *flags = gpiospec->args[1]; return gpiospec->args[0]; } EXPORT_SYMBOL(of_gpio_simple_xlate); /** * of_mm_gpiochip_add_data - Add memory mapped GPIO chip (bank) * @np: device node of the GPIO chip * @mm_gc: pointer to the of_mm_gpio_chip allocated structure * @data: driver data to store in the struct gpio_chip * * To use this function you should allocate and fill mm_gc with: * * 1) In the gpio_chip structure: * - all the callbacks * - of_gpio_n_cells * - of_xlate callback (optional) * * 3) In the of_mm_gpio_chip structure: * - save_regs callback (optional) * * If succeeded, this function will map bank's memory and will * do all necessary work for you. Then you'll able to use .regs * to manage GPIOs from the callbacks. */ int of_mm_gpiochip_add_data(struct device_node *np, struct of_mm_gpio_chip *mm_gc, void *data) { int ret = -ENOMEM; struct gpio_chip *gc = &mm_gc->gc; gc->label = kasprintf(GFP_KERNEL, "%pOF", np); if (!gc->label) goto err0; mm_gc->regs = of_iomap(np, 0); if (!mm_gc->regs) goto err1; gc->base = -1; if (mm_gc->save_regs) mm_gc->save_regs(mm_gc); mm_gc->gc.of_node = np; ret = gpiochip_add_data(gc, data); if (ret) goto err2; return 0; err2: iounmap(mm_gc->regs); err1: kfree(gc->label); err0: pr_err("%pOF: GPIO chip registration failed with status %d\n", np, ret); return ret; } EXPORT_SYMBOL(of_mm_gpiochip_add_data); /** * of_mm_gpiochip_remove - Remove memory mapped GPIO chip (bank) * @mm_gc: pointer to the of_mm_gpio_chip allocated structure */ void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc) { struct gpio_chip *gc = &mm_gc->gc; if (!mm_gc) return; gpiochip_remove(gc); iounmap(mm_gc->regs); kfree(gc->label); } EXPORT_SYMBOL(of_mm_gpiochip_remove); #ifdef CONFIG_PINCTRL static int of_gpiochip_add_pin_range(struct gpio_chip *chip) { struct device_node *np = chip->of_node; struct of_phandle_args pinspec; struct pinctrl_dev *pctldev; int index = 0, ret; const char *name; static const char group_names_propname[] = "gpio-ranges-group-names"; struct property *group_names; if (!np) return 0; group_names = of_find_property(np, group_names_propname, NULL); for (;; index++) { ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, index, &pinspec); if (ret) break; pctldev = of_pinctrl_get(pinspec.np); of_node_put(pinspec.np); if (!pctldev) return -EPROBE_DEFER; if (pinspec.args[2]) { if (group_names) { of_property_read_string_index(np, group_names_propname, index, &name); if (strlen(name)) { pr_err("%pOF: Group name of numeric GPIO ranges must be the empty string.\n", np); break; } } /* npins != 0: linear range */ ret = gpiochip_add_pin_range(chip, pinctrl_dev_get_devname(pctldev), pinspec.args[0], pinspec.args[1], pinspec.args[2]); if (ret) return ret; } else { /* npins == 0: special range */ if (pinspec.args[1]) { pr_err("%pOF: Illegal gpio-range format.\n", np); break; } if (!group_names) { pr_err("%pOF: GPIO group range requested but no %s property.\n", np, group_names_propname); break; } ret = of_property_read_string_index(np, group_names_propname, index, &name); if (ret) break; if (!strlen(name)) { pr_err("%pOF: Group name of GPIO group range cannot be the empty string.\n", np); break; } ret = gpiochip_add_pingroup_range(chip, pctldev, pinspec.args[0], name); if (ret) return ret; } } return 0; } #else static int of_gpiochip_add_pin_range(struct gpio_chip *chip) { return 0; } #endif int of_gpiochip_add(struct gpio_chip *chip) { int status; if ((!chip->of_node) && (chip->parent)) chip->of_node = chip->parent->of_node; if (!chip->of_node) return 0; if (!chip->of_xlate) { chip->of_gpio_n_cells = 2; chip->of_xlate = of_gpio_simple_xlate; } if (chip->of_gpio_n_cells > MAX_PHANDLE_ARGS) return -EINVAL; status = of_gpiochip_add_pin_range(chip); if (status) return status; /* If the chip defines names itself, these take precedence */ if (!chip->names) devprop_gpiochip_set_names(chip, of_fwnode_handle(chip->of_node)); of_node_get(chip->of_node); return of_gpiochip_scan_gpios(chip); } void of_gpiochip_remove(struct gpio_chip *chip) { gpiochip_remove_pin_ranges(chip); of_node_put(chip->of_node); }
hannes/linux
drivers/gpio/gpiolib-of.c
C
gpl-2.0
12,354
/** * @license Copyright (c) 2003-2015, CKSource - Frederico Knabben. All rights reserved. * For licensing, see LICENSE.md or http://ckeditor.com/license */ ( function() { var template = '<a id="{id}"' + ' class="cke_button cke_button__{name} cke_button_{state} {cls}"' + ( CKEDITOR.env.gecko && !CKEDITOR.env.hc ? '' : ' href="javascript:void(\'{titleJs}\')"' ) + ' title="{title}"' + ' tabindex="-1"' + ' hidefocus="true"' + ' role="button"' + ' aria-labelledby="{id}_label"' + ' aria-haspopup="{hasArrow}"' + ' aria-disabled="{ariaDisabled}"'; // Some browsers don't cancel key events in the keydown but in the // keypress. // TODO: Check if really needed. if ( CKEDITOR.env.gecko && CKEDITOR.env.mac ) template += ' onkeypress="return false;"'; // With Firefox, we need to force the button to redraw, otherwise it // will remain in the focus state. if ( CKEDITOR.env.gecko ) template += ' onblur="this.style.cssText = this.style.cssText;"'; template += ' onkeydown="return CKEDITOR.tools.callFunction({keydownFn},event);"' + ' onfocus="return CKEDITOR.tools.callFunction({focusFn},event);" ' + ( CKEDITOR.env.ie ? 'onclick="return false;" onmouseup' : 'onclick' ) + // #188 '="CKEDITOR.tools.callFunction({clickFn},this);return false;">' + '<span class="cke_button_icon cke_button__{iconName}_icon" style="{style}"'; template += '>&nbsp;</span>' + '<span id="{id}_label" class="cke_button_label cke_button__{name}_label" aria-hidden="false">{label}</span>' + '{arrowHtml}' + '</a>'; var templateArrow = '<span class="cke_button_arrow">' + // BLACK DOWN-POINTING TRIANGLE ( CKEDITOR.env.hc ? '&#9660;' : '' ) + '</span>'; var btnArrowTpl = CKEDITOR.addTemplate( 'buttonArrow', templateArrow ), btnTpl = CKEDITOR.addTemplate( 'button', template ); CKEDITOR.plugins.add( 'button', { lang: 'af,ar,bg,ca,cs,da,de,el,en,en-gb,eo,es,eu,fa,fi,fr,gl,he,hu,id,it,ja,km,ko,ku,lt,nb,nl,pl,pt,pt-br,ro,ru,sk,sl,sq,sv,tr,tt,ug,uk,vi,zh,zh-cn', // %REMOVE_LINE_CORE% beforeInit: function( editor ) { editor.ui.addHandler( CKEDITOR.UI_BUTTON, CKEDITOR.ui.button.handler ); } } ); /** * Button UI element. * * @readonly * @property {String} [='button'] * @member CKEDITOR */ CKEDITOR.UI_BUTTON = 'button'; /** * Represents a button UI element. This class should not be called directly. To * create new buttons use {@link CKEDITOR.ui#addButton} instead. * * @class * @constructor Creates a button class instance. * @param {Object} definition The button definition. */ CKEDITOR.ui.button = function( definition ) { CKEDITOR.tools.extend( this, definition, // Set defaults. { title: definition.label, click: definition.click || function( editor ) { editor.execCommand( definition.command ); } } ); this._ = {}; }; /** * Represents the button handler object. * * @class * @singleton * @extends CKEDITOR.ui.handlerDefinition */ CKEDITOR.ui.button.handler = { /** * Transforms a button definition in a {@link CKEDITOR.ui.button} instance. * * @member CKEDITOR.ui.button.handler * @param {Object} definition * @returns {CKEDITOR.ui.button} */ create: function( definition ) { return new CKEDITOR.ui.button( definition ); } }; /** @class CKEDITOR.ui.button */ CKEDITOR.ui.button.prototype = { /** * Renders the button. * * @param {CKEDITOR.editor} editor The editor instance which this button is * to be used by. * @param {Array} output The output array to which the HTML code related to * this button should be appended. */ render: function( editor, output ) { function updateState() { // "this" is a CKEDITOR.ui.button instance. var mode = editor.mode; if ( mode ) { // Restore saved button state. var state = this.modes[ mode ] ? modeStates[ mode ] !== undefined ? modeStates[ mode ] : CKEDITOR.TRISTATE_OFF : CKEDITOR.TRISTATE_DISABLED; state = editor.readOnly && !this.readOnly ? CKEDITOR.TRISTATE_DISABLED : state; this.setState( state ); // Let plugin to disable button. if ( this.refresh ) this.refresh(); } } var env = CKEDITOR.env, id = this._.id = CKEDITOR.tools.getNextId(), stateName = '', command = this.command, // Get the command name. clickFn; this._.editor = editor; var instance = { id: id, button: this, editor: editor, focus: function() { var element = CKEDITOR.document.getById( id ); element.focus(); }, execute: function() { this.button.click( editor ); }, attach: function( editor ) { this.button.attach( editor ); } }; var keydownFn = CKEDITOR.tools.addFunction( function( ev ) { if ( instance.onkey ) { ev = new CKEDITOR.dom.event( ev ); return ( instance.onkey( instance, ev.getKeystroke() ) !== false ); } } ); var focusFn = CKEDITOR.tools.addFunction( function( ev ) { var retVal; if ( instance.onfocus ) retVal = ( instance.onfocus( instance, new CKEDITOR.dom.event( ev ) ) !== false ); return retVal; } ); var selLocked = 0; instance.clickFn = clickFn = CKEDITOR.tools.addFunction( function() { // Restore locked selection in Opera. if ( selLocked ) { editor.unlockSelection( 1 ); selLocked = 0; } instance.execute(); // Fixed iOS focus issue when your press disabled button (#12381). if ( env.iOS ) { editor.focus(); } } ); // Indicate a mode sensitive button. if ( this.modes ) { var modeStates = {}; editor.on( 'beforeModeUnload', function() { if ( editor.mode && this._.state != CKEDITOR.TRISTATE_DISABLED ) modeStates[ editor.mode ] = this._.state; }, this ); // Update status when activeFilter, mode or readOnly changes. editor.on( 'activeFilterChange', updateState, this ); editor.on( 'mode', updateState, this ); // If this button is sensitive to readOnly state, update it accordingly. !this.readOnly && editor.on( 'readOnly', updateState, this ); } else if ( command ) { // Get the command instance. command = editor.getCommand( command ); if ( command ) { command.on( 'state', function() { this.setState( command.state ); }, this ); stateName += ( command.state == CKEDITOR.TRISTATE_ON ? 'on' : command.state == CKEDITOR.TRISTATE_DISABLED ? 'disabled' : 'off' ); } } // For button that has text-direction awareness on selection path. if ( this.directional ) { editor.on( 'contentDirChanged', function( evt ) { var el = CKEDITOR.document.getById( this._.id ), icon = el.getFirst(); var pathDir = evt.data; // Make a minor direction change to become style-able for the skin icon. if ( pathDir != editor.lang.dir ) el.addClass( 'cke_' + pathDir ); else el.removeClass( 'cke_ltr' ).removeClass( 'cke_rtl' ); // Inline style update for the plugin icon. icon.setAttribute( 'style', CKEDITOR.skin.getIconStyle( iconName, pathDir == 'rtl', this.icon, this.iconOffset ) ); }, this ); } if ( !command ) stateName += 'off'; var name = this.name || this.command, iconName = name; // Check if we're pointing to an icon defined by another command. (#9555) if ( this.icon && !( /\./ ).test( this.icon ) ) { iconName = this.icon; this.icon = null; } var params = { id: id, name: name, iconName: iconName, label: this.label, cls: this.className || '', state: stateName, ariaDisabled: stateName == 'disabled' ? 'true' : 'false', title: this.title, titleJs: env.gecko && !env.hc ? '' : ( this.title || '' ).replace( "'", '' ), hasArrow: this.hasArrow ? 'true' : 'false', keydownFn: keydownFn, focusFn: focusFn, clickFn: clickFn, style: CKEDITOR.skin.getIconStyle( iconName, ( editor.lang.dir == 'rtl' ), this.icon, this.iconOffset ), arrowHtml: this.hasArrow ? btnArrowTpl.output() : '' }; btnTpl.output( params, output ); if ( this.onRender ) this.onRender(); return instance; }, /** * Sets the button state. * * @param {Number} state Indicates the button state. One of {@link CKEDITOR#TRISTATE_ON}, * {@link CKEDITOR#TRISTATE_OFF}, or {@link CKEDITOR#TRISTATE_DISABLED}. */ setState: function( state ) { if ( this._.state == state ) return false; this._.state = state; var element = CKEDITOR.document.getById( this._.id ); if ( element ) { element.setState( state, 'cke_button' ); state == CKEDITOR.TRISTATE_DISABLED ? element.setAttribute( 'aria-disabled', true ) : element.removeAttribute( 'aria-disabled' ); if ( !this.hasArrow ) { // Note: aria-pressed attribute should not be added to menuButton instances. (#11331) state == CKEDITOR.TRISTATE_ON ? element.setAttribute( 'aria-pressed', true ) : element.removeAttribute( 'aria-pressed' ); } else { var newLabel = state == CKEDITOR.TRISTATE_ON ? this._.editor.lang.button.selectedLabel.replace( /%1/g, this.label ) : this.label; CKEDITOR.document.getById( this._.id + '_label' ).setText( newLabel ); } return true; } else { return false; } }, /** * Gets the button state. * * @returns {Number} The button state. One of {@link CKEDITOR#TRISTATE_ON}, * {@link CKEDITOR#TRISTATE_OFF}, or {@link CKEDITOR#TRISTATE_DISABLED}. */ getState: function() { return this._.state; }, /** * Returns this button's {@link CKEDITOR.feature} instance. * * It may be this button instance if it has at least one of * `allowedContent` and `requiredContent` properties. Otherwise, * if a command is bound to this button by the `command` property, then * that command will be returned. * * This method implements the {@link CKEDITOR.feature#toFeature} interface method. * * @since 4.1 * @param {CKEDITOR.editor} Editor instance. * @returns {CKEDITOR.feature} The feature. */ toFeature: function( editor ) { if ( this._.feature ) return this._.feature; var feature = this; // If button isn't a feature, return command if is bound. if ( !this.allowedContent && !this.requiredContent && this.command ) feature = editor.getCommand( this.command ) || feature; return this._.feature = feature; } }; /** * Adds a button definition to the UI elements list. * * editorInstance.ui.addButton( 'MyBold', { * label: 'My Bold', * command: 'bold', * toolbar: 'basicstyles,1' * } ); * * @member CKEDITOR.ui * @param {String} name The button name. * @param {Object} definition The button definition. * @param {String} definition.label The textual part of the button (if visible) and its tooltip. * @param {String} definition.command The command to be executed once the button is activated. * @param {String} definition.toolbar The {@link CKEDITOR.config#toolbarGroups toolbar group} into which * the button will be added. An optional index value (separated by a comma) determines the button position within the group. */ CKEDITOR.ui.prototype.addButton = function( name, definition ) { this.add( name, CKEDITOR.UI_BUTTON, definition ); }; } )();
lcrojanouninorte/gie_portal
sites/all/libraries/ckeditor/plugins/button/plugin.js
JavaScript
gpl-2.0
11,300
/* * Mapping of DWARF debug register numbers into register names. * * Copyright (C) 2010 Matt Fleming <matt@console-pimps.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <libio.h> #include <dwarf-regs.h> /* */ #define SH_MAX_REGS 18 const char *sh_regs_table[SH_MAX_REGS] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "pc", "pr", }; /* */ const char *get_arch_regstr(unsigned int n) { return (n <= SH_MAX_REGS) ? sh_regs_table[n] : NULL; }
curbthepain/revkernel_us990
tools/perf/arch/sh/util/dwarf-regs.c
C
gpl-2.0
1,317
/**************************************************************************** ** ** This file is part of the Qt Extended Opensource Package. ** ** Copyright (C) 2009 Trolltech ASA. ** ** Contact: Qt Extended Information (info@qtextended.org) ** ** This file may be used under the terms of the GNU General Public License ** version 2.0 as published by the Free Software Foundation and appearing ** in the file LICENSE.GPL included in the packaging of this file. ** ** Please review the following information to ensure GNU General Public ** Licensing requirements will be met: ** http://www.fsf.org/licensing/licenses/info/GPLv2.html. ** ** ****************************************************************************/ #include <qbluetoothaudiogateway.h> #include <qbluetoothaddress.h> #include <QVariant> #include <QString> /*! \class QBluetoothAudioGateway \inpublicgroup QtBluetoothModule \brief The QBluetoothAudioGateway class provides an interface to a Bluetooth Audio Gateway. \ingroup qtopiabluetooth The Bluetooth Audio Gateway usually has an audio device associated with it. Clients can also control speaker and microphone volume of the device, connect and disconnect remote clients and find status information of the audio gateway. The QBluetoothAudioGateway is used to control the Headset and Handsfree profile implementations in Qtopia. Both use a control RFCOMM channel and a SCO voice data channel. This class should be used by client applications that wish to control the state of Handsfree / Headset implementations (e.g. a Bluetooth Audio settings application.) \sa QCommInterface */ /*! Construct a new audio gateway object for \a service and attach it to \a parent. The object will be created in client mode if \a mode is Client, or server mode otherwise. If \a service is empty, this class will use the first available service that supports bluetooth audio gateway. If there is more than one service that supports bluetooth audio gateway, the caller should enumerate them with QCommServiceManager::supports() and create separate QBluetoothAudioGateway objects for each. \sa QAbstractIpcInterfaceGroup::supports() */ QBluetoothAudioGateway::QBluetoothAudioGateway(const QString &service, QObject *parent, QAbstractIpcInterface::Mode mode) : QCommInterface("QBluetoothAudioGateway", service, parent, mode) { proxyAll( staticMetaObject ); } /*! Destroys the audio gateway. */ QBluetoothAudioGateway::~QBluetoothAudioGateway() { } /*! Returns the current speaker volume of the device. The volume can be between 0 and 15. \sa setSpeakerVolume() */ int QBluetoothAudioGateway::speakerVolume() const { return value("SpeakerVolume", 0).toInt(); } /*! Returns the current microphone volume of the device. The volume can be between 0 and 15. \sa setMicrophoneVolume() */ int QBluetoothAudioGateway::microphoneVolume() const { return value("MicrophoneVolume", 0).toInt(); } /*! Returns whether the voice data information is being passed to the device. This is true once an SCO connection has been established between the remote device (headset) and the audio gateway. All audio information should now be routed over the device associated with this audio gateway. \sa isConnected(), remotePeer() */ bool QBluetoothAudioGateway::audioEnabled() const { return value("AudioEnabled", false).toBool(); } /*! Returns true if a client is connected. This is true once an RFCOMM control connection has been established between the remote device and the audio gateway. \sa audioEnabled(), remotePeer() */ bool QBluetoothAudioGateway::isConnected() const { return value("IsConnected", false).toBool(); } /*! Returns the address of the connected remote device. If there is no connection an invalid QBluetoothAddress is returned. \sa isConnected() */ QBluetoothAddress QBluetoothAudioGateway::remotePeer() const { return value("RemotePeer").value<QBluetoothAddress>(); } /*! Attempts to establish an Audio Gateway initiated RFCOMM control connection to the headset. The \a addr contains the address of the remote device and \a rfcomm_channel contains the service channel to connect on. The connectResult() signal will be sent once the connection succeeds or fails. \sa connectResult() */ void QBluetoothAudioGateway::connect(const QBluetoothAddress &addr, int rfcomm_channel) { invoke( SLOT(connect(QBluetoothAddress,int)), qVariantFromValue( addr ), qVariantFromValue( rfcomm_channel )); } /*! Disconnect the currently active client from the Audio Gateway. The headsetDisconnected() signal will be sent once the headset is disconnected(). \sa headsetDisconnected() */ void QBluetoothAudioGateway::disconnect() { invoke( SLOT(disconnect()) ); } /*! Requests the Audio Gateway to notify the remote device to update its microphone volume to \a volume. The volume range must be between 0 and 15. \sa microphoneVolume() */ void QBluetoothAudioGateway::setMicrophoneVolume(int volume) { invoke( SLOT(setMicrophoneVolume(int)), qVariantFromValue(volume)); } /*! Requests the Audio Gateway to notify the remote device to update its speaker volume to \a volume. The volume range must be between 0 and 15. \sa speakerVolume() */ void QBluetoothAudioGateway::setSpeakerVolume(int volume) { invoke( SLOT(setSpeakerVolume(int)), qVariantFromValue(volume) ); } /*! Asks the Audio Gateway to release the SCO audio data connection. No audio should be routed through the associated audio device. \sa connectAudio() */ void QBluetoothAudioGateway::releaseAudio() { invoke( SLOT(releaseAudio()) ); } /*! Asks the Audio Gateway to establish an SCO data connection with the currently connected peer. All audio data should now be routed to the associated audio device. \sa releaseAudio() */ void QBluetoothAudioGateway::connectAudio() { invoke( SLOT(connectAudio()) ); } /*! \fn void QBluetoothAudioGateway::connectResult(bool success, const QString &msg) This signal is sent after the connect method has been called on the Audio Gateway object. The \a success parameter is true if the connection succeeded, and false otherwise. If the connection failed, the \a msg parameter holds the error string. \sa connect() */ /*! \fn void QBluetoothAudioGateway::newConnection(const QBluetoothAddress &addr) This signal is sent whenever a client has connected to the Audio Gateway. It is only sent on remote device initiated connections. The \a addr parameter holds the address of the connected remote device. \sa connect() */ /*! \fn void QBluetoothAudioGateway::headsetDisconnected() This signal is sent whenever a headset has disconnected from the audio gateway. \sa disconnect() */ /*! \fn void QBluetoothAudioGateway::speakerVolumeChanged() This signal is sent whenever the microphone volume of the remote device has been changed. \sa speakerVolume(), setSpeakerVolume() */ /*! \fn void QBluetoothAudioGateway::microphoneVolumeChanged() This signal is sent whenever the microphone volume of the remote device has been changed. \sa microphoneVolume(), setMicrophoneVolume() */ /*! \fn void QBluetoothAudioGateway::audioStateChanged() This signal is emitted whenever the state of the audio data stream has changed. E.g. the audio stream (SCO connection) has been disconnected or connected. Use audioEnabled() to find out the state of the audio stream. \sa audioEnabled() */
radekp/qtmoko
src/libraries/qtopiacomm/bluetooth/qbluetoothaudiogateway.cpp
C++
gpl-2.0
7,819
/* Copyright (C) 1991, 1992, 1993, 1995, 1997 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <errno.h> #include <sys/wait.h> #include <sys/types.h> /* Wait for a child to exit. When one does, put its status in *STAT_LOC and return its process ID. For errors return (pid_t) -1. If USAGE is not nil, store information about the child's resource usage (as a `struct rusage') there. If the WUNTRACED bit is set in OPTIONS, return status for stopped children; otherwise don't. */ pid_t __wait3 (stat_loc, options, usage) __WAIT_STATUS stat_loc; int options; struct rusage *usage; { return __wait4 (WAIT_ANY, stat_loc, options, usage); } weak_alias (__wait3, wait3)
ScotSalmon/glibc
sysdeps/unix/bsd/bsd4.4/wait3.c
C
gpl-2.0
1,420
-- 1879 Habla con Bink -- https://es.classic.wowhead.com/quest=1879 SET @ID := 1879; DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Oh, puedo ver que tienes una promesa, ¡sí que la tienes! Pero se necesita más que una promesa para ser $gun buen:una buena; $c. Y si no eres $gun buen:una buena; $c, entonces eres $gun:una; $c $gmalo:mala;. Y los magos malos son parte del problema, ¡sí que lo son!$B$BVaya, me estoy adelantando. ¡Empecemos por el principio!', 0), (@ID, 'esMX', 'Oh, puedo ver que tienes una promesa, ¡sí que la tienes! Pero se necesita más que una promesa para ser $gun buen:una buena; $c. Y si no eres $gun buen:una buena; $c, entonces eres $gun:una; $c $gmalo:mala;. Y los magos malos son parte del problema, ¡sí que lo son!$B$BVaya, me estoy adelantando. ¡Empecemos por el principio!', 0); -- 1880 Trastónitor maguitronic -- https://es.classic.wowhead.com/quest=1880 SET @ID := 1880; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¿Encontraste el trastónitor? ¡Debo recuperarlo antes de que se quede sin suministro de energía!', 0), (@ID, 'esMX', '¿Encontraste el trastónitor? ¡Debo recuperarlo antes de que se quede sin suministro de energía!', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¡Oh, un día espléndido! Cuando hice mi trastónitor no me di cuenta de lo que iba a depender de él. ¡Ya puedo volver a empezar mis estudios!$B$B¡Gracias, $n! Y toma, estas fueron mis primeras herramientas de $c. Están un poco estropeadas pero funcionan bien. Escoge lo que quieras, seguro que cualquiera de las dos cosas te vendrá bien.', 0), (@ID, 'esMX', '¡Oh, un día espléndido! Cuando hice mi trastónitor no me di cuenta de lo que iba a depender de él. ¡Ya puedo volver a empezar mis estudios!$B$B¡Gracias, $n! Y toma, estas fueron mis primeras herramientas de $c. Están un poco estropeadas pero funcionan bien. Escoge lo que quieras, seguro que cualquiera de las dos cosas te vendrá bien.', 0); -- 1881 Habla con Anastasia -- https://es.classic.wowhead.com/quest=1881 SET @ID := 1881; DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Saludos, $n. Fuiste $gsabio:sabia; al responder a mi llamada, y aún lo serás más si escuchas lo que voy a decirte...', 0), (@ID, 'esMX', 'Saludos, $n. Fuiste $gsabio:sabia; al responder a mi llamada, y aún lo serás más si escuchas lo que voy a decirte...', 0); -- 1882 La Hacienda Balnir -- https://es.classic.wowhead.com/quest=1882 SET @ID := 1882; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¿Tienes las Bocas de dragón? Esa clase de flor es muy receptiva a las energías mágicas y estudiar las Bocas de dragón que se crían tan cerca de las Tierras de la Peste podrá indicarnos si la Peste se ha introducido en los Claros de Tirisfal.', 0), (@ID, 'esMX', '¿Tienes las Bocas de dragón? Esa clase de flor es muy receptiva a las energías mágicas y estudiar las Bocas de dragón que se crían tan cerca de las Tierras de la Peste podrá indicarnos si la Peste se ha introducido en los Claros de Tirisfal.', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Ah, es un espécimen fantástico. Lo enviaré enseguida para que lo estudien nuestros boticarios y para que puedan determinar la extensión de la corrupción de las Tierras de la Peste.$B$BBien hecho, $n. A cambio de todos tus servicios te doy a elegir entre un orbe o un bastón. Elijas lo que elijas te será muy útil.', 0), (@ID, 'esMX', 'Ah, es un espécimen fantástico. Lo enviaré enseguida para que lo estudien nuestros boticarios y para que puedan determinar la extensión de la corrupción de las Tierras de la Peste.$B$BBien hecho, $n. A cambio de todos tus servicios te doy a elegir entre un orbe o un bastón. Elijas lo que elijas te será muy útil.', 0); -- 1883 Habla con Un'thuwa -- https://es.classic.wowhead.com/quest=1883 SET @ID := 1883; DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Ave, $n. Loado seas, has respondido a mi petición de auxilio. Esperemos que estés $gpreparado:preparada; para esta tarea...', 0), (@ID, 'esMX', 'Ave, $n. Loado seas, has respondido a mi petición de auxilio. Esperemos que estés $gpreparado:preparada; para esta tarea...', 0); -- 1884 Montones de Ju-Ju -- https://es.classic.wowhead.com/quest=1884 SET @ID := 1884; UPDATE `quest_template_locale` SET `ObjectiveText1` = 'Montones de Juju destruidos', `VerifiedBuild` = 0 WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Debes darte prisa, $n. ¡Destruye los montones de Juju!', 0), (@ID, 'esMX', 'Debes darte prisa, $n. ¡Destruye los montones de Juju!', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Puedo sentir que los montones de Juju de Zalazane han sido destruidos y con tiempo y algo de suerte, el flujo de magia de Durotar volverá a la normalidad.$B$BHas hecho una gran gesta por nuestro pueblo, $n. Y has demostrado tu valor para nosotros, los magos de la Horda.', 0), (@ID, 'esMX', 'Puedo sentir que los montones de Juju de Zalazane han sido destruidos y con tiempo y algo de suerte, el flujo de magia de Durotar volverá a la normalidad.$B$BHas hecho una gran gesta por nuestro pueblo, $n. Y has demostrado tu valor para nosotros, los magos de la Horda.', 0); -- 1885 Mennet Carkad -- https://es.classic.wowhead.com/quest=1885 SET @ID := 1885; DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¿Mmm? No hay duda de que has venido tan pronto te has enterado de que requería tu presencia. Claro que no esperaría otra cosa de $gun futuro mortacechador:una futura mortacechadora;.', 0), (@ID, 'esMX', '¿Mmm? No hay duda de que has venido tan pronto te has enterado de que requería tu presencia. Claro que no esperaría otra cosa de $gun futuro mortacechador:una futura mortacechadora;.', 0); -- 1886 Los mortacechadores -- https://es.classic.wowhead.com/quest=1886 SET @ID := 1886; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¿Lo has logrado?', 0), (@ID, 'esMX', '¿Lo has logrado?', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Buen trabajo, $n. Has superado la primera parte de esta prueba. Con un poco de suerte, Andron no se dará cuenta de nada y creerá que eres realmente el mensajero que estaba esperando.', 0), (@ID, 'esMX', 'Buen trabajo, $n. Has superado la primera parte de esta prueba. Con un poco de suerte, Andron no se dará cuenta de nada y creerá que eres realmente el mensajero que estaba esperando.', 0); -- 1898 Los mortacechadores -- https://es.classic.wowhead.com/quest=1898 SET @ID := 1898; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Hola. ¿Hay algo que pueda hacer por ti?', 0), (@ID, 'esMX', 'Hola. ¿Hay algo que pueda hacer por ti?', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¡Ah! Te estaba esperando. Seguro que quienes te envían están satisfechos con la información que les he dado hasta el momento, ¿no?$B$BSupongo que no tiene mucho sentido que te haga esta pregunta pues seguro que desconoces la respuesta. No creo que confiaran tanto en $gun simple mensajero:una simple mensajera;...', 0), (@ID, 'esMX', '¡Ah! Te estaba esperando. Seguro que quienes te envían están satisfechos con la información que les he dado hasta el momento, ¿no?$B$BSupongo que no tiene mucho sentido que te haga esta pregunta pues seguro que desconoces la respuesta. No creo que confiaran tanto en $gun simple mensajero:una simple mensajera;...', 0); -- 1899 Los mortacechadores -- https://es.classic.wowhead.com/quest=1899 SET @ID := 1899; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¿Lo has logrado, $n?', 0), (@ID, 'esMX', '¿Lo has logrado, $n?', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Sin duda, has obtenido más información de Andron de la que esperaba. Déjame que le eche un vistazo antes de asignarte tu siguiente tarea.', 0), (@ID, 'esMX', 'Sin duda, has obtenido más información de Andron de la que esperaba. Déjame que le eche un vistazo antes de asignarte tu siguiente tarea.', 0); -- 1918 El elemento contaminado -- https://es.classic.wowhead.com/quest=1918 SET @ID := 1918; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¿Sí? ¿Necesitas algo?', 0), (@ID, 'esMX', '¿Sí? ¿Necesitas algo?', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Te encuentras una asquerosa bola de agua ¿y decides traérmela? ¡¿Quién crees que soy?! ¿El Anillo de la Tierra o algo así?$B$BDe hecho, esto me da una idea... quizás necesites al Anillo de la Tierra para esto. Vamos, que si contribuye a aclarar lo del lago de una vez por todas, estoy contigo.$B$B¿Sabes quiénes integran el Anillo de la Tierra, $n?', 0), (@ID, 'esMX', 'Te encuentras una asquerosa bola de agua ¿y decides traérmela? ¡¿Quién crees que soy?! ¿El Anillo de la Tierra o algo así?$B$BDe hecho, esto me da una idea... quizás necesites al Anillo de la Tierra para esto. Vamos, que si contribuye a aclarar lo del lago de una vez por todas, estoy contigo.$B$B¿Sabes quiénes integran el Anillo de la Tierra, $n?', 0); -- 1919 Preséntate ante Jennea -- https://es.classic.wowhead.com/quest=1919 SET @ID := 1919; DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', '$n, tengo una misión para ti, aquí en el distrito de los $c. Escucha atentamente, porque no hay tiempo que perder.', 0), (@ID, 'esMX', '$n, tengo una misión para ti, aquí en el distrito de los $c. Escucha atentamente, porque no hay tiempo que perder.', 0); -- 1920 Investiga lo que pasa en El Ermitaño Taciturno -- https://es.classic.wowhead.com/quest=1920 SET @ID := 1920; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¿Has capturado a las criaturas, $n? Deben ser estudiadas para determinar el motivo de su llegada.', 0), (@ID, 'esMX', '¿Has capturado a las criaturas, $n? Deben ser estudiadas para determinar el motivo de su llegada.', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¡Muy bien hecho! Estudiaré estas criaturas y, con diligencia y suerte, determinaré cómo se deslizaron en nuestro mundo.$B$BTu valor para la sociedad de magos crece, $n.', 0), (@ID, 'esMX', '¡Muy bien hecho! Estudiaré estas criaturas y, con diligencia y suerte, determinaré cómo se deslizaron en nuestro mundo.$B$BTu valor para la sociedad de magos crece, $n.', 0); -- 1921 Recolección de materiales -- https://es.classic.wowhead.com/quest=1921 SET @ID := 1921; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Hola, $n. Jennea me dijo que vendrías. ¿Tienes algo para mí?', 0), (@ID, 'esMX', 'Hola, $n. Jennea me dijo que vendrías. ¿Tienes algo para mí?', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Sí, son unas piezas de lino excelentes. Podré hacer una bonita túnica y cuando entreteja las gemas de la falla, ¡tendremos una prenda realmente digna de $gun:una; $c!', 0), (@ID, 'esMX', 'Sí, son unas piezas de lino excelentes. Podré hacer una bonita túnica y cuando entreteja las gemas de la falla, ¡tendremos una prenda realmente digna de $gun:una; $c!', 0); -- 1938 Tratado de Ur sobre la Magia de las Sombras -- https://es.classic.wowhead.com/quest=1938 SET @ID := 1938; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¿Tienes el libro, $n? Sus secretos no son para los no instruidos y mirar sus entrañas puede llevar a la perdición y a la ruina.', 0), (@ID, 'esMX', '¿Tienes el libro, $n? Sus secretos no son para los no instruidos y mirar sus entrañas puede llevar a la perdición y a la ruina.', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Temía que Morganth hubiera escondido el libro en algún otro sitio o que hubiera encontrado un modo de abrirlo y leer su contenido.$B$BMe alegra saber que no fue así y que los sellos del Tratado de Ur sobre la Magia de las Sombras siguen en su sitio.', 0), (@ID, 'esMX', 'Temía que Morganth hubiera escondido el libro en algún otro sitio o que hubiera encontrado un modo de abrirlo y leer su contenido.$B$BMe alegra saber que no fue así y que los sellos del Tratado de Ur sobre la Magia de las Sombras siguen en su sitio.', 0); -- 1939 Sumo Hechicero Andromath -- https://es.classic.wowhead.com/quest=1939 SET @ID := 1939; DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Saludos, $n. Es un placer verte, espero que tengas algo de tiempo para escuchar las palabras de un anciano...', 0), (@ID, 'esMX', 'Saludos, $n. Es un placer verte, espero que tengas algo de tiempo para escuchar las palabras de un anciano...', 0); -- 1940 Seda prístina de araña -- https://es.classic.wowhead.com/quest=1940 SET @ID := 1940; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Ah, sí. Los magos del Sagrario dijeron que vendrías a hablar conmigo. ¿Tienes la seda necesaria?', 0), (@ID, 'esMX', 'Ah, sí. Los magos del Sagrario dijeron que vendrías a hablar conmigo. ¿Tienes la seda necesaria?', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Unos bonitos especimenes. ¡Me pondré manos a la obra!', 0), (@ID, 'esMX', 'Unos bonitos especimenes. ¡Me pondré manos a la obra!', 0); -- 1941 Toga Tejemaná -- https://es.classic.wowhead.com/quest=1941 SET @ID := 1941; DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Aquí está tu toga. ¡Es una de las mejores que he hecho nunca! Llévala con orgullo y cuando te pregunten... ¡di que te la hicieron en Diseños Larson!', 0), (@ID, 'esMX', 'Aquí está tu toga. ¡Es una de las mejores que he hecho nunca! Llévala con orgullo y cuando te pregunten... ¡di que te la hicieron en Diseños Larson!', 0); -- 1942 Prendas de nudo astral -- https://es.classic.wowhead.com/quest=1942 SET @ID := 1942; DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¡Ya está lista! La seda que trajiste era tan delicada... que te hice la prenda un poco pequeña para quedarme yo con algo de sobra, ¿no te importa, verdad?$B$BEs una broma. Te quedará perfecta y será ideal tanto para tus aventuras como para una cena formal. ¡Que la disfrutes!', 0), (@ID, 'esMX', '¡Ya está lista! La seda que trajiste era tan delicada... que te hice la prenda un poco pequeña para quedarme yo con algo de sobra, ¿no te importa, verdad?$B$BEs una broma. Te quedará perfecta y será ideal tanto para tus aventuras como para una cena formal. ¡Que la disfrutes!', 0); -- 1943 Habla con Deino -- https://es.classic.wowhead.com/quest=1943 SET @ID := 1943; DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Bien, has venido. Mi tarea no es fácil, pero es vital para la sociedad de magos. Si me ayudas estoy segura de que te merecerá la pena.', 0), (@ID, 'esMX', 'Bien, has venido. Mi tarea no es fácil, pero es vital para la sociedad de magos. Si me ayudas estoy segura de que te merecerá la pena.', 0); -- 1944 Aguas de Xavian -- https://es.classic.wowhead.com/quest=1944 SET @ID := 1944; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Me alegra que hayas regresado con la piel intacta, $n. ¿Tienes la muestra de agua? Estoy ansioso por estudiar sus propiedades.', 0), (@ID, 'esMX', 'Me alegra que hayas regresado con la piel intacta, $n. ¿Tienes la muestra de agua? Estoy ansioso por estudiar sus propiedades.', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Ah, muy bien. ¿Confío en que los sátiros de Xavian te hayan dado pocos problemas?$B$BGracias, $n. Estudiaré esta muestra de agua de Xavian inmediatamente.', 0), (@ID, 'esMX', 'Ah, muy bien. ¿Confío en que los sátiros de Xavian te hayan dado pocos problemas?$B$BGracias, $n. Estudiaré esta muestra de agua de Xavian inmediatamente.', 0); -- 1945 Hermanas sonrientes -- https://es.classic.wowhead.com/quest=1945 SET @ID := 1945; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Saludos, $n. Deino me dijo que vendrías y que traerías el cabello de hermana sonriente. ¿Lo has traído?', 0), (@ID, 'esMX', 'Saludos, $n. Deino me dijo que vendrías y que traerías el cabello de hermana sonriente. ¿Lo has traído?', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Ah, este cabello será perfecto para tejer una buena túnica, $n. Es una pena que las hermanas sonrientes tuvieran que morir para quitárselo, pero dudo que lo hubieran dado voluntariamente.$B$BUna pena... pero pequeña.', 0), (@ID, 'esMX', 'Ah, este cabello será perfecto para tejer una buena túnica, $n. Es una pena que las hermanas sonrientes tuvieran que morir para quitárselo, pero dudo que lo hubieran dado voluntariamente.$B$BUna pena... pero pequeña.', 0); -- 1946 Prendas de encaje Abisal -- https://es.classic.wowhead.com/quest=1946 SET @ID := 1946; DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Aquí tienes tu encaje abisal, $n. Es cómodo y duradero y alberga la magia del cabello de las hermanas sonrientes.$B$BDisfrútalo y si lo encuentras adecuado menciónaselo a Deino, la maga. Me gustaría estar de su lado...', 0), (@ID, 'esMX', 'Aquí tienes tu encaje abisal, $n. Es cómodo y duradero y alberga la magia del cabello de las hermanas sonrientes.$B$BDisfrútalo y si lo encuentras adecuado menciónaselo a Deino, la maga. Me gustaría estar de su lado...', 0); -- 1947 Viaje al pantano -- https://es.classic.wowhead.com/quest=1947 SET @ID := 1947; DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¡Silencio! ¡Es un momento muy delicado, no me molestes!$B$BUn momento...', 0), (@ID, 'esMX', '¡Silencio! ¡Es un momento muy delicado, no me molestes!$B$BUn momento...', 0); -- 1948 Objetos de poder -- https://es.classic.wowhead.com/quest=1948 SET @ID := 1948; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Hola, $n. ¿Tienes los objetos que necesito?', 0), (@ID, 'esMX', 'Hola, $n. ¿Tienes los objetos que necesito?', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¡Aja! ¡Tienes el jade y la zarza! ¡Bien hecho!$B$B¿No es humo lo que veo salirte de las orejas? Debías estar demasiado cerca cuando el rayo golpeó el palo de tótem, pero no te preocupes, ¡ser golpeado por un rayo trae buena suerte!', 0), (@ID, 'esMX', '¡Aja! ¡Tienes el jade y la zarza! ¡Bien hecho!$B$B¿No es humo lo que veo salirte de las orejas? Debías estar demasiado cerca cuando el rayo golpeó el palo de tótem, pero no te preocupes, ¡ser golpeado por un rayo trae buena suerte!', 0); -- 1949 Secretos escondidos -- https://es.classic.wowhead.com/quest=1949 SET @ID := 1949; DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¿Qué? ¡Habla más alto! Estos viejos oídos ya no son lo que eran.$B$B¡Bien, espero que hayas venido con una jarra fresca de cerveza o con alguna información para la siguiente carrera! Si no es así, más vale que te vayas... ¡porque tengo una varita polimorfizadora que lleva escrito tu nombre!', 0), (@ID, 'esMX', '¿Qué? ¡Habla más alto! Estos viejos oídos ya no son lo que eran.$B$B¡Bien, espero que hayas venido con una jarra fresca de cerveza o con alguna información para la siguiente carrera! Si no es así, más vale que te vayas... ¡porque tengo una varita polimorfizadora que lleva escrito tu nombre!', 0); -- 1950 Santo y seña -- https://es.classic.wowhead.com/quest=1950 SET @ID := 1950; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¿Que estas esperando? ¡Necesito mi frase mágica!', 0), (@ID, 'esMX', '¿Que estas esperando? ¡Necesito mi frase mágica!', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Ah, ya recuerdo...$B$BMe había olvidado de la pequeña maldición que lancé sobre Johnson. No puedo recordar porqué lo hice pero seguro que le ha venido bien, ¡pequeño ingrato!', 0), (@ID, 'esMX', 'Ah, ya recuerdo...$B$BMe había olvidado de la pequeña maldición que lancé sobre Johnson. No puedo recordar porqué lo hice pero seguro que le ha venido bien, ¡pequeño ingrato!', 0); -- 1951 Rituales de poder -- https://es.classic.wowhead.com/quest=1951 SET @ID := 1951; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Saludos, $n. ¿Encontraste el libro Rituales de Poder?', 0), (@ID, 'esMX', 'Saludos, $n. ¿Encontraste el libro Rituales de Poder?', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¡Eso es! ¡Es justo lo que necesito para prepararme para el ritual para crear tu varita! Espero que no haya sido una gran molestia encontrarlo...', 0), (@ID, 'esMX', '¡Eso es! ¡Es justo lo que necesito para prepararme para el ritual para crear tu varita! Espero que no haya sido una gran molestia encontrarlo...', 0); -- 1952 Varitas de mago -- https://es.classic.wowhead.com/quest=1952 SET @ID := 1952; DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Aquí están las varitas que hice. ¡Elige una!', 0), (@ID, 'esMX', 'Aquí están las varitas que hice. ¡Elige una!', 0); -- 1953 Regresa a la Marisma -- https://es.classic.wowhead.com/quest=1953 SET @ID := 1953; DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Ah, sí. ¿Estás aquí por un orbe o un bastón, $n? Muy bien. Te ayudaré en su construcción.$B$BPero ten cuidado: el proceso es peligroso y podría significar tu muerte.$B$BO mi muerte. Definitivamente la muerte de alguien...$B$BPero esperemos que sea la muerte de otro, ¿eh?', 0), (@ID, 'esMX', 'Ah, sí. ¿Estás aquí por un orbe o un bastón, $n? Muy bien. Te ayudaré en su construcción.$B$BPero ten cuidado: el proceso es peligroso y podría significar tu muerte.$B$BO mi muerte. Definitivamente la muerte de alguien...$B$BPero esperemos que sea la muerte de otro, ¿eh?', 0); -- 1954 El orbe infernal -- https://es.classic.wowhead.com/quest=1954 SET @ID := 1954; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¿Tienes un orbe infernal, $n?', 0), (@ID, 'esMX', '¿Tienes un orbe infernal, $n?', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¡Oh, qué bonito! No, no me refiero a que sea "agradable" como el jugo fresco en un día caluroso.$B$BDe hecho, NO es muy agradable ahora que lo pienso. No el jugo. ¡El orbe!$B$B¡Concéntrate, $n! ¡Necesitamos que te concentres si alguna vez vamos a terminar nuestra tarea!', 0), (@ID, 'esMX', '¡Oh, qué bonito! No, no me refiero a que sea "agradable" como el jugo fresco en un día caluroso.$B$BDe hecho, NO es muy agradable ahora que lo pienso. No el jugo. ¡El orbe!$B$B¡Concéntrate, $n! ¡Necesitamos que te concentres si alguna vez vamos a terminar nuestra tarea!', 0); -- 1955 El exorcismo -- https://es.classic.wowhead.com/quest=1955 SET @ID := 1955; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', 'Tendrás que matar a ese demonio para eliminar su mancha del orbe, $n.', 0), (@ID, 'esMX', 'Tendrás que matar a ese demonio para eliminar su mancha del orbe, $n.', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¡Bien hecho, $n! Le diste a ese demonio una lección que no olvidará.$B$BSe lo pensará dos veces antes de asomar su fea cabeza otra vez en este mundo, buscando al estúpido mortal que lo golpeó. Probablemente con amigos esta vez. Amigos grandes y escamosos que no querrían nada más que hacerte pedazos $r.$B$BNo te asusté, ¿verdad... $n?', 0), (@ID, 'esMX', '¡Bien hecho, $n! Le diste a ese demonio una lección que no olvidará.$B$BSe lo pensará dos veces antes de asomar su fea cabeza otra vez en este mundo, buscando al estúpido mortal que lo golpeó. Probablemente con amigos esta vez. Amigos grandes y escamosos que no querrían nada más que hacerte pedazos $r.$B$BNo te asusté, ¿verdad... $n?', 0); -- 1956 Poder en Uldaman -- https://es.classic.wowhead.com/quest=1956 SET @ID := 1956; DELETE FROM `quest_request_items_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_request_items_locale` (`id`, `locale`, `CompletionText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¿Has estado en Uldaman, $n? ¿Has derrotado al centinela de obsidiana?', 0), (@ID, 'esMX', '¿Has estado en Uldaman, $n? ¿Has derrotado al centinela de obsidiana?', 0); DELETE FROM `quest_offer_reward_locale` WHERE `id` = @ID AND `locale` IN('esES', 'esMX'); INSERT INTO `quest_offer_reward_locale` (`id`, `locale`, `RewardText`, `VerifiedBuild`) VALUES (@ID, 'esES', '¿Lo hiciste? ¡Bien hecho!$B$BEstaba charlando con algunos de mis colegas y cuando apareció tu nombre, dijeron: "¡Sabes que $n seguro que funciona bien bajo presión!"$B$BPero cuando les dije que te enviaba a Uldaman contra el centinela de obsidiana, ¡dijeron que estaba loca! Dijeron: "¡Tabetha, estás loca! ¡No hay forma de que $n pueda enfrentarse a ese centinela!"$B$BY dije: "Ya veremos", y luego dijeron "Sí, ya veremos..."$B$BBueno, seguro que lo verán, ¿no?', 0), (@ID, 'esMX', '¿Lo hiciste? ¡Bien hecho!$B$BEstaba charlando con algunos de mis colegas y cuando apareció tu nombre, dijeron: "¡Sabes que $n seguro que funciona bien bajo presión!"$B$BPero cuando les dije que te enviaba a Uldaman contra el centinela de obsidiana, ¡dijeron que estaba loca! Dijeron: "¡Tabetha, estás loca! ¡No hay forma de que $n pueda enfrentarse a ese centinela!"$B$BY dije: "Ya veremos", y luego dijeron "Sí, ya veremos..."$B$BBueno, seguro que lo verán, ¿no?', 0);
Odyssey/TrinityCore
sql/old/3.3.5a/world/20121_2021_01_15/2021_01_15_05_world_335.sql
SQL
gpl-2.0
32,289
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Copyright (c) 2011 Synaptics, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ #include <linux/delay.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/syscalls.h> #include <linux/battery.h> /* Variables for F34 functionality */ static unsigned short SynaF34DataBase; static unsigned short SynaF34QueryBase; static unsigned short SynaF01DataBase; static unsigned short SynaF01CommandBase; static unsigned short SynaF34Reflash_BlockNum; static unsigned short SynaF34Reflash_BlockData; static unsigned short SynaF34ReflashQuery_BootID; static unsigned short SynaF34ReflashQuery_FlashPropertyQuery; static unsigned short SynaF34ReflashQuery_FirmwareBlockSize; static unsigned short SynaF34ReflashQuery_FirmwareBlockCount; static unsigned short SynaF34ReflashQuery_ConfigBlockSize; static unsigned short SynaF34ReflashQuery_ConfigBlockCount; static unsigned short SynaFirmwareBlockSize; static unsigned short SynaFirmwareBlockCount; static unsigned long SynaImageSize; static unsigned short SynaConfigBlockSize; static unsigned short SynaConfigBlockCount; static unsigned long SynaConfigImageSize; static unsigned short SynaBootloadID; static unsigned short SynaF34_FlashControl; static u8 *SynafirmwareImgData; static u8 *SynaconfigImgData; static u8 *SynalockImgData; static unsigned int SynafirmwareImgVersion; static u8 *FirmwareImage; static u8 *ConfigImage; static const u8 *SynaFirmwareData; /* Variables for F51 functionality */ static unsigned short F51_Query_Base; static unsigned short F51_Command_Base; static unsigned short F51_Control_Base; static unsigned short F51_Data_Base; static unsigned short F51_Feature_Ctrl; /* Variables for F54 functionality */ static unsigned short F54_Query_Base; static unsigned short F54_Command_Base; static unsigned short F54_Control_Base; static unsigned short F54_Data_Base; static unsigned short F54_Data_LowIndex; static unsigned short F54_Data_HighIndex; static unsigned short F54_Data_Buffer; static unsigned short F54_PhysicalTx_Addr; static unsigned short F54_PhysicalRx_Addr; static unsigned short F01_Control_Base; static unsigned short F01_Command_Base; static unsigned short F01_Data_Base; static unsigned short F11_Query_Base; static unsigned short F11_MaxNumberOfTx_Addr; static unsigned short F11_MaxNumberOfRx_Addr; #define CFG_F54_TXCOUNT 50 #define CFG_F54_RXCOUNT 50 unsigned char TxChannelUsed[CFG_F54_TXCOUNT]; unsigned char RxChannelUsed[CFG_F54_RXCOUNT]; static unsigned char numberOfTx; static unsigned char numberOfRx; static struct i2c_client *client; static int gpio_irq; inline void synaptics_set_i2c_client(struct i2c_client *i2c_client) { client = i2c_client; } static int readRMI(u8 address, u8 *buf, int size) { if (i2c_master_send(client, &address, 1) < 0) return -1; if (i2c_master_recv(client, buf, size) < 0) return -1; return 0; } static int writeRMI(u8 address, u8 *buf, int size) { int ret = 0; u8 *msg_buf; msg_buf = kzalloc(size + 1, GFP_KERNEL); msg_buf[0] = address; memcpy(msg_buf + 1, buf, size); if (i2c_master_send(client, msg_buf, size + 1) < 0) ret = -1; kfree(msg_buf); return ret; } /* SynaSetup scans the Page Description Table (PDT) and sets up the necessary * variables for the reflash process. This function is a "slim" version of the * PDT scan function in PDT.c, since only F34 and F01 are needed for reflash. */ static void SynaSetup(void) { u8 address; u8 buffer[6]; pr_info("tsp fw. : SynaSetup ++\n"); for (address = 0xe9; address > 0xc0; address = address - 6) { readRMI(address, buffer, 6); switch (buffer[5]) { case 0x34: SynaF34DataBase = buffer[3]; SynaF34QueryBase = buffer[0]; break; case 0x01: SynaF01DataBase = buffer[3]; SynaF01CommandBase = buffer[1]; break; } } SynaF34Reflash_BlockNum = SynaF34DataBase; SynaF34Reflash_BlockData = SynaF34DataBase + 2; SynaF34ReflashQuery_BootID = SynaF34QueryBase; SynaF34ReflashQuery_FlashPropertyQuery = SynaF34QueryBase + 2; SynaF34ReflashQuery_FirmwareBlockSize = SynaF34QueryBase + 3; SynaF34ReflashQuery_FirmwareBlockCount = SynaF34QueryBase + 5; SynaF34ReflashQuery_ConfigBlockSize = SynaF34QueryBase + 3; SynaF34ReflashQuery_ConfigBlockCount = SynaF34QueryBase + 7; SynafirmwareImgData = (u8 *)((&SynaFirmwareData[0]) + 0x100); SynaconfigImgData = (u8 *)(SynafirmwareImgData + SynaImageSize); SynafirmwareImgVersion = (u32)(SynaFirmwareData[7]); switch (SynafirmwareImgVersion) { case 2: SynalockImgData = (u8 *)((&SynaFirmwareData[0]) + 0xD0); break; case 3: case 4: SynalockImgData = (u8 *)((&SynaFirmwareData[0]) + 0xC0); break; case 5: SynalockImgData = (u8 *)((&SynaFirmwareData[0]) + 0xB0); break; default: break; } } /* SynaInitialize sets up the reflahs process */ static void SynaInitialize(void) { u8 uData[2]; pr_info("tsp fw. : Initializing Reflash Process...\n"); uData[0] = 0x00; writeRMI(0xff, uData, 1); SynaSetup(); SynafirmwareImgData = &FirmwareImage[0]; SynaconfigImgData = &ConfigImage[0]; readRMI(SynaF34ReflashQuery_FirmwareBlockSize, uData, 2); SynaFirmwareBlockSize = uData[0] | (uData[1] << 8); } /* SynaReadFirmwareInfo reads the F34 query registers and retrieves the block * size and count of the firmware section of the image to be reflashed */ static void SynaReadFirmwareInfo(void) { u8 uData[2]; pr_info("tsp fw. : Read Firmware Info\n"); readRMI(SynaF34ReflashQuery_FirmwareBlockSize, uData, 2); SynaFirmwareBlockSize = uData[0] | (uData[1] << 8); readRMI(SynaF34ReflashQuery_FirmwareBlockCount, uData, 2); SynaFirmwareBlockCount = uData[0] | (uData[1] << 8); SynaImageSize = SynaFirmwareBlockCount * SynaFirmwareBlockSize; } /* SynaReadConfigInfo reads the F34 query registers and retrieves the block size * and count of the configuration section of the image to be reflashed */ static void SynaReadConfigInfo(void) { u8 uData[2]; pr_info("tsp fw. : Read Config Info\n"); readRMI(SynaF34ReflashQuery_ConfigBlockSize, uData, 2); SynaConfigBlockSize = uData[0] | (uData[1] << 8); readRMI(SynaF34ReflashQuery_ConfigBlockCount, uData, 2); SynaConfigBlockCount = uData[0] | (uData[1] << 8); SynaConfigImageSize = SynaConfigBlockCount * SynaConfigBlockSize; } /* SynaReadBootloadID reads the F34 query registers and retrieves the bootloader * ID of the firmware */ static void SynaReadBootloadID(void) { u8 uData[2]; pr_info("tsp fw. : SynaReadBootloadID\n"); readRMI(SynaF34ReflashQuery_BootID, uData, 2); SynaBootloadID = uData[0] + uData[1] * 0x100; } /* SynaWriteBootloadID writes the bootloader ID to the F34 data register to * unlock the reflash process */ static void SynaWriteBootloadID(void) { u8 uData[2]; pr_info("tsp fw. : SynaWriteBootloadID\n"); uData[0] = SynaBootloadID % 0x100; uData[1] = SynaBootloadID / 0x100; writeRMI(SynaF34Reflash_BlockData, uData, 2); } /* SynaEnableFlashing kicks off the reflash process */ static void SynaEnableFlashing(void) { u8 uData; u8 uStatus; pr_info("\nEnable Reflash..."); /* Reflash is enabled by first reading the bootloader ID from the firmware and write it back */ SynaReadBootloadID(); SynaWriteBootloadID(); /* Make sure Reflash is not already enabled */ do { readRMI(SynaF34_FlashControl, &uData, 1); } while (((uData & 0x0f) != 0x00)); readRMI(SynaF01DataBase, &uStatus, 1); if ((uStatus & 0x40) == 0) { /* Write the "Enable Flash Programming command to F34 Control register Wait for ATTN and then clear the ATTN. */ uData = 0x0f; writeRMI(SynaF34_FlashControl, &uData, 1); mdelay(300); readRMI((SynaF01DataBase + 1), &uStatus, 1); /* Scan the PDT again to ensure all register offsets are correct */ SynaSetup(); /* Read the "Program Enabled" bit of the F34 Control register, and proceed only if the bit is set.*/ readRMI(SynaF34_FlashControl, &uData, 1); while (uData != 0x80) { /* In practice, if uData!=0x80 happens for multiple counts, it indicates reflash is failed to be enabled, and program should quit */ ; } } } /* SynaWaitATTN waits for ATTN to be asserted within a certain time threshold. * The function also checks for the F34 "Program Enabled" bit and clear ATTN * accordingly. */ static void SynaWaitATTN(void) { u8 uData; u8 uStatus; int cnt = 0; while (gpio_get_value(gpio_irq) && cnt++ < 300) usleep_range(500, 1000); do { readRMI(SynaF34_FlashControl, &uData, 1); usleep_range(500, 1000); } while ((uData != 0x80) && (cnt++ < 300)); readRMI((SynaF01DataBase + 1), &uStatus, 1); } /* SynaProgramConfiguration writes the configuration section of the image block * by block */ static void SynaProgramConfiguration(void) { u8 uData[2]; u8 *puData; unsigned short blockNum; puData = (u8 *) &SynaFirmwareData[0xb100]; pr_info("tsp fw. : Program Configuration Section...\n"); for (blockNum = 0; blockNum < SynaConfigBlockCount; blockNum++) { uData[0] = blockNum & 0xff; uData[1] = (blockNum & 0xff00) >> 8; /* Block by blcok, write the block number and data to the corresponding F34 data registers */ writeRMI(SynaF34Reflash_BlockNum, uData, 2); writeRMI(SynaF34Reflash_BlockData, puData, SynaConfigBlockSize); puData += SynaConfigBlockSize; /* Issue the "Write Configuration Block" command */ uData[0] = 0x06; writeRMI(SynaF34_FlashControl, uData, 1); SynaWaitATTN(); pr_info("."); } } /* SynaFinalizeReflash finalizes the reflash process */ static void SynaFinalizeReflash(void) { u8 uData; u8 uStatus; pr_info("tsp fw. : Finalizing Reflash..\n"); /* Issue the "Reset" command to F01 command register to reset the chip This command will also test the new firmware image and check if its is valid */ uData = 1; writeRMI(SynaF01CommandBase, &uData, 1); mdelay(300); readRMI(SynaF01DataBase, &uData, 1); /* Sanity check that the reflash process is still enabled */ do { readRMI(SynaF34_FlashControl, &uStatus, 1); } while ((uStatus & 0x0f) != 0x00); readRMI((SynaF01DataBase + 1), &uStatus, 1); SynaSetup(); uData = 0; /* Check if the "Program Enabled" bit in F01 data register is cleared Reflash is completed, and the image passes testing when the bit is cleared */ do { readRMI(SynaF01DataBase, &uData, 1); } while ((uData & 0x40) != 0); /* Rescan PDT the update any changed register offsets */ SynaSetup(); pr_info("tsp fw. : Reflash Completed. Please reboot.\n"); } /* SynaFlashFirmwareWrite writes the firmware section of the image block by * block */ static void SynaFlashFirmwareWrite(void) { u8 *puFirmwareData; u8 uData[2]; unsigned short blockNum; pr_info("tsp fw. : SynaFlashFirmwareWrite\n"); puFirmwareData = (u8 *) &SynaFirmwareData[0x100]; for (blockNum = 0; blockNum < SynaFirmwareBlockCount; ++blockNum) { /* Block by blcok, write the block number and data to the corresponding F34 data registers */ uData[0] = blockNum & 0xff; uData[1] = (blockNum & 0xff00) >> 8; writeRMI(SynaF34Reflash_BlockNum, uData, 2); writeRMI(SynaF34Reflash_BlockData, puFirmwareData, SynaFirmwareBlockSize); puFirmwareData += SynaFirmwareBlockSize; /* Issue the "Write Firmware Block" command */ uData[0] = 2; writeRMI(SynaF34_FlashControl, uData, 1); SynaWaitATTN(); } } /* SynaProgramFirmware prepares the firmware writing process */ static void SynaProgramFirmware(void) { u8 uData; pr_info("tsp fw. : Program Firmware Section..."); SynaReadBootloadID(); SynaWriteBootloadID(); uData = 3; writeRMI(SynaF34_FlashControl, &uData, 1); SynaWaitATTN(); SynaFlashFirmwareWrite(); } /* commenting this func as we r not using it currently */ #if 0 /* eraseConfigBlock erases the config block */ static void eraseConfigBlock(void) { u8 uData; pr_info("tsp fw. : eraseConfigBlock\n"); /* Erase of config block is done by first entering into bootloader mode */ SynaReadBootloadID(); SynaWriteBootloadID(); /* Command 7 to erase config block */ uData = 7; writeRMI(SynaF34_FlashControl, &uData, 1); SynaWaitATTN(); } #endif bool synaptics_fw_update(struct i2c_client *ts_client, const u8 *fw_data, const int gpio) { client = ts_client; SynaFirmwareData = fw_data; gpio_irq = gpio; FirmwareImage = kzalloc(16000, GFP_KERNEL); if (FirmwareImage == NULL) { pr_err("tsp fw. : alloc fw. memory failed.\n"); return false; } ConfigImage = kzalloc(16000, GFP_KERNEL); if (ConfigImage == NULL) { pr_err("tsp fw. : alloc fw. memory failed.\n"); return false; } SynaInitialize(); SynaReadConfigInfo(); SynaReadFirmwareInfo(); SynaF34_FlashControl = SynaF34DataBase + SynaFirmwareBlockSize + 2; SynaEnableFlashing(); SynaProgramFirmware(); SynaProgramConfiguration(); SynaFinalizeReflash(); kfree(FirmwareImage); kfree(ConfigImage); pr_info("tsp fw. : fw. update completed."); return true; } /* SynaSetup scans the Page Description Table (PDT) and sets up the necessary * variables for the reflash process. */ static void PDTscan(void) { unsigned char address; unsigned char buffer[6]; for (address = 0xe9; address > 0xd0; address = address - 6) { readRMI(address, &buffer[0], 6); if (!buffer[5]) break; switch (buffer[5]) { case 0x01: F01_Command_Base = buffer[1]; F01_Control_Base = buffer[2]; F01_Data_Base = buffer[3]; break; case 0x11: F11_Query_Base = buffer[0]; F11_MaxNumberOfTx_Addr = F11_Query_Base + 2; F11_MaxNumberOfRx_Addr = F11_Query_Base + 3; break; case 0x51: F51_Query_Base = buffer[0]; F51_Command_Base = buffer[1]; F51_Control_Base = buffer[2]; F51_Data_Base = buffer[3]; F51_Feature_Ctrl = F51_Control_Base + 8; break; case 0x54: F54_Query_Base = buffer[0]; F54_Command_Base = buffer[1]; F54_Control_Base = buffer[2]; F54_Data_Base = buffer[3]; F54_Data_LowIndex = F54_Data_Base + 1; F54_Data_HighIndex = F54_Data_Base + 2; F54_Data_Buffer = F54_Data_Base + 3; F54_PhysicalRx_Addr = F54_Control_Base + 18; break; } } } static void SetPage(unsigned char page) { /* changing page */ writeRMI(0xff, &page, 1); } static void RegSetup(void) { unsigned char MaxNumberTx; unsigned char MaxNumberRx; unsigned char command; int i; numberOfRx = 0; numberOfTx = 0; SetPage(0x01); PDTscan(); /* scan for page 0x01 */ SetPage(0x00); PDTscan(); /* scan for page 0x00 */ /* Check Used Rx channels */ readRMI(F11_MaxNumberOfRx_Addr, &MaxNumberRx, 1); SetPage(0x01); F54_PhysicalTx_Addr = F54_PhysicalRx_Addr + MaxNumberRx; readRMI(F54_PhysicalRx_Addr, &RxChannelUsed[0], MaxNumberRx); /* Checking Used Tx channels */ SetPage(0x00); readRMI(F11_MaxNumberOfTx_Addr, &MaxNumberTx, 1); SetPage(0x01); readRMI(F54_PhysicalTx_Addr, &TxChannelUsed[0], MaxNumberTx); /* Check used number of Rx */ for (i = 0; i < MaxNumberRx; i++) { if (RxChannelUsed[i] == 0xff) break; numberOfRx++; } /* Check used number of Tx */ for (i = 0; i < MaxNumberTx; i++) { if (TxChannelUsed[i] == 0xff) break; numberOfTx++; } /* Enabling only the analog image reporting interrupt, and * turn off the rest */ SetPage(0x00); command = 0x08; writeRMI(F01_Control_Base+1, &command, 1); SetPage(0x01); } bool F54_SetRawCapData(struct i2c_client *ts_client, s16 *node_data) { u8 *ImageBuffer; int i, k, length; unsigned char command; client = ts_client; RegSetup(); /* PDT scan for reg map adress mapping */ length = numberOfTx * numberOfRx * 2; /* Set report mode to to run the AutoScan */ command = 0x03; writeRMI(F54_Data_Base, &command, 1); command = 0x00; writeRMI(F54_Data_LowIndex, &command, 1); writeRMI(F54_Data_HighIndex, &command, 1); /* Set the GetReport bit to run the AutoScan */ command = 0x01; writeRMI(F54_Command_Base, &command, 1); /* Wait until the command is completed */ do { udelay(1000); readRMI(F54_Command_Base, &command, 1); } while (command == 0x01); ImageBuffer = kmalloc(sizeof(u8) * CFG_F54_TXCOUNT * CFG_F54_RXCOUNT * 2, GFP_KERNEL); if (ImageBuffer == NULL) { pr_err("tsp fw. : alloc fw. memory failed.\n"); return false; } /* Read raw_cap data */ readRMI(F54_Data_Buffer, ImageBuffer, length); for (i = 0, k = 0; i < numberOfTx * numberOfRx; i++, k += 2) node_data[i] = (s16)(ImageBuffer[k] | (ImageBuffer[k + 1] << 8)); /* reset TSP IC */ SetPage(0x00); command = 0x01; writeRMI(F01_Command_Base, &command, 1); udelay(160); readRMI(F01_Data_Base + 1, &command, 1); kfree(ImageBuffer); return true; } /* (important) should be defined the value(=register address) according to * register map 'Multi Metric Noise Mitigation Control' */ #define NOISEMITIGATION 0xB1 bool F54_SetRxToRxData(struct i2c_client *ts_client, s16 *node_data) { u8 *ImageBuffer; int i, k, length; u8 command; client = ts_client; RegSetup(); /* PDT scan for reg map adress mapping */ /* Set report mode to run Rx-to-Rx 1st data */ length = numberOfRx * numberOfTx * 2; command = 0x07; writeRMI(F54_Data_Base, &command, 1); /* NoCDM4 */ command = 0x01; writeRMI(NOISEMITIGATION, &command, 1); command = 0x04; writeRMI(F54_Command_Base, &command, 1); do { mdelay(1); readRMI(F54_Command_Base, &command, 1); } while (command != 0x02); command = 0x02; writeRMI(F54_Command_Base, &command, 1); do { mdelay(1); readRMI(F54_Command_Base, &command, 1); } while (command != 0x00); command = 0x00; writeRMI(F54_Data_LowIndex, &command, 1); writeRMI(F54_Data_HighIndex, &command, 1); /* Set the GetReport bit to run Tx-to-Tx */ command = 0x01; writeRMI(F54_Command_Base, &command, 1); /* Wait until the command is completed */ do { mdelay(1); readRMI(F54_Command_Base, &command, 1); } while (command != 0x00); ImageBuffer = kmalloc(sizeof(u8) * CFG_F54_TXCOUNT * CFG_F54_RXCOUNT * 2, GFP_KERNEL); if (ImageBuffer == NULL) { pr_err("tsp fw. : alloc fw. memory failed.\n"); return false; } readRMI(F54_Data_Buffer, &ImageBuffer[0], length); for (i = 0, k = 0; i < numberOfTx * numberOfRx; i++, k += 2) { node_data[i] = (s16)((ImageBuffer[k] | (ImageBuffer[k + 1] << 8))); } /* Set report mode to run Rx-to-Rx 2nd data */ length = numberOfRx * (numberOfRx - numberOfTx) * 2; command = 0x11; writeRMI(F54_Data_Base, &command, 1); command = 0x00; writeRMI(F54_Data_LowIndex, &command, 1); writeRMI(F54_Data_HighIndex, &command, 1); /* Set the GetReport bit to run Tx-to-Tx */ command = 0x01; writeRMI(F54_Command_Base, &command, 1); /* Wait until the command is completed */ do { mdelay(1); readRMI(F54_Command_Base, &command, 1); } while (command != 0x00); readRMI(F54_Data_Buffer, &ImageBuffer[0], length); for (i = 0, k = 0; i < (numberOfRx - numberOfTx) * numberOfRx; i++, k += 2) node_data[(numberOfTx * numberOfRx) + i] = (s16)(ImageBuffer[k] | (ImageBuffer[k + 1] << 8)); /* Set the Force Cal */ command = 0x02; writeRMI(F54_Command_Base, &command, 1); do { mdelay(1); readRMI(F54_Command_Base, &command, 1); } while (command != 0x00); /* enable all the interrupts */ SetPage(0x00); command = 0x01; writeRMI(F01_Command_Base, &command, 1); msleep(160); /* Read Interrupt status register to Interrupt line goes to high */ readRMI(F01_Data_Base+1, &command, 1); kfree(ImageBuffer); return true; } /** * F54_TxToTest() - tx to tx, tx to gnd channel short test. * @ts_client : i2c client for i2c comm. with TS IC. * @node_data : array pointer test value be stored. * @mode : command that select return value tx-to-tx or tx-to-gnd. * normal value : '0' at tx-to-tx test * '1' at tx-to-gnd test */ #define TX_TO_TX_TEST_MODE 0x05 #define TX_TO_GND_TEST_MODE 0x10 bool F54_TxToTest(struct i2c_client *ts_client, s16 *node_data, int mode) { u8 ImageBuffer[CFG_F54_TXCOUNT] = {0, }; u8 ImageArray[CFG_F54_TXCOUNT] = {0, }; u8 command; int i, k, length, shift; client = ts_client; RegSetup(); /* PDT scan for reg map adress mapping */ length = (numberOfTx + 7) / 8; /* Set report mode to run Tx-to-Tx */ command = mode; writeRMI(F54_Data_Base, &command, 1); command = 0x00; writeRMI(F54_Data_LowIndex, &command, 1); writeRMI(F54_Data_HighIndex, &command, 1); /* Set the GetReport bit to run Tx-to-Tx */ command = 0x01; writeRMI(F54_Command_Base, &command, 1); /* Wait until the command is completed */ do { mdelay(1); readRMI(F54_Command_Base, &command, 1); } while (command != 0x00); readRMI(F54_Data_Buffer, &ImageBuffer[0], length); if (mode == TX_TO_TX_TEST_MODE) { /* One bit per transmitter channel */ for (i = 0, k = 0; i < numberOfTx; i++) { k = i / 8; shift = i % 8; node_data[i] = ImageBuffer[k] & (1 << shift); } } else if (mode == TX_TO_GND_TEST_MODE) { /* One bit per transmitter channel */ for (i = 0, k = 0; i < length * 8; i++) { k = i / 8; shift = i % 8; if (ImageBuffer[k] & (1 << shift)) ImageArray[i] = 1; } for (i = 0; i < numberOfTx; i++) node_data[i] = ImageArray[TxChannelUsed[i]]; } /* enable all the interrupts */ SetPage(0x00); command = 0x01; writeRMI(F01_Command_Base, &command, 1); msleep(160); /* Read Interrupt status register to Interrupt line goes to high */ readRMI(F01_Data_Base+1, &command, 1); return true; } int synaptics_set_low_temp_bit(const bool set) { u8 command; if (!client) { pr_err("tsp: %s: Can't find i2c client info.\n", __func__); return -1; } SetPage(0x04); PDTscan(); readRMI(F51_Feature_Ctrl, &command, 1); command |= set ? 0x80 : 0x00; writeRMI(F51_Feature_Ctrl, &command, 1); SetPage(0x00); return 0; }
1N4148/android_kernel_samsung_espresso10
drivers/input/touchscreen/vendor/synaptics.c
C
gpl-2.0
22,730
/* Copyright (C) 1992-1998, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ /* Modified for uClibc by Erik Andersen */ #include <_lfs_64.h> #include <dirent.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <errno.h> #include <sys/types.h> #include "dirstream.h" libc_hidden_proto(memcpy) libc_hidden_proto(opendir) libc_hidden_proto(closedir) libc_hidden_proto(qsort) libc_hidden_proto(readdir64) int scandir64(const char *dir, struct dirent64 ***namelist, int (*selector) (const struct dirent64 *), int (*compar) (const void *, const void *)) { DIR *dp = opendir (dir); struct dirent64 *current; struct dirent64 **names = NULL; size_t names_size = 0, pos; int save; if (dp == NULL) return -1; save = errno; __set_errno (0); pos = 0; while ((current = readdir64 (dp)) != NULL) if (selector == NULL || (*selector) (current)) { struct dirent64 *vnew; size_t dsize; /* Ignore errors from selector or readdir64 */ __set_errno (0); if (unlikely(pos == names_size)) { struct dirent64 **new; if (names_size == 0) names_size = 10; else names_size *= 2; new = (struct dirent64 **) realloc (names, names_size * sizeof (struct dirent64 *)); if (new == NULL) break; names = new; } dsize = &current->d_name[_D_ALLOC_NAMLEN (current)] - (char *) current; vnew = (struct dirent64 *) malloc (dsize); if (vnew == NULL) break; names[pos++] = (struct dirent64 *) memcpy (vnew, current, dsize); } if (unlikely(errno != 0)) { save = errno; closedir (dp); while (pos > 0) free (names[--pos]); free (names); __set_errno (save); return -1; } closedir (dp); __set_errno (save); /* Sort the list if we have a comparison function to sort with. */ if (compar != NULL) qsort (names, pos, sizeof (struct dirent64 *), compar); *namelist = names; return pos; }
rhuitl/uClinux
uClibc/libc/misc/dirent/scandir64.c
C
gpl-2.0
2,741
/* Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with this library; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "config.h" #include "PlatformVideoWindow.h" #if ENABLE(VIDEO) && USE(GSTREAMER) && USE(NATIVE_FULLSCREEN_VIDEO) #include "HTMLVideoElement.h" #include "PlatformVideoWindowPrivate.h" #include <QCursor> #include <QGuiApplication> #include <QKeyEvent> #include <QPalette> using namespace WebCore; #ifndef QT_NO_CURSOR static const int gHideMouseCursorDelay = 3000; #endif FullScreenVideoWindow::FullScreenVideoWindow() : m_mediaElement(0) { setModality(Qt::ApplicationModal); #ifndef QT_NO_CURSOR m_cursorTimer.setSingleShot(true); connect(&m_cursorTimer, SIGNAL(timeout()), this, SLOT(hideCursor())); #endif } void FullScreenVideoWindow::setVideoElement(HTMLVideoElement* element) { m_mediaElement = element; } void FullScreenVideoWindow::keyPressEvent(QKeyEvent* ev) { if (m_mediaElement && ev->key() == Qt::Key_Space) { if (!m_mediaElement->paused()) m_mediaElement->pause(); else m_mediaElement->play(); } else if (ev->key() == Qt::Key_Escape) emit closed(); QWindow::keyPressEvent(ev); } bool FullScreenVideoWindow::event(QEvent* ev) { switch (ev->type()) { case QEvent::MouseMove: showCursor(); ev->accept(); return true; case QEvent::MouseButtonDblClick: emit closed(); ev->accept(); return true; case QEvent::Close: #ifndef QT_NO_CURSOR m_cursorTimer.stop(); #endif #ifndef QT_NO_CURSOR QGuiApplication::restoreOverrideCursor(); #endif break; default: break; } return QWindow::event(ev); } void FullScreenVideoWindow::showFullScreen() { QWindow::showFullScreen(); raise(); hideCursor(); } void FullScreenVideoWindow::hideCursor() { #ifndef QT_NO_CURSOR QGuiApplication::setOverrideCursor(QCursor(Qt::BlankCursor)); #endif } void FullScreenVideoWindow::showCursor() { #ifndef QT_NO_CURSOR QGuiApplication::restoreOverrideCursor(); m_cursorTimer.start(gHideMouseCursorDelay); #endif } PlatformVideoWindow::PlatformVideoWindow() { QWindow* win = new FullScreenVideoWindow(); m_window = win; win->setFlags(win->flags() | Qt::FramelessWindowHint); // FIXME: Port to Qt 5. win->showFullScreen(); m_videoWindowId = win->winId(); } PlatformVideoWindow::~PlatformVideoWindow() { delete m_window; m_videoWindowId = 0; } void PlatformVideoWindow::prepareForOverlay(GstMessage*) { } #endif // ENABLE(VIDEO) && USE(GSTREAMER) && USE(NATIVE_FULLSCREEN_VIDEO)
166MMX/openjdk.java.net-openjfx-8u40-rt
modules/web/src/main/native/Source/WebCore/platform/graphics/gstreamer/PlatformVideoWindowQt.cpp
C++
gpl-2.0
3,376
/* * Shared Transport Line discipline driver Core * Init Manager module responsible for GPIO control * and firmware download * Copyright (C) 2009-2010 Texas Instruments * Author: Pavan Savoy <pavan_savoy@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #define pr_fmt(fmt) "(stk) :" fmt #include <linux/platform_device.h> #include <linux/jiffies.h> #include <linux/firmware.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/gpio.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/sched.h> #include <linux/sysfs.h> #include <linux/tty.h> #include <linux/skbuff.h> #include <linux/ti_wilink_st.h> #include <linux/serial_core.h> #define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */ static struct platform_device *st_kim_devices[MAX_ST_DEVICES]; /**********************************************************************/ /* internal functions */ /** * st_get_plat_device - * function which returns the reference to the platform device * requested by id. As of now only 1 such device exists (id=0) * the context requesting for reference can get the id to be * requested by a. The protocol driver which is registering or * b. the tty device which is opened. */ static struct platform_device *st_get_plat_device(int id) { return st_kim_devices[id]; } /** * validate_firmware_response - * function to return whether the firmware response was proper * in case of error don't complete so that waiting for proper * response times out */ void validate_firmware_response(struct kim_data_s *kim_gdata) { struct sk_buff *skb = kim_gdata->rx_skb; if (unlikely(skb->data[5] != 0)) { pr_err("no proper response during fw download"); pr_err("data6 %x", skb->data[5]); kfree_skb(skb); return; /* keep waiting for the proper response */ } /* becos of all the script being downloaded */ complete_all(&kim_gdata->kim_rcvd); kfree_skb(skb); } /* check for data len received inside kim_int_recv * most often hit the last case to update state to waiting for data */ static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len) { register int room = skb_tailroom(kim_gdata->rx_skb); pr_debug("len %d room %d", len, room); if (!len) { validate_firmware_response(kim_gdata); } else if (len > room) { /* Received packet's payload length is larger. * We can't accommodate it in created skb. */ pr_err("Data length is too large len %d room %d", len, room); kfree_skb(kim_gdata->rx_skb); } else { /* Packet header has non-zero payload length and * we have enough space in created skb. Lets read * payload data */ kim_gdata->rx_state = ST_W4_DATA; kim_gdata->rx_count = len; return len; } /* Change ST LL state to continue to process next * packet */ kim_gdata->rx_state = ST_W4_PACKET_TYPE; kim_gdata->rx_skb = NULL; kim_gdata->rx_count = 0; return 0; } /** * kim_int_recv - receive function called during firmware download * firmware download responses on different UART drivers * have been observed to come in bursts of different * tty_receive and hence the logic */ void kim_int_recv(struct kim_data_s *kim_gdata, const unsigned char *data, long count) { const unsigned char *ptr; int len = 0, type = 0; unsigned char *plen; pr_debug("%s", __func__); /* Decode received bytes here */ ptr = data; if (unlikely(ptr == NULL)) { pr_err(" received null from TTY "); return; } while (count) { if (kim_gdata->rx_count) { len = min_t(unsigned int, kim_gdata->rx_count, count); memcpy(skb_put(kim_gdata->rx_skb, len), ptr, len); kim_gdata->rx_count -= len; count -= len; ptr += len; if (kim_gdata->rx_count) continue; /* Check ST RX state machine , where are we? */ switch (kim_gdata->rx_state) { /* Waiting for complete packet ? */ case ST_W4_DATA: pr_debug("Complete pkt received"); validate_firmware_response(kim_gdata); kim_gdata->rx_state = ST_W4_PACKET_TYPE; kim_gdata->rx_skb = NULL; continue; /* Waiting for Bluetooth event header ? */ case ST_W4_HEADER: plen = (unsigned char *)&kim_gdata->rx_skb->data[1]; pr_debug("event hdr: plen 0x%02x\n", *plen); kim_check_data_len(kim_gdata, *plen); continue; } /* end of switch */ } /* end of if rx_state */ switch (*ptr) { /* Bluetooth event packet? */ case 0x04: kim_gdata->rx_state = ST_W4_HEADER; kim_gdata->rx_count = 2; type = *ptr; break; default: pr_info("unknown packet"); ptr++; count--; continue; } ptr++; count--; kim_gdata->rx_skb = alloc_skb(1024+8, GFP_ATOMIC); if (!kim_gdata->rx_skb) { pr_err("can't allocate mem for new packet"); kim_gdata->rx_state = ST_W4_PACKET_TYPE; kim_gdata->rx_count = 0; return; } skb_reserve(kim_gdata->rx_skb, 8); kim_gdata->rx_skb->cb[0] = 4; kim_gdata->rx_skb->cb[1] = 0; } return; } static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name) { unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0; const char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 }; pr_debug("%s", __func__); INIT_COMPLETION(kim_gdata->kim_rcvd); if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) { pr_err("kim: couldn't write 4 bytes"); return -EIO; } if (!wait_for_completion_timeout (&kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME))) { pr_err(" waiting for ver info- timed out "); return -ETIMEDOUT; } INIT_COMPLETION(kim_gdata->kim_rcvd); version = MAKEWORD(kim_gdata->resp_buffer[13], kim_gdata->resp_buffer[14]); chip = (version & 0x7C00) >> 10; min_ver = (version & 0x007F); maj_ver = (version & 0x0380) >> 7; if (version & 0x8000) maj_ver |= 0x0008; sprintf(bts_scr_name, "TIInit_%d.%d.%d.bts", chip, maj_ver, min_ver); /* to be accessed later via sysfs entry */ kim_gdata->version.full = version; kim_gdata->version.chip = chip; kim_gdata->version.maj_ver = maj_ver; kim_gdata->version.min_ver = min_ver; pr_info("%s", bts_scr_name); return 0; } void skip_change_remote_baud(unsigned char **ptr, long *len) { unsigned char *nxt_action, *cur_action; cur_action = *ptr; nxt_action = cur_action + sizeof(struct bts_action) + ((struct bts_action *) cur_action)->size; if (((struct bts_action *) nxt_action)->type != ACTION_WAIT_EVENT) { pr_err("invalid action after change remote baud command"); } else { *ptr = *ptr + sizeof(struct bts_action) + ((struct bts_action *)cur_action)->size; *len = *len - (sizeof(struct bts_action) + ((struct bts_action *)cur_action)->size); /* warn user on not commenting these in firmware */ pr_warn("skipping the wait event of change remote baud"); } } /** * download_firmware - * internal function which parses through the .bts firmware * script file intreprets SEND, DELAY actions only as of now */ static long download_firmware(struct kim_data_s *kim_gdata) { long err = 0; long len = 0; unsigned char *ptr = NULL; unsigned char *action_ptr = NULL; unsigned char bts_scr_name[30] = { 0 }; /* 30 char long bts scr name? */ int wr_room_space; int cmd_size; unsigned long timeout; err = read_local_version(kim_gdata, bts_scr_name); if (err != 0) { pr_err("kim: failed to read local ver"); return err; } err = request_firmware(&kim_gdata->fw_entry, bts_scr_name, &kim_gdata->kim_pdev->dev); if (unlikely((err != 0) || (kim_gdata->fw_entry->data == NULL) || (kim_gdata->fw_entry->size == 0))) { pr_err(" request_firmware failed(errno %ld) for %s", err, bts_scr_name); return -EINVAL; } ptr = (void *)kim_gdata->fw_entry->data; len = kim_gdata->fw_entry->size; /* bts_header to remove out magic number and * version */ ptr += sizeof(struct bts_header); len -= sizeof(struct bts_header); while (len > 0 && ptr) { pr_debug(" action size %d, type %d ", ((struct bts_action *)ptr)->size, ((struct bts_action *)ptr)->type); switch (((struct bts_action *)ptr)->type) { case ACTION_SEND_COMMAND: /* action send */ pr_debug("S"); action_ptr = &(((struct bts_action *)ptr)->data[0]); if (unlikely (((struct hci_command *)action_ptr)->opcode == 0xFF36)) { /* ignore remote change * baud rate HCI VS command */ pr_warn("change remote baud" " rate command in firmware"); skip_change_remote_baud(&ptr, &len); break; } /* * Make sure we have enough free space in uart * tx buffer to write current firmware command */ cmd_size = ((struct bts_action *)ptr)->size; timeout = jiffies + msecs_to_jiffies(CMD_WR_TIME); do { wr_room_space = st_get_uart_wr_room(kim_gdata->core_data); if (wr_room_space < 0) { pr_err("Unable to get free " "space info from uart tx buffer"); release_firmware(kim_gdata->fw_entry); return wr_room_space; } mdelay(1); /* wait 1ms before checking room */ } while ((wr_room_space < cmd_size) && time_before(jiffies, timeout)); /* Timeout happened ? */ if (time_after_eq(jiffies, timeout)) { pr_err("Timeout while waiting for free " "free space in uart tx buffer"); release_firmware(kim_gdata->fw_entry); return -ETIMEDOUT; } /* reinit completion before sending for the * relevant wait */ INIT_COMPLETION(kim_gdata->kim_rcvd); /* * Free space found in uart buffer, call st_int_write * to send current firmware command to the uart tx * buffer. */ err = st_int_write(kim_gdata->core_data, ((struct bts_action_send *)action_ptr)->data, ((struct bts_action *)ptr)->size); if (unlikely(err < 0)) { release_firmware(kim_gdata->fw_entry); return err; } /* * Check number of bytes written to the uart tx buffer * and requested command write size */ if (err != cmd_size) { pr_err("Number of bytes written to uart " "tx buffer are not matching with " "requested cmd write size"); release_firmware(kim_gdata->fw_entry); return -EIO; } break; case ACTION_WAIT_EVENT: /* wait */ pr_debug("W"); if (!wait_for_completion_timeout (&kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME))) { pr_err("response timeout during fw download "); /* timed out */ release_firmware(kim_gdata->fw_entry); return -ETIMEDOUT; } INIT_COMPLETION(kim_gdata->kim_rcvd); break; case ACTION_DELAY: /* sleep */ pr_info("sleep command in scr"); action_ptr = &(((struct bts_action *)ptr)->data[0]); mdelay(((struct bts_action_delay *)action_ptr)->msec); break; } len = len - (sizeof(struct bts_action) + ((struct bts_action *)ptr)->size); ptr = ptr + sizeof(struct bts_action) + ((struct bts_action *)ptr)->size; } /* fw download complete */ release_firmware(kim_gdata->fw_entry); return 0; } /**********************************************************************/ /* functions called from ST core */ /* called from ST Core, when REG_IN_PROGRESS (registration in progress) * can be because of * 1. response to read local version * 2. during send/recv's of firmware download */ void st_kim_recv(void *disc_data, const unsigned char *data, long count) { struct st_data_s *st_gdata = (struct st_data_s *)disc_data; struct kim_data_s *kim_gdata = st_gdata->kim_data; /* copy to local buffer */ if (unlikely(data[4] == 0x01 && data[5] == 0x10 && data[0] == 0x04)) { /* must be the read_ver_cmd */ memcpy(kim_gdata->resp_buffer, data, count); complete_all(&kim_gdata->kim_rcvd); return; } else { kim_int_recv(kim_gdata, data, count); /* either completes or times out */ } return; } /* to signal completion of line discipline installation * called from ST Core, upon tty_open */ void st_kim_complete(void *kim_data) { struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; complete(&kim_gdata->ldisc_installed); } /** * st_kim_start - called from ST Core upon 1st registration * This involves toggling the chip enable gpio, reading * the firmware version from chip, forming the fw file name * based on the chip version, requesting the fw, parsing it * and perform download(send/recv). */ long st_kim_start(void *kim_data) { long err = 0; long retry = POR_RETRY_COUNT; struct ti_st_plat_data *pdata; struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; pr_info(" %s", __func__); pdata = kim_gdata->kim_pdev->dev.platform_data; do { /* platform specific enabling code here */ wake_lock(&kim_gdata->core_data->st_wk_lock); /* Configure BT nShutdown to HIGH state */ gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); mdelay(5); /* FIXME: a proper toggle */ gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); mdelay(100); /* re-initialize the completion */ INIT_COMPLETION(kim_gdata->ldisc_installed); /* send notification to UIM */ kim_gdata->ldisc_install = 1; pr_info("ldisc_install = 1"); sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, NULL, "install"); /* wait for ldisc to be installed */ err = wait_for_completion_timeout(&kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME)); if (!err) { /* timeout */ pr_err("line disc installation timed out "); kim_gdata->ldisc_install = 0; pr_info("ldisc_install = 0"); sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, NULL, "install"); /* the following wait is never going to be completed, * since the ldisc was never installed, hence serving * as a mdelay of LDISC_TIME msecs */ err = wait_for_completion_timeout (&kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME)); err = -ETIMEDOUT; continue; } else { /* ldisc installed now */ pr_info(" line discipline installed "); err = download_firmware(kim_gdata); if (err != 0) { pr_err("download firmware failed"); kim_gdata->ldisc_install = 0; pr_info("ldisc_install = 0"); sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, NULL, "install"); /* this wait might be completed, though in the * tty_close() since the ldisc is already * installed */ err = wait_for_completion_timeout (&kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME)); err = -EINVAL; continue; } else { /* on success don't retry */ break; } } } while (retry--); return err; } /** * st_kim_stop - called from ST Core, on the last un-registration * toggle low the chip enable gpio */ long st_kim_stop(void *kim_data) { long err = 0; struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; INIT_COMPLETION(kim_gdata->ldisc_installed); /* Flush any pending characters in the driver and discipline. */ tty_ldisc_flush(kim_gdata->core_data->tty); tty_driver_flush_buffer(kim_gdata->core_data->tty); /* send uninstall notification to UIM */ pr_info("ldisc_install = 0"); kim_gdata->ldisc_install = 0; sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, NULL, "install"); /* wait for ldisc to be un-installed */ err = wait_for_completion_timeout(&kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME)); if (!err) { /* timeout */ pr_err(" timed out waiting for ldisc to be un-installed"); return -ETIMEDOUT; } /* By default configure BT nShutdown to LOW state */ gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); mdelay(1); gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); mdelay(1); gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); /* platform specific disable */ wake_unlock(&kim_gdata->core_data->st_wk_lock); return err; } /**********************************************************************/ /* functions called from subsystems */ /* called when debugfs entry is read from */ static int show_version(struct seq_file *s, void *unused) { struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private; seq_printf(s, "%04X %d.%d.%d\n", kim_gdata->version.full, kim_gdata->version.chip, kim_gdata->version.maj_ver, kim_gdata->version.min_ver); return 0; } static int show_list(struct seq_file *s, void *unused) { struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private; kim_st_list_protocols(kim_gdata->core_data, s); return 0; } static ssize_t show_install(struct device *dev, struct device_attribute *attr, char *buf) { struct kim_data_s *kim_data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", kim_data->ldisc_install); } static ssize_t show_dev_name(struct device *dev, struct device_attribute *attr, char *buf) { struct kim_data_s *kim_data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", kim_data->dev_name); } static ssize_t show_baud_rate(struct device *dev, struct device_attribute *attr, char *buf) { struct kim_data_s *kim_data = dev_get_drvdata(dev); return sprintf(buf, "%ld\n", kim_data->baud_rate); } static ssize_t show_flow_cntrl(struct device *dev, struct device_attribute *attr, char *buf) { struct kim_data_s *kim_data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", kim_data->flow_cntrl); } /* structures specific for sysfs entries */ static struct kobj_attribute ldisc_install = __ATTR(install, 0444, (void *)show_install, NULL); static struct kobj_attribute uart_dev_name = __ATTR(dev_name, 0444, (void *)show_dev_name, NULL); static struct kobj_attribute uart_baud_rate = __ATTR(baud_rate, 0444, (void *)show_baud_rate, NULL); static struct kobj_attribute uart_flow_cntrl = __ATTR(flow_cntrl, 0444, (void *)show_flow_cntrl, NULL); static struct attribute *uim_attrs[] = { &ldisc_install.attr, &uart_dev_name.attr, &uart_baud_rate.attr, &uart_flow_cntrl.attr, NULL, }; static struct attribute_group uim_attr_grp = { .attrs = uim_attrs, }; /** * st_kim_ref - reference the core's data * This references the per-ST platform device in the arch/xx/ * board-xx.c file. * This would enable multiple such platform devices to exist * on a given platform */ void st_kim_ref(struct st_data_s **core_data, int id) { struct platform_device *pdev; struct kim_data_s *kim_gdata; /* get kim_gdata reference from platform device */ pdev = st_get_plat_device(id); if (!pdev) { *core_data = NULL; return; } kim_gdata = dev_get_drvdata(&pdev->dev); *core_data = kim_gdata->core_data; } static int kim_version_open(struct inode *i, struct file *f) { return single_open(f, show_version, i->i_private); } static int kim_list_open(struct inode *i, struct file *f) { return single_open(f, show_list, i->i_private); } static const struct file_operations version_debugfs_fops = { /* version info */ .open = kim_version_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations list_debugfs_fops = { /* protocols info */ .open = kim_list_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /**********************************************************************/ /* functions called from platform device driver subsystem * need to have a relevant platform device entry in the platform's * board-*.c file */ struct dentry *kim_debugfs_dir; static int kim_probe(struct platform_device *pdev) { long status; struct kim_data_s *kim_gdata; struct ti_st_plat_data *pdata = pdev->dev.platform_data; if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) { /* multiple devices could exist */ st_kim_devices[pdev->id] = pdev; } else { /* platform's sure about existence of 1 device */ st_kim_devices[0] = pdev; } kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); if (!kim_gdata) { pr_err("no mem to allocate"); return -ENOMEM; } dev_set_drvdata(&pdev->dev, kim_gdata); status = st_core_init(&kim_gdata->core_data); if (status != 0) { pr_err(" ST core init failed"); return -EIO; } /* refer to itself */ kim_gdata->core_data->kim_data = kim_gdata; wake_lock_init(&kim_gdata->core_data->st_wk_lock, WAKE_LOCK_SUSPEND, "st_wake_lock"); /* Claim the chip enable nShutdown gpio from the system */ kim_gdata->nshutdown = pdata->nshutdown_gpio; status = gpio_request(kim_gdata->nshutdown, "kim"); if (unlikely(status)) { pr_err(" gpio %ld request failed ", kim_gdata->nshutdown); return status; } /* Configure nShutdown GPIO as output=0 */ status = gpio_direction_output(kim_gdata->nshutdown, 0); if (unlikely(status)) { pr_err(" unable to configure gpio %ld", kim_gdata->nshutdown); return status; } /* get reference of pdev for request_firmware */ kim_gdata->kim_pdev = pdev; init_completion(&kim_gdata->kim_rcvd); init_completion(&kim_gdata->ldisc_installed); status = sysfs_create_group(&pdev->dev.kobj, &uim_attr_grp); if (status) { pr_err("failed to create sysfs entries"); return status; } /* copying platform data */ strncpy(kim_gdata->dev_name, pdata->dev_name, UART_DEV_NAME_LEN); kim_gdata->flow_cntrl = pdata->flow_cntrl; kim_gdata->baud_rate = pdata->baud_rate; pr_info("sysfs entries created\n"); kim_debugfs_dir = debugfs_create_dir("ti-st", NULL); if (IS_ERR(kim_debugfs_dir)) { pr_err(" debugfs entries creation failed "); kim_debugfs_dir = NULL; return -EIO; } debugfs_create_file("version", S_IRUGO, kim_debugfs_dir, kim_gdata, &version_debugfs_fops); debugfs_create_file("protocols", S_IRUGO, kim_debugfs_dir, kim_gdata, &list_debugfs_fops); pr_info(" debugfs entries created "); return 0; } static int kim_remove(struct platform_device *pdev) { /* free the GPIOs requested */ struct ti_st_plat_data *pdata = pdev->dev.platform_data; struct kim_data_s *kim_gdata; kim_gdata = dev_get_drvdata(&pdev->dev); /* Free the Bluetooth/FM/GPIO * nShutdown gpio from the system */ gpio_free(pdata->nshutdown_gpio); pr_info("nshutdown GPIO Freed"); debugfs_remove_recursive(kim_debugfs_dir); sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp); pr_info("sysfs entries removed"); wake_lock_destroy(&kim_gdata->core_data->st_wk_lock); kim_gdata->kim_pdev = NULL; st_core_exit(kim_gdata->core_data); kfree(kim_gdata); kim_gdata = NULL; return 0; } int kim_suspend(struct platform_device *pdev, pm_message_t state) { struct kim_data_s *kim_gdata; struct st_data_s *core_data; struct uart_state *uart_state; struct uart_port *uport; kim_gdata = dev_get_drvdata(&pdev->dev); core_data = kim_gdata->core_data; if (st_ll_getstate(core_data) != ST_LL_INVALID) { uart_state = core_data->tty->driver_data; uport = uart_state->uart_port; #ifdef CONFIG_BT_TIBLUESLEEP pr_info(" Bluesleep Start"); bluesleep_start(uport); #endif } return 0; } int kim_resume(struct platform_device *pdev) { return 0; } /**********************************************************************/ /* entry point for ST KIM module, called in from ST Core */ static struct platform_driver kim_platform_driver = { .probe = kim_probe, .remove = kim_remove, .suspend = kim_suspend, .resume = kim_resume, .driver = { .name = "kim", .owner = THIS_MODULE, }, }; static int __init st_kim_init(void) { return platform_driver_register(&kim_platform_driver); } static void __exit st_kim_deinit(void) { platform_driver_unregister(&kim_platform_driver); } module_init(st_kim_init); module_exit(st_kim_deinit); MODULE_AUTHOR("Pavan Savoy <pavan_savoy@ti.com>"); MODULE_DESCRIPTION("Shared Transport Driver for TI BT/FM/GPS combo chips "); MODULE_LICENSE("GPL");
TheYorickable/tf300t_jb_kernel
drivers/misc/ti-st/st_kim.c
C
gpl-2.0
23,852
/* * intel_menlow.c - Intel menlow Driver for thermal management extension * * Copyright (C) 2008 Intel Corp * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com> * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This driver creates the sys I/F for programming the sensors. * It also implements the driver for intel menlow memory controller (hardware * id is INT0002) which makes use of the platform specific ACPI methods * to get/set bandwidth. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/pm.h> #include <linux/thermal.h> #include <linux/acpi.h> MODULE_AUTHOR("Thomas Sujith"); MODULE_AUTHOR("Zhang Rui"); MODULE_DESCRIPTION("Intel Menlow platform specific driver"); MODULE_LICENSE("GPL"); /* * Memory controller device control */ #define MEMORY_GET_BANDWIDTH "GTHS" #define MEMORY_SET_BANDWIDTH "STHS" #define MEMORY_ARG_CUR_BANDWIDTH 1 #define MEMORY_ARG_MAX_BANDWIDTH 0 static void intel_menlow_unregister_sensor(void); /* * GTHS returning 'n' would mean that [0,n-1] states are supported * In that case max_cstate would be n-1 * GTHS returning '0' would mean that no bandwidth control states are supported */ static int memory_get_max_bandwidth(struct thermal_cooling_device *cdev, unsigned long *max_state) { struct acpi_device *device = cdev->devdata; acpi_handle handle = device->handle; unsigned long long value; struct acpi_object_list arg_list; union acpi_object arg; acpi_status status = AE_OK; arg_list.count = 1; arg_list.pointer = &arg; arg.type = ACPI_TYPE_INTEGER; arg.integer.value = MEMORY_ARG_MAX_BANDWIDTH; status = acpi_evaluate_integer(handle, MEMORY_GET_BANDWIDTH, &arg_list, &value); if (ACPI_FAILURE(status)) return -EFAULT; if (!value) return -EINVAL; *max_state = value - 1; return 0; } static int memory_get_cur_bandwidth(struct thermal_cooling_device *cdev, unsigned long *value) { struct acpi_device *device = cdev->devdata; acpi_handle handle = device->handle; unsigned long long result; struct acpi_object_list arg_list; union acpi_object arg; acpi_status status = AE_OK; arg_list.count = 1; arg_list.pointer = &arg; arg.type = ACPI_TYPE_INTEGER; arg.integer.value = MEMORY_ARG_CUR_BANDWIDTH; status = acpi_evaluate_integer(handle, MEMORY_GET_BANDWIDTH, &arg_list, &result); if (ACPI_FAILURE(status)) return -EFAULT; *value = result; return 0; } static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev, unsigned long state) { struct acpi_device *device = cdev->devdata; acpi_handle handle = device->handle; struct acpi_object_list arg_list; union acpi_object arg; acpi_status status; unsigned long long temp; unsigned long max_state; if (memory_get_max_bandwidth(cdev, &max_state)) return -EFAULT; if (state > max_state) return -EINVAL; arg_list.count = 1; arg_list.pointer = &arg; arg.type = ACPI_TYPE_INTEGER; arg.integer.value = state; status = acpi_evaluate_integer(handle, MEMORY_SET_BANDWIDTH, &arg_list, &temp); pr_info("Bandwidth value was %ld: status is %d\n", state, status); if (ACPI_FAILURE(status)) return -EFAULT; return 0; } static struct thermal_cooling_device_ops memory_cooling_ops = { .get_max_state = memory_get_max_bandwidth, .get_cur_state = memory_get_cur_bandwidth, .set_cur_state = memory_set_cur_bandwidth, }; /* * Memory Device Management */ static int intel_menlow_memory_add(struct acpi_device *device) { int result = -ENODEV; struct thermal_cooling_device *cdev; if (!device) return -EINVAL; if (!acpi_has_method(device->handle, MEMORY_GET_BANDWIDTH)) goto end; if (!acpi_has_method(device->handle, MEMORY_SET_BANDWIDTH)) goto end; cdev = thermal_cooling_device_register("Memory controller", device, &memory_cooling_ops); if (IS_ERR(cdev)) { result = PTR_ERR(cdev); goto end; } device->driver_data = cdev; result = sysfs_create_link(&device->dev.kobj, &cdev->device.kobj, "thermal_cooling"); if (result) goto unregister; result = sysfs_create_link(&cdev->device.kobj, &device->dev.kobj, "device"); if (result) { sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); goto unregister; } end: return result; unregister: thermal_cooling_device_unregister(cdev); return result; } static int intel_menlow_memory_remove(struct acpi_device *device) { struct thermal_cooling_device *cdev = acpi_driver_data(device); if (!device || !cdev) return -EINVAL; sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); sysfs_remove_link(&cdev->device.kobj, "device"); thermal_cooling_device_unregister(cdev); return 0; } static const struct acpi_device_id intel_menlow_memory_ids[] = { {"INT0002", 0}, {"", 0}, }; static struct acpi_driver intel_menlow_memory_driver = { .name = "intel_menlow_thermal_control", .ids = intel_menlow_memory_ids, .ops = { .add = intel_menlow_memory_add, .remove = intel_menlow_memory_remove, }, }; /* * Sensor control on menlow platform */ #define THERMAL_AUX0 0 #define THERMAL_AUX1 1 #define GET_AUX0 "GAX0" #define GET_AUX1 "GAX1" #define SET_AUX0 "SAX0" #define SET_AUX1 "SAX1" struct intel_menlow_attribute { struct device_attribute attr; struct device *device; acpi_handle handle; struct list_head node; }; static LIST_HEAD(intel_menlow_attr_list); static DEFINE_MUTEX(intel_menlow_attr_lock); /* * sensor_get_auxtrip - get the current auxtrip value from sensor * @name: Thermalzone name * @auxtype : AUX0/AUX1 * @buf: syfs buffer */ static int sensor_get_auxtrip(acpi_handle handle, int index, unsigned long long *value) { acpi_status status; if ((index != 0 && index != 1) || !value) return -EINVAL; status = acpi_evaluate_integer(handle, index ? GET_AUX1 : GET_AUX0, NULL, value); if (ACPI_FAILURE(status)) return -EIO; return 0; } /* * sensor_set_auxtrip - set the new auxtrip value to sensor * @name: Thermalzone name * @auxtype : AUX0/AUX1 * @buf: syfs buffer */ static int sensor_set_auxtrip(acpi_handle handle, int index, int value) { acpi_status status; union acpi_object arg = { ACPI_TYPE_INTEGER }; struct acpi_object_list args = { 1, &arg }; unsigned long long temp; if (index != 0 && index != 1) return -EINVAL; status = acpi_evaluate_integer(handle, index ? GET_AUX0 : GET_AUX1, NULL, &temp); if (ACPI_FAILURE(status)) return -EIO; if ((index && value < temp) || (!index && value > temp)) return -EINVAL; arg.integer.value = value; status = acpi_evaluate_integer(handle, index ? SET_AUX1 : SET_AUX0, &args, &temp); if (ACPI_FAILURE(status)) return -EIO; /* do we need to check the return value of SAX0/SAX1 ? */ return 0; } #define to_intel_menlow_attr(_attr) \ container_of(_attr, struct intel_menlow_attribute, attr) static ssize_t aux_show(struct device *dev, struct device_attribute *dev_attr, char *buf, int idx) { struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); unsigned long long value; int result; result = sensor_get_auxtrip(attr->handle, idx, &value); return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value)); } static ssize_t aux0_show(struct device *dev, struct device_attribute *dev_attr, char *buf) { return aux_show(dev, dev_attr, buf, 0); } static ssize_t aux1_show(struct device *dev, struct device_attribute *dev_attr, char *buf) { return aux_show(dev, dev_attr, buf, 1); } static ssize_t aux_store(struct device *dev, struct device_attribute *dev_attr, const char *buf, size_t count, int idx) { struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr); int value; int result; /*Sanity check; should be a positive integer */ if (!sscanf(buf, "%d", &value)) return -EINVAL; if (value < 0) return -EINVAL; result = sensor_set_auxtrip(attr->handle, idx, CELSIUS_TO_DECI_KELVIN(value)); return result ? result : count; } static ssize_t aux0_store(struct device *dev, struct device_attribute *dev_attr, const char *buf, size_t count) { return aux_store(dev, dev_attr, buf, count, 0); } static ssize_t aux1_store(struct device *dev, struct device_attribute *dev_attr, const char *buf, size_t count) { return aux_store(dev, dev_attr, buf, count, 1); } /* BIOS can enable/disable the thermal user application in dabney platform */ #define BIOS_ENABLED "\\_TZ.GSTS" static ssize_t bios_enabled_show(struct device *dev, struct device_attribute *attr, char *buf) { acpi_status status; unsigned long long bios_enabled; status = acpi_evaluate_integer(NULL, BIOS_ENABLED, NULL, &bios_enabled); if (ACPI_FAILURE(status)) return -ENODEV; return sprintf(buf, "%s\n", bios_enabled ? "enabled" : "disabled"); } static int intel_menlow_add_one_attribute(char *name, umode_t mode, void *show, void *store, struct device *dev, acpi_handle handle) { struct intel_menlow_attribute *attr; int result; attr = kzalloc(sizeof(struct intel_menlow_attribute), GFP_KERNEL); if (!attr) return -ENOMEM; sysfs_attr_init(&attr->attr.attr); /* That is consistent naming :D */ attr->attr.attr.name = name; attr->attr.attr.mode = mode; attr->attr.show = show; attr->attr.store = store; attr->device = dev; attr->handle = handle; result = device_create_file(dev, &attr->attr); if (result) { kfree(attr); return result; } mutex_lock(&intel_menlow_attr_lock); list_add_tail(&attr->node, &intel_menlow_attr_list); mutex_unlock(&intel_menlow_attr_lock); return 0; } static acpi_status intel_menlow_register_sensor(acpi_handle handle, u32 lvl, void *context, void **rv) { acpi_status status; acpi_handle dummy; struct thermal_zone_device *thermal; int result; result = acpi_bus_get_private_data(handle, (void **)&thermal); if (result) return 0; /* _TZ must have the AUX0/1 methods */ status = acpi_get_handle(handle, GET_AUX0, &dummy); if (ACPI_FAILURE(status)) return (status == AE_NOT_FOUND) ? AE_OK : status; status = acpi_get_handle(handle, SET_AUX0, &dummy); if (ACPI_FAILURE(status)) return (status == AE_NOT_FOUND) ? AE_OK : status; result = intel_menlow_add_one_attribute("aux0", 0644, aux0_show, aux0_store, &thermal->device, handle); if (result) return AE_ERROR; status = acpi_get_handle(handle, GET_AUX1, &dummy); if (ACPI_FAILURE(status)) goto aux1_not_found; status = acpi_get_handle(handle, SET_AUX1, &dummy); if (ACPI_FAILURE(status)) goto aux1_not_found; result = intel_menlow_add_one_attribute("aux1", 0644, aux1_show, aux1_store, &thermal->device, handle); if (result) { intel_menlow_unregister_sensor(); return AE_ERROR; } /* * create the "dabney_enabled" attribute which means the user app * should be loaded or not */ result = intel_menlow_add_one_attribute("bios_enabled", 0444, bios_enabled_show, NULL, &thermal->device, handle); if (result) { intel_menlow_unregister_sensor(); return AE_ERROR; } return AE_OK; aux1_not_found: if (status == AE_NOT_FOUND) return AE_OK; intel_menlow_unregister_sensor(); return status; } static void intel_menlow_unregister_sensor(void) { struct intel_menlow_attribute *pos, *next; mutex_lock(&intel_menlow_attr_lock); list_for_each_entry_safe(pos, next, &intel_menlow_attr_list, node) { list_del(&pos->node); device_remove_file(pos->device, &pos->attr); kfree(pos); } mutex_unlock(&intel_menlow_attr_lock); return; } static int __init intel_menlow_module_init(void) { int result = -ENODEV; acpi_status status; unsigned long long enable; if (acpi_disabled) return result; /* Looking for the \_TZ.GSTS method */ status = acpi_evaluate_integer(NULL, BIOS_ENABLED, NULL, &enable); if (ACPI_FAILURE(status) || !enable) return -ENODEV; /* Looking for ACPI device MEM0 with hardware id INT0002 */ result = acpi_bus_register_driver(&intel_menlow_memory_driver); if (result) return result; /* Looking for sensors in each ACPI thermal zone */ status = acpi_walk_namespace(ACPI_TYPE_THERMAL, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, intel_menlow_register_sensor, NULL, NULL, NULL); if (ACPI_FAILURE(status)) { acpi_bus_unregister_driver(&intel_menlow_memory_driver); return -ENODEV; } return 0; } static void __exit intel_menlow_module_exit(void) { acpi_bus_unregister_driver(&intel_menlow_memory_driver); intel_menlow_unregister_sensor(); } module_init(intel_menlow_module_init); module_exit(intel_menlow_module_exit);
minipli/linux-grsec
drivers/platform/x86/intel_menlow.c
C
gpl-2.0
13,497
/* Copyright (c) 2003-2012, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.html or http://ckeditor.com/license */ (function() { var meta = { editorFocus : false, modes : { wysiwyg:1, source:1 } }; var blurCommand = { exec : function( editor ) { editor.container.focusNext( true, editor.tabIndex ); } }; var blurBackCommand = { exec : function( editor ) { editor.container.focusPrevious( true, editor.tabIndex ); } }; function selectNextCellCommand( backward ) { return { editorFocus : false, canUndo : false, modes : { wysiwyg : 1 }, exec : function( editor ) { if ( editor.focusManager.hasFocus ) { var sel = editor.getSelection(), ancestor = sel.getCommonAncestor(), cell; if ( ( cell = ( ancestor.getAscendant( 'td', true ) || ancestor.getAscendant( 'th', true ) ) ) ) { var resultRange = new CKEDITOR.dom.range( editor.document ), next = CKEDITOR.tools.tryThese( function() { var row = cell.getParent(), next = row.$.cells[ cell.$.cellIndex + ( backward ? - 1 : 1 ) ]; // Invalid any empty value. next.parentNode.parentNode; return next; }, function() { var row = cell.getParent(), table = row.getAscendant( 'table' ), nextRow = table.$.rows[ row.$.rowIndex + ( backward ? - 1 : 1 ) ]; return nextRow.cells[ backward? nextRow.cells.length -1 : 0 ]; }); // Clone one more row at the end of table and select the first newly established cell. if ( ! ( next || backward ) ) { var table = cell.getAscendant( 'table' ).$, cells = cell.getParent().$.cells; var newRow = new CKEDITOR.dom.element( table.insertRow( -1 ), editor.document ); for ( var i = 0, count = cells.length ; i < count; i++ ) { var newCell = newRow.append( new CKEDITOR.dom.element( cells[ i ], editor.document ).clone( false, false ) ); !CKEDITOR.env.ie && newCell.appendBogus(); } resultRange.moveToElementEditStart( newRow ); } else if ( next ) { next = new CKEDITOR.dom.element( next ); resultRange.moveToElementEditStart( next ); // Avoid selecting empty block makes the cursor blind. if ( !( resultRange.checkStartOfBlock() && resultRange.checkEndOfBlock() ) ) resultRange.selectNodeContents( next ); } else return true; resultRange.select( true ); return true; } } return false; } }; } CKEDITOR.plugins.add( 'tab', { requires : [ 'keystrokes' ], init : function( editor ) { var tabTools = editor.config.enableTabKeyTools !== false, tabSpaces = editor.config.tabSpaces || 0, tabText = ''; while ( tabSpaces-- ) tabText += '\xa0'; if ( tabText ) { editor.on( 'key', function( ev ) { if ( ev.data.keyCode == 9 ) // TAB { editor.insertHtml( tabText ); ev.cancel(); } }); } if ( tabTools ) { editor.on( 'key', function( ev ) { if ( ev.data.keyCode == 9 && editor.execCommand( 'selectNextCell' ) || // TAB ev.data.keyCode == ( CKEDITOR.SHIFT + 9 ) && editor.execCommand( 'selectPreviousCell' ) ) // SHIFT+TAB ev.cancel(); }); } if ( CKEDITOR.env.webkit || CKEDITOR.env.gecko ) { editor.on( 'key', function( ev ) { var keyCode = ev.data.keyCode; if ( keyCode == 9 && !tabText ) // TAB { ev.cancel(); editor.execCommand( 'blur' ); } if ( keyCode == ( CKEDITOR.SHIFT + 9 ) ) // SHIFT+TAB { editor.execCommand( 'blurBack' ); ev.cancel(); } }); } editor.addCommand( 'blur', CKEDITOR.tools.extend( blurCommand, meta ) ); editor.addCommand( 'blurBack', CKEDITOR.tools.extend( blurBackCommand, meta ) ); editor.addCommand( 'selectNextCell', selectNextCellCommand() ); editor.addCommand( 'selectPreviousCell', selectNextCellCommand( true ) ); } }); })(); /** * Moves the UI focus to the element following this element in the tabindex * order. * @example * var element = CKEDITOR.document.getById( 'example' ); * element.focusNext(); */ CKEDITOR.dom.element.prototype.focusNext = function( ignoreChildren, indexToUse ) { var $ = this.$, curTabIndex = ( indexToUse === undefined ? this.getTabIndex() : indexToUse ), passedCurrent, enteredCurrent, elected, electedTabIndex, element, elementTabIndex; if ( curTabIndex <= 0 ) { // If this element has tabindex <= 0 then we must simply look for any // element following it containing tabindex=0. element = this.getNextSourceNode( ignoreChildren, CKEDITOR.NODE_ELEMENT ); while ( element ) { if ( element.isVisible() && element.getTabIndex() === 0 ) { elected = element; break; } element = element.getNextSourceNode( false, CKEDITOR.NODE_ELEMENT ); } } else { // If this element has tabindex > 0 then we must look for: // 1. An element following this element with the same tabindex. // 2. The first element in source other with the lowest tabindex // that is higher than this element tabindex. // 3. The first element with tabindex=0. element = this.getDocument().getBody().getFirst(); while ( ( element = element.getNextSourceNode( false, CKEDITOR.NODE_ELEMENT ) ) ) { if ( !passedCurrent ) { if ( !enteredCurrent && element.equals( this ) ) { enteredCurrent = true; // Ignore this element, if required. if ( ignoreChildren ) { if ( !( element = element.getNextSourceNode( true, CKEDITOR.NODE_ELEMENT ) ) ) break; passedCurrent = 1; } } else if ( enteredCurrent && !this.contains( element ) ) passedCurrent = 1; } if ( !element.isVisible() || ( elementTabIndex = element.getTabIndex() ) < 0 ) continue; if ( passedCurrent && elementTabIndex == curTabIndex ) { elected = element; break; } if ( elementTabIndex > curTabIndex && ( !elected || !electedTabIndex || elementTabIndex < electedTabIndex ) ) { elected = element; electedTabIndex = elementTabIndex; } else if ( !elected && elementTabIndex === 0 ) { elected = element; electedTabIndex = elementTabIndex; } } } if ( elected ) elected.focus(); }; /** * Moves the UI focus to the element before this element in the tabindex order. * @example * var element = CKEDITOR.document.getById( 'example' ); * element.focusPrevious(); */ CKEDITOR.dom.element.prototype.focusPrevious = function( ignoreChildren, indexToUse ) { var $ = this.$, curTabIndex = ( indexToUse === undefined ? this.getTabIndex() : indexToUse ), passedCurrent, enteredCurrent, elected, electedTabIndex = 0, elementTabIndex; var element = this.getDocument().getBody().getLast(); while ( ( element = element.getPreviousSourceNode( false, CKEDITOR.NODE_ELEMENT ) ) ) { if ( !passedCurrent ) { if ( !enteredCurrent && element.equals( this ) ) { enteredCurrent = true; // Ignore this element, if required. if ( ignoreChildren ) { if ( !( element = element.getPreviousSourceNode( true, CKEDITOR.NODE_ELEMENT ) ) ) break; passedCurrent = 1; } } else if ( enteredCurrent && !this.contains( element ) ) passedCurrent = 1; } if ( !element.isVisible() || ( elementTabIndex = element.getTabIndex() ) < 0 ) continue; if ( curTabIndex <= 0 ) { // If this element has tabindex <= 0 then we must look for: // 1. An element before this one containing tabindex=0. // 2. The last element with the highest tabindex. if ( passedCurrent && elementTabIndex === 0 ) { elected = element; break; } if ( elementTabIndex > electedTabIndex ) { elected = element; electedTabIndex = elementTabIndex; } } else { // If this element has tabindex > 0 we must look for: // 1. An element preceeding this one, with the same tabindex. // 2. The last element in source other with the highest tabindex // that is lower than this element tabindex. if ( passedCurrent && elementTabIndex == curTabIndex ) { elected = element; break; } if ( elementTabIndex < curTabIndex && ( !elected || elementTabIndex > electedTabIndex ) ) { elected = element; electedTabIndex = elementTabIndex; } } } if ( elected ) elected.focus(); }; /** * Intructs the editor to add a number of spaces (&amp;nbsp;) to the text when * hitting the TAB key. If set to zero, the TAB key will be used to move the * cursor focus to the next element in the page, out of the editor focus. * @name CKEDITOR.config.tabSpaces * @type Number * @default 0 * @example * config.tabSpaces = 4; */ /** * Allow context-sensitive tab key behaviors, including the following scenarios: * <h5>When selection is anchored inside <b>table cells</b>:</h5> * <ul> * <li>If TAB is pressed, select the contents of the "next" cell. If in the last cell in the table, add a new row to it and focus its first cell.</li> * <li>If SHIFT+TAB is pressed, select the contents of the "previous" cell. Do nothing when it's in the first cell.</li> * </ul> * @name CKEDITOR.config.enableTabKeyTools * @type Boolean * @default true * @example * config.enableTabKeyTools = false; */ // If the TAB key is not supposed to be enabled for navigation, the following // settings could be used alternatively: // config.keystrokes.push( // [ CKEDITOR.ALT + 38 /*Arrow Up*/, 'selectPreviousCell' ], // [ CKEDITOR.ALT + 40 /*Arrow Down*/, 'selectNextCell' ] // );
eltmc/drupal-website
sites/all/libraries/ckeditor/_source/plugins/tab/plugin.js
JavaScript
gpl-2.0
10,104
<?php namespace Drupal\Core\Render\Element; use Drupal\Core\Form\FormStateInterface; use Drupal\Component\Utility\Html as HtmlUtility; /** * Provides a form element for a set of radio buttons. * * Properties: * - #options: An associative array, where the keys are the returned values for * each radio button, and the values are the labels next to each radio button. * * Usage example: * @code * $form['settings']['active'] = array( * '#type' => 'radios', * '#title' => $this->t('Poll status'), * '#default_value' => 1, * '#options' => array(0 => $this->t('Closed'), 1 => $this->t('Active')), * ); * @endcode * * @see \Drupal\Core\Render\Element\Checkboxes * @see \Drupal\Core\Render\Element\Radio * @see \Drupal\Core\Render\Element\Select * * @FormElement("radios") */ class Radios extends FormElement { use CompositeFormElementTrait; /** * {@inheritdoc} */ public function getInfo() { $class = get_class($this); return array( '#input' => TRUE, '#process' => array( array($class, 'processRadios'), ), '#theme_wrappers' => array('radios'), '#pre_render' => array( array($class, 'preRenderCompositeFormElement'), ), ); } /** * Expands a radios element into individual radio elements. */ public static function processRadios(&$element, FormStateInterface $form_state, &$complete_form) { if (count($element['#options']) > 0) { $weight = 0; foreach ($element['#options'] as $key => $choice) { // Maintain order of options as defined in #options, in case the element // defines custom option sub-elements, but does not define all option // sub-elements. $weight += 0.001; $element += array($key => array()); // Generate the parents as the autogenerator does, so we will have a // unique id for each radio button. $parents_for_id = array_merge($element['#parents'], array($key)); $element[$key] += array( '#type' => 'radio', '#title' => $choice, // The key is sanitized in Drupal\Core\Template\Attribute during output // from the theme function. '#return_value' => $key, // Use default or FALSE. A value of FALSE means that the radio button is // not 'checked'. '#default_value' => isset($element['#default_value']) ? $element['#default_value'] : FALSE, '#attributes' => $element['#attributes'], '#parents' => $element['#parents'], '#id' => HtmlUtility::getUniqueId('edit-' . implode('-', $parents_for_id)), '#ajax' => isset($element['#ajax']) ? $element['#ajax'] : NULL, // Errors should only be shown on the parent radios element. '#error_no_message' => TRUE, '#weight' => $weight, ); } } return $element; } /** * {@inheritdoc} */ public static function valueCallback(&$element, $input, FormStateInterface $form_state) { if ($input !== FALSE) { // When there's user input (including NULL), return it as the value. // However, if NULL is submitted, FormBuilder::handleInputElement() will // apply the default value, and we want that validated against #options // unless it's empty. (An empty #default_value, such as NULL or FALSE, can // be used to indicate that no radio button is selected by default.) if (!isset($input) && !empty($element['#default_value'])) { $element['#needs_validation'] = TRUE; } return $input; } else { // For default value handling, simply return #default_value. Additionally, // for a NULL default value, set #has_garbage_value to prevent // FormBuilder::handleInputElement() converting the NULL to an empty // string, so that code can distinguish between nothing selected and the // selection of a radio button whose value is an empty string. $value = isset($element['#default_value']) ? $element['#default_value'] : NULL; if (!isset($value)) { $element['#has_garbage_value'] = TRUE; } return $value; } } }
angrycactus/social-commerce
core/lib/Drupal/Core/Render/Element/Radios.php
PHP
gpl-2.0
4,166
<?php namespace Drupal\datetime\Plugin\Field\FieldWidget; use Drupal\Core\Entity\EntityStorageInterface; use Drupal\Core\Field\FieldItemListInterface; use Drupal\Core\Field\FieldDefinitionInterface; use Drupal\Core\Form\FormStateInterface; use Drupal\Core\Plugin\ContainerFactoryPluginInterface; use Drupal\datetime\Plugin\Field\FieldType\DateTimeItem; use Symfony\Component\DependencyInjection\ContainerInterface; /** * Plugin implementation of the 'datetime_default' widget. * * @FieldWidget( * id = "datetime_default", * label = @Translation("Date and time"), * field_types = { * "datetime" * } * ) */ class DateTimeDefaultWidget extends DateTimeWidgetBase implements ContainerFactoryPluginInterface { /** * The date format storage. * * @var \Drupal\Core\Entity\EntityStorageInterface */ protected $dateStorage; /** * {@inheritdoc} */ public function __construct($plugin_id, $plugin_definition, FieldDefinitionInterface $field_definition, array $settings, array $third_party_settings, EntityStorageInterface $date_storage) { parent::__construct($plugin_id, $plugin_definition, $field_definition, $settings, $third_party_settings); $this->dateStorage = $date_storage; } /** * {@inheritdoc} */ public static function create(ContainerInterface $container, array $configuration, $plugin_id, $plugin_definition) { return new static( $plugin_id, $plugin_definition, $configuration['field_definition'], $configuration['settings'], $configuration['third_party_settings'], $container->get('entity.manager')->getStorage('date_format') ); } /** * {@inheritdoc} */ public function formElement(FieldItemListInterface $items, $delta, array $element, array &$form, FormStateInterface $form_state) { $element = parent::formElement($items, $delta, $element, $form, $form_state); // Identify the type of date and time elements to use. switch ($this->getFieldSetting('datetime_type')) { case DateTimeItem::DATETIME_TYPE_DATE: $date_type = 'date'; $time_type = 'none'; $date_format = $this->dateStorage->load('html_date')->getPattern(); $time_format = ''; break; default: $date_type = 'date'; $time_type = 'time'; $date_format = $this->dateStorage->load('html_date')->getPattern(); $time_format = $this->dateStorage->load('html_time')->getPattern(); break; } $element['value'] += array( '#date_date_format' => $date_format, '#date_date_element' => $date_type, '#date_date_callbacks' => array(), '#date_time_format' => $time_format, '#date_time_element' => $time_type, '#date_time_callbacks' => array(), ); return $element; } }
ArnoldHub/blurrywalkabouts
core/core/modules/datetime/src/Plugin/Field/FieldWidget/DateTimeDefaultWidget.php
PHP
gpl-2.0
2,805
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks */ #include <linux/cpu.h> #include <linux/delay.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/sched.h> #include <linux/module.h> #include <asm/mmu_context.h> #include <asm/time.h> #include <asm/setup.h> #include <asm/octeon/octeon.h> #include "octeon_boot.h" volatile unsigned long octeon_processor_boot = 0xff; volatile unsigned long octeon_processor_sp; volatile unsigned long octeon_processor_gp; #ifdef CONFIG_HOTPLUG_CPU uint64_t octeon_bootloader_entry_addr; EXPORT_SYMBOL(octeon_bootloader_entry_addr); #endif static irqreturn_t mailbox_interrupt(int irq, void *dev_id) { const int coreid = cvmx_get_core_num(); uint64_t action; /* Load the mailbox register to figure out what we're supposed to do */ action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff; /* Clear the mailbox to clear the interrupt */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); if (action & SMP_CALL_FUNCTION) generic_smp_call_function_interrupt(); if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); /* Check if we've been told to flush the icache */ if (action & SMP_ICACHE_FLUSH) asm volatile ("synci 0($0)\n"); return IRQ_HANDLED; } /** * Cause the function described by call_data to be executed on the passed * cpu. When the function has finished, increment the finished field of * call_data. */ void octeon_send_ipi_single(int cpu, unsigned int action) { int coreid = cpu_logical_map(cpu); /* pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu, coreid, action); */ cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action); } static inline void octeon_send_ipi_mask(const struct cpumask *mask, unsigned int action) { unsigned int i; for_each_cpu(i, mask) octeon_send_ipi_single(i, action); } /** * Detect available CPUs, populate cpu_possible_mask */ static void octeon_smp_hotplug_setup(void) { #ifdef CONFIG_HOTPLUG_CPU struct linux_app_boot_info *labi; if (!setup_max_cpus) return; labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); if (labi->labi_signature != LABI_SIGNATURE) { pr_info("The bootloader on this board does not support HOTPLUG_CPU."); return; } octeon_bootloader_entry_addr = labi->InitTLBStart_addr; #endif } static void octeon_smp_setup(void) { const int coreid = cvmx_get_core_num(); int cpus; int id; int core_mask = octeon_get_boot_coremask(); #ifdef CONFIG_HOTPLUG_CPU unsigned int num_cores = cvmx_octeon_num_cores(); #endif /* The present CPUs are initially just the boot cpu (CPU 0). */ for (id = 0; id < NR_CPUS; id++) { set_cpu_possible(id, id == 0); set_cpu_present(id, id == 0); } __cpu_number_map[coreid] = 0; __cpu_logical_map[0] = coreid; /* The present CPUs get the lowest CPU numbers. */ cpus = 1; for (id = 0; id < NR_CPUS; id++) { if ((id != coreid) && (core_mask & (1 << id))) { set_cpu_possible(cpus, true); set_cpu_present(cpus, true); __cpu_number_map[id] = cpus; __cpu_logical_map[cpus] = id; cpus++; } } #ifdef CONFIG_HOTPLUG_CPU /* * The possible CPUs are all those present on the chip. We * will assign CPU numbers for possible cores as well. Cores * are always consecutively numberd from 0. */ for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr && id < num_cores && id < NR_CPUS; id++) { if (!(core_mask & (1 << id))) { set_cpu_possible(cpus, true); __cpu_number_map[id] = cpus; __cpu_logical_map[cpus] = id; cpus++; } } #endif octeon_smp_hotplug_setup(); } /** * Firmware CPU startup hook * */ static void octeon_boot_secondary(int cpu, struct task_struct *idle) { int count; pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu, cpu_logical_map(cpu)); octeon_processor_sp = __KSTK_TOS(idle); octeon_processor_gp = (unsigned long)(task_thread_info(idle)); octeon_processor_boot = cpu_logical_map(cpu); mb(); count = 10000; while (octeon_processor_sp && count) { /* Waiting for processor to get the SP and GP */ udelay(1); count--; } if (count == 0) pr_err("Secondary boot timeout\n"); } /** * After we've done initial boot, this function is called to allow the * board code to clean up state, if needed */ static void octeon_init_secondary(void) { unsigned int sr; sr = set_c0_status(ST0_BEV); write_c0_ebase((u32)ebase); write_c0_status(sr); octeon_check_cpu_bist(); octeon_init_cvmcount(); octeon_irq_setup_secondary(); } /** * Callout to firmware before smp_init * */ void octeon_prepare_cpus(unsigned int max_cpus) { /* * Only the low order mailbox bits are used for IPIs, leave * the other bits alone. */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff); if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI", mailbox_interrupt)) { panic("Cannot request_irq(OCTEON_IRQ_MBOX0)"); } } /** * Last chance for the board code to finish SMP initialization before * the CPU is "online". */ static void octeon_smp_finish(void) { octeon_user_io_init(); /* to generate the first CPU timer interrupt */ write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); local_irq_enable(); } #ifdef CONFIG_HOTPLUG_CPU /* State of each CPU. */ DEFINE_PER_CPU(int, cpu_state); static int octeon_cpu_disable(void) { unsigned int cpu = smp_processor_id(); if (cpu == 0) return -EBUSY; if (!octeon_bootloader_entry_addr) return -ENOTSUPP; set_cpu_online(cpu, false); cpumask_clear_cpu(cpu, &cpu_callin_map); octeon_fixup_irqs(); flush_cache_all(); local_flush_tlb_all(); return 0; } static void octeon_cpu_die(unsigned int cpu) { int coreid = cpu_logical_map(cpu); uint32_t mask, new_mask; const struct cvmx_bootmem_named_block_desc *block_desc; while (per_cpu(cpu_state, cpu) != CPU_DEAD) cpu_relax(); /* * This is a bit complicated strategics of getting/settig available * cores mask, copied from bootloader */ mask = 1 << coreid; /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */ block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); if (!block_desc) { struct linux_app_boot_info *labi; labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); labi->avail_coremask |= mask; new_mask = labi->avail_coremask; } else { /* alternative, already initialized */ uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK); *p |= mask; new_mask = *p; } pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask); mb(); cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); cvmx_write_csr(CVMX_CIU_PP_RST, 0); } void play_dead(void) { int cpu = cpu_number_map(cvmx_get_core_num()); idle_task_exit(); octeon_processor_boot = 0xff; per_cpu(cpu_state, cpu) = CPU_DEAD; mb(); while (1) /* core will be reset here */ ; } extern void kernel_entry(unsigned long arg1, ...); static void start_after_reset(void) { kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */ } static int octeon_update_boot_vector(unsigned int cpu) { int coreid = cpu_logical_map(cpu); uint32_t avail_coremask; const struct cvmx_bootmem_named_block_desc *block_desc; struct boot_init_vector *boot_vect = (struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR); block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); if (!block_desc) { struct linux_app_boot_info *labi; labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); avail_coremask = labi->avail_coremask; labi->avail_coremask &= ~(1 << coreid); } else { /* alternative, already initialized */ avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED( block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK); } if (!(avail_coremask & (1 << coreid))) { /* core not available, assume, that catched by simple-executive */ cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); cvmx_write_csr(CVMX_CIU_PP_RST, 0); } boot_vect[coreid].app_start_func_addr = (uint32_t) (unsigned long) start_after_reset; boot_vect[coreid].code_addr = octeon_bootloader_entry_addr; mb(); cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask); return 0; } static int octeon_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; switch (action) { case CPU_UP_PREPARE: octeon_update_boot_vector(cpu); break; case CPU_ONLINE: pr_info("Cpu %d online\n", cpu); break; case CPU_DEAD: break; } return NOTIFY_OK; } static int register_cavium_notifier(void) { hotcpu_notifier(octeon_cpu_callback, 0); return 0; } late_initcall(register_cavium_notifier); #endif /* CONFIG_HOTPLUG_CPU */ struct plat_smp_ops octeon_smp_ops = { .send_ipi_single = octeon_send_ipi_single, .send_ipi_mask = octeon_send_ipi_mask, .init_secondary = octeon_init_secondary, .smp_finish = octeon_smp_finish, .boot_secondary = octeon_boot_secondary, .smp_setup = octeon_smp_setup, .prepare_cpus = octeon_prepare_cpus, #ifdef CONFIG_HOTPLUG_CPU .cpu_disable = octeon_cpu_disable, .cpu_die = octeon_cpu_die, #endif };
lukego/linux
arch/mips/cavium-octeon/smp.c
C
gpl-2.0
9,478
/* * sound/oss/sequencer.c * * The sequencer personality manager. */ /* * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. */ /* * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed) * Alan Cox : reformatted and fixed a pair of null pointer bugs */ #include <linux/kmod.h> #include <linux/spinlock.h> #include "sound_config.h" #include "midi_ctrl.h" #include "sleep.h" static int sequencer_ok; static struct sound_timer_operations *tmr; static int tmr_no = -1; /* Currently selected timer */ static int pending_timer = -1; /* For timer change operation */ extern unsigned long seq_time; static int obsolete_api_used; static DEFINE_SPINLOCK(lock); /* * Local counts for number of synth and MIDI devices. These are initialized * by the sequencer_open. */ static int max_mididev; static int max_synthdev; /* * The seq_mode gives the operating mode of the sequencer: * 1 = level1 (the default) * 2 = level2 (extended capabilities) */ #define SEQ_1 1 #define SEQ_2 2 static int seq_mode = SEQ_1; static DECLARE_WAIT_QUEUE_HEAD(seq_sleeper); static DECLARE_WAIT_QUEUE_HEAD(midi_sleeper); static int midi_opened[MAX_MIDI_DEV]; static int midi_written[MAX_MIDI_DEV]; static unsigned long prev_input_time; static int prev_event_time; #include "tuning.h" #define EV_SZ 8 #define IEV_SZ 8 static unsigned char *queue; static unsigned char *iqueue; static volatile int qhead, qtail, qlen; static volatile int iqhead, iqtail, iqlen; static volatile int seq_playing; static volatile int sequencer_busy; static int output_threshold; static long pre_event_timeout; static unsigned synth_open_mask; static int seq_queue(unsigned char *note, char nonblock); static void seq_startplay(void); static int seq_sync(void); static void seq_reset(void); #if MAX_SYNTH_DEV > 15 #error Too many synthesizer devices enabled. #endif int sequencer_read(int dev, struct file *file, char __user *buf, int count) { int c = count, p = 0; int ev_len; unsigned long flags; dev = dev >> 4; ev_len = seq_mode == SEQ_1 ? 4 : 8; spin_lock_irqsave(&lock,flags); if (!iqlen) { spin_unlock_irqrestore(&lock,flags); if (file->f_flags & O_NONBLOCK) { return -EAGAIN; } oss_broken_sleep_on(&midi_sleeper, pre_event_timeout); spin_lock_irqsave(&lock,flags); if (!iqlen) { spin_unlock_irqrestore(&lock,flags); return 0; } } while (iqlen && c >= ev_len) { char *fixit = (char *) &iqueue[iqhead * IEV_SZ]; spin_unlock_irqrestore(&lock,flags); if (copy_to_user(&(buf)[p], fixit, ev_len)) return count - c; p += ev_len; c -= ev_len; spin_lock_irqsave(&lock,flags); iqhead = (iqhead + 1) % SEQ_MAX_QUEUE; iqlen--; } spin_unlock_irqrestore(&lock,flags); return count - c; } static void sequencer_midi_output(int dev) { /* * Currently NOP */ } void seq_copy_to_input(unsigned char *event_rec, int len) { unsigned long flags; /* * Verify that the len is valid for the current mode. */ if (len != 4 && len != 8) return; if ((seq_mode == SEQ_1) != (len == 4)) return; if (iqlen >= (SEQ_MAX_QUEUE - 1)) return; /* Overflow */ spin_lock_irqsave(&lock,flags); memcpy(&iqueue[iqtail * IEV_SZ], event_rec, len); iqlen++; iqtail = (iqtail + 1) % SEQ_MAX_QUEUE; wake_up(&midi_sleeper); spin_unlock_irqrestore(&lock,flags); } EXPORT_SYMBOL(seq_copy_to_input); static void sequencer_midi_input(int dev, unsigned char data) { unsigned int tstamp; unsigned char event_rec[4]; if (data == 0xfe) /* Ignore active sensing */ return; tstamp = jiffies - seq_time; if (tstamp != prev_input_time) { tstamp = (tstamp << 8) | SEQ_WAIT; seq_copy_to_input((unsigned char *) &tstamp, 4); prev_input_time = tstamp; } event_rec[0] = SEQ_MIDIPUTC; event_rec[1] = data; event_rec[2] = dev; event_rec[3] = 0; seq_copy_to_input(event_rec, 4); } void seq_input_event(unsigned char *event_rec, int len) { unsigned long this_time; if (seq_mode == SEQ_2) this_time = tmr->get_time(tmr_no); else this_time = jiffies - seq_time; if (this_time != prev_input_time) { unsigned char tmp_event[8]; tmp_event[0] = EV_TIMING; tmp_event[1] = TMR_WAIT_ABS; tmp_event[2] = 0; tmp_event[3] = 0; *(unsigned int *) &tmp_event[4] = this_time; seq_copy_to_input(tmp_event, 8); prev_input_time = this_time; } seq_copy_to_input(event_rec, len); } EXPORT_SYMBOL(seq_input_event); int sequencer_write(int dev, struct file *file, const char __user *buf, int count) { unsigned char event_rec[EV_SZ], ev_code; int p = 0, c, ev_size; int mode = translate_mode(file); dev = dev >> 4; if (mode == OPEN_READ) return -EIO; c = count; while (c >= 4) { if (copy_from_user((char *) event_rec, &(buf)[p], 4)) goto out; ev_code = event_rec[0]; if (ev_code == SEQ_FULLSIZE) { int err, fmt; dev = *(unsigned short *) &event_rec[2]; if (dev < 0 || dev >= max_synthdev || synth_devs[dev] == NULL) return -ENXIO; if (!(synth_open_mask & (1 << dev))) return -ENXIO; fmt = (*(short *) &event_rec[0]) & 0xffff; err = synth_devs[dev]->load_patch(dev, fmt, buf + p, c, 0); if (err < 0) return err; return err; } if (ev_code >= 128) { if (seq_mode == SEQ_2 && ev_code == SEQ_EXTENDED) { printk(KERN_WARNING "Sequencer: Invalid level 2 event %x\n", ev_code); return -EINVAL; } ev_size = 8; if (c < ev_size) { if (!seq_playing) seq_startplay(); return count - c; } if (copy_from_user((char *)&event_rec[4], &(buf)[p + 4], 4)) goto out; } else { if (seq_mode == SEQ_2) { printk(KERN_WARNING "Sequencer: 4 byte event in level 2 mode\n"); return -EINVAL; } ev_size = 4; if (event_rec[0] != SEQ_MIDIPUTC) obsolete_api_used = 1; } if (event_rec[0] == SEQ_MIDIPUTC) { if (!midi_opened[event_rec[2]]) { int err, mode; int dev = event_rec[2]; if (dev >= max_mididev || midi_devs[dev]==NULL) { /*printk("Sequencer Error: Nonexistent MIDI device %d\n", dev);*/ return -ENXIO; } mode = translate_mode(file); if ((err = midi_devs[dev]->open(dev, mode, sequencer_midi_input, sequencer_midi_output)) < 0) { seq_reset(); printk(KERN_WARNING "Sequencer Error: Unable to open Midi #%d\n", dev); return err; } midi_opened[dev] = 1; } } if (!seq_queue(event_rec, (file->f_flags & (O_NONBLOCK) ? 1 : 0))) { int processed = count - c; if (!seq_playing) seq_startplay(); if (!processed && (file->f_flags & O_NONBLOCK)) return -EAGAIN; else return processed; } p += ev_size; c -= ev_size; } if (!seq_playing) seq_startplay(); out: return count; } static int seq_queue(unsigned char *note, char nonblock) { /* * Test if there is space in the queue */ if (qlen >= SEQ_MAX_QUEUE) if (!seq_playing) seq_startplay(); /* * Give chance to drain the queue */ if (!nonblock && qlen >= SEQ_MAX_QUEUE && !waitqueue_active(&seq_sleeper)) { /* * Sleep until there is enough space on the queue */ oss_broken_sleep_on(&seq_sleeper, MAX_SCHEDULE_TIMEOUT); } if (qlen >= SEQ_MAX_QUEUE) { return 0; /* * To be sure */ } memcpy(&queue[qtail * EV_SZ], note, EV_SZ); qtail = (qtail + 1) % SEQ_MAX_QUEUE; qlen++; return 1; } static int extended_event(unsigned char *q) { int dev = q[2]; if (dev < 0 || dev >= max_synthdev) return -ENXIO; if (!(synth_open_mask & (1 << dev))) return -ENXIO; switch (q[1]) { case SEQ_NOTEOFF: synth_devs[dev]->kill_note(dev, q[3], q[4], q[5]); break; case SEQ_NOTEON: if (q[4] > 127 && q[4] != 255) return 0; if (q[5] == 0) { synth_devs[dev]->kill_note(dev, q[3], q[4], q[5]); break; } synth_devs[dev]->start_note(dev, q[3], q[4], q[5]); break; case SEQ_PGMCHANGE: synth_devs[dev]->set_instr(dev, q[3], q[4]); break; case SEQ_AFTERTOUCH: synth_devs[dev]->aftertouch(dev, q[3], q[4]); break; case SEQ_BALANCE: synth_devs[dev]->panning(dev, q[3], (char) q[4]); break; case SEQ_CONTROLLER: synth_devs[dev]->controller(dev, q[3], q[4], (short) (q[5] | (q[6] << 8))); break; case SEQ_VOLMODE: if (synth_devs[dev]->volume_method != NULL) synth_devs[dev]->volume_method(dev, q[3]); break; default: return -EINVAL; } return 0; } static int find_voice(int dev, int chn, int note) { unsigned short key; int i; key = (chn << 8) | (note + 1); for (i = 0; i < synth_devs[dev]->alloc.max_voice; i++) if (synth_devs[dev]->alloc.map[i] == key) return i; return -1; } static int alloc_voice(int dev, int chn, int note) { unsigned short key; int voice; key = (chn << 8) | (note + 1); voice = synth_devs[dev]->alloc_voice(dev, chn, note, &synth_devs[dev]->alloc); synth_devs[dev]->alloc.map[voice] = key; synth_devs[dev]->alloc.alloc_times[voice] = synth_devs[dev]->alloc.timestamp++; return voice; } static void seq_chn_voice_event(unsigned char *event_rec) { #define dev event_rec[1] #define cmd event_rec[2] #define chn event_rec[3] #define note event_rec[4] #define parm event_rec[5] int voice = -1; if ((int) dev > max_synthdev || synth_devs[dev] == NULL) return; if (!(synth_open_mask & (1 << dev))) return; if (!synth_devs[dev]) return; if (seq_mode == SEQ_2) { if (synth_devs[dev]->alloc_voice) voice = find_voice(dev, chn, note); if (cmd == MIDI_NOTEON && parm == 0) { cmd = MIDI_NOTEOFF; parm = 64; } } switch (cmd) { case MIDI_NOTEON: if (note > 127 && note != 255) /* Not a seq2 feature */ return; if (voice == -1 && seq_mode == SEQ_2 && synth_devs[dev]->alloc_voice) { /* Internal synthesizer (FM, GUS, etc) */ voice = alloc_voice(dev, chn, note); } if (voice == -1) voice = chn; if (seq_mode == SEQ_2 && (int) dev < num_synths) { /* * The MIDI channel 10 is a percussive channel. Use the note * number to select the proper patch (128 to 255) to play. */ if (chn == 9) { synth_devs[dev]->set_instr(dev, voice, 128 + note); synth_devs[dev]->chn_info[chn].pgm_num = 128 + note; } synth_devs[dev]->setup_voice(dev, voice, chn); } synth_devs[dev]->start_note(dev, voice, note, parm); break; case MIDI_NOTEOFF: if (voice == -1) voice = chn; synth_devs[dev]->kill_note(dev, voice, note, parm); break; case MIDI_KEY_PRESSURE: if (voice == -1) voice = chn; synth_devs[dev]->aftertouch(dev, voice, parm); break; default:; } #undef dev #undef cmd #undef chn #undef note #undef parm } static void seq_chn_common_event(unsigned char *event_rec) { unsigned char dev = event_rec[1]; unsigned char cmd = event_rec[2]; unsigned char chn = event_rec[3]; unsigned char p1 = event_rec[4]; /* unsigned char p2 = event_rec[5]; */ unsigned short w14 = *(short *) &event_rec[6]; if ((int) dev > max_synthdev || synth_devs[dev] == NULL) return; if (!(synth_open_mask & (1 << dev))) return; if (!synth_devs[dev]) return; switch (cmd) { case MIDI_PGM_CHANGE: if (seq_mode == SEQ_2) { if (chn > 15) break; synth_devs[dev]->chn_info[chn].pgm_num = p1; if ((int) dev >= num_synths) synth_devs[dev]->set_instr(dev, chn, p1); } else synth_devs[dev]->set_instr(dev, chn, p1); break; case MIDI_CTL_CHANGE: if (seq_mode == SEQ_2) { if (chn > 15 || p1 > 127) break; synth_devs[dev]->chn_info[chn].controllers[p1] = w14 & 0x7f; if (p1 < 32) /* Setting MSB should clear LSB to 0 */ synth_devs[dev]->chn_info[chn].controllers[p1 + 32] = 0; if ((int) dev < num_synths) { int val = w14 & 0x7f; int i, key; if (p1 < 64) /* Combine MSB and LSB */ { val = ((synth_devs[dev]-> chn_info[chn].controllers[p1 & ~32] & 0x7f) << 7) | (synth_devs[dev]-> chn_info[chn].controllers[p1 | 32] & 0x7f); p1 &= ~32; } /* Handle all playing notes on this channel */ key = ((int) chn << 8); for (i = 0; i < synth_devs[dev]->alloc.max_voice; i++) if ((synth_devs[dev]->alloc.map[i] & 0xff00) == key) synth_devs[dev]->controller(dev, i, p1, val); } else synth_devs[dev]->controller(dev, chn, p1, w14); } else /* Mode 1 */ synth_devs[dev]->controller(dev, chn, p1, w14); break; case MIDI_PITCH_BEND: if (seq_mode == SEQ_2) { if (chn > 15) break; synth_devs[dev]->chn_info[chn].bender_value = w14; if ((int) dev < num_synths) { /* Handle all playing notes on this channel */ int i, key; key = (chn << 8); for (i = 0; i < synth_devs[dev]->alloc.max_voice; i++) if ((synth_devs[dev]->alloc.map[i] & 0xff00) == key) synth_devs[dev]->bender(dev, i, w14); } else synth_devs[dev]->bender(dev, chn, w14); } else /* MODE 1 */ synth_devs[dev]->bender(dev, chn, w14); break; default:; } } static int seq_timing_event(unsigned char *event_rec) { unsigned char cmd = event_rec[1]; unsigned int parm = *(int *) &event_rec[4]; if (seq_mode == SEQ_2) { int ret; if ((ret = tmr->event(tmr_no, event_rec)) == TIMER_ARMED) if ((SEQ_MAX_QUEUE - qlen) >= output_threshold) wake_up(&seq_sleeper); return ret; } switch (cmd) { case TMR_WAIT_REL: parm += prev_event_time; /* * NOTE! No break here. Execution of TMR_WAIT_REL continues in the * next case (TMR_WAIT_ABS) */ case TMR_WAIT_ABS: if (parm > 0) { long time; time = parm; prev_event_time = time; seq_playing = 1; request_sound_timer(time); if ((SEQ_MAX_QUEUE - qlen) >= output_threshold) wake_up(&seq_sleeper); return TIMER_ARMED; } break; case TMR_START: seq_time = jiffies; prev_input_time = 0; prev_event_time = 0; break; case TMR_STOP: break; case TMR_CONTINUE: break; case TMR_TEMPO: break; case TMR_ECHO: if (seq_mode == SEQ_2) seq_copy_to_input(event_rec, 8); else { parm = (parm << 8 | SEQ_ECHO); seq_copy_to_input((unsigned char *) &parm, 4); } break; default:; } return TIMER_NOT_ARMED; } static void seq_local_event(unsigned char *event_rec) { unsigned char cmd = event_rec[1]; unsigned int parm = *((unsigned int *) &event_rec[4]); switch (cmd) { case LOCL_STARTAUDIO: DMAbuf_start_devices(parm); break; default:; } } static void seq_sysex_message(unsigned char *event_rec) { unsigned int dev = event_rec[1]; int i, l = 0; unsigned char *buf = &event_rec[2]; if (dev > max_synthdev) return; if (!(synth_open_mask & (1 << dev))) return; if (!synth_devs[dev]) return; l = 0; for (i = 0; i < 6 && buf[i] != 0xff; i++) l = i + 1; if (!synth_devs[dev]->send_sysex) return; if (l > 0) synth_devs[dev]->send_sysex(dev, buf, l); } static int play_event(unsigned char *q) { /* * NOTE! This routine returns * 0 = normal event played. * 1 = Timer armed. Suspend playback until timer callback. * 2 = MIDI output buffer full. Restore queue and suspend until timer */ unsigned int *delay; switch (q[0]) { case SEQ_NOTEOFF: if (synth_open_mask & (1 << 0)) if (synth_devs[0]) synth_devs[0]->kill_note(0, q[1], 255, q[3]); break; case SEQ_NOTEON: if (q[4] < 128 || q[4] == 255) if (synth_open_mask & (1 << 0)) if (synth_devs[0]) synth_devs[0]->start_note(0, q[1], q[2], q[3]); break; case SEQ_WAIT: delay = (unsigned int *) q; /* * Bytes 1 to 3 are containing the * * delay in 'ticks' */ *delay = (*delay >> 8) & 0xffffff; if (*delay > 0) { long time; seq_playing = 1; time = *delay; prev_event_time = time; request_sound_timer(time); if ((SEQ_MAX_QUEUE - qlen) >= output_threshold) wake_up(&seq_sleeper); /* * The timer is now active and will reinvoke this function * after the timer expires. Return to the caller now. */ return 1; } break; case SEQ_PGMCHANGE: if (synth_open_mask & (1 << 0)) if (synth_devs[0]) synth_devs[0]->set_instr(0, q[1], q[2]); break; case SEQ_SYNCTIMER: /* * Reset timer */ seq_time = jiffies; prev_input_time = 0; prev_event_time = 0; break; case SEQ_MIDIPUTC: /* * Put a midi character */ if (midi_opened[q[2]]) { int dev; dev = q[2]; if (dev < 0 || dev >= num_midis || midi_devs[dev] == NULL) break; if (!midi_devs[dev]->outputc(dev, q[1])) { /* * Output FIFO is full. Wait one timer cycle and try again. */ seq_playing = 1; request_sound_timer(-1); return 2; } else midi_written[dev] = 1; } break; case SEQ_ECHO: seq_copy_to_input(q, 4); /* * Echo back to the process */ break; case SEQ_PRIVATE: if ((int) q[1] < max_synthdev) synth_devs[q[1]]->hw_control(q[1], q); break; case SEQ_EXTENDED: extended_event(q); break; case EV_CHN_VOICE: seq_chn_voice_event(q); break; case EV_CHN_COMMON: seq_chn_common_event(q); break; case EV_TIMING: if (seq_timing_event(q) == TIMER_ARMED) { return 1; } break; case EV_SEQ_LOCAL: seq_local_event(q); break; case EV_SYSEX: seq_sysex_message(q); break; default:; } return 0; } /* called also as timer in irq context */ static void seq_startplay(void) { int this_one, action; unsigned long flags; while (qlen > 0) { spin_lock_irqsave(&lock,flags); qhead = ((this_one = qhead) + 1) % SEQ_MAX_QUEUE; qlen--; spin_unlock_irqrestore(&lock,flags); seq_playing = 1; if ((action = play_event(&queue[this_one * EV_SZ]))) { /* Suspend playback. Next timer routine invokes this routine again */ if (action == 2) { qlen++; qhead = this_one; } return; } } seq_playing = 0; if ((SEQ_MAX_QUEUE - qlen) >= output_threshold) wake_up(&seq_sleeper); } static void reset_controllers(int dev, unsigned char *controller, int update_dev) { int i; for (i = 0; i < 128; i++) controller[i] = ctrl_def_values[i]; } static void setup_mode2(void) { int dev; max_synthdev = num_synths; for (dev = 0; dev < num_midis; dev++) { if (midi_devs[dev] && midi_devs[dev]->converter != NULL) { synth_devs[max_synthdev++] = midi_devs[dev]->converter; } } for (dev = 0; dev < max_synthdev; dev++) { int chn; synth_devs[dev]->sysex_ptr = 0; synth_devs[dev]->emulation = 0; for (chn = 0; chn < 16; chn++) { synth_devs[dev]->chn_info[chn].pgm_num = 0; reset_controllers(dev, synth_devs[dev]->chn_info[chn].controllers,0); synth_devs[dev]->chn_info[chn].bender_value = (1 << 7); /* Neutral */ synth_devs[dev]->chn_info[chn].bender_range = 200; } } max_mididev = 0; seq_mode = SEQ_2; } int sequencer_open(int dev, struct file *file) { int retval, mode, i; int level, tmp; if (!sequencer_ok) sequencer_init(); level = ((dev & 0x0f) == SND_DEV_SEQ2) ? 2 : 1; dev = dev >> 4; mode = translate_mode(file); if (!sequencer_ok) { /* printk("Sound card: sequencer not initialized\n");*/ return -ENXIO; } if (dev) /* Patch manager device (obsolete) */ return -ENXIO; if(synth_devs[dev] == NULL) request_module("synth0"); if (mode == OPEN_READ) { if (!num_midis) { /*printk("Sequencer: No MIDI devices. Input not possible\n");*/ sequencer_busy = 0; return -ENXIO; } } if (sequencer_busy) { return -EBUSY; } sequencer_busy = 1; obsolete_api_used = 0; max_mididev = num_midis; max_synthdev = num_synths; pre_event_timeout = MAX_SCHEDULE_TIMEOUT; seq_mode = SEQ_1; if (pending_timer != -1) { tmr_no = pending_timer; pending_timer = -1; } if (tmr_no == -1) /* Not selected yet */ { int i, best; best = -1; for (i = 0; i < num_sound_timers; i++) if (sound_timer_devs[i] && sound_timer_devs[i]->priority > best) { tmr_no = i; best = sound_timer_devs[i]->priority; } if (tmr_no == -1) /* Should not be */ tmr_no = 0; } tmr = sound_timer_devs[tmr_no]; if (level == 2) { if (tmr == NULL) { /*printk("sequencer: No timer for level 2\n");*/ sequencer_busy = 0; return -ENXIO; } setup_mode2(); } if (!max_synthdev && !max_mididev) { sequencer_busy=0; return -ENXIO; } synth_open_mask = 0; for (i = 0; i < max_mididev; i++) { midi_opened[i] = 0; midi_written[i] = 0; } for (i = 0; i < max_synthdev; i++) { if (synth_devs[i]==NULL) continue; if (!try_module_get(synth_devs[i]->owner)) continue; if ((tmp = synth_devs[i]->open(i, mode)) < 0) { printk(KERN_WARNING "Sequencer: Warning! Cannot open synth device #%d (%d)\n", i, tmp); if (synth_devs[i]->midi_dev) printk(KERN_WARNING "(Maps to MIDI dev #%d)\n", synth_devs[i]->midi_dev); } else { synth_open_mask |= (1 << i); if (synth_devs[i]->midi_dev) midi_opened[synth_devs[i]->midi_dev] = 1; } } seq_time = jiffies; prev_input_time = 0; prev_event_time = 0; if (seq_mode == SEQ_1 && (mode == OPEN_READ || mode == OPEN_READWRITE)) { /* * Initialize midi input devices */ for (i = 0; i < max_mididev; i++) if (!midi_opened[i] && midi_devs[i]) { if (!try_module_get(midi_devs[i]->owner)) continue; if ((retval = midi_devs[i]->open(i, mode, sequencer_midi_input, sequencer_midi_output)) >= 0) { midi_opened[i] = 1; } } } if (seq_mode == SEQ_2) { if (try_module_get(tmr->owner)) tmr->open(tmr_no, seq_mode); } init_waitqueue_head(&seq_sleeper); init_waitqueue_head(&midi_sleeper); output_threshold = SEQ_MAX_QUEUE / 2; return 0; } static void seq_drain_midi_queues(void) { int i, n; /* * Give the Midi drivers time to drain their output queues */ n = 1; while (!signal_pending(current) && n) { n = 0; for (i = 0; i < max_mididev; i++) if (midi_opened[i] && midi_written[i]) if (midi_devs[i]->buffer_status != NULL) if (midi_devs[i]->buffer_status(i)) n++; /* * Let's have a delay */ if (n) oss_broken_sleep_on(&seq_sleeper, HZ/10); } } void sequencer_release(int dev, struct file *file) { int i; int mode = translate_mode(file); dev = dev >> 4; /* * Wait until the queue is empty (if we don't have nonblock) */ if (mode != OPEN_READ && !(file->f_flags & O_NONBLOCK)) { while (!signal_pending(current) && qlen > 0) { seq_sync(); oss_broken_sleep_on(&seq_sleeper, 3*HZ); /* Extra delay */ } } if (mode != OPEN_READ) seq_drain_midi_queues(); /* * Ensure the output queues are empty */ seq_reset(); if (mode != OPEN_READ) seq_drain_midi_queues(); /* * Flush the all notes off messages */ for (i = 0; i < max_synthdev; i++) { if (synth_open_mask & (1 << i)) /* * Actually opened */ if (synth_devs[i]) { synth_devs[i]->close(i); module_put(synth_devs[i]->owner); if (synth_devs[i]->midi_dev) midi_opened[synth_devs[i]->midi_dev] = 0; } } for (i = 0; i < max_mididev; i++) { if (midi_opened[i]) { midi_devs[i]->close(i); module_put(midi_devs[i]->owner); } } if (seq_mode == SEQ_2) { tmr->close(tmr_no); module_put(tmr->owner); } if (obsolete_api_used) printk(KERN_WARNING "/dev/music: Obsolete (4 byte) API was used by %s\n", current->comm); sequencer_busy = 0; } static int seq_sync(void) { if (qlen && !seq_playing && !signal_pending(current)) seq_startplay(); if (qlen > 0) oss_broken_sleep_on(&seq_sleeper, HZ); return qlen; } static void midi_outc(int dev, unsigned char data) { /* * NOTE! Calls sleep(). Don't call this from interrupt. */ int n; unsigned long flags; /* * This routine sends one byte to the Midi channel. * If the output FIFO is full, it waits until there * is space in the queue */ n = 3 * HZ; /* Timeout */ spin_lock_irqsave(&lock,flags); while (n && !midi_devs[dev]->outputc(dev, data)) { oss_broken_sleep_on(&seq_sleeper, HZ/25); n--; } spin_unlock_irqrestore(&lock,flags); } static void seq_reset(void) { /* * NOTE! Calls sleep(). Don't call this from interrupt. */ int i; int chn; unsigned long flags; sound_stop_timer(); seq_time = jiffies; prev_input_time = 0; prev_event_time = 0; qlen = qhead = qtail = 0; iqlen = iqhead = iqtail = 0; for (i = 0; i < max_synthdev; i++) if (synth_open_mask & (1 << i)) if (synth_devs[i]) synth_devs[i]->reset(i); if (seq_mode == SEQ_2) { for (chn = 0; chn < 16; chn++) for (i = 0; i < max_synthdev; i++) if (synth_open_mask & (1 << i)) if (synth_devs[i]) { synth_devs[i]->controller(i, chn, 123, 0); /* All notes off */ synth_devs[i]->controller(i, chn, 121, 0); /* Reset all ctl */ synth_devs[i]->bender(i, chn, 1 << 13); /* Bender off */ } } else /* seq_mode == SEQ_1 */ { for (i = 0; i < max_mididev; i++) if (midi_written[i]) /* * Midi used. Some notes may still be playing */ { /* * Sending just a ACTIVE SENSING message should be enough to stop all * playing notes. Since there are devices not recognizing the * active sensing, we have to send some all notes off messages also. */ midi_outc(i, 0xfe); for (chn = 0; chn < 16; chn++) { midi_outc(i, (unsigned char) (0xb0 + (chn & 0x0f))); /* control change */ midi_outc(i, 0x7b); /* All notes off */ midi_outc(i, 0); /* Dummy parameter */ } midi_devs[i]->close(i); midi_written[i] = 0; midi_opened[i] = 0; } } seq_playing = 0; spin_lock_irqsave(&lock,flags); if (waitqueue_active(&seq_sleeper)) { /* printk( "Sequencer Warning: Unexpected sleeping process - Waking up\n"); */ wake_up(&seq_sleeper); } spin_unlock_irqrestore(&lock,flags); } static void seq_panic(void) { /* * This routine is called by the application in case the user * wants to reset the system to the default state. */ seq_reset(); /* * Since some of the devices don't recognize the active sensing and * all notes off messages, we have to shut all notes manually. * * TO BE IMPLEMENTED LATER */ /* * Also return the controllers to their default states */ } int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *arg) { int midi_dev, orig_dev, val, err; int mode = translate_mode(file); struct synth_info inf; struct seq_event_rec event_rec; unsigned long flags; int __user *p = arg; orig_dev = dev = dev >> 4; switch (cmd) { case SNDCTL_TMR_TIMEBASE: case SNDCTL_TMR_TEMPO: case SNDCTL_TMR_START: case SNDCTL_TMR_STOP: case SNDCTL_TMR_CONTINUE: case SNDCTL_TMR_METRONOME: case SNDCTL_TMR_SOURCE: if (seq_mode != SEQ_2) return -EINVAL; return tmr->ioctl(tmr_no, cmd, arg); case SNDCTL_TMR_SELECT: if (seq_mode != SEQ_2) return -EINVAL; if (get_user(pending_timer, p)) return -EFAULT; if (pending_timer < 0 || pending_timer >= num_sound_timers || sound_timer_devs[pending_timer] == NULL) { pending_timer = -1; return -EINVAL; } val = pending_timer; break; case SNDCTL_SEQ_PANIC: seq_panic(); return -EINVAL; case SNDCTL_SEQ_SYNC: if (mode == OPEN_READ) return 0; while (qlen > 0 && !signal_pending(current)) seq_sync(); return qlen ? -EINTR : 0; case SNDCTL_SEQ_RESET: seq_reset(); return 0; case SNDCTL_SEQ_TESTMIDI: if (__get_user(midi_dev, p)) return -EFAULT; if (midi_dev < 0 || midi_dev >= max_mididev || !midi_devs[midi_dev]) return -ENXIO; if (!midi_opened[midi_dev] && (err = midi_devs[midi_dev]->open(midi_dev, mode, sequencer_midi_input, sequencer_midi_output)) < 0) return err; midi_opened[midi_dev] = 1; return 0; case SNDCTL_SEQ_GETINCOUNT: if (mode == OPEN_WRITE) return 0; val = iqlen; break; case SNDCTL_SEQ_GETOUTCOUNT: if (mode == OPEN_READ) return 0; val = SEQ_MAX_QUEUE - qlen; break; case SNDCTL_SEQ_GETTIME: if (seq_mode == SEQ_2) return tmr->ioctl(tmr_no, cmd, arg); val = jiffies - seq_time; break; case SNDCTL_SEQ_CTRLRATE: /* * If *arg == 0, just return the current rate */ if (seq_mode == SEQ_2) return tmr->ioctl(tmr_no, cmd, arg); if (get_user(val, p)) return -EFAULT; if (val != 0) return -EINVAL; val = HZ; break; case SNDCTL_SEQ_RESETSAMPLES: case SNDCTL_SYNTH_REMOVESAMPLE: case SNDCTL_SYNTH_CONTROL: if (get_user(dev, p)) return -EFAULT; if (dev < 0 || dev >= num_synths || synth_devs[dev] == NULL) return -ENXIO; if (!(synth_open_mask & (1 << dev)) && !orig_dev) return -EBUSY; return synth_devs[dev]->ioctl(dev, cmd, arg); case SNDCTL_SEQ_NRSYNTHS: val = max_synthdev; break; case SNDCTL_SEQ_NRMIDIS: val = max_mididev; break; case SNDCTL_SYNTH_MEMAVL: if (get_user(dev, p)) return -EFAULT; if (dev < 0 || dev >= num_synths || synth_devs[dev] == NULL) return -ENXIO; if (!(synth_open_mask & (1 << dev)) && !orig_dev) return -EBUSY; val = synth_devs[dev]->ioctl(dev, cmd, arg); break; case SNDCTL_FM_4OP_ENABLE: if (get_user(dev, p)) return -EFAULT; if (dev < 0 || dev >= num_synths || synth_devs[dev] == NULL) return -ENXIO; if (!(synth_open_mask & (1 << dev))) return -ENXIO; synth_devs[dev]->ioctl(dev, cmd, arg); return 0; case SNDCTL_SYNTH_INFO: if (get_user(dev, &((struct synth_info __user *)arg)->device)) return -EFAULT; if (dev < 0 || dev >= max_synthdev) return -ENXIO; if (!(synth_open_mask & (1 << dev)) && !orig_dev) return -EBUSY; return synth_devs[dev]->ioctl(dev, cmd, arg); /* Like SYNTH_INFO but returns ID in the name field */ case SNDCTL_SYNTH_ID: if (get_user(dev, &((struct synth_info __user *)arg)->device)) return -EFAULT; if (dev < 0 || dev >= max_synthdev) return -ENXIO; if (!(synth_open_mask & (1 << dev)) && !orig_dev) return -EBUSY; memcpy(&inf, synth_devs[dev]->info, sizeof(inf)); strlcpy(inf.name, synth_devs[dev]->id, sizeof(inf.name)); inf.device = dev; return copy_to_user(arg, &inf, sizeof(inf))?-EFAULT:0; case SNDCTL_SEQ_OUTOFBAND: if (copy_from_user(&event_rec, arg, sizeof(event_rec))) return -EFAULT; spin_lock_irqsave(&lock,flags); play_event(event_rec.arr); spin_unlock_irqrestore(&lock,flags); return 0; case SNDCTL_MIDI_INFO: if (get_user(dev, &((struct midi_info __user *)arg)->device)) return -EFAULT; if (dev < 0 || dev >= max_mididev || !midi_devs[dev]) return -ENXIO; midi_devs[dev]->info.device = dev; return copy_to_user(arg, &midi_devs[dev]->info, sizeof(struct midi_info))?-EFAULT:0; case SNDCTL_SEQ_THRESHOLD: if (get_user(val, p)) return -EFAULT; if (val < 1) val = 1; if (val >= SEQ_MAX_QUEUE) val = SEQ_MAX_QUEUE - 1; output_threshold = val; return 0; case SNDCTL_MIDI_PRETIME: if (get_user(val, p)) return -EFAULT; if (val < 0) val = 0; val = (HZ * val) / 10; pre_event_timeout = val; break; default: if (mode == OPEN_READ) return -EIO; if (!synth_devs[0]) return -ENXIO; if (!(synth_open_mask & (1 << 0))) return -ENXIO; if (!synth_devs[0]->ioctl) return -EINVAL; return synth_devs[0]->ioctl(0, cmd, arg); } return put_user(val, p); } /* No kernel lock - we're using the global irq lock here */ unsigned int sequencer_poll(int dev, struct file *file, poll_table * wait) { unsigned long flags; unsigned int mask = 0; dev = dev >> 4; spin_lock_irqsave(&lock,flags); /* input */ poll_wait(file, &midi_sleeper, wait); if (iqlen) mask |= POLLIN | POLLRDNORM; /* output */ poll_wait(file, &seq_sleeper, wait); if ((SEQ_MAX_QUEUE - qlen) >= output_threshold) mask |= POLLOUT | POLLWRNORM; spin_unlock_irqrestore(&lock,flags); return mask; } void sequencer_timer(unsigned long dummy) { seq_startplay(); } EXPORT_SYMBOL(sequencer_timer); int note_to_freq(int note_num) { /* * This routine converts a midi note to a frequency (multiplied by 1000) */ int note, octave, note_freq; static int notes[] = { 261632, 277189, 293671, 311132, 329632, 349232, 369998, 391998, 415306, 440000, 466162, 493880 }; #define BASE_OCTAVE 5 octave = note_num / 12; note = note_num % 12; note_freq = notes[note]; if (octave < BASE_OCTAVE) note_freq >>= (BASE_OCTAVE - octave); else if (octave > BASE_OCTAVE) note_freq <<= (octave - BASE_OCTAVE); /* * note_freq >>= 1; */ return note_freq; } EXPORT_SYMBOL(note_to_freq); unsigned long compute_finetune(unsigned long base_freq, int bend, int range, int vibrato_cents) { unsigned long amount; int negative, semitones, cents, multiplier = 1; if (!bend) return base_freq; if (!range) return base_freq; if (!base_freq) return base_freq; if (range >= 8192) range = 8192; bend = bend * range / 8192; /* Convert to cents */ bend += vibrato_cents; if (!bend) return base_freq; negative = bend < 0 ? 1 : 0; if (bend < 0) bend *= -1; if (bend > range) bend = range; /* if (bend > 2399) bend = 2399; */ while (bend > 2399) { multiplier *= 4; bend -= 2400; } semitones = bend / 100; cents = bend % 100; amount = (int) (semitone_tuning[semitones] * multiplier * cent_tuning[cents]) / 10000; if (negative) return (base_freq * 10000) / amount; /* Bend down */ else return (base_freq * amount) / 10000; /* Bend up */ } EXPORT_SYMBOL(compute_finetune); void sequencer_init(void) { if (sequencer_ok) return; queue = vmalloc(SEQ_MAX_QUEUE * EV_SZ); if (queue == NULL) { printk(KERN_ERR "sequencer: Can't allocate memory for sequencer output queue\n"); return; } iqueue = vmalloc(SEQ_MAX_QUEUE * IEV_SZ); if (iqueue == NULL) { printk(KERN_ERR "sequencer: Can't allocate memory for sequencer input queue\n"); vfree(queue); return; } sequencer_ok = 1; } EXPORT_SYMBOL(sequencer_init); void sequencer_unload(void) { vfree(queue); vfree(iqueue); queue = iqueue = NULL; }
arfvidsonUIS/staging-unsub
sound/oss/sequencer.c
C
gpl-2.0
34,391
.glyphicon-spin,a .glyphicon-spin{display:inline-block}.alert a,.field--label,.file{font-weight:700}.file,.file-link{width:100%}.tabs-left>.nav-tabs>li:focus,.tabs-left>.nav-tabs>li>a:focus,.tabs-right>.nav-tabs>li:focus,.tabs-right>.nav-tabs>li>a:focus{outline:0}.panel-title:focus,.panel-title:hover,a .glyphicon-spin{text-decoration:none}.image-widget.row,.region-help .block,.tabledrag-changed-warning{overflow:hidden}.alert-sm{padding:5px 10px}.alert-danger a,.alert-danger a:focus,.alert-danger a:hover,.alert-info a,.alert-info a:focus,.alert-info a:hover,.alert-success a,.alert-success a:focus,.alert-success a:hover,.alert-warning a,.alert-warning a:focus,.alert-warning a:hover{color:#e6e6e6}@-webkit-keyframes glyphicon-spin{0%{-webkit-transform:rotate(0);transform:rotate(0)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@-o-keyframes glyphicon-spin{0%{-o-transform:rotate(0);transform:rotate(0)}100%{-o-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes glyphicon-spin{0%{-webkit-transform:rotate(0);-o-transform:rotate(0);transform:rotate(0)}100%{-webkit-transform:rotate(359deg);-o-transform:rotate(359deg);transform:rotate(359deg)}}.glyphicon-spin{-webkit-animation:glyphicon-spin 1s infinite linear;-o-animation:glyphicon-spin 1s infinite linear;animation:glyphicon-spin 1s infinite linear}html.js .btn .ajax-throbber{margin-left:.5em;margin-right:-.25em}html.js .form-item .input-group-addon .glyphicon{color:#7A8288;opacity:.5;-webkit-transition:150ms color,150ms opacity;-o-transition:150ms color,150ms opacity;transition:150ms color,150ms opacity}html.js .form-item .input-group-addon .glyphicon.glyphicon-spin{color:#7A8288;opacity:1}html.js .form-item .input-group-addon .input-group-addon{background-color:#fff}html.js .ajax-new-content:empty{display:none!important}.field--label-inline .field--items,.field--label-inline .field--label{float:left}.field--label-inline .field--items,.field--label-inline .field--label,.field--label-inline>.field--item{padding-right:.5em}[dir=rtl] .field--label-inline .field--items,[dir=rtl] .field--label-inline .field--label{padding-left:.5em;padding-right:0}.field--label-inline .field--label::after{content:':'}.file{display:table;font-size:75%;margin:5px 0}.file-icon,.file-link,.file-size,.file>.tabledrag-changed{display:table-cell;vertical-align:middle}.file>span{background:#fff;color:#7A8288;border-bottom:1px solid #ccc;border-top:1px solid #ccc}.file>span:first-child{border-left:1px solid #ccc}.file>span:last-child{border-right:1px solid #ccc}.file>.tabledrag-changed{background:#f89406;border-radius:0;color:#fff;padding:0 1em;top:0}.file>.tabledrag-changed,.file>.tabledrag-changed:last-child{border:1px solid #d05a05}.file-icon{font-size:150%;padding:.25em .5em;text-align:center}.file-link a,.file-link a:active,.file-link a:focus,.file-link a:hover{color:inherit}.file-size{padding:0 1em;text-align:right;white-space:pre}.filter-wrapper{background-color:#2e3338;border:1px solid rgba(0,0,0,.6);border-top:0;border-radius:0 0 4px 4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05);margin-bottom:0;padding:10px;height:51px}.filter-help{float:right;line-height:1;margin:.5em 0 0}.nav.nav-tabs.filter-formats{margin-bottom:15px}table .checkbox.form-no-label,table .radio.form-no-label{margin-bottom:0;margin-top:0}.select-wrapper{display:inline-block;position:relative;width:100%}.form-inline .select-wrapper{width:auto}.input-group .select-wrapper{display:table-cell}.input-group .select-wrapper:first-child .form-control:first-child{border-bottom-left-radius:4px;border-top-left-radius:4px}.input-group .select-wrapper:last-child .form-control:first-child{border-bottom-right-radius:4px;border-top-right-radius:4px}.select-wrapper select{-webkit-appearance:none;-moz-appearance:none;appearance:none;line-height:1;padding-right:2em}.select-wrapper select::-ms-expand{opacity:0}.select-wrapper:after{color:#7A8288;content:'▼';font-style:normal;font-weight:400;line-height:1;margin-top:-.5em;padding-right:.5em;pointer-events:none;position:absolute;right:0;top:50%;z-index:10}.has-glyphicons .select-wrapper:after{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;content:'\e114';display:inline-block;font-family:'Glyphicons Halflings'}.has-error .select-wrapper:after,.has-success .select-wrapper:after,.has-warning .select-wrapper:after{color:#fff}.form-required:after{background-image:url(../images/required.svg);-webkit-background-size:10px 7px;background-size:10px 7px;content:"";display:inline-block;line-height:1;height:7px;width:10px}.form-actions .btn,.form-actions .btn-group{margin-right:10px}.form-actions .btn-group .btn{margin-right:0}a.icon-before .glyphicon{margin-right:.25em}a.icon-after .glyphicon{margin-left:.25em}.btn.icon-before .glyphicon{margin-left:-.25em;margin-right:.25em}.btn.icon-after .glyphicon{margin-left:.25em;margin-right:-.25em}body{position:relative}body.navbar-is-static-top{margin-top:0}body.navbar-is-fixed-top{margin-top:65px}body.navbar-is-fixed-bottom{padding-bottom:65px}@media screen and (max-width:767px){body.toolbar-vertical.navbar-is-fixed-bottom .toolbar-bar,body.toolbar-vertical.navbar-is-fixed-top .toolbar-bar{position:fixed}body.toolbar-vertical.navbar-is-fixed-bottom header,body.toolbar-vertical.navbar-is-fixed-top header{z-index:500}body.toolbar-vertical.navbar-is-fixed-top header{top:39px}}@media screen and (min-width:768px){body{margin-top:15px}.navbar.container{max-width:720px}}@media screen and (min-width:992px){.navbar.container{max-width:940px}}@media screen and (min-width:1200px){.navbar.container{max-width:1140px}}.node-preview-container{margin-top:-15px}.node-preview-form-select{padding:15px}.panel-title{display:block;margin:-10px -15px;padding:10px 15px}.panel-title,.panel-title:focus,.panel-title:hover,.panel-title:hover:focus{color:inherit}.progress-wrapper{margin-bottom:15px}.progress-wrapper:last-child .progress{margin-bottom:5px}.progress-wrapper .message{font-weight:700;margin-bottom:5px}.progress-wrapper .percentage,.progress-wrapper .progress-label{font-size:12px}.progress-wrapper .progress-bar{min-width:2em}.tabledrag-toggle-weight{float:right;margin:1px 2px 1px 10px}.tabledrag-changed-warning{margin:0}.tabledrag-handle{color:#7A8288;cursor:move;float:left;font-size:125%;line-height:1;margin:-10px 0 0 -10px;padding:10px}.tabledrag-handle:focus,.tabledrag-handle:hover{color:#7A8288}.indentation{float:left;height:1.7em;margin:-.4em .2em -.4em -.4em;padding:.42em 0 .42em .6em;width:20px}[dir=rtl] .indentation{float:right;margin:-.4em -.4em -.4em .2em;padding:.42em .6em .42em 0}.local-actions{margin:10px 0 10px -5px}.tabs--secondary{margin:10px 0 5px}.tabbable{margin-bottom:20px}.tabs-below>.nav-tabs,.tabs-left>.nav-tabs,.tabs-right>.nav-tabs{border-bottom:0}.tabs-below>.nav-tabs .summary,.tabs-left>.nav-tabs .summary,.tabs-right>.nav-tabs .summary{color:#7A8288;font-size:12px}.tab-pane>.panel-heading{display:none}.tab-content>.active{display:block}.tabs-below>.nav-tabs{border-top:1px solid #1c1e22}.tabs-below>.nav-tabs>li{margin-top:-1px;margin-bottom:0}.tabs-below>.nav-tabs>li>a{border-radius:0 0 4px 4px}.tabs-below>.nav-tabs>li>a:focus,.tabs-below>.nav-tabs>li>a:hover{border-top-color:#1c1e22;border-bottom-color:transparent}.tabs-below>.nav-tabs>.active>a,.tabs-below>.nav-tabs>.active>a:focus,.tabs-below>.nav-tabs>.active>a:hover{border-color:transparent #1c1e22 #1c1e22}.tabs-left>.nav-tabs,.tabs-right>.nav-tabs{padding-bottom:20px;width:220px}.tabs-left>.nav-tabs>li,.tabs-right>.nav-tabs>li{float:none}.tabs-left>.nav-tabs>li>a,.tabs-right>.nav-tabs>li>a{margin-right:0;margin-bottom:3px}.form-group:last-child,.panel:last-child,.popover ol:last-child,.popover ul:last-child,p:last-child{margin-bottom:0}.tabs-left>.tab-content,.tabs-right>.tab-content{border-radius:0 4px 4px;border:1px solid #1c1e22;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05);overflow:hidden;padding:10px 15px}.tabs-left>.nav-tabs{float:left;margin-right:-1px}.tabs-left>.nav-tabs>li>a{border-radius:4px 0 0 4px}.tabs-left>.nav-tabs>li>a:focus,.tabs-left>.nav-tabs>li>a:hover{border-color:#1c1e22}.tabs-left>.nav-tabs>.active>a,.tabs-left>.nav-tabs>.active>a:focus,.tabs-left>.nav-tabs>.active>a:hover{border-color:#1c1e22 transparent #1c1e22 #1c1e22;-webkit-box-shadow:-1px 1px 1px rgba(0,0,0,.05);box-shadow:-1px 1px 1px rgba(0,0,0,.05)}.tabs-right>.nav-tabs{float:right;margin-left:-1px}.tabs-right>.nav-tabs>li>a{border-radius:0 4px 4px 0}.tabs-right>.nav-tabs>li>a:focus,.tabs-right>.nav-tabs>li>a:hover{border-color:#1c1e22;-webkit-box-shadow:1px 1px 1px rgba(0,0,0,.05);box-shadow:1px 1px 1px rgba(0,0,0,.05)}.tabs-right>.nav-tabs>.active>a,.tabs-right>.nav-tabs>.active>a:focus,.tabs-right>.nav-tabs>.active>a:hover{border-color:#1c1e22 #1c1e22 #1c1e22 transparent}body.toolbar-fixed .toolbar-oriented .toolbar-bar{z-index:1031}body.toolbar-fixed .navbar-fixed-top{top:39px}body.toolbar-fixed.toolbar-horizontal.toolbar-tray-open .navbar-fixed-top{top:79px}body.toolbar-fixed.toolbar-vertical.toolbar-tray-open .navbar-fixed-top{left:240px}body.toolbar-fixed.toolbar-vertical.toolbar-tray-open.toolbar-fixed{margin-left:240px}body.toolbar-fixed.toolbar-vertical.toolbar-tray-open.toolbar-fixed .toolbar-tray{padding-bottom:40px}body.toolbar-fixed.toolbar-vertical.toolbar-tray-open.toolbar-fixed .toolbar-tray,body.toolbar-fixed.toolbar-vertical.toolbar-tray-open.toolbar-fixed .toolbar-tray>.toolbar-lining:before{width:240px}.ui-autocomplete{background:#3A3F44;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #272B30;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175);color:inherit;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;list-style:none;min-width:160px;padding:5px 0;text-align:left;z-index:1000}.ui-autocomplete .ui-menu-item{border:0;border-radius:0;clear:both;color:#C8C8C8;cursor:pointer;display:block;font-weight:400;line-height:1.42857143;margin:0;outline:0;padding:3px 20px;text-decoration:none;white-space:nowrap}.ui-autocomplete .ui-menu-item.ui-state-active,.ui-autocomplete .ui-menu-item.ui-state-focus,.ui-autocomplete .ui-menu-item.ui-state-hover{background:#272B30;color:#fff}ol,ul{padding-left:1.5em}.page-header{margin-top:0}.footer{margin-top:45px;padding-top:35px;padding-bottom:36px;border-top:1px solid #E5E5E5}.region-help>.glyphicon{font-size:18px;float:left;margin:-.05em .5em 0 0}.control-group .help-inline,.help-block{color:#7A8288;font-size:12px;margin:5px 0 10px;padding:0}.control-group .help-inline:first-child,.help-block:first-child{margin-top:0}
mahak891/Brewed
themes/main/bootstrap/css/3.1.0/overrides-slate.min.css
CSS
gpl-2.0
10,727
/* * nosun4c.c: This file is a bunch of dummies for SMP compiles, * so that it does not need sun4c and avoid ifdefs. * * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <asm/pgtable.h> static char shouldnothappen[] __initdata = "32bit SMP kernel only supports sun4m and sun4d\n"; /* Dummies */ struct sun4c_mmu_ring { unsigned long xxx1[3]; unsigned char xxx2[2]; int xxx3; }; struct sun4c_mmu_ring sun4c_kernel_ring; struct sun4c_mmu_ring sun4c_kfree_ring; unsigned long sun4c_kernel_faults; unsigned long *sun4c_memerr_reg; static void __init should_not_happen(void) { prom_printf(shouldnothappen); prom_halt(); } unsigned long __init sun4c_paging_init(unsigned long start_mem, unsigned long end_mem) { should_not_happen(); return 0; } void __init ld_mmu_sun4c(void) { should_not_happen(); } void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly) { } void sun4c_unmapioaddr(unsigned long virt_addr) { } void sun4c_complete_all_stores(void) { } pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address) { return NULL; } pte_t *sun4c_pte_offset_kernel(pmd_t *dir, unsigned long address) { return NULL; } void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { } void __init sun4c_probe_vac(void) { should_not_happen(); } void __init sun4c_probe_memerr_reg(void) { should_not_happen(); }
ziozzang/kernel-rhel6
arch/sparc/mm/nosun4c.c
C
gpl-2.0
1,500
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * FDDI-type device handling. * * Version: @(#)fddi.c 1.0.0 08/12/96 * * Authors: Lawrence V. Stefani, <stefani@lkg.dec.com> * * fddi.c is based on previous eth.c and tr.c work by * Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Florian La Roche, <rzsfl@rz.uni-sb.de> * Alan Cox, <gw4pts@gw4pts.ampr.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes * Alan Cox : New arp/rebuild header * Maciej W. Rozycki : IPv6 support */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/fddidevice.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <net/arp.h> #include <net/sock.h> /* * Create the FDDI MAC header for an arbitrary protocol layer * * saddr=NULL means use device source address * daddr=NULL means leave destination address (eg unresolved arp) */ static int fddi_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned int len) { int hl = FDDI_K_SNAP_HLEN; struct fddihdr *fddi; if(type != ETH_P_IP && type != ETH_P_IPV6 && type != ETH_P_ARP) hl=FDDI_K_8022_HLEN-3; fddi = (struct fddihdr *)skb_push(skb, hl); fddi->fc = FDDI_FC_K_ASYNC_LLC_DEF; if(type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP) { fddi->hdr.llc_snap.dsap = FDDI_EXTENDED_SAP; fddi->hdr.llc_snap.ssap = FDDI_EXTENDED_SAP; fddi->hdr.llc_snap.ctrl = FDDI_UI_CMD; fddi->hdr.llc_snap.oui[0] = 0x00; fddi->hdr.llc_snap.oui[1] = 0x00; fddi->hdr.llc_snap.oui[2] = 0x00; fddi->hdr.llc_snap.ethertype = htons(type); } /* Set the source and destination hardware addresses */ if (saddr != NULL) memcpy(fddi->saddr, saddr, dev->addr_len); else memcpy(fddi->saddr, dev->dev_addr, dev->addr_len); if (daddr != NULL) { memcpy(fddi->daddr, daddr, dev->addr_len); return hl; } return -hl; } /* * Determine the packet's protocol ID and fill in skb fields. * This routine is called before an incoming packet is passed * up. It's used to fill in specific skb fields and to set * the proper pointer to the start of packet data (skb->data). */ __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev) { struct fddihdr *fddi = (struct fddihdr *)skb->data; __be16 type; /* * Set mac.raw field to point to FC byte, set data field to point * to start of packet data. Assume 802.2 SNAP frames for now. */ skb->dev = dev; skb_reset_mac_header(skb); /* point to frame control (FC) */ if(fddi->hdr.llc_8022_1.dsap==0xe0) { skb_pull(skb, FDDI_K_8022_HLEN-3); type = htons(ETH_P_802_2); } else { skb_pull(skb, FDDI_K_SNAP_HLEN); /* adjust for 21 byte header */ type=fddi->hdr.llc_snap.ethertype; } /* Set packet type based on destination address and flag settings */ if (*fddi->daddr & 0x01) { if (memcmp(fddi->daddr, dev->broadcast, FDDI_K_ALEN) == 0) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; } else if (dev->flags & IFF_PROMISC) { if (memcmp(fddi->daddr, dev->dev_addr, FDDI_K_ALEN)) skb->pkt_type = PACKET_OTHERHOST; } /* Assume 802.2 SNAP frames, for now */ return type; } EXPORT_SYMBOL(fddi_type_trans); int fddi_change_mtu(struct net_device *dev, int new_mtu) { if ((new_mtu < FDDI_K_SNAP_HLEN) || (new_mtu > FDDI_K_SNAP_DLEN)) return -EINVAL; dev->mtu = new_mtu; return 0; } EXPORT_SYMBOL(fddi_change_mtu); static const struct header_ops fddi_header_ops = { .create = fddi_header, }; static void fddi_setup(struct net_device *dev) { dev->header_ops = &fddi_header_ops; dev->type = ARPHRD_FDDI; dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */ dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */ dev->addr_len = FDDI_K_ALEN; dev->tx_queue_len = 100; /* Long queues on FDDI */ dev->flags = IFF_BROADCAST | IFF_MULTICAST; memset(dev->broadcast, 0xFF, FDDI_K_ALEN); } /** * alloc_fddidev - Register FDDI device * @sizeof_priv: Size of additional driver-private structure to be allocated * for this FDDI device * * Fill in the fields of the device structure with FDDI-generic values. * * Constructs a new net device, complete with a private data area of * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for * this private data area. */ struct net_device *alloc_fddidev(int sizeof_priv) { return alloc_netdev(sizeof_priv, "fddi%d", NET_NAME_UNKNOWN, fddi_setup); } EXPORT_SYMBOL(alloc_fddidev); MODULE_LICENSE("GPL");
gospo/net-next
net/802/fddi.c
C
gpl-2.0
5,203
/* * Copyright (C) 2008 Sebastian Haas (initial chardev implementation) * Copyright (C) 2010 Markus Plessing <plessing@ems-wuensche.com> * Rework for mainline by Oliver Hartkopp <socketcan@hartkopp.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the version 2 of the GNU General Public License * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/io.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <linux/can.h> #include <linux/can/dev.h> #include "sja1000.h" #define DRV_NAME "ems_pcmcia" MODULE_AUTHOR("Markus Plessing <plessing@ems-wuensche.com>"); MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-CARD cards"); MODULE_SUPPORTED_DEVICE("EMS CPC-CARD CAN card"); MODULE_LICENSE("GPL v2"); #define EMS_PCMCIA_MAX_CHAN 2 struct ems_pcmcia_card { int channels; struct pcmcia_device *pcmcia_dev; struct net_device *net_dev[EMS_PCMCIA_MAX_CHAN]; void __iomem *base_addr; }; #define EMS_PCMCIA_CAN_CLOCK (16000000 / 2) /* * The board configuration is probably following: * RX1 is connected to ground. * TX1 is not connected. * CLKO is not connected. * Setting the OCR register to 0xDA is a good idea. * This means normal output mode , push-pull and the correct polarity. */ #define EMS_PCMCIA_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL) /* * In the CDR register, you should set CBP to 1. * You will probably also want to set the clock divider value to 7 * (meaning direct oscillator output) because the second SJA1000 chip * is driven by the first one CLKOUT output. */ #define EMS_PCMCIA_CDR (CDR_CBP | CDR_CLKOUT_MASK) #define EMS_PCMCIA_MEM_SIZE 4096 /* Size of the remapped io-memory */ #define EMS_PCMCIA_CAN_BASE_OFFSET 0x100 /* Offset where controllers starts */ #define EMS_PCMCIA_CAN_CTRL_SIZE 0x80 /* Memory size for each controller */ #define EMS_CMD_RESET 0x00 /* Perform a reset of the card */ #define EMS_CMD_MAP 0x03 /* Map CAN controllers into card' memory */ #define EMS_CMD_UMAP 0x02 /* Unmap CAN controllers from card' memory */ static struct pcmcia_device_id ems_pcmcia_tbl[] = { PCMCIA_DEVICE_PROD_ID123("EMS_T_W", "CPC-Card", "V2.0", 0xeab1ea23, 0xa338573f, 0xe4575800), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, ems_pcmcia_tbl); static u8 ems_pcmcia_read_reg(const struct sja1000_priv *priv, int port) { return readb(priv->reg_base + port); } static void ems_pcmcia_write_reg(const struct sja1000_priv *priv, int port, u8 val) { writeb(val, priv->reg_base + port); } static irqreturn_t ems_pcmcia_interrupt(int irq, void *dev_id) { struct ems_pcmcia_card *card = dev_id; struct net_device *dev; irqreturn_t retval = IRQ_NONE; int i, again; /* Card not present */ if (readw(card->base_addr) != 0xAA55) return IRQ_HANDLED; do { again = 0; /* Check interrupt for each channel */ for (i = 0; i < card->channels; i++) { dev = card->net_dev[i]; if (!dev) continue; if (sja1000_interrupt(irq, dev) == IRQ_HANDLED) again = 1; } /* At least one channel handled the interrupt */ if (again) retval = IRQ_HANDLED; } while (again); return retval; } /* * Check if a CAN controller is present at the specified location * by trying to set 'em into the PeliCAN mode */ static inline int ems_pcmcia_check_chan(struct sja1000_priv *priv) { /* Make sure SJA1000 is in reset mode */ ems_pcmcia_write_reg(priv, SJA1000_MOD, 1); ems_pcmcia_write_reg(priv, SJA1000_CDR, CDR_PELICAN); /* read reset-values */ if (ems_pcmcia_read_reg(priv, SJA1000_CDR) == CDR_PELICAN) return 1; return 0; } static void ems_pcmcia_del_card(struct pcmcia_device *pdev) { struct ems_pcmcia_card *card = pdev->priv; struct net_device *dev; int i; free_irq(pdev->irq, card); for (i = 0; i < card->channels; i++) { dev = card->net_dev[i]; if (!dev) continue; printk(KERN_INFO "%s: removing %s on channel #%d\n", DRV_NAME, dev->name, i); unregister_sja1000dev(dev); free_sja1000dev(dev); } writeb(EMS_CMD_UMAP, card->base_addr); iounmap(card->base_addr); kfree(card); pdev->priv = NULL; } /* * Probe PCI device for EMS CAN signature and register each available * CAN channel to SJA1000 Socket-CAN subsystem. */ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base) { struct sja1000_priv *priv; struct net_device *dev; struct ems_pcmcia_card *card; int err, i; /* Allocating card structures to hold addresses, ... */ card = kzalloc(sizeof(struct ems_pcmcia_card), GFP_KERNEL); if (!card) return -ENOMEM; pdev->priv = card; card->channels = 0; card->base_addr = ioremap(base, EMS_PCMCIA_MEM_SIZE); if (!card->base_addr) { err = -ENOMEM; goto failure_cleanup; } /* Check for unique EMS CAN signature */ if (readw(card->base_addr) != 0xAA55) { err = -ENODEV; goto failure_cleanup; } /* Request board reset */ writeb(EMS_CMD_RESET, card->base_addr); /* Make sure CAN controllers are mapped into card's memory space */ writeb(EMS_CMD_MAP, card->base_addr); /* Detect available channels */ for (i = 0; i < EMS_PCMCIA_MAX_CHAN; i++) { dev = alloc_sja1000dev(0); if (!dev) { err = -ENOMEM; goto failure_cleanup; } card->net_dev[i] = dev; priv = netdev_priv(dev); priv->priv = card; SET_NETDEV_DEV(dev, &pdev->dev); priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; priv->reg_base = card->base_addr + EMS_PCMCIA_CAN_BASE_OFFSET + (i * EMS_PCMCIA_CAN_CTRL_SIZE); /* Check if channel is present */ if (ems_pcmcia_check_chan(priv)) { priv->read_reg = ems_pcmcia_read_reg; priv->write_reg = ems_pcmcia_write_reg; priv->can.clock.freq = EMS_PCMCIA_CAN_CLOCK; priv->ocr = EMS_PCMCIA_OCR; priv->cdr = EMS_PCMCIA_CDR; priv->flags |= SJA1000_CUSTOM_IRQ_HANDLER; /* Register SJA1000 device */ err = register_sja1000dev(dev); if (err) { free_sja1000dev(dev); goto failure_cleanup; } card->channels++; printk(KERN_INFO "%s: registered %s on channel " "#%d at 0x%p, irq %d\n", DRV_NAME, dev->name, i, priv->reg_base, dev->irq); } else free_sja1000dev(dev); } err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, DRV_NAME, card); if (!err) return 0; failure_cleanup: ems_pcmcia_del_card(pdev); return err; } /* * Setup PCMCIA socket and probe for EMS CPC-CARD */ static int ems_pcmcia_probe(struct pcmcia_device *dev) { int csval; /* General socket configuration */ dev->config_flags |= CONF_ENABLE_IRQ; dev->config_index = 1; dev->config_regs = PRESENT_OPTION; /* The io structure describes IO port mapping */ dev->resource[0]->end = 16; dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; dev->resource[1]->end = 16; dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_16; dev->io_lines = 5; /* Allocate a memory window */ dev->resource[2]->flags = (WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE); dev->resource[2]->start = dev->resource[2]->end = 0; csval = pcmcia_request_window(dev, dev->resource[2], 0); if (csval) { dev_err(&dev->dev, "pcmcia_request_window failed (err=%d)\n", csval); return 0; } csval = pcmcia_map_mem_page(dev, dev->resource[2], dev->config_base); if (csval) { dev_err(&dev->dev, "pcmcia_map_mem_page failed (err=%d)\n", csval); return 0; } csval = pcmcia_enable_device(dev); if (csval) { dev_err(&dev->dev, "pcmcia_enable_device failed (err=%d)\n", csval); return 0; } ems_pcmcia_add_card(dev, dev->resource[2]->start); return 0; } /* * Release claimed resources */ static void ems_pcmcia_remove(struct pcmcia_device *dev) { ems_pcmcia_del_card(dev); pcmcia_disable_device(dev); } static struct pcmcia_driver ems_pcmcia_driver = { .name = DRV_NAME, .probe = ems_pcmcia_probe, .remove = ems_pcmcia_remove, .id_table = ems_pcmcia_tbl, }; module_pcmcia_driver(ems_pcmcia_driver);
mukulsoni/android_kernel_cyanogen_msm8916
drivers/net/can/sja1000/ems_pcmcia.c
C
gpl-2.0
8,248
/* * V9FS FID Management * * Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net> * Copyright (C) 2005, 2006 by Eric Van Hensbergen <ericvh@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to: * Free Software Foundation * 51 Franklin Street, Fifth Floor * Boston, MA 02111-1301 USA * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/idr.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" /** * v9fs_fid_add - add a fid to a dentry * @dentry: dentry that the fid is being added to * @fid: fid to add * */ static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid) { hlist_add_head(&fid->dlist, (struct hlist_head *)&dentry->d_fsdata); } void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid) { spin_lock(&dentry->d_lock); __add_fid(dentry, fid); spin_unlock(&dentry->d_lock); } /** * v9fs_fid_find - retrieve a fid that belongs to the specified uid * @dentry: dentry to look for fid in * @uid: return fid that belongs to the specified user * @any: if non-zero, return any fid associated with the dentry * */ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any) { struct p9_fid *fid, *ret; p9_debug(P9_DEBUG_VFS, " dentry: %s (%p) uid %d any %d\n", dentry->d_name.name, dentry, from_kuid(&init_user_ns, uid), any); ret = NULL; /* we'll recheck under lock if there's anything to look in */ if (dentry->d_fsdata) { struct hlist_head *h = (struct hlist_head *)&dentry->d_fsdata; spin_lock(&dentry->d_lock); hlist_for_each_entry(fid, h, dlist) { if (any || uid_eq(fid->uid, uid)) { ret = fid; break; } } spin_unlock(&dentry->d_lock); } return ret; } /* * We need to hold v9ses->rename_sem as long as we hold references * to returned path array. Array element contain pointers to * dentry names. */ static int build_path_from_dentry(struct v9fs_session_info *v9ses, struct dentry *dentry, char ***names) { int n = 0, i; char **wnames; struct dentry *ds; for (ds = dentry; !IS_ROOT(ds); ds = ds->d_parent) n++; wnames = kmalloc(sizeof(char *) * n, GFP_KERNEL); if (!wnames) goto err_out; for (ds = dentry, i = (n-1); i >= 0; i--, ds = ds->d_parent) wnames[i] = (char *)ds->d_name.name; *names = wnames; return n; err_out: return -ENOMEM; } static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, kuid_t uid, int any) { struct dentry *ds; char **wnames, *uname; int i, n, l, clone, access; struct v9fs_session_info *v9ses; struct p9_fid *fid, *old_fid = NULL; v9ses = v9fs_dentry2v9ses(dentry); access = v9ses->flags & V9FS_ACCESS_MASK; fid = v9fs_fid_find(dentry, uid, any); if (fid) return fid; /* * we don't have a matching fid. To do a TWALK we need * parent fid. We need to prevent rename when we want to * look at the parent. */ down_read(&v9ses->rename_sem); ds = dentry->d_parent; fid = v9fs_fid_find(ds, uid, any); if (fid) { /* Found the parent fid do a lookup with that */ fid = p9_client_walk(fid, 1, (char **)&dentry->d_name.name, 1); goto fid_out; } up_read(&v9ses->rename_sem); /* start from the root and try to do a lookup */ fid = v9fs_fid_find(dentry->d_sb->s_root, uid, any); if (!fid) { /* the user is not attached to the fs yet */ if (access == V9FS_ACCESS_SINGLE) return ERR_PTR(-EPERM); if (v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) uname = NULL; else uname = v9ses->uname; fid = p9_client_attach(v9ses->clnt, NULL, uname, uid, v9ses->aname); if (IS_ERR(fid)) return fid; v9fs_fid_add(dentry->d_sb->s_root, fid); } /* If we are root ourself just return that */ if (dentry->d_sb->s_root == dentry) return fid; /* * Do a multipath walk with attached root. * When walking parent we need to make sure we * don't have a parallel rename happening */ down_read(&v9ses->rename_sem); n = build_path_from_dentry(v9ses, dentry, &wnames); if (n < 0) { fid = ERR_PTR(n); goto err_out; } clone = 1; i = 0; while (i < n) { l = min(n - i, P9_MAXWELEM); /* * We need to hold rename lock when doing a multipath * walk to ensure none of the patch component change */ fid = p9_client_walk(fid, l, &wnames[i], clone); if (IS_ERR(fid)) { if (old_fid) { /* * If we fail, clunk fid which are mapping * to path component and not the last component * of the path. */ p9_client_clunk(old_fid); } kfree(wnames); goto err_out; } old_fid = fid; i += l; clone = 0; } kfree(wnames); fid_out: if (!IS_ERR(fid)) { spin_lock(&dentry->d_lock); if (d_unhashed(dentry)) { spin_unlock(&dentry->d_lock); p9_client_clunk(fid); fid = ERR_PTR(-ENOENT); } else { __add_fid(dentry, fid); spin_unlock(&dentry->d_lock); } } err_out: up_read(&v9ses->rename_sem); return fid; } /** * v9fs_fid_lookup - lookup for a fid, try to walk if not found * @dentry: dentry to look for fid in * * Look for a fid in the specified dentry for the current user. * If no fid is found, try to create one walking from a fid from the parent * dentry (if it has one), or the root dentry. If the user haven't accessed * the fs yet, attach now and walk from the root. */ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry) { kuid_t uid; int any, access; struct v9fs_session_info *v9ses; v9ses = v9fs_dentry2v9ses(dentry); access = v9ses->flags & V9FS_ACCESS_MASK; switch (access) { case V9FS_ACCESS_SINGLE: case V9FS_ACCESS_USER: case V9FS_ACCESS_CLIENT: uid = current_fsuid(); any = 0; break; case V9FS_ACCESS_ANY: uid = v9ses->uid; any = 1; break; default: uid = INVALID_UID; any = 0; break; } return v9fs_fid_lookup_with_uid(dentry, uid, any); } struct p9_fid *v9fs_fid_clone(struct dentry *dentry) { struct p9_fid *fid, *ret; fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return fid; ret = p9_client_walk(fid, 0, NULL, 1); return ret; } static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, kuid_t uid) { struct p9_fid *fid, *ret; fid = v9fs_fid_lookup_with_uid(dentry, uid, 0); if (IS_ERR(fid)) return fid; ret = p9_client_walk(fid, 0, NULL, 1); return ret; } struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) { int err; struct p9_fid *fid; fid = v9fs_fid_clone_with_uid(dentry, GLOBAL_ROOT_UID); if (IS_ERR(fid)) goto error_out; /* * writeback fid will only be used to write back the * dirty pages. We always request for the open fid in read-write * mode so that a partial page write which result in page * read can work. */ err = p9_client_open(fid, O_RDWR); if (err < 0) { p9_client_clunk(fid); fid = ERR_PTR(err); goto error_out; } error_out: return fid; }
DerRomtester/android_kernel_oneplus_msm8974-3.10
fs/9p/fid.c
C
gpl-2.0
7,397
/* * videobuf2-memops.c - generic memory handling routines for videobuf2 * * Copyright (C) 2010 Samsung Electronics * * Author: Pawel Osciak <pawel@osciak.com> * Marek Szyprowski <m.szyprowski@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/file.h> #include <media/videobuf2-core.h> #include <media/videobuf2-memops.h> /** * vb2_get_vma() - acquire and lock the virtual memory area * @vma: given virtual memory area * * This function attempts to acquire an area mapped in the userspace for * the duration of a hardware operation. The area is "locked" by performing * the same set of operation that are done when process calls fork() and * memory areas are duplicated. * * Returns a copy of a virtual memory region on success or NULL. */ struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma) { struct vm_area_struct *vma_copy; vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL); if (vma_copy == NULL) return NULL; if (vma->vm_ops && vma->vm_ops->open) vma->vm_ops->open(vma); if (vma->vm_file) get_file(vma->vm_file); memcpy(vma_copy, vma, sizeof(*vma)); vma_copy->vm_mm = NULL; vma_copy->vm_next = NULL; vma_copy->vm_prev = NULL; return vma_copy; } EXPORT_SYMBOL_GPL(vb2_get_vma); /** * vb2_put_userptr() - release a userspace virtual memory area * @vma: virtual memory region associated with the area to be released * * This function releases the previously acquired memory area after a hardware * operation. */ void vb2_put_vma(struct vm_area_struct *vma) { if (!vma) return; if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) fput(vma->vm_file); kfree(vma); } EXPORT_SYMBOL_GPL(vb2_put_vma); /** * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory * @vaddr: starting virtual address of the area to be verified * @size: size of the area * @res_paddr: will return physical address for the given vaddr * @res_vma: will return locked copy of struct vm_area for the given area * * This function will go through memory area of size @size mapped at @vaddr and * verify that the underlying physical pages are contiguous. If they are * contiguous the virtual memory area is locked and a @res_vma is filled with * the copy and @res_pa set to the physical address of the buffer. * * Returns 0 on success. */ int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size, struct vm_area_struct **res_vma, dma_addr_t *res_pa) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long offset, start, end; unsigned long this_pfn, prev_pfn; dma_addr_t pa = 0; start = vaddr; offset = start & ~PAGE_MASK; end = start + size; vma = find_vma(mm, start); if (vma == NULL || vma->vm_end < end) return -EFAULT; for (prev_pfn = 0; start < end; start += PAGE_SIZE) { int ret = follow_pfn(vma, start, &this_pfn); if (ret) return ret; if (prev_pfn == 0) pa = this_pfn << PAGE_SHIFT; else if (this_pfn != prev_pfn + 1) return -EFAULT; prev_pfn = this_pfn; } /* * Memory is contigous, lock vma and return to the caller */ *res_vma = vb2_get_vma(vma); if (*res_vma == NULL) return -ENOMEM; *res_pa = pa + offset; return 0; } EXPORT_SYMBOL_GPL(vb2_get_contig_userptr); /** * vb2_mmap_pfn_range() - map physical pages to userspace * @vma: virtual memory region for the mapping * @paddr: starting physical address of the memory to be mapped * @size: size of the memory to be mapped * @vm_ops: vm operations to be assigned to the created area * @priv: private data to be associated with the area * * Returns 0 on success. */ int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr, unsigned long size, const struct vm_operations_struct *vm_ops, void *priv) { int ret; size = min_t(unsigned long, vma->vm_end - vma->vm_start, size); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT, size, vma->vm_page_prot); if (ret) { printk(KERN_ERR "Remapping memory failed, error: %d\n", ret); return ret; } vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED; vma->vm_private_data = priv; vma->vm_ops = vm_ops; vma->vm_ops->open(vma); pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n", __func__, paddr, vma->vm_start, size); return 0; } EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range); /** * vb2_common_vm_open() - increase refcount of the vma * @vma: virtual memory region for the mapping * * This function adds another user to the provided vma. It expects * struct vb2_vmarea_handler pointer in vma->vm_private_data. */ static void vb2_common_vm_open(struct vm_area_struct *vma) { struct vb2_vmarea_handler *h = vma->vm_private_data; pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n", __func__, h, atomic_read(h->refcount), vma->vm_start, vma->vm_end); atomic_inc(h->refcount); } /** * vb2_common_vm_close() - decrease refcount of the vma * @vma: virtual memory region for the mapping * * This function releases the user from the provided vma. It expects * struct vb2_vmarea_handler pointer in vma->vm_private_data. */ static void vb2_common_vm_close(struct vm_area_struct *vma) { struct vb2_vmarea_handler *h = vma->vm_private_data; pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n", __func__, h, atomic_read(h->refcount), vma->vm_start, vma->vm_end); h->put(h->arg); } /** * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped * video buffers */ const struct vm_operations_struct vb2_common_vm_ops = { .open = vb2_common_vm_open, .close = vb2_common_vm_close, }; EXPORT_SYMBOL_GPL(vb2_common_vm_ops); MODULE_DESCRIPTION("common memory handling routines for videobuf2"); MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>"); MODULE_LICENSE("GPL");
Evervolv/android_kernel_samsung_d2
drivers/media/video/videobuf2-memops.c
C
gpl-2.0
6,171
<?php /** * @file * Default theme implementation for entities. * * Available variables: * - $content: An array of comment items. Use render($content) to print them all, or * print a subset such as render($content['field_example']). Use * hide($content['field_example']) to temporarily suppress the printing of a * given element. * - $title: The (sanitized) entity label. * - $url: Direct url of the current entity if specified. * - $page: Flag for the full page state. * - $classes: String of classes that can be used to style contextually through * CSS. It can be manipulated through the variable $classes_array from * preprocess functions. By default the following classes are available, where * the parts enclosed by {} are replaced by the appropriate values: * - entity-{ENTITY_TYPE} * - {ENTITY_TYPE}-{BUNDLE} * * Other variables: * - $classes_array: Array of html class attribute values. It is flattened * into a string within the variable $classes. * * @see template_preprocess() * @see template_preprocess_entity() * @see template_process() */ ?> <div class="<?php print $classes; ?> clearfix"<?php print $attributes; ?>> <?php if (!$page): ?> <h2<?php print $title_attributes; ?>> <?php if ($url): ?> <a href="<?php print $url; ?>"><?php print $title; ?></a> <?php else: ?> <?php print $title; ?> <?php endif; ?> </h2> <?php endif; ?> <div class="content"<?php print $content_attributes; ?>> <?php print render($content); ?> </div> </div>
mysticw/mwd
sites/all/modules/contrib/entity/theme/entity.tpl.php
PHP
gpl-2.0
1,559
/* * Real Time Clock interface for Linux * * Copyright (C) 1996 Paul Gortmaker * * This driver allows use of the real time clock (built into * nearly all computers) from user space. It exports the /dev/rtc * interface supporting various ioctl() and also the * /proc/driver/rtc pseudo-file for status information. * * The ioctls can be used to set the interrupt behaviour and * generation rate from the RTC via IRQ 8. Then the /dev/rtc * interface can be used to make use of these timer interrupts, * be they interval or alarm based. * * The /dev/rtc interface will block on reads until an interrupt * has been received. If a RTC interrupt has already happened, * it will output an unsigned long and then block. The output value * contains the interrupt status in the low byte and the number of * interrupts since the last read in the remaining high bytes. The * /dev/rtc interface can also be used with the select(2) call. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Based on other minimal char device drivers, like Alan's * watchdog, Ted's random, etc. etc. * * 1.07 Paul Gortmaker. * 1.08 Miquel van Smoorenburg: disallow certain things on the * DEC Alpha as the CMOS clock is also used for other things. * 1.09 Nikita Schmidt: epoch support and some Alpha cleanup. * 1.09a Pete Zaitcev: Sun SPARC * 1.09b Jeff Garzik: Modularize, init cleanup * 1.09c Jeff Garzik: SMP cleanup * 1.10 Paul Barton-Davis: add support for async I/O * 1.10a Andrea Arcangeli: Alpha updates * 1.10b Andrew Morton: SMP lock fix * 1.10c Cesar Barros: SMP locking fixes and cleanup * 1.10d Paul Gortmaker: delete paranoia check in rtc_exit * 1.10e Maciej W. Rozycki: Handle DECstation's year weirdness. * 1.11 Takashi Iwai: Kernel access functions * rtc_register/rtc_unregister/rtc_control * 1.11a Daniele Bellucci: Audit create_proc_read_entry in rtc_init * 1.12 Venkatesh Pallipadi: Hooks for emulating rtc on HPET base-timer * CONFIG_HPET_EMULATE_RTC * 1.12a Maciej W. Rozycki: Handle memory-mapped chips properly. * 1.12ac Alan Cox: Allow read access to the day of week register * 1.12b David John: Remove calls to the BKL. */ #define RTC_VERSION "1.12b" /* * Note that *all* calls to CMOS_READ and CMOS_WRITE are done with * interrupts disabled. Due to the index-port/data-port (0x70/0x71) * design of the RTC, we don't want two different things trying to * get to it at once. (e.g. the periodic 11 min sync from * kernel/time/ntp.c vs. this driver.) */ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <linux/ioport.h> #include <linux/fcntl.h> #include <linux/mc146818rtc.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/sysctl.h> #include <linux/wait.h> #include <linux/bcd.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/ratelimit.h> #include <asm/current.h> #ifdef CONFIG_X86 #include <asm/hpet.h> #endif #ifdef CONFIG_SPARC32 #include <linux/of.h> #include <linux/of_device.h> #include <asm/io.h> static unsigned long rtc_port; static int rtc_irq; #endif #ifdef CONFIG_HPET_EMULATE_RTC #undef RTC_IRQ #endif #ifdef RTC_IRQ static int rtc_has_irq = 1; #endif #ifndef CONFIG_HPET_EMULATE_RTC #define is_hpet_enabled() 0 #define hpet_set_alarm_time(hrs, min, sec) 0 #define hpet_set_periodic_freq(arg) 0 #define hpet_mask_rtc_irq_bit(arg) 0 #define hpet_set_rtc_irq_bit(arg) 0 #define hpet_rtc_timer_init() do { } while (0) #define hpet_rtc_dropped_irq() 0 #define hpet_register_irq_handler(h) ({ 0; }) #define hpet_unregister_irq_handler(h) ({ 0; }) #ifdef RTC_IRQ static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) { return 0; } #endif #endif /* * We sponge a minor off of the misc major. No need slurping * up another valuable major dev number for this. If you add * an ioctl, make sure you don't conflict with SPARC's RTC * ioctls. */ static struct fasync_struct *rtc_async_queue; static DECLARE_WAIT_QUEUE_HEAD(rtc_wait); #ifdef RTC_IRQ static void rtc_dropped_irq(unsigned long data); static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq, 0, 0); #endif static ssize_t rtc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos); static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static void rtc_get_rtc_time(struct rtc_time *rtc_tm); #ifdef RTC_IRQ static unsigned int rtc_poll(struct file *file, poll_table *wait); #endif static void get_rtc_alm_time(struct rtc_time *alm_tm); #ifdef RTC_IRQ static void set_rtc_irq_bit_locked(unsigned char bit); static void mask_rtc_irq_bit_locked(unsigned char bit); static inline void set_rtc_irq_bit(unsigned char bit) { spin_lock_irq(&rtc_lock); set_rtc_irq_bit_locked(bit); spin_unlock_irq(&rtc_lock); } static void mask_rtc_irq_bit(unsigned char bit) { spin_lock_irq(&rtc_lock); mask_rtc_irq_bit_locked(bit); spin_unlock_irq(&rtc_lock); } #endif #ifdef CONFIG_PROC_FS static int rtc_proc_open(struct inode *inode, struct file *file); #endif /* * Bits in rtc_status. (6 bits of room for future expansion) */ #define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ #define RTC_TIMER_ON 0x02 /* missed irq timer active */ /* * rtc_status is never changed by rtc_interrupt, and ioctl/open/close is * protected by the spin lock rtc_lock. However, ioctl can still disable the * timer in rtc_status and then with del_timer after the interrupt has read * rtc_status but before mod_timer is called, which would then reenable the * timer (but you would need to have an awful timing before you'd trip on it) */ static unsigned long rtc_status; /* bitmapped status byte. */ static unsigned long rtc_freq; /* Current periodic IRQ rate */ static unsigned long rtc_irq_data; /* our output to the world */ static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */ #ifdef RTC_IRQ /* * rtc_task_lock nests inside rtc_lock. */ static DEFINE_SPINLOCK(rtc_task_lock); static rtc_task_t *rtc_callback; #endif /* * If this driver ever becomes modularised, it will be really nice * to make the epoch retain its value across module reload... */ static unsigned long epoch = 1900; /* year corresponding to 0x00 */ static const unsigned char days_in_mo[] = {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; /* * Returns true if a clock update is in progress */ static inline unsigned char rtc_is_updating(void) { unsigned long flags; unsigned char uip; spin_lock_irqsave(&rtc_lock, flags); uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); spin_unlock_irqrestore(&rtc_lock, flags); return uip; } #ifdef RTC_IRQ /* * A very tiny interrupt handler. It runs with IRQF_DISABLED set, * but there is possibility of conflicting with the set_rtc_mmss() * call (the rtc irq and the timer irq can easily run at the same * time in two different CPUs). So we need to serialize * accesses to the chip with the rtc_lock spinlock that each * architecture should implement in the timer code. * (See ./arch/XXXX/kernel/time.c for the set_rtc_mmss() function.) */ static irqreturn_t rtc_interrupt(int irq, void *dev_id) { /* * Can be an alarm interrupt, update complete interrupt, * or a periodic interrupt. We store the status in the * low byte and the number of interrupts received since * the last read in the remainder of rtc_irq_data. */ spin_lock(&rtc_lock); rtc_irq_data += 0x100; rtc_irq_data &= ~0xff; if (is_hpet_enabled()) { /* * In this case it is HPET RTC interrupt handler * calling us, with the interrupt information * passed as arg1, instead of irq. */ rtc_irq_data |= (unsigned long)irq & 0xF0; } else { rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0); } if (rtc_status & RTC_TIMER_ON) mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); spin_unlock(&rtc_lock); /* Now do the rest of the actions */ spin_lock(&rtc_task_lock); if (rtc_callback) rtc_callback->func(rtc_callback->private_data); spin_unlock(&rtc_task_lock); wake_up_interruptible(&rtc_wait); kill_fasync(&rtc_async_queue, SIGIO, POLL_IN); return IRQ_HANDLED; } #endif /* * sysctl-tuning infrastructure. */ static ctl_table rtc_table[] = { { .procname = "max-user-freq", .data = &rtc_max_user_freq, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; static ctl_table rtc_root[] = { { .procname = "rtc", .mode = 0555, .child = rtc_table, }, { } }; static ctl_table dev_root[] = { { .procname = "dev", .mode = 0555, .child = rtc_root, }, { } }; static struct ctl_table_header *sysctl_header; static int __init init_sysctl(void) { sysctl_header = register_sysctl_table(dev_root); return 0; } static void __exit cleanup_sysctl(void) { unregister_sysctl_table(sysctl_header); } /* * Now all the various file operations that we export. */ static ssize_t rtc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { #ifndef RTC_IRQ return -EIO; #else DECLARE_WAITQUEUE(wait, current); unsigned long data; ssize_t retval; if (rtc_has_irq == 0) return -EIO; /* * Historically this function used to assume that sizeof(unsigned long) * is the same in userspace and kernelspace. This lead to problems * for configurations with multiple ABIs such a the MIPS o32 and 64 * ABIs supported on the same kernel. So now we support read of both * 4 and 8 bytes and assume that's the sizeof(unsigned long) in the * userspace ABI. */ if (count != sizeof(unsigned int) && count != sizeof(unsigned long)) return -EINVAL; add_wait_queue(&rtc_wait, &wait); do { /* First make it right. Then make it fast. Putting this whole * block within the parentheses of a while would be too * confusing. And no, xchg() is not the answer. */ __set_current_state(TASK_INTERRUPTIBLE); spin_lock_irq(&rtc_lock); data = rtc_irq_data; rtc_irq_data = 0; spin_unlock_irq(&rtc_lock); if (data != 0) break; if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto out; } if (signal_pending(current)) { retval = -ERESTARTSYS; goto out; } schedule(); } while (1); if (count == sizeof(unsigned int)) { retval = put_user(data, (unsigned int __user *)buf) ?: sizeof(int); } else { retval = put_user(data, (unsigned long __user *)buf) ?: sizeof(long); } if (!retval) retval = count; out: __set_current_state(TASK_RUNNING); remove_wait_queue(&rtc_wait, &wait); return retval; #endif } static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) { struct rtc_time wtime; #ifdef RTC_IRQ if (rtc_has_irq == 0) { switch (cmd) { case RTC_AIE_OFF: case RTC_AIE_ON: case RTC_PIE_OFF: case RTC_PIE_ON: case RTC_UIE_OFF: case RTC_UIE_ON: case RTC_IRQP_READ: case RTC_IRQP_SET: return -EINVAL; }; } #endif switch (cmd) { #ifdef RTC_IRQ case RTC_AIE_OFF: /* Mask alarm int. enab. bit */ { mask_rtc_irq_bit(RTC_AIE); return 0; } case RTC_AIE_ON: /* Allow alarm interrupts. */ { set_rtc_irq_bit(RTC_AIE); return 0; } case RTC_PIE_OFF: /* Mask periodic int. enab. bit */ { /* can be called from isr via rtc_control() */ unsigned long flags; spin_lock_irqsave(&rtc_lock, flags); mask_rtc_irq_bit_locked(RTC_PIE); if (rtc_status & RTC_TIMER_ON) { rtc_status &= ~RTC_TIMER_ON; del_timer(&rtc_irq_timer); } spin_unlock_irqrestore(&rtc_lock, flags); return 0; } case RTC_PIE_ON: /* Allow periodic ints */ { /* can be called from isr via rtc_control() */ unsigned long flags; /* * We don't really want Joe User enabling more * than 64Hz of interrupts on a multi-user machine. */ if (!kernel && (rtc_freq > rtc_max_user_freq) && (!capable(CAP_SYS_RESOURCE))) return -EACCES; spin_lock_irqsave(&rtc_lock, flags); if (!(rtc_status & RTC_TIMER_ON)) { mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); rtc_status |= RTC_TIMER_ON; } set_rtc_irq_bit_locked(RTC_PIE); spin_unlock_irqrestore(&rtc_lock, flags); return 0; } case RTC_UIE_OFF: /* Mask ints from RTC updates. */ { mask_rtc_irq_bit(RTC_UIE); return 0; } case RTC_UIE_ON: /* Allow ints for RTC updates. */ { set_rtc_irq_bit(RTC_UIE); return 0; } #endif case RTC_ALM_READ: /* Read the present alarm time */ { /* * This returns a struct rtc_time. Reading >= 0xc0 * means "don't care" or "match all". Only the tm_hour, * tm_min, and tm_sec values are filled in. */ memset(&wtime, 0, sizeof(struct rtc_time)); get_rtc_alm_time(&wtime); break; } case RTC_ALM_SET: /* Store a time into the alarm */ { /* * This expects a struct rtc_time. Writing 0xff means * "don't care" or "match all". Only the tm_hour, * tm_min and tm_sec are used. */ unsigned char hrs, min, sec; struct rtc_time alm_tm; if (copy_from_user(&alm_tm, (struct rtc_time __user *)arg, sizeof(struct rtc_time))) return -EFAULT; hrs = alm_tm.tm_hour; min = alm_tm.tm_min; sec = alm_tm.tm_sec; spin_lock_irq(&rtc_lock); if (hpet_set_alarm_time(hrs, min, sec)) { /* * Fallthru and set alarm time in CMOS too, * so that we will get proper value in RTC_ALM_READ */ } if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { if (sec < 60) sec = bin2bcd(sec); else sec = 0xff; if (min < 60) min = bin2bcd(min); else min = 0xff; if (hrs < 24) hrs = bin2bcd(hrs); else hrs = 0xff; } CMOS_WRITE(hrs, RTC_HOURS_ALARM); CMOS_WRITE(min, RTC_MINUTES_ALARM); CMOS_WRITE(sec, RTC_SECONDS_ALARM); spin_unlock_irq(&rtc_lock); return 0; } case RTC_RD_TIME: /* Read the time/date from RTC */ { memset(&wtime, 0, sizeof(struct rtc_time)); rtc_get_rtc_time(&wtime); break; } case RTC_SET_TIME: /* Set the RTC */ { struct rtc_time rtc_tm; unsigned char mon, day, hrs, min, sec, leap_yr; unsigned char save_control, save_freq_select; unsigned int yrs; #ifdef CONFIG_MACH_DECSTATION unsigned int real_yrs; #endif if (!capable(CAP_SYS_TIME)) return -EACCES; if (copy_from_user(&rtc_tm, (struct rtc_time __user *)arg, sizeof(struct rtc_time))) return -EFAULT; yrs = rtc_tm.tm_year + 1900; mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */ day = rtc_tm.tm_mday; hrs = rtc_tm.tm_hour; min = rtc_tm.tm_min; sec = rtc_tm.tm_sec; if (yrs < 1970) return -EINVAL; leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400)); if ((mon > 12) || (day == 0)) return -EINVAL; if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr))) return -EINVAL; if ((hrs >= 24) || (min >= 60) || (sec >= 60)) return -EINVAL; yrs -= epoch; if (yrs > 255) /* They are unsigned */ return -EINVAL; spin_lock_irq(&rtc_lock); #ifdef CONFIG_MACH_DECSTATION real_yrs = yrs; yrs = 72; /* * We want to keep the year set to 73 until March * for non-leap years, so that Feb, 29th is handled * correctly. */ if (!leap_yr && mon < 3) { real_yrs--; yrs = 73; } #endif /* These limits and adjustments are independent of * whether the chip is in binary mode or not. */ if (yrs > 169) { spin_unlock_irq(&rtc_lock); return -EINVAL; } if (yrs >= 100) yrs -= 100; if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { sec = bin2bcd(sec); min = bin2bcd(min); hrs = bin2bcd(hrs); day = bin2bcd(day); mon = bin2bcd(mon); yrs = bin2bcd(yrs); } save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); #endif CMOS_WRITE(yrs, RTC_YEAR); CMOS_WRITE(mon, RTC_MONTH); CMOS_WRITE(day, RTC_DAY_OF_MONTH); CMOS_WRITE(hrs, RTC_HOURS); CMOS_WRITE(min, RTC_MINUTES); CMOS_WRITE(sec, RTC_SECONDS); CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); spin_unlock_irq(&rtc_lock); return 0; } #ifdef RTC_IRQ case RTC_IRQP_READ: /* Read the periodic IRQ rate. */ { return put_user(rtc_freq, (unsigned long __user *)arg); } case RTC_IRQP_SET: /* Set periodic IRQ rate. */ { int tmp = 0; unsigned char val; /* can be called from isr via rtc_control() */ unsigned long flags; /* * The max we can do is 8192Hz. */ if ((arg < 2) || (arg > 8192)) return -EINVAL; /* * We don't really want Joe User generating more * than 64Hz of interrupts on a multi-user machine. */ if (!kernel && (arg > rtc_max_user_freq) && !capable(CAP_SYS_RESOURCE)) return -EACCES; while (arg > (1<<tmp)) tmp++; /* * Check that the input was really a power of 2. */ if (arg != (1<<tmp)) return -EINVAL; rtc_freq = arg; spin_lock_irqsave(&rtc_lock, flags); if (hpet_set_periodic_freq(arg)) { spin_unlock_irqrestore(&rtc_lock, flags); return 0; } val = CMOS_READ(RTC_FREQ_SELECT) & 0xf0; val |= (16 - tmp); CMOS_WRITE(val, RTC_FREQ_SELECT); spin_unlock_irqrestore(&rtc_lock, flags); return 0; } #endif case RTC_EPOCH_READ: /* Read the epoch. */ { return put_user(epoch, (unsigned long __user *)arg); } case RTC_EPOCH_SET: /* Set the epoch. */ { /* * There were no RTC clocks before 1900. */ if (arg < 1900) return -EINVAL; if (!capable(CAP_SYS_TIME)) return -EACCES; epoch = arg; return 0; } default: return -ENOTTY; } return copy_to_user((void __user *)arg, &wtime, sizeof wtime) ? -EFAULT : 0; } static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; ret = rtc_do_ioctl(cmd, arg, 0); return ret; } /* * We enforce only one user at a time here with the open/close. * Also clear the previous interrupt data on an open, and clean * up things on a close. */ static int rtc_open(struct inode *inode, struct file *file) { spin_lock_irq(&rtc_lock); if (rtc_status & RTC_IS_OPEN) goto out_busy; rtc_status |= RTC_IS_OPEN; rtc_irq_data = 0; spin_unlock_irq(&rtc_lock); return 0; out_busy: spin_unlock_irq(&rtc_lock); return -EBUSY; } static int rtc_fasync(int fd, struct file *filp, int on) { return fasync_helper(fd, filp, on, &rtc_async_queue); } static int rtc_release(struct inode *inode, struct file *file) { #ifdef RTC_IRQ unsigned char tmp; if (rtc_has_irq == 0) goto no_irq; /* * Turn off all interrupts once the device is no longer * in use, and clear the data. */ spin_lock_irq(&rtc_lock); if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) { tmp = CMOS_READ(RTC_CONTROL); tmp &= ~RTC_PIE; tmp &= ~RTC_AIE; tmp &= ~RTC_UIE; CMOS_WRITE(tmp, RTC_CONTROL); CMOS_READ(RTC_INTR_FLAGS); } if (rtc_status & RTC_TIMER_ON) { rtc_status &= ~RTC_TIMER_ON; del_timer(&rtc_irq_timer); } spin_unlock_irq(&rtc_lock); no_irq: #endif spin_lock_irq(&rtc_lock); rtc_irq_data = 0; rtc_status &= ~RTC_IS_OPEN; spin_unlock_irq(&rtc_lock); return 0; } #ifdef RTC_IRQ static unsigned int rtc_poll(struct file *file, poll_table *wait) { unsigned long l; if (rtc_has_irq == 0) return 0; poll_wait(file, &rtc_wait, wait); spin_lock_irq(&rtc_lock); l = rtc_irq_data; spin_unlock_irq(&rtc_lock); if (l != 0) return POLLIN | POLLRDNORM; return 0; } #endif int rtc_register(rtc_task_t *task) { #ifndef RTC_IRQ return -EIO; #else if (task == NULL || task->func == NULL) return -EINVAL; spin_lock_irq(&rtc_lock); if (rtc_status & RTC_IS_OPEN) { spin_unlock_irq(&rtc_lock); return -EBUSY; } spin_lock(&rtc_task_lock); if (rtc_callback) { spin_unlock(&rtc_task_lock); spin_unlock_irq(&rtc_lock); return -EBUSY; } rtc_status |= RTC_IS_OPEN; rtc_callback = task; spin_unlock(&rtc_task_lock); spin_unlock_irq(&rtc_lock); return 0; #endif } EXPORT_SYMBOL(rtc_register); int rtc_unregister(rtc_task_t *task) { #ifndef RTC_IRQ return -EIO; #else unsigned char tmp; spin_lock_irq(&rtc_lock); spin_lock(&rtc_task_lock); if (rtc_callback != task) { spin_unlock(&rtc_task_lock); spin_unlock_irq(&rtc_lock); return -ENXIO; } rtc_callback = NULL; /* disable controls */ if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) { tmp = CMOS_READ(RTC_CONTROL); tmp &= ~RTC_PIE; tmp &= ~RTC_AIE; tmp &= ~RTC_UIE; CMOS_WRITE(tmp, RTC_CONTROL); CMOS_READ(RTC_INTR_FLAGS); } if (rtc_status & RTC_TIMER_ON) { rtc_status &= ~RTC_TIMER_ON; del_timer(&rtc_irq_timer); } rtc_status &= ~RTC_IS_OPEN; spin_unlock(&rtc_task_lock); spin_unlock_irq(&rtc_lock); return 0; #endif } EXPORT_SYMBOL(rtc_unregister); int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg) { #ifndef RTC_IRQ return -EIO; #else unsigned long flags; if (cmd != RTC_PIE_ON && cmd != RTC_PIE_OFF && cmd != RTC_IRQP_SET) return -EINVAL; spin_lock_irqsave(&rtc_task_lock, flags); if (rtc_callback != task) { spin_unlock_irqrestore(&rtc_task_lock, flags); return -ENXIO; } spin_unlock_irqrestore(&rtc_task_lock, flags); return rtc_do_ioctl(cmd, arg, 1); #endif } EXPORT_SYMBOL(rtc_control); /* * The various file operations we support. */ static const struct file_operations rtc_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = rtc_read, #ifdef RTC_IRQ .poll = rtc_poll, #endif .unlocked_ioctl = rtc_ioctl, .open = rtc_open, .release = rtc_release, .fasync = rtc_fasync, }; static struct miscdevice rtc_dev = { .minor = RTC_MINOR, .name = "rtc", .fops = &rtc_fops, }; #ifdef CONFIG_PROC_FS static const struct file_operations rtc_proc_fops = { .owner = THIS_MODULE, .open = rtc_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif static resource_size_t rtc_size; static struct resource * __init rtc_request_region(resource_size_t size) { struct resource *r; if (RTC_IOMAPPED) r = request_region(RTC_PORT(0), size, "rtc"); else r = request_mem_region(RTC_PORT(0), size, "rtc"); if (r) rtc_size = size; return r; } static void rtc_release_region(void) { if (RTC_IOMAPPED) release_region(RTC_PORT(0), rtc_size); else release_mem_region(RTC_PORT(0), rtc_size); } static int __init rtc_init(void) { #ifdef CONFIG_PROC_FS struct proc_dir_entry *ent; #endif #if defined(__alpha__) || defined(__mips__) unsigned int year, ctrl; char *guess = NULL; #endif #ifdef CONFIG_SPARC32 struct device_node *ebus_dp; struct platform_device *op; #else void *r; #ifdef RTC_IRQ irq_handler_t rtc_int_handler_ptr; #endif #endif #ifdef CONFIG_SPARC32 for_each_node_by_name(ebus_dp, "ebus") { struct device_node *dp; for (dp = ebus_dp; dp; dp = dp->sibling) { if (!strcmp(dp->name, "rtc")) { op = of_find_device_by_node(dp); if (op) { rtc_port = op->resource[0].start; rtc_irq = op->irqs[0]; goto found; } } } } rtc_has_irq = 0; printk(KERN_ERR "rtc_init: no PC rtc found\n"); return -EIO; found: if (!rtc_irq) { rtc_has_irq = 0; goto no_irq; } /* * XXX Interrupt pin #7 in Espresso is shared between RTC and * PCI Slot 2 INTA# (and some INTx# in Slot 1). */ if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", (void *)&rtc_port)) { rtc_has_irq = 0; printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq); return -EIO; } no_irq: #else r = rtc_request_region(RTC_IO_EXTENT); /* * If we've already requested a smaller range (for example, because * PNPBIOS or ACPI told us how the device is configured), the request * above might fail because it's too big. * * If so, request just the range we actually use. */ if (!r) r = rtc_request_region(RTC_IO_EXTENT_USED); if (!r) { #ifdef RTC_IRQ rtc_has_irq = 0; #endif printk(KERN_ERR "rtc: I/O resource %lx is not free.\n", (long)(RTC_PORT(0))); return -EIO; } #ifdef RTC_IRQ if (is_hpet_enabled()) { int err; rtc_int_handler_ptr = hpet_rtc_interrupt; err = hpet_register_irq_handler(rtc_interrupt); if (err != 0) { printk(KERN_WARNING "hpet_register_irq_handler failed " "in rtc_init()."); return err; } } else { rtc_int_handler_ptr = rtc_interrupt; } if (request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED, "rtc", NULL)) { /* Yeah right, seeing as irq 8 doesn't even hit the bus. */ rtc_has_irq = 0; printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ); rtc_release_region(); return -EIO; } hpet_rtc_timer_init(); #endif #endif /* CONFIG_SPARC32 vs. others */ if (misc_register(&rtc_dev)) { #ifdef RTC_IRQ free_irq(RTC_IRQ, NULL); hpet_unregister_irq_handler(rtc_interrupt); rtc_has_irq = 0; #endif rtc_release_region(); return -ENODEV; } #ifdef CONFIG_PROC_FS ent = proc_create("driver/rtc", 0, NULL, &rtc_proc_fops); if (!ent) printk(KERN_WARNING "rtc: Failed to register with procfs.\n"); #endif #if defined(__alpha__) || defined(__mips__) rtc_freq = HZ; /* Each operating system on an Alpha uses its own epoch. Let's try to guess which one we are using now. */ if (rtc_is_updating() != 0) msleep(20); spin_lock_irq(&rtc_lock); year = CMOS_READ(RTC_YEAR); ctrl = CMOS_READ(RTC_CONTROL); spin_unlock_irq(&rtc_lock); if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) year = bcd2bin(year); /* This should never happen... */ if (year < 20) { epoch = 2000; guess = "SRM (post-2000)"; } else if (year >= 20 && year < 48) { epoch = 1980; guess = "ARC console"; } else if (year >= 48 && year < 72) { epoch = 1952; guess = "Digital UNIX"; #if defined(__mips__) } else if (year >= 72 && year < 74) { epoch = 2000; guess = "Digital DECstation"; #else } else if (year >= 70) { epoch = 1900; guess = "Standard PC (1900)"; #endif } if (guess) printk(KERN_INFO "rtc: %s epoch (%lu) detected\n", guess, epoch); #endif #ifdef RTC_IRQ if (rtc_has_irq == 0) goto no_irq2; spin_lock_irq(&rtc_lock); rtc_freq = 1024; if (!hpet_set_periodic_freq(rtc_freq)) { /* * Initialize periodic frequency to CMOS reset default, * which is 1024Hz */ CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06), RTC_FREQ_SELECT); } spin_unlock_irq(&rtc_lock); no_irq2: #endif (void) init_sysctl(); printk(KERN_INFO "Real Time Clock Driver v" RTC_VERSION "\n"); return 0; } static void __exit rtc_exit(void) { cleanup_sysctl(); remove_proc_entry("driver/rtc", NULL); misc_deregister(&rtc_dev); #ifdef CONFIG_SPARC32 if (rtc_has_irq) free_irq(rtc_irq, &rtc_port); #else rtc_release_region(); #ifdef RTC_IRQ if (rtc_has_irq) { free_irq(RTC_IRQ, NULL); hpet_unregister_irq_handler(hpet_rtc_interrupt); } #endif #endif /* CONFIG_SPARC32 */ } module_init(rtc_init); module_exit(rtc_exit); #ifdef RTC_IRQ /* * At IRQ rates >= 4096Hz, an interrupt may get lost altogether. * (usually during an IDE disk interrupt, with IRQ unmasking off) * Since the interrupt handler doesn't get called, the IRQ status * byte doesn't get read, and the RTC stops generating interrupts. * A timer is set, and will call this function if/when that happens. * To get it out of this stalled state, we just read the status. * At least a jiffy of interrupts (rtc_freq/HZ) will have been lost. * (You *really* shouldn't be trying to use a non-realtime system * for something that requires a steady > 1KHz signal anyways.) */ static void rtc_dropped_irq(unsigned long data) { unsigned long freq; spin_lock_irq(&rtc_lock); if (hpet_rtc_dropped_irq()) { spin_unlock_irq(&rtc_lock); return; } /* Just in case someone disabled the timer from behind our back... */ if (rtc_status & RTC_TIMER_ON) mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); rtc_irq_data += ((rtc_freq/HZ)<<8); rtc_irq_data &= ~0xff; rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0); /* restart */ freq = rtc_freq; spin_unlock_irq(&rtc_lock); printk_ratelimited(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", freq); /* Now we have new data */ wake_up_interruptible(&rtc_wait); kill_fasync(&rtc_async_queue, SIGIO, POLL_IN); } #endif #ifdef CONFIG_PROC_FS /* * Info exported via "/proc/driver/rtc". */ static int rtc_proc_show(struct seq_file *seq, void *v) { #define YN(bit) ((ctrl & bit) ? "yes" : "no") #define NY(bit) ((ctrl & bit) ? "no" : "yes") struct rtc_time tm; unsigned char batt, ctrl; unsigned long freq; spin_lock_irq(&rtc_lock); batt = CMOS_READ(RTC_VALID) & RTC_VRT; ctrl = CMOS_READ(RTC_CONTROL); freq = rtc_freq; spin_unlock_irq(&rtc_lock); rtc_get_rtc_time(&tm); /* * There is no way to tell if the luser has the RTC set for local * time or for Universal Standard Time (GMT). Probably local though. */ seq_printf(seq, "rtc_time\t: %02d:%02d:%02d\n" "rtc_date\t: %04d-%02d-%02d\n" "rtc_epoch\t: %04lu\n", tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, epoch); get_rtc_alm_time(&tm); /* * We implicitly assume 24hr mode here. Alarm values >= 0xc0 will * match any value for that particular field. Values that are * greater than a valid time, but less than 0xc0 shouldn't appear. */ seq_puts(seq, "alarm\t\t: "); if (tm.tm_hour <= 24) seq_printf(seq, "%02d:", tm.tm_hour); else seq_puts(seq, "**:"); if (tm.tm_min <= 59) seq_printf(seq, "%02d:", tm.tm_min); else seq_puts(seq, "**:"); if (tm.tm_sec <= 59) seq_printf(seq, "%02d\n", tm.tm_sec); else seq_puts(seq, "**\n"); seq_printf(seq, "DST_enable\t: %s\n" "BCD\t\t: %s\n" "24hr\t\t: %s\n" "square_wave\t: %s\n" "alarm_IRQ\t: %s\n" "update_IRQ\t: %s\n" "periodic_IRQ\t: %s\n" "periodic_freq\t: %ld\n" "batt_status\t: %s\n", YN(RTC_DST_EN), NY(RTC_DM_BINARY), YN(RTC_24H), YN(RTC_SQWE), YN(RTC_AIE), YN(RTC_UIE), YN(RTC_PIE), freq, batt ? "okay" : "dead"); return 0; #undef YN #undef NY } static int rtc_proc_open(struct inode *inode, struct file *file) { return single_open(file, rtc_proc_show, NULL); } #endif static void rtc_get_rtc_time(struct rtc_time *rtc_tm) { unsigned long uip_watchdog = jiffies, flags; unsigned char ctrl; #ifdef CONFIG_MACH_DECSTATION unsigned int real_year; #endif /* * read RTC once any update in progress is done. The update * can take just over 2ms. We wait 20ms. There is no need to * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. * If you need to know *exactly* when a second has started, enable * periodic update complete interrupts, (via ioctl) and then * immediately read /dev/rtc which will block until you get the IRQ. * Once the read clears, read the RTC time (again via ioctl). Easy. */ while (rtc_is_updating() != 0 && time_before(jiffies, uip_watchdog + 2*HZ/100)) cpu_relax(); /* * Only the values that we read from the RTC are set. We leave * tm_wday, tm_yday and tm_isdst untouched. Note that while the * RTC has RTC_DAY_OF_WEEK, we should usually ignore it, as it is * only updated by the RTC when initially set to a non-zero value. */ spin_lock_irqsave(&rtc_lock, flags); rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS); rtc_tm->tm_min = CMOS_READ(RTC_MINUTES); rtc_tm->tm_hour = CMOS_READ(RTC_HOURS); rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH); rtc_tm->tm_mon = CMOS_READ(RTC_MONTH); rtc_tm->tm_year = CMOS_READ(RTC_YEAR); /* Only set from 2.6.16 onwards */ rtc_tm->tm_wday = CMOS_READ(RTC_DAY_OF_WEEK); #ifdef CONFIG_MACH_DECSTATION real_year = CMOS_READ(RTC_DEC_YEAR); #endif ctrl = CMOS_READ(RTC_CONTROL); spin_unlock_irqrestore(&rtc_lock, flags); if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday); rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon); rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); rtc_tm->tm_wday = bcd2bin(rtc_tm->tm_wday); } #ifdef CONFIG_MACH_DECSTATION rtc_tm->tm_year += real_year - 72; #endif /* * Account for differences between how the RTC uses the values * and how they are defined in a struct rtc_time; */ rtc_tm->tm_year += epoch - 1900; if (rtc_tm->tm_year <= 69) rtc_tm->tm_year += 100; rtc_tm->tm_mon--; } static void get_rtc_alm_time(struct rtc_time *alm_tm) { unsigned char ctrl; /* * Only the values that we read from the RTC are set. That * means only tm_hour, tm_min, and tm_sec. */ spin_lock_irq(&rtc_lock); alm_tm->tm_sec = CMOS_READ(RTC_SECONDS_ALARM); alm_tm->tm_min = CMOS_READ(RTC_MINUTES_ALARM); alm_tm->tm_hour = CMOS_READ(RTC_HOURS_ALARM); ctrl = CMOS_READ(RTC_CONTROL); spin_unlock_irq(&rtc_lock); if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec); alm_tm->tm_min = bcd2bin(alm_tm->tm_min); alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour); } } #ifdef RTC_IRQ /* * Used to disable/enable interrupts for any one of UIE, AIE, PIE. * Rumour has it that if you frob the interrupt enable/disable * bits in RTC_CONTROL, you should read RTC_INTR_FLAGS, to * ensure you actually start getting interrupts. Probably for * compatibility with older/broken chipset RTC implementations. * We also clear out any old irq data after an ioctl() that * meddles with the interrupt enable/disable bits. */ static void mask_rtc_irq_bit_locked(unsigned char bit) { unsigned char val; if (hpet_mask_rtc_irq_bit(bit)) return; val = CMOS_READ(RTC_CONTROL); val &= ~bit; CMOS_WRITE(val, RTC_CONTROL); CMOS_READ(RTC_INTR_FLAGS); rtc_irq_data = 0; } static void set_rtc_irq_bit_locked(unsigned char bit) { unsigned char val; if (hpet_set_rtc_irq_bit(bit)) return; val = CMOS_READ(RTC_CONTROL); val |= bit; CMOS_WRITE(val, RTC_CONTROL); CMOS_READ(RTC_INTR_FLAGS); rtc_irq_data = 0; } #endif MODULE_AUTHOR("Paul Gortmaker"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(RTC_MINOR);
nvertigo/AK-OnePone
drivers/char/rtc.c
C
gpl-2.0
34,303
/* * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> * * Licensed under the terms of the GNU GPL License version 2. * * Library for common functions for Intel SpeedStep v.1 and v.2 support * * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <asm/msr.h> #include <asm/tsc.h> #include "speedstep-lib.h" #define PFX "speedstep-lib: " #ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK static int relaxed_check; #else #define relaxed_check 0 #endif /********************************************************************* * GET PROCESSOR CORE SPEED IN KHZ * *********************************************************************/ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) { /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ struct { unsigned int ratio; /* Frequency Multiplier (x10) */ u8 bitmap; /* power on configuration bits [27, 25:22] (in MSR 0x2a) */ } msr_decode_mult[] = { { 30, 0x01 }, { 35, 0x05 }, { 40, 0x02 }, { 45, 0x06 }, { 50, 0x00 }, { 55, 0x04 }, { 60, 0x0b }, { 65, 0x0f }, { 70, 0x09 }, { 75, 0x0d }, { 80, 0x0a }, { 85, 0x26 }, { 90, 0x20 }, { 100, 0x2b }, { 0, 0xff } /* error or unknown value */ }; /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */ struct { unsigned int value; /* Front Side Bus speed in MHz */ u8 bitmap; /* power on configuration bits [18: 19] (in MSR 0x2a) */ } msr_decode_fsb[] = { { 66, 0x0 }, { 100, 0x2 }, { 133, 0x1 }, { 0, 0xff} }; u32 msr_lo, msr_tmp; int i = 0, j = 0; /* read MSR 0x2a - we only need the low 32 bits */ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); pr_debug("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); msr_tmp = msr_lo; /* decode the FSB */ msr_tmp &= 0x00c0000; msr_tmp >>= 18; while (msr_tmp != msr_decode_fsb[i].bitmap) { if (msr_decode_fsb[i].bitmap == 0xff) return 0; i++; } /* decode the multiplier */ if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) { pr_debug("workaround for early PIIIs\n"); msr_lo &= 0x03c00000; } else msr_lo &= 0x0bc00000; msr_lo >>= 22; while (msr_lo != msr_decode_mult[j].bitmap) { if (msr_decode_mult[j].bitmap == 0xff) return 0; j++; } pr_debug("speed is %u\n", (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100)); return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100; } static unsigned int pentiumM_get_frequency(void) { u32 msr_lo, msr_tmp; rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); pr_debug("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); /* see table B-2 of 24547212.pdf */ if (msr_lo & 0x00040000) { printk(KERN_DEBUG PFX "PM - invalid FSB: 0x%x 0x%x\n", msr_lo, msr_tmp); return 0; } msr_tmp = (msr_lo >> 22) & 0x1f; pr_debug("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * 100 * 1000)); return msr_tmp * 100 * 1000; } static unsigned int pentium_core_get_frequency(void) { u32 fsb = 0; u32 msr_lo, msr_tmp; int ret; rdmsr(MSR_FSB_FREQ, msr_lo, msr_tmp); /* see table B-2 of 25366920.pdf */ switch (msr_lo & 0x07) { case 5: fsb = 100000; break; case 1: fsb = 133333; break; case 3: fsb = 166667; break; case 2: fsb = 200000; break; case 0: fsb = 266667; break; case 4: fsb = 333333; break; default: printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value"); } rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); pr_debug("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); msr_tmp = (msr_lo >> 22) & 0x1f; pr_debug("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * fsb)); ret = (msr_tmp * fsb); return ret; } static unsigned int pentium4_get_frequency(void) { struct cpuinfo_x86 *c = &boot_cpu_data; u32 msr_lo, msr_hi, mult; unsigned int fsb = 0; unsigned int ret; u8 fsb_code; /* Pentium 4 Model 0 and 1 do not have the Core Clock Frequency * to System Bus Frequency Ratio Field in the Processor Frequency * Configuration Register of the MSR. Therefore the current * frequency cannot be calculated and has to be measured. */ if (c->x86_model < 2) return cpu_khz; rdmsr(0x2c, msr_lo, msr_hi); pr_debug("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); /* decode the FSB: see IA-32 Intel (C) Architecture Software * Developer's Manual, Volume 3: System Prgramming Guide, * revision #12 in Table B-1: MSRs in the Pentium 4 and * Intel Xeon Processors, on page B-4 and B-5. */ fsb_code = (msr_lo >> 16) & 0x7; switch (fsb_code) { case 0: fsb = 100 * 1000; break; case 1: fsb = 13333 * 10; break; case 2: fsb = 200 * 1000; break; } if (!fsb) printk(KERN_DEBUG PFX "couldn't detect FSB speed. " "Please send an e-mail to <linux@brodo.de>\n"); /* Multiplier. */ mult = msr_lo >> 24; pr_debug("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", fsb, mult, (fsb * mult)); ret = (fsb * mult); return ret; } /* Warning: may get called from smp_call_function_single. */ unsigned int speedstep_get_frequency(enum speedstep_processor processor) { switch (processor) { case SPEEDSTEP_CPU_PCORE: return pentium_core_get_frequency(); case SPEEDSTEP_CPU_PM: return pentiumM_get_frequency(); case SPEEDSTEP_CPU_P4D: case SPEEDSTEP_CPU_P4M: return pentium4_get_frequency(); case SPEEDSTEP_CPU_PIII_T: case SPEEDSTEP_CPU_PIII_C: case SPEEDSTEP_CPU_PIII_C_EARLY: return pentium3_get_frequency(processor); default: return 0; }; return 0; } EXPORT_SYMBOL_GPL(speedstep_get_frequency); /********************************************************************* * DETECT SPEEDSTEP-CAPABLE PROCESSOR * *********************************************************************/ /* Keep in sync with the x86_cpu_id tables in the different modules */ unsigned int speedstep_detect_processor(void) { struct cpuinfo_x86 *c = &cpu_data(0); u32 ebx, msr_lo, msr_hi; pr_debug("x86: %x, model: %x\n", c->x86, c->x86_model); if ((c->x86_vendor != X86_VENDOR_INTEL) || ((c->x86 != 6) && (c->x86 != 0xF))) return 0; if (c->x86 == 0xF) { /* Intel Mobile Pentium 4-M * or Intel Mobile Pentium 4 with 533 MHz FSB */ if (c->x86_model != 2) return 0; ebx = cpuid_ebx(0x00000001); ebx &= 0x000000FF; pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); switch (c->x86_mask) { case 4: /* * B-stepping [M-P4-M] * sample has ebx = 0x0f, production has 0x0e. */ if ((ebx == 0x0e) || (ebx == 0x0f)) return SPEEDSTEP_CPU_P4M; break; case 7: /* * C-stepping [M-P4-M] * needs to have ebx=0x0e, else it's a celeron: * cf. 25130917.pdf / page 7, footnote 5 even * though 25072120.pdf / page 7 doesn't say * samples are only of B-stepping... */ if (ebx == 0x0e) return SPEEDSTEP_CPU_P4M; break; case 9: /* * D-stepping [M-P4-M or M-P4/533] * * this is totally strange: CPUID 0x0F29 is * used by M-P4-M, M-P4/533 and(!) Celeron CPUs. * The latter need to be sorted out as they don't * support speedstep. * Celerons with CPUID 0x0F29 may have either * ebx=0x8 or 0xf -- 25130917.pdf doesn't say anything * specific. * M-P4-Ms may have either ebx=0xe or 0xf [see above] * M-P4/533 have either ebx=0xe or 0xf. [25317607.pdf] * also, M-P4M HTs have ebx=0x8, too * For now, they are distinguished by the model_id * string */ if ((ebx == 0x0e) || (strstr(c->x86_model_id, "Mobile Intel(R) Pentium(R) 4") != NULL)) return SPEEDSTEP_CPU_P4M; break; default: break; } return 0; } switch (c->x86_model) { case 0x0B: /* Intel PIII [Tualatin] */ /* cpuid_ebx(1) is 0x04 for desktop PIII, * 0x06 for mobile PIII-M */ ebx = cpuid_ebx(0x00000001); pr_debug("ebx is %x\n", ebx); ebx &= 0x000000FF; if (ebx != 0x06) return 0; /* So far all PIII-M processors support SpeedStep. See * Intel's 24540640.pdf of June 2003 */ return SPEEDSTEP_CPU_PIII_T; case 0x08: /* Intel PIII [Coppermine] */ /* all mobile PIII Coppermines have FSB 100 MHz * ==> sort out a few desktop PIIIs. */ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); pr_debug("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", msr_lo, msr_hi); msr_lo &= 0x00c0000; if (msr_lo != 0x0080000) return 0; /* * If the processor is a mobile version, * platform ID has bit 50 set * it has SpeedStep technology if either * bit 56 or 57 is set */ rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi); pr_debug("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", msr_lo, msr_hi); if ((msr_hi & (1<<18)) && (relaxed_check ? 1 : (msr_hi & (3<<24)))) { if (c->x86_mask == 0x01) { pr_debug("early PIII version\n"); return SPEEDSTEP_CPU_PIII_C_EARLY; } else return SPEEDSTEP_CPU_PIII_C; } default: return 0; } } EXPORT_SYMBOL_GPL(speedstep_detect_processor); /********************************************************************* * DETECT SPEEDSTEP SPEEDS * *********************************************************************/ unsigned int speedstep_get_freqs(enum speedstep_processor processor, unsigned int *low_speed, unsigned int *high_speed, unsigned int *transition_latency, void (*set_state) (unsigned int state)) { unsigned int prev_speed; unsigned int ret = 0; unsigned long flags; struct timeval tv1, tv2; if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) return -EINVAL; pr_debug("trying to determine both speeds\n"); /* get current speed */ prev_speed = speedstep_get_frequency(processor); if (!prev_speed) return -EIO; pr_debug("previous speed is %u\n", prev_speed); local_irq_save(flags); /* switch to low state */ set_state(SPEEDSTEP_LOW); *low_speed = speedstep_get_frequency(processor); if (!*low_speed) { ret = -EIO; goto out; } pr_debug("low speed is %u\n", *low_speed); /* start latency measurement */ if (transition_latency) do_gettimeofday(&tv1); /* switch to high state */ set_state(SPEEDSTEP_HIGH); /* end latency measurement */ if (transition_latency) do_gettimeofday(&tv2); *high_speed = speedstep_get_frequency(processor); if (!*high_speed) { ret = -EIO; goto out; } pr_debug("high speed is %u\n", *high_speed); if (*low_speed == *high_speed) { ret = -ENODEV; goto out; } /* switch to previous state, if necessary */ if (*high_speed != prev_speed) set_state(SPEEDSTEP_LOW); if (transition_latency) { *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC + tv2.tv_usec - tv1.tv_usec; pr_debug("transition latency is %u uSec\n", *transition_latency); /* convert uSec to nSec and add 20% for safety reasons */ *transition_latency *= 1200; /* check if the latency measurement is too high or too low * and set it to a safe value (500uSec) in that case */ if (*transition_latency > 10000000 || *transition_latency < 50000) { printk(KERN_WARNING PFX "frequency transition " "measured seems out of range (%u " "nSec), falling back to a safe one of" "%u nSec.\n", *transition_latency, 500000); *transition_latency = 500000; } } out: local_irq_restore(flags); return ret; } EXPORT_SYMBOL_GPL(speedstep_get_freqs); #ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK module_param(relaxed_check, int, 0444); MODULE_PARM_DESC(relaxed_check, "Don't do all checks for speedstep capability."); #endif MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); MODULE_DESCRIPTION("Library for Intel SpeedStep 1 or 2 cpufreq drivers."); MODULE_LICENSE("GPL");
GrandPrime/android_kernel_samsung_fortuna
drivers/cpufreq/speedstep-lib.c
C
gpl-2.0
11,866
/* * docproc is a simple preprocessor for the template files * used as placeholders for the kernel internal documentation. * docproc is used for documentation-frontend and * dependency-generator. * The two usages have in common that they require * some knowledge of the .tmpl syntax, therefore they * are kept together. * * documentation-frontend * Scans the template file and call kernel-doc for * all occurrences of ![EIF]file * Beforehand each referenced file is scanned for * any symbols that are exported via these macros: * EXPORT_SYMBOL(), EXPORT_SYMBOL_GPL(), & * EXPORT_SYMBOL_GPL_FUTURE() * This is used to create proper -function and * -nofunction arguments in calls to kernel-doc. * Usage: docproc doc file.tmpl * * dependency-generator: * Scans the template file and list all files * referenced in a format recognized by make. * Usage: docproc depend file.tmpl * Writes dependency information to stdout * in the following format: * file.tmpl src.c src2.c * The filenames are obtained from the following constructs: * !Efilename * !Ifilename * !Dfilename * !Ffilename * !Pfilename * */ #define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <unistd.h> #include <limits.h> #include <errno.h> #include <sys/types.h> #include <sys/wait.h> /* exitstatus is used to keep track of any failing calls to kernel-doc, * but execution continues. */ int exitstatus = 0; typedef void DFL(char *); DFL *defaultline; typedef void FILEONLY(char * file); FILEONLY *internalfunctions; FILEONLY *externalfunctions; FILEONLY *symbolsonly; FILEONLY *findall; typedef void FILELINE(char * file, char * line); FILELINE * singlefunctions; FILELINE * entity_system; FILELINE * docsection; #define MAXLINESZ 2048 #define MAXFILES 250 #define KERNELDOCPATH "scripts/" #define KERNELDOC "kernel-doc" #define DOCBOOK "-docbook" #define LIST "-list" #define FUNCTION "-function" #define NOFUNCTION "-nofunction" #define NODOCSECTIONS "-no-doc-sections" static char *srctree, *kernsrctree; static char **all_list = NULL; static int all_list_len = 0; static void consume_symbol(const char *sym) { int i; for (i = 0; i < all_list_len; i++) { if (!all_list[i]) continue; if (strcmp(sym, all_list[i])) continue; all_list[i] = NULL; break; } } static void usage (void) { fprintf(stderr, "Usage: docproc {doc|depend} file\n"); fprintf(stderr, "Input is read from file.tmpl. Output is sent to stdout\n"); fprintf(stderr, "doc: frontend when generating kernel documentation\n"); fprintf(stderr, "depend: generate list of files referenced within file\n"); fprintf(stderr, "Environment variable SRCTREE: absolute path to sources.\n"); fprintf(stderr, " KBUILD_SRC: absolute path to kernel source tree.\n"); } /* * Execute kernel-doc with parameters given in svec */ static void exec_kernel_doc(char **svec) { pid_t pid; int ret; char real_filename[PATH_MAX + 1]; /* Make sure output generated so far are flushed */ fflush(stdout); switch (pid=fork()) { case -1: perror("fork"); exit(1); case 0: memset(real_filename, 0, sizeof(real_filename)); strncat(real_filename, kernsrctree, PATH_MAX); strncat(real_filename, "/" KERNELDOCPATH KERNELDOC, PATH_MAX - strlen(real_filename)); execvp(real_filename, svec); fprintf(stderr, "exec "); perror(real_filename); exit(1); default: waitpid(pid, &ret ,0); } if (WIFEXITED(ret)) exitstatus |= WEXITSTATUS(ret); else exitstatus = 0xff; } /* Types used to create list of all exported symbols in a number of files */ struct symbols { char *name; }; struct symfile { char *filename; struct symbols *symbollist; int symbolcnt; }; struct symfile symfilelist[MAXFILES]; int symfilecnt = 0; static void add_new_symbol(struct symfile *sym, char * symname) { sym->symbollist = realloc(sym->symbollist, (sym->symbolcnt + 1) * sizeof(char *)); sym->symbollist[sym->symbolcnt++].name = strdup(symname); } /* Add a filename to the list */ static struct symfile * add_new_file(char * filename) { symfilelist[symfilecnt++].filename = strdup(filename); return &symfilelist[symfilecnt - 1]; } /* Check if file already are present in the list */ static struct symfile * filename_exist(char * filename) { int i; for (i=0; i < symfilecnt; i++) if (strcmp(symfilelist[i].filename, filename) == 0) return &symfilelist[i]; return NULL; } /* * List all files referenced within the template file. * Files are separated by tabs. */ static void adddep(char * file) { printf("\t%s", file); } static void adddep2(char * file, char * line) { line = line; adddep(file); } static void noaction(char * line) { line = line; } static void noaction2(char * file, char * line) { file = file; line = line; } /* Echo the line without further action */ static void printline(char * line) { printf("%s", line); } /* * Find all symbols in filename that are exported with EXPORT_SYMBOL & * EXPORT_SYMBOL_GPL (& EXPORT_SYMBOL_GPL_FUTURE implicitly). * All symbols located are stored in symfilelist. */ static void find_export_symbols(char * filename) { FILE * fp; struct symfile *sym; char line[MAXLINESZ]; if (filename_exist(filename) == NULL) { char real_filename[PATH_MAX + 1]; memset(real_filename, 0, sizeof(real_filename)); strncat(real_filename, srctree, PATH_MAX); strncat(real_filename, "/", PATH_MAX - strlen(real_filename)); strncat(real_filename, filename, PATH_MAX - strlen(real_filename)); sym = add_new_file(filename); fp = fopen(real_filename, "r"); if (fp == NULL) { fprintf(stderr, "docproc: "); perror(real_filename); exit(1); } while (fgets(line, MAXLINESZ, fp)) { char *p; char *e; if (((p = strstr(line, "EXPORT_SYMBOL_GPL")) != NULL) || ((p = strstr(line, "EXPORT_SYMBOL")) != NULL)) { /* Skip EXPORT_SYMBOL{_GPL} */ while (isalnum(*p) || *p == '_') p++; /* Remove parentheses & additional whitespace */ while (isspace(*p)) p++; if (*p != '(') continue; /* Syntax error? */ else p++; while (isspace(*p)) p++; e = p; while (isalnum(*e) || *e == '_') e++; *e = '\0'; add_new_symbol(sym, p); } } fclose(fp); } } /* * Document all external or internal functions in a file. * Call kernel-doc with following parameters: * kernel-doc -docbook -nofunction function_name1 filename * Function names are obtained from all the src files * by find_export_symbols. * intfunc uses -nofunction * extfunc uses -function */ static void docfunctions(char * filename, char * type) { int i,j; int symcnt = 0; int idx = 0; char **vec; for (i=0; i <= symfilecnt; i++) symcnt += symfilelist[i].symbolcnt; vec = malloc((2 + 2 * symcnt + 3) * sizeof(char *)); if (vec == NULL) { perror("docproc: "); exit(1); } vec[idx++] = KERNELDOC; vec[idx++] = DOCBOOK; vec[idx++] = NODOCSECTIONS; for (i=0; i < symfilecnt; i++) { struct symfile * sym = &symfilelist[i]; for (j=0; j < sym->symbolcnt; j++) { vec[idx++] = type; consume_symbol(sym->symbollist[j].name); vec[idx++] = sym->symbollist[j].name; } } vec[idx++] = filename; vec[idx] = NULL; printf("<!-- %s -->\n", filename); exec_kernel_doc(vec); fflush(stdout); free(vec); } static void intfunc(char * filename) { docfunctions(filename, NOFUNCTION); } static void extfunc(char * filename) { docfunctions(filename, FUNCTION); } /* * Document specific function(s) in a file. * Call kernel-doc with the following parameters: * kernel-doc -docbook -function function1 [-function function2] */ static void singfunc(char * filename, char * line) { char *vec[200]; /* Enough for specific functions */ int i, idx = 0; int startofsym = 1; vec[idx++] = KERNELDOC; vec[idx++] = DOCBOOK; /* Split line up in individual parameters preceded by FUNCTION */ for (i=0; line[i]; i++) { if (isspace(line[i])) { line[i] = '\0'; startofsym = 1; continue; } if (startofsym) { startofsym = 0; vec[idx++] = FUNCTION; vec[idx++] = &line[i]; } } for (i = 0; i < idx; i++) { if (strcmp(vec[i], FUNCTION)) continue; consume_symbol(vec[i + 1]); } vec[idx++] = filename; vec[idx] = NULL; exec_kernel_doc(vec); } /* * Insert specific documentation section from a file. * Call kernel-doc with the following parameters: * kernel-doc -docbook -function "doc section" filename */ static void docsect(char *filename, char *line) { char *vec[6]; /* kerneldoc -docbook -function "section" file NULL */ char *s; for (s = line; *s; s++) if (*s == '\n') *s = '\0'; if (asprintf(&s, "DOC: %s", line) < 0) { perror("asprintf"); exit(1); } consume_symbol(s); free(s); vec[0] = KERNELDOC; vec[1] = DOCBOOK; vec[2] = FUNCTION; vec[3] = line; vec[4] = filename; vec[5] = NULL; exec_kernel_doc(vec); } static void find_all_symbols(char *filename) { char *vec[4]; /* kerneldoc -list file NULL */ pid_t pid; int ret, i, count, start; char real_filename[PATH_MAX + 1]; int pipefd[2]; char *data, *str; size_t data_len = 0; vec[0] = KERNELDOC; vec[1] = LIST; vec[2] = filename; vec[3] = NULL; if (pipe(pipefd)) { perror("pipe"); exit(1); } switch (pid=fork()) { case -1: perror("fork"); exit(1); case 0: close(pipefd[0]); dup2(pipefd[1], 1); memset(real_filename, 0, sizeof(real_filename)); strncat(real_filename, kernsrctree, PATH_MAX); strncat(real_filename, "/" KERNELDOCPATH KERNELDOC, PATH_MAX - strlen(real_filename)); execvp(real_filename, vec); fprintf(stderr, "exec "); perror(real_filename); exit(1); default: close(pipefd[1]); data = malloc(4096); do { while ((ret = read(pipefd[0], data + data_len, 4096)) > 0) { data_len += ret; data = realloc(data, data_len + 4096); } } while (ret == -EAGAIN); if (ret != 0) { perror("read"); exit(1); } waitpid(pid, &ret ,0); } if (WIFEXITED(ret)) exitstatus |= WEXITSTATUS(ret); else exitstatus = 0xff; count = 0; /* poor man's strtok, but with counting */ for (i = 0; i < data_len; i++) { if (data[i] == '\n') { count++; data[i] = '\0'; } } start = all_list_len; all_list_len += count; all_list = realloc(all_list, sizeof(char *) * all_list_len); str = data; for (i = 0; i < data_len && start != all_list_len; i++) { if (data[i] == '\0') { all_list[start] = str; str = data + i + 1; start++; } } } /* * Parse file, calling action specific functions for: * 1) Lines containing !E * 2) Lines containing !I * 3) Lines containing !D * 4) Lines containing !F * 5) Lines containing !P * 6) Lines containing !C * 7) Default lines - lines not matching the above */ static void parse_file(FILE *infile) { char line[MAXLINESZ]; char * s; while (fgets(line, MAXLINESZ, infile)) { if (line[0] == '!') { s = line + 2; switch (line[1]) { case 'E': while (*s && !isspace(*s)) s++; *s = '\0'; externalfunctions(line+2); break; case 'I': while (*s && !isspace(*s)) s++; *s = '\0'; internalfunctions(line+2); break; case 'D': while (*s && !isspace(*s)) s++; *s = '\0'; symbolsonly(line+2); break; case 'F': /* filename */ while (*s && !isspace(*s)) s++; *s++ = '\0'; /* function names */ while (isspace(*s)) s++; singlefunctions(line +2, s); break; case 'P': /* filename */ while (*s && !isspace(*s)) s++; *s++ = '\0'; /* DOC: section name */ while (isspace(*s)) s++; docsection(line + 2, s); break; case 'C': while (*s && !isspace(*s)) s++; *s = '\0'; if (findall) findall(line+2); break; default: defaultline(line); } } else { defaultline(line); } } fflush(stdout); } int main(int argc, char *argv[]) { FILE * infile; int i; srctree = getenv("SRCTREE"); if (!srctree) srctree = getcwd(NULL, 0); kernsrctree = getenv("KBUILD_SRC"); if (!kernsrctree || !*kernsrctree) kernsrctree = srctree; if (argc != 3) { usage(); exit(1); } /* Open file, exit on error */ infile = fopen(argv[2], "r"); if (infile == NULL) { fprintf(stderr, "docproc: "); perror(argv[2]); exit(2); } if (strcmp("doc", argv[1]) == 0) { /* Need to do this in two passes. * First pass is used to collect all symbols exported * in the various files; * Second pass generate the documentation. * This is required because some functions are declared * and exported in different files :-(( */ /* Collect symbols */ defaultline = noaction; internalfunctions = find_export_symbols; externalfunctions = find_export_symbols; symbolsonly = find_export_symbols; singlefunctions = noaction2; docsection = noaction2; findall = find_all_symbols; parse_file(infile); /* Rewind to start from beginning of file again */ fseek(infile, 0, SEEK_SET); defaultline = printline; internalfunctions = intfunc; externalfunctions = extfunc; symbolsonly = printline; singlefunctions = singfunc; docsection = docsect; findall = NULL; parse_file(infile); for (i = 0; i < all_list_len; i++) { if (!all_list[i]) continue; fprintf(stderr, "Warning: didn't use docs for %s\n", all_list[i]); } } else if (strcmp("depend", argv[1]) == 0) { /* Create first part of dependency chain * file.tmpl */ printf("%s\t", argv[2]); defaultline = noaction; internalfunctions = adddep; externalfunctions = adddep; symbolsonly = adddep; singlefunctions = adddep2; docsection = adddep2; findall = adddep; parse_file(infile); printf("\n"); } else { fprintf(stderr, "Unknown option: %s\n", argv[1]); exit(1); } fclose(infile); fflush(stdout); return exitstatus; }
jekkos/android_kernel_htc_msm8960
scripts/docproc.c
C
gpl-2.0
14,478
/* * linux/drivers/char/ppdev.c * * This is the code behind /dev/parport* -- it allows a user-space * application to use the parport subsystem. * * Copyright (C) 1998-2000, 2002 Tim Waugh <tim@cyberelk.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * A /dev/parportx device node represents an arbitrary device * on port 'x'. The following operations are possible: * * open do nothing, set up default IEEE 1284 protocol to be COMPAT * close release port and unregister device (if necessary) * ioctl * EXCL register device exclusively (may fail) * CLAIM (register device first time) parport_claim_or_block * RELEASE parport_release * SETMODE set the IEEE 1284 protocol to use for read/write * SETPHASE set the IEEE 1284 phase of a particular mode. Not to be * confused with ioctl(fd, SETPHASER, &stun). ;-) * DATADIR data_forward / data_reverse * WDATA write_data * RDATA read_data * WCONTROL write_control * RCONTROL read_control * FCONTROL frob_control * RSTATUS read_status * NEGOT parport_negotiate * YIELD parport_yield_blocking * WCTLONIRQ on interrupt, set control lines * CLRIRQ clear (and return) interrupt count * SETTIME sets device timeout (struct timeval) * GETTIME gets device timeout (struct timeval) * GETMODES gets hardware supported modes (unsigned int) * GETMODE gets the current IEEE1284 mode * GETPHASE gets the current IEEE1284 phase * GETFLAGS gets current (user-visible) flags * SETFLAGS sets current (user-visible) flags * read/write read or write in current IEEE 1284 protocol * select wait for interrupt (in readfds) * * Changes: * Added SETTIME/GETTIME ioctl, Fred Barnes, 1999. * * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 2000/08/25 * - On error, copy_from_user and copy_to_user do not return -EFAULT, * They return the positive number of bytes *not* copied due to address * space errors. * * Added GETMODES/GETMODE/GETPHASE ioctls, Fred Barnes <frmb2@ukc.ac.uk>, 03/01/2001. * Added GETFLAGS/SETFLAGS ioctls, Fred Barnes, 04/2001 */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/device.h> #include <linux/ioctl.h> #include <linux/parport.h> #include <linux/ctype.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/major.h> #include <linux/ppdev.h> #include <linux/mutex.h> #include <linux/uaccess.h> #define PP_VERSION "ppdev: user-space parallel port driver" #define CHRDEV "ppdev" struct pp_struct { struct pardevice * pdev; wait_queue_head_t irq_wait; atomic_t irqc; unsigned int flags; int irqresponse; unsigned char irqctl; struct ieee1284_info state; struct ieee1284_info saved_state; long default_inactivity; }; /* pp_struct.flags bitfields */ #define PP_CLAIMED (1<<0) #define PP_EXCL (1<<1) /* Other constants */ #define PP_INTERRUPT_TIMEOUT (10 * HZ) /* 10s */ #define PP_BUFFER_SIZE 1024 #define PARDEVICE_MAX 8 /* ROUND_UP macro from fs/select.c */ #define ROUND_UP(x,y) (((x)+(y)-1)/(y)) static DEFINE_MUTEX(pp_do_mutex); static inline void pp_enable_irq (struct pp_struct *pp) { struct parport *port = pp->pdev->port; port->ops->enable_irq (port); } static ssize_t pp_read (struct file * file, char __user * buf, size_t count, loff_t * ppos) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct pp_struct *pp = file->private_data; char * kbuffer; ssize_t bytes_read = 0; struct parport *pport; int mode; if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } /* Trivial case. */ if (count == 0) return 0; kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); if (!kbuffer) { return -ENOMEM; } pport = pp->pdev->port; mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); parport_set_timeout (pp->pdev, (file->f_flags & O_NONBLOCK) ? PARPORT_INACTIVITY_O_NONBLOCK : pp->default_inactivity); while (bytes_read == 0) { ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE); if (mode == IEEE1284_MODE_EPP) { /* various specials for EPP mode */ int flags = 0; size_t (*fn)(struct parport *, void *, size_t, int); if (pp->flags & PP_W91284PIC) { flags |= PARPORT_W91284PIC; } if (pp->flags & PP_FASTREAD) { flags |= PARPORT_EPP_FAST; } if (pport->ieee1284.mode & IEEE1284_ADDR) { fn = pport->ops->epp_read_addr; } else { fn = pport->ops->epp_read_data; } bytes_read = (*fn)(pport, kbuffer, need, flags); } else { bytes_read = parport_read (pport, kbuffer, need); } if (bytes_read != 0) break; if (file->f_flags & O_NONBLOCK) { bytes_read = -EAGAIN; break; } if (signal_pending (current)) { bytes_read = -ERESTARTSYS; break; } cond_resched(); } parport_set_timeout (pp->pdev, pp->default_inactivity); if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read)) bytes_read = -EFAULT; kfree (kbuffer); pp_enable_irq (pp); return bytes_read; } static ssize_t pp_write (struct file * file, const char __user * buf, size_t count, loff_t * ppos) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct pp_struct *pp = file->private_data; char * kbuffer; ssize_t bytes_written = 0; ssize_t wrote; int mode; struct parport *pport; if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); if (!kbuffer) { return -ENOMEM; } pport = pp->pdev->port; mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); parport_set_timeout (pp->pdev, (file->f_flags & O_NONBLOCK) ? PARPORT_INACTIVITY_O_NONBLOCK : pp->default_inactivity); while (bytes_written < count) { ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE); if (copy_from_user (kbuffer, buf + bytes_written, n)) { bytes_written = -EFAULT; break; } if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) { /* do a fast EPP write */ if (pport->ieee1284.mode & IEEE1284_ADDR) { wrote = pport->ops->epp_write_addr (pport, kbuffer, n, PARPORT_EPP_FAST); } else { wrote = pport->ops->epp_write_data (pport, kbuffer, n, PARPORT_EPP_FAST); } } else { wrote = parport_write (pp->pdev->port, kbuffer, n); } if (wrote <= 0) { if (!bytes_written) { bytes_written = wrote; } break; } bytes_written += wrote; if (file->f_flags & O_NONBLOCK) { if (!bytes_written) bytes_written = -EAGAIN; break; } if (signal_pending (current)) { if (!bytes_written) { bytes_written = -EINTR; } break; } cond_resched(); } parport_set_timeout (pp->pdev, pp->default_inactivity); kfree (kbuffer); pp_enable_irq (pp); return bytes_written; } static void pp_irq (void *private) { struct pp_struct *pp = private; if (pp->irqresponse) { parport_write_control (pp->pdev->port, pp->irqctl); pp->irqresponse = 0; } atomic_inc (&pp->irqc); wake_up_interruptible (&pp->irq_wait); } static int register_device (int minor, struct pp_struct *pp) { struct parport *port; struct pardevice * pdev = NULL; char *name; int fl; name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); if (name == NULL) return -ENOMEM; port = parport_find_number (minor); if (!port) { printk (KERN_WARNING "%s: no associated port!\n", name); kfree (name); return -ENXIO; } fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; pdev = parport_register_device (port, name, NULL, NULL, pp_irq, fl, pp); parport_put_port (port); if (!pdev) { printk (KERN_WARNING "%s: failed to register device!\n", name); kfree (name); return -ENXIO; } pp->pdev = pdev; pr_debug("%s: registered pardevice\n", name); return 0; } static enum ieee1284_phase init_phase (int mode) { switch (mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR)) { case IEEE1284_MODE_NIBBLE: case IEEE1284_MODE_BYTE: return IEEE1284_PH_REV_IDLE; } return IEEE1284_PH_FWD_IDLE; } static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct pp_struct *pp = file->private_data; struct parport * port; void __user *argp = (void __user *)arg; /* First handle the cases that don't take arguments. */ switch (cmd) { case PPCLAIM: { struct ieee1284_info *info; int ret; if (pp->flags & PP_CLAIMED) { pr_debug(CHRDEV "%x: you've already got it!\n", minor); return -EINVAL; } /* Deferred device registration. */ if (!pp->pdev) { int err = register_device (minor, pp); if (err) { return err; } } ret = parport_claim_or_block (pp->pdev); if (ret < 0) return ret; pp->flags |= PP_CLAIMED; /* For interrupt-reporting to work, we need to be * informed of each interrupt. */ pp_enable_irq (pp); /* We may need to fix up the state machine. */ info = &pp->pdev->port->ieee1284; pp->saved_state.mode = info->mode; pp->saved_state.phase = info->phase; info->mode = pp->state.mode; info->phase = pp->state.phase; pp->default_inactivity = parport_set_timeout (pp->pdev, 0); parport_set_timeout (pp->pdev, pp->default_inactivity); return 0; } case PPEXCL: if (pp->pdev) { pr_debug(CHRDEV "%x: too late for PPEXCL; " "already registered\n", minor); if (pp->flags & PP_EXCL) /* But it's not really an error. */ return 0; /* There's no chance of making the driver happy. */ return -EINVAL; } /* Just remember to register the device exclusively * when we finally do the registration. */ pp->flags |= PP_EXCL; return 0; case PPSETMODE: { int mode; if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; /* FIXME: validate mode */ pp->state.mode = mode; pp->state.phase = init_phase (mode); if (pp->flags & PP_CLAIMED) { pp->pdev->port->ieee1284.mode = mode; pp->pdev->port->ieee1284.phase = pp->state.phase; } return 0; } case PPGETMODE: { int mode; if (pp->flags & PP_CLAIMED) { mode = pp->pdev->port->ieee1284.mode; } else { mode = pp->state.mode; } if (copy_to_user (argp, &mode, sizeof (mode))) { return -EFAULT; } return 0; } case PPSETPHASE: { int phase; if (copy_from_user (&phase, argp, sizeof (phase))) { return -EFAULT; } /* FIXME: validate phase */ pp->state.phase = phase; if (pp->flags & PP_CLAIMED) { pp->pdev->port->ieee1284.phase = phase; } return 0; } case PPGETPHASE: { int phase; if (pp->flags & PP_CLAIMED) { phase = pp->pdev->port->ieee1284.phase; } else { phase = pp->state.phase; } if (copy_to_user (argp, &phase, sizeof (phase))) { return -EFAULT; } return 0; } case PPGETMODES: { unsigned int modes; port = parport_find_number (minor); if (!port) return -ENODEV; modes = port->modes; parport_put_port(port); if (copy_to_user (argp, &modes, sizeof (modes))) { return -EFAULT; } return 0; } case PPSETFLAGS: { int uflags; if (copy_from_user (&uflags, argp, sizeof (uflags))) { return -EFAULT; } pp->flags &= ~PP_FLAGMASK; pp->flags |= (uflags & PP_FLAGMASK); return 0; } case PPGETFLAGS: { int uflags; uflags = pp->flags & PP_FLAGMASK; if (copy_to_user (argp, &uflags, sizeof (uflags))) { return -EFAULT; } return 0; } } /* end switch() */ /* Everything else requires the port to be claimed, so check * that now. */ if ((pp->flags & PP_CLAIMED) == 0) { pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } port = pp->pdev->port; switch (cmd) { struct ieee1284_info *info; unsigned char reg; unsigned char mask; int mode; int ret; struct timeval par_timeout; long to_jiffies; case PPRSTATUS: reg = parport_read_status (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPRDATA: reg = parport_read_data (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPRCONTROL: reg = parport_read_control (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPYIELD: parport_yield_blocking (pp->pdev); return 0; case PPRELEASE: /* Save the state machine's state. */ info = &pp->pdev->port->ieee1284; pp->state.mode = info->mode; pp->state.phase = info->phase; info->mode = pp->saved_state.mode; info->phase = pp->saved_state.phase; parport_release (pp->pdev); pp->flags &= ~PP_CLAIMED; return 0; case PPWCONTROL: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; parport_write_control (port, reg); return 0; case PPWDATA: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; parport_write_data (port, reg); return 0; case PPFCONTROL: if (copy_from_user (&mask, argp, sizeof (mask))) return -EFAULT; if (copy_from_user (&reg, 1 + (unsigned char __user *) arg, sizeof (reg))) return -EFAULT; parport_frob_control (port, mask, reg); return 0; case PPDATADIR: if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; if (mode) port->ops->data_reverse (port); else port->ops->data_forward (port); return 0; case PPNEGOT: if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; switch ((ret = parport_negotiate (port, mode))) { case 0: break; case -1: /* handshake failed, peripheral not IEEE 1284 */ ret = -EIO; break; case 1: /* handshake succeeded, peripheral rejected mode */ ret = -ENXIO; break; } pp_enable_irq (pp); return ret; case PPWCTLONIRQ: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; /* Remember what to set the control lines to, for next * time we get an interrupt. */ pp->irqctl = reg; pp->irqresponse = 1; return 0; case PPCLRIRQ: ret = atomic_read (&pp->irqc); if (copy_to_user (argp, &ret, sizeof (ret))) return -EFAULT; atomic_sub (ret, &pp->irqc); return 0; case PPSETTIME: if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) { return -EFAULT; } /* Convert to jiffies, place in pp->pdev->timeout */ if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) { return -EINVAL; } to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ); to_jiffies += par_timeout.tv_sec * (long)HZ; if (to_jiffies <= 0) { return -EINVAL; } pp->pdev->timeout = to_jiffies; return 0; case PPGETTIME: to_jiffies = pp->pdev->timeout; memset(&par_timeout, 0, sizeof(par_timeout)); par_timeout.tv_sec = to_jiffies / HZ; par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ); if (copy_to_user (argp, &par_timeout, sizeof(struct timeval))) return -EFAULT; return 0; default: pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd); return -EINVAL; } /* Keep the compiler happy */ return 0; } static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; mutex_lock(&pp_do_mutex); ret = pp_do_ioctl(file, cmd, arg); mutex_unlock(&pp_do_mutex); return ret; } static int pp_open (struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct pp_struct *pp; if (minor >= PARPORT_MAX) return -ENXIO; pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL); if (!pp) return -ENOMEM; pp->state.mode = IEEE1284_MODE_COMPAT; pp->state.phase = init_phase (pp->state.mode); pp->flags = 0; pp->irqresponse = 0; atomic_set (&pp->irqc, 0); init_waitqueue_head (&pp->irq_wait); /* Defer the actual device registration until the first claim. * That way, we know whether or not the driver wants to have * exclusive access to the port (PPEXCL). */ pp->pdev = NULL; file->private_data = pp; return 0; } static int pp_release (struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct pp_struct *pp = file->private_data; int compat_negot; compat_negot = 0; if (!(pp->flags & PP_CLAIMED) && pp->pdev && (pp->state.mode != IEEE1284_MODE_COMPAT)) { struct ieee1284_info *info; /* parport released, but not in compatibility mode */ parport_claim_or_block (pp->pdev); pp->flags |= PP_CLAIMED; info = &pp->pdev->port->ieee1284; pp->saved_state.mode = info->mode; pp->saved_state.phase = info->phase; info->mode = pp->state.mode; info->phase = pp->state.phase; compat_negot = 1; } else if ((pp->flags & PP_CLAIMED) && pp->pdev && (pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT)) { compat_negot = 2; } if (compat_negot) { parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT); pr_debug(CHRDEV "%x: negotiated back to compatibility " "mode because user-space forgot\n", minor); } if (pp->flags & PP_CLAIMED) { struct ieee1284_info *info; info = &pp->pdev->port->ieee1284; pp->state.mode = info->mode; pp->state.phase = info->phase; info->mode = pp->saved_state.mode; info->phase = pp->saved_state.phase; parport_release (pp->pdev); if (compat_negot != 1) { pr_debug(CHRDEV "%x: released pardevice " "because user-space forgot\n", minor); } } if (pp->pdev) { const char *name = pp->pdev->name; parport_unregister_device (pp->pdev); kfree (name); pp->pdev = NULL; pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); } kfree (pp); return 0; } /* No kernel lock held - fine */ static unsigned int pp_poll (struct file * file, poll_table * wait) { struct pp_struct *pp = file->private_data; unsigned int mask = 0; poll_wait (file, &pp->irq_wait, wait); if (atomic_read (&pp->irqc)) mask |= POLLIN | POLLRDNORM; return mask; } static struct class *ppdev_class; static const struct file_operations pp_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = pp_read, .write = pp_write, .poll = pp_poll, .unlocked_ioctl = pp_ioctl, .open = pp_open, .release = pp_release, }; static void pp_attach(struct parport *port) { device_create(ppdev_class, port->dev, MKDEV(PP_MAJOR, port->number), NULL, "parport%d", port->number); } static void pp_detach(struct parport *port) { device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number)); } static struct parport_driver pp_driver = { .name = CHRDEV, .attach = pp_attach, .detach = pp_detach, }; static int __init ppdev_init (void) { int err = 0; if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) { printk (KERN_WARNING CHRDEV ": unable to get major %d\n", PP_MAJOR); return -EIO; } ppdev_class = class_create(THIS_MODULE, CHRDEV); if (IS_ERR(ppdev_class)) { err = PTR_ERR(ppdev_class); goto out_chrdev; } if (parport_register_driver(&pp_driver)) { printk (KERN_WARNING CHRDEV ": unable to register with parport\n"); goto out_class; } printk (KERN_INFO PP_VERSION "\n"); goto out; out_class: class_destroy(ppdev_class); out_chrdev: unregister_chrdev(PP_MAJOR, CHRDEV); out: return err; } static void __exit ppdev_cleanup (void) { /* Clean up all parport stuff */ parport_unregister_driver(&pp_driver); class_destroy(ppdev_class); unregister_chrdev (PP_MAJOR, CHRDEV); } module_init(ppdev_init); module_exit(ppdev_cleanup); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(PP_MAJOR);
CaptainThrowback/kernel_htc_e8
drivers/char/ppdev.c
C
gpl-2.0
19,633
/* * HighPoint RR3xxx/4xxx controller driver for Linux * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Please report bugs/comments/suggestions to linux@highpoint-tech.com * * For more information, visit http://www.highpoint-tech.com */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/spinlock.h> #include <linux/gfp.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/div64.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_host.h> #include "hptiop.h" MODULE_AUTHOR("HighPoint Technologies, Inc."); MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver"); static char driver_name[] = "hptiop"; static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver"; static const char driver_ver[] = "v1.6 (090910)"; static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec); static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, struct hpt_iop_request_scsi_command *req); static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag); static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag); static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec) { u32 req = 0; int i; for (i = 0; i < millisec; i++) { req = readl(&hba->u.itl.iop->inbound_queue); if (req != IOPMU_QUEUE_EMPTY) break; msleep(1); } if (req != IOPMU_QUEUE_EMPTY) { writel(req, &hba->u.itl.iop->outbound_queue); readl(&hba->u.itl.iop->outbound_intstatus); return 0; } return -1; } static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec) { return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec); } static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag) { if (tag & IOPMU_QUEUE_ADDR_HOST_BIT) hptiop_host_request_callback_itl(hba, tag & ~IOPMU_QUEUE_ADDR_HOST_BIT); else hptiop_iop_request_callback_itl(hba, tag); } static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba) { u32 req; while ((req = readl(&hba->u.itl.iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) { if (req & IOPMU_QUEUE_MASK_HOST_BITS) hptiop_request_callback_itl(hba, req); else { struct hpt_iop_request_header __iomem * p; p = (struct hpt_iop_request_header __iomem *) ((char __iomem *)hba->u.itl.iop + req); if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) { if (readl(&p->context)) hptiop_request_callback_itl(hba, req); else writel(1, &p->context); } else hptiop_request_callback_itl(hba, req); } } } static int iop_intr_itl(struct hptiop_hba *hba) { struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop; void __iomem *plx = hba->u.itl.plx; u32 status; int ret = 0; if (plx && readl(plx + 0x11C5C) & 0xf) writel(1, plx + 0x11C60); status = readl(&iop->outbound_intstatus); if (status & IOPMU_OUTBOUND_INT_MSG0) { u32 msg = readl(&iop->outbound_msgaddr0); dprintk("received outbound msg %x\n", msg); writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus); hptiop_message_callback(hba, msg); ret = 1; } if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { hptiop_drain_outbound_queue_itl(hba); ret = 1; } return ret; } static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu) { u32 outbound_tail = readl(&mu->outbound_tail); u32 outbound_head = readl(&mu->outbound_head); if (outbound_tail != outbound_head) { u64 p; memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8); outbound_tail++; if (outbound_tail == MVIOP_QUEUE_LEN) outbound_tail = 0; writel(outbound_tail, &mu->outbound_tail); return p; } else return 0; } static void mv_inbound_write(u64 p, struct hptiop_hba *hba) { u32 inbound_head = readl(&hba->u.mv.mu->inbound_head); u32 head = inbound_head + 1; if (head == MVIOP_QUEUE_LEN) head = 0; memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8); writel(head, &hba->u.mv.mu->inbound_head); writel(MVIOP_MU_INBOUND_INT_POSTQUEUE, &hba->u.mv.regs->inbound_doorbell); } static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag) { u32 req_type = (tag >> 5) & 0x7; struct hpt_iop_request_scsi_command *req; dprintk("hptiop_request_callback_mv: tag=%llx\n", tag); BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0); switch (req_type) { case IOP_REQUEST_TYPE_GET_CONFIG: case IOP_REQUEST_TYPE_SET_CONFIG: hba->msg_done = 1; break; case IOP_REQUEST_TYPE_SCSI_COMMAND: req = hba->reqs[tag >> 8].req_virt; if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)) req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); hptiop_finish_scsi_req(hba, tag>>8, req); break; default: break; } } static int iop_intr_mv(struct hptiop_hba *hba) { u32 status; int ret = 0; status = readl(&hba->u.mv.regs->outbound_doorbell); writel(~status, &hba->u.mv.regs->outbound_doorbell); if (status & MVIOP_MU_OUTBOUND_INT_MSG) { u32 msg; msg = readl(&hba->u.mv.mu->outbound_msg); dprintk("received outbound msg %x\n", msg); hptiop_message_callback(hba, msg); ret = 1; } if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { u64 tag; while ((tag = mv_outbound_read(hba->u.mv.mu))) hptiop_request_callback_mv(hba, tag); ret = 1; } return ret; } static int iop_send_sync_request_itl(struct hptiop_hba *hba, void __iomem *_req, u32 millisec) { struct hpt_iop_request_header __iomem *req = _req; u32 i; writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags); writel(0, &req->context); writel((unsigned long)req - (unsigned long)hba->u.itl.iop, &hba->u.itl.iop->inbound_queue); readl(&hba->u.itl.iop->outbound_intstatus); for (i = 0; i < millisec; i++) { iop_intr_itl(hba); if (readl(&req->context)) return 0; msleep(1); } return -1; } static int iop_send_sync_request_mv(struct hptiop_hba *hba, u32 size_bits, u32 millisec) { struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req; u32 i; hba->msg_done = 0; reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST); mv_inbound_write(hba->u.mv.internal_req_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba); for (i = 0; i < millisec; i++) { iop_intr_mv(hba); if (hba->msg_done) return 0; msleep(1); } return -1; } static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg) { writel(msg, &hba->u.itl.iop->inbound_msgaddr0); readl(&hba->u.itl.iop->outbound_intstatus); } static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg) { writel(msg, &hba->u.mv.mu->inbound_msg); writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell); readl(&hba->u.mv.regs->inbound_doorbell); } static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec) { u32 i; hba->msg_done = 0; hba->ops->post_msg(hba, msg); for (i = 0; i < millisec; i++) { spin_lock_irq(hba->host->host_lock); hba->ops->iop_intr(hba); spin_unlock_irq(hba->host->host_lock); if (hba->msg_done) break; msleep(1); } return hba->msg_done? 0 : -1; } static int iop_get_config_itl(struct hptiop_hba *hba, struct hpt_iop_request_get_config *config) { u32 req32; struct hpt_iop_request_get_config __iomem *req; req32 = readl(&hba->u.itl.iop->inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return -1; req = (struct hpt_iop_request_get_config __iomem *) ((unsigned long)hba->u.itl.iop + req32); writel(0, &req->header.flags); writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type); writel(sizeof(struct hpt_iop_request_get_config), &req->header.size); writel(IOP_RESULT_PENDING, &req->header.result); if (iop_send_sync_request_itl(hba, req, 20000)) { dprintk("Get config send cmd failed\n"); return -1; } memcpy_fromio(config, req, sizeof(*config)); writel(req32, &hba->u.itl.iop->outbound_queue); return 0; } static int iop_get_config_mv(struct hptiop_hba *hba, struct hpt_iop_request_get_config *config) { struct hpt_iop_request_get_config *req = hba->u.mv.internal_req; req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG); req->header.size = cpu_to_le32(sizeof(struct hpt_iop_request_get_config)); req->header.result = cpu_to_le32(IOP_RESULT_PENDING); req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5); req->header.context_hi32 = 0; if (iop_send_sync_request_mv(hba, 0, 20000)) { dprintk("Get config send cmd failed\n"); return -1; } memcpy(config, req, sizeof(struct hpt_iop_request_get_config)); return 0; } static int iop_set_config_itl(struct hptiop_hba *hba, struct hpt_iop_request_set_config *config) { u32 req32; struct hpt_iop_request_set_config __iomem *req; req32 = readl(&hba->u.itl.iop->inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return -1; req = (struct hpt_iop_request_set_config __iomem *) ((unsigned long)hba->u.itl.iop + req32); memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header), (u8 *)config + sizeof(struct hpt_iop_request_header), sizeof(struct hpt_iop_request_set_config) - sizeof(struct hpt_iop_request_header)); writel(0, &req->header.flags); writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type); writel(sizeof(struct hpt_iop_request_set_config), &req->header.size); writel(IOP_RESULT_PENDING, &req->header.result); if (iop_send_sync_request_itl(hba, req, 20000)) { dprintk("Set config send cmd failed\n"); return -1; } writel(req32, &hba->u.itl.iop->outbound_queue); return 0; } static int iop_set_config_mv(struct hptiop_hba *hba, struct hpt_iop_request_set_config *config) { struct hpt_iop_request_set_config *req = hba->u.mv.internal_req; memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); req->header.size = cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); req->header.result = cpu_to_le32(IOP_RESULT_PENDING); req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); req->header.context_hi32 = 0; if (iop_send_sync_request_mv(hba, 0, 20000)) { dprintk("Set config send cmd failed\n"); return -1; } return 0; } static void hptiop_enable_intr_itl(struct hptiop_hba *hba) { writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0), &hba->u.itl.iop->outbound_intmask); } static void hptiop_enable_intr_mv(struct hptiop_hba *hba) { writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG, &hba->u.mv.regs->outbound_intmask); } static int hptiop_initialize_iop(struct hptiop_hba *hba) { /* enable interrupts */ hba->ops->enable_intr(hba); hba->initialized = 1; /* start background tasks */ if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { printk(KERN_ERR "scsi%d: fail to start background task\n", hba->host->host_no); return -1; } return 0; } static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index) { u32 mem_base_phy, length; void __iomem *mem_base_virt; struct pci_dev *pcidev = hba->pcidev; if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) { printk(KERN_ERR "scsi%d: pci resource invalid\n", hba->host->host_no); return NULL; } mem_base_phy = pci_resource_start(pcidev, index); length = pci_resource_len(pcidev, index); mem_base_virt = ioremap(mem_base_phy, length); if (!mem_base_virt) { printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n", hba->host->host_no); return NULL; } return mem_base_virt; } static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba) { struct pci_dev *pcidev = hba->pcidev; hba->u.itl.iop = hptiop_map_pci_bar(hba, 0); if (hba->u.itl.iop == NULL) return -1; if ((pcidev->device & 0xff00) == 0x4400) { hba->u.itl.plx = hba->u.itl.iop; hba->u.itl.iop = hptiop_map_pci_bar(hba, 2); if (hba->u.itl.iop == NULL) { iounmap(hba->u.itl.plx); return -1; } } return 0; } static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba) { if (hba->u.itl.plx) iounmap(hba->u.itl.plx); iounmap(hba->u.itl.iop); } static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba) { hba->u.mv.regs = hptiop_map_pci_bar(hba, 0); if (hba->u.mv.regs == NULL) return -1; hba->u.mv.mu = hptiop_map_pci_bar(hba, 2); if (hba->u.mv.mu == NULL) { iounmap(hba->u.mv.regs); return -1; } return 0; } static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba) { iounmap(hba->u.mv.regs); iounmap(hba->u.mv.mu); } static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg) { dprintk("iop message 0x%x\n", msg); if (msg == IOPMU_INBOUND_MSG0_NOP) hba->msg_done = 1; if (!hba->initialized) return; if (msg == IOPMU_INBOUND_MSG0_RESET) { atomic_set(&hba->resetting, 0); wake_up(&hba->reset_wq); } else if (msg <= IOPMU_INBOUND_MSG0_MAX) hba->msg_done = 1; } static struct hptiop_request *get_req(struct hptiop_hba *hba) { struct hptiop_request *ret; dprintk("get_req : req=%p\n", hba->req_list); ret = hba->req_list; if (ret) hba->req_list = ret->next; return ret; } static void free_req(struct hptiop_hba *hba, struct hptiop_request *req) { dprintk("free_req(%d, %p)\n", req->index, req); req->next = hba->req_list; hba->req_list = req; } static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, struct hpt_iop_request_scsi_command *req) { struct scsi_cmnd *scp; dprintk("hptiop_finish_scsi_req: req=%p, type=%d, " "result=%d, context=0x%x tag=%d\n", req, req->header.type, req->header.result, req->header.context, tag); BUG_ON(!req->header.result); BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND)); scp = hba->reqs[tag].scp; if (HPT_SCP(scp)->mapped) scsi_dma_unmap(scp); switch (le32_to_cpu(req->header.result)) { case IOP_RESULT_SUCCESS: scsi_set_resid(scp, scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); scp->result = (DID_OK<<16); break; case IOP_RESULT_BAD_TARGET: scp->result = (DID_BAD_TARGET<<16); break; case IOP_RESULT_BUSY: scp->result = (DID_BUS_BUSY<<16); break; case IOP_RESULT_RESET: scp->result = (DID_RESET<<16); break; case IOP_RESULT_FAIL: scp->result = (DID_ERROR<<16); break; case IOP_RESULT_INVALID_REQUEST: scp->result = (DID_ABORT<<16); break; case IOP_RESULT_CHECK_CONDITION: scsi_set_resid(scp, scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); scp->result = SAM_STAT_CHECK_CONDITION; memcpy(scp->sense_buffer, &req->sg_list, min_t(size_t, SCSI_SENSE_BUFFERSIZE, le32_to_cpu(req->dataxfer_length))); break; default: scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16; break; } dprintk("scsi_done(%p)\n", scp); scp->scsi_done(scp); free_req(hba, &hba->reqs[tag]); } static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag) { struct hpt_iop_request_scsi_command *req; u32 tag; if (hba->iopintf_v2) { tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT; req = hba->reqs[tag].req_virt; if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT)) req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); } else { tag = _tag; req = hba->reqs[tag].req_virt; } hptiop_finish_scsi_req(hba, tag, req); } void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag) { struct hpt_iop_request_header __iomem *req; struct hpt_iop_request_ioctl_command __iomem *p; struct hpt_ioctl_k *arg; req = (struct hpt_iop_request_header __iomem *) ((unsigned long)hba->u.itl.iop + tag); dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, " "result=%d, context=0x%x tag=%d\n", req, readl(&req->type), readl(&req->result), readl(&req->context), tag); BUG_ON(!readl(&req->result)); BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND); p = (struct hpt_iop_request_ioctl_command __iomem *)req; arg = (struct hpt_ioctl_k *)(unsigned long) (readl(&req->context) | ((u64)readl(&req->context_hi32)<<32)); if (readl(&req->result) == IOP_RESULT_SUCCESS) { arg->result = HPT_IOCTL_RESULT_OK; if (arg->outbuf_size) memcpy_fromio(arg->outbuf, &p->buf[(readl(&p->inbuf_size) + 3)& ~3], arg->outbuf_size); if (arg->bytes_returned) *arg->bytes_returned = arg->outbuf_size; } else arg->result = HPT_IOCTL_RESULT_FAILED; arg->done(arg); writel(tag, &hba->u.itl.iop->outbound_queue); } static irqreturn_t hptiop_intr(int irq, void *dev_id) { struct hptiop_hba *hba = dev_id; int handled; unsigned long flags; spin_lock_irqsave(hba->host->host_lock, flags); handled = hba->ops->iop_intr(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); return handled; } static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg) { struct Scsi_Host *host = scp->device->host; struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; struct scatterlist *sg; int idx, nseg; nseg = scsi_dma_map(scp); BUG_ON(nseg < 0); if (!nseg) return 0; HPT_SCP(scp)->sgcnt = nseg; HPT_SCP(scp)->mapped = 1; BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors); scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) { psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)); psg[idx].size = cpu_to_le32(sg_dma_len(sg)); psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ? cpu_to_le32(1) : 0; } return HPT_SCP(scp)->sgcnt; } static void hptiop_post_req_itl(struct hptiop_hba *hba, struct hptiop_request *_req) { struct hpt_iop_request_header *reqhdr = _req->req_virt; reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | (u32)_req->index); reqhdr->context_hi32 = 0; if (hba->iopintf_v2) { u32 size, size_bits; size = le32_to_cpu(reqhdr->size); if (size < 256) size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; else if (size < 512) size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; else size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | IOPMU_QUEUE_ADDR_HOST_BIT; writel(_req->req_shifted_phy | size_bits, &hba->u.itl.iop->inbound_queue); } else writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT, &hba->u.itl.iop->inbound_queue); } static void hptiop_post_req_mv(struct hptiop_hba *hba, struct hptiop_request *_req) { struct hpt_iop_request_header *reqhdr = _req->req_virt; u32 size, size_bit; reqhdr->context = cpu_to_le32(_req->index<<8 | IOP_REQUEST_TYPE_SCSI_COMMAND<<5); reqhdr->context_hi32 = 0; size = le32_to_cpu(reqhdr->size); if (size <= 256) size_bit = 0; else if (size <= 256*2) size_bit = 1; else if (size <= 256*3) size_bit = 2; else size_bit = 3; mv_inbound_write((_req->req_shifted_phy << 5) | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba); } static int hptiop_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) { struct Scsi_Host *host = scp->device->host; struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; struct hpt_iop_request_scsi_command *req; int sg_count = 0; struct hptiop_request *_req; BUG_ON(!done); scp->scsi_done = done; _req = get_req(hba); if (_req == NULL) { dprintk("hptiop_queuecmd : no free req\n"); return SCSI_MLQUEUE_HOST_BUSY; } _req->scp = scp; dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) " "req_index=%d, req=%p\n", scp, host->host_no, scp->device->channel, scp->device->id, scp->device->lun, ((u32 *)scp->cmnd)[0], ((u32 *)scp->cmnd)[1], ((u32 *)scp->cmnd)[2], _req->index, _req->req_virt); scp->result = 0; if (scp->device->channel || scp->device->lun || scp->device->id > hba->max_devices) { scp->result = DID_BAD_TARGET << 16; free_req(hba, _req); goto cmd_done; } req = _req->req_virt; /* build S/G table */ sg_count = hptiop_buildsgl(scp, req->sg_list); if (!sg_count) HPT_SCP(scp)->mapped = 0; req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND); req->header.result = cpu_to_le32(IOP_RESULT_PENDING); req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp)); req->channel = scp->device->channel; req->target = scp->device->id; req->lun = scp->device->lun; req->header.size = cpu_to_le32( sizeof(struct hpt_iop_request_scsi_command) - sizeof(struct hpt_iopsg) + sg_count * sizeof(struct hpt_iopsg)); memcpy(req->cdb, scp->cmnd, sizeof(req->cdb)); hba->ops->post_req(hba, _req); return 0; cmd_done: dprintk("scsi_done(scp=%p)\n", scp); scp->scsi_done(scp); return 0; } static DEF_SCSI_QCMD(hptiop_queuecommand) static const char *hptiop_info(struct Scsi_Host *host) { return driver_name_long; } static int hptiop_reset_hba(struct hptiop_hba *hba) { if (atomic_xchg(&hba->resetting, 1) == 0) { atomic_inc(&hba->reset_count); hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET); } wait_event_timeout(hba->reset_wq, atomic_read(&hba->resetting) == 0, 60 * HZ); if (atomic_read(&hba->resetting)) { /* IOP is in unknown state, abort reset */ printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no); return -1; } if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { dprintk("scsi%d: fail to start background task\n", hba->host->host_no); } return 0; } static int hptiop_reset(struct scsi_cmnd *scp) { struct Scsi_Host * host = scp->device->host; struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata; printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n", scp->device->host->host_no, scp->device->channel, scp->device->id, scp); return hptiop_reset_hba(hba)? FAILED : SUCCESS; } static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth, int reason) { struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; if (reason != SCSI_QDEPTH_DEFAULT) return -EOPNOTSUPP; if (queue_depth > hba->max_requests) queue_depth = hba->max_requests; scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); return queue_depth; } static ssize_t hptiop_show_version(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); } static ssize_t hptiop_show_fw_version(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", hba->firmware_version >> 24, (hba->firmware_version >> 16) & 0xff, (hba->firmware_version >> 8) & 0xff, hba->firmware_version & 0xff); } static struct device_attribute hptiop_attr_version = { .attr = { .name = "driver-version", .mode = S_IRUGO, }, .show = hptiop_show_version, }; static struct device_attribute hptiop_attr_fw_version = { .attr = { .name = "firmware-version", .mode = S_IRUGO, }, .show = hptiop_show_fw_version, }; static struct device_attribute *hptiop_attrs[] = { &hptiop_attr_version, &hptiop_attr_fw_version, NULL }; static struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = driver_name, .queuecommand = hptiop_queuecommand, .eh_device_reset_handler = hptiop_reset, .eh_bus_reset_handler = hptiop_reset, .info = hptiop_info, .emulated = 0, .use_clustering = ENABLE_CLUSTERING, .proc_name = driver_name, .shost_attrs = hptiop_attrs, .this_id = -1, .change_queue_depth = hptiop_adjust_disk_queue_depth, }; static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba) { hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev, 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL); if (hba->u.mv.internal_req) return 0; else return -1; } static int hptiop_internal_memfree_mv(struct hptiop_hba *hba) { if (hba->u.mv.internal_req) { dma_free_coherent(&hba->pcidev->dev, 0x800, hba->u.mv.internal_req, hba->u.mv.internal_req_phy); return 0; } else return -1; } static int __devinit hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id) { struct Scsi_Host *host = NULL; struct hptiop_hba *hba; struct hpt_iop_request_get_config iop_config; struct hpt_iop_request_set_config set_config; dma_addr_t start_phy; void *start_virt; u32 offset, i, req_size; dprintk("hptiop_probe(%p)\n", pcidev); if (pci_enable_device(pcidev)) { printk(KERN_ERR "hptiop: fail to enable pci device\n"); return -ENODEV; } printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n", pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7, pcidev->irq); pci_set_master(pcidev); /* Enable 64bit DMA if possible */ if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) { if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) { printk(KERN_ERR "hptiop: fail to set dma_mask\n"); goto disable_pci_device; } } if (pci_request_regions(pcidev, driver_name)) { printk(KERN_ERR "hptiop: pci_request_regions failed\n"); goto disable_pci_device; } host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba)); if (!host) { printk(KERN_ERR "hptiop: fail to alloc scsi host\n"); goto free_pci_regions; } hba = (struct hptiop_hba *)host->hostdata; hba->ops = (struct hptiop_adapter_ops *)id->driver_data; hba->pcidev = pcidev; hba->host = host; hba->initialized = 0; hba->iopintf_v2 = 0; atomic_set(&hba->resetting, 0); atomic_set(&hba->reset_count, 0); init_waitqueue_head(&hba->reset_wq); init_waitqueue_head(&hba->ioctl_wq); host->max_lun = 1; host->max_channel = 0; host->io_port = 0; host->n_io_port = 0; host->irq = pcidev->irq; if (hba->ops->map_pci_bar(hba)) goto free_scsi_host; if (hba->ops->iop_wait_ready(hba, 20000)) { printk(KERN_ERR "scsi%d: firmware not ready\n", hba->host->host_no); goto unmap_pci_bar; } if (hba->ops->internal_memalloc) { if (hba->ops->internal_memalloc(hba)) { printk(KERN_ERR "scsi%d: internal_memalloc failed\n", hba->host->host_no); goto unmap_pci_bar; } } if (hba->ops->get_config(hba, &iop_config)) { printk(KERN_ERR "scsi%d: get config failed\n", hba->host->host_no); goto unmap_pci_bar; } hba->max_requests = min(le32_to_cpu(iop_config.max_requests), HPTIOP_MAX_REQUESTS); hba->max_devices = le32_to_cpu(iop_config.max_devices); hba->max_request_size = le32_to_cpu(iop_config.request_size); hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count); hba->firmware_version = le32_to_cpu(iop_config.firmware_version); hba->interface_version = le32_to_cpu(iop_config.interface_version); hba->sdram_size = le32_to_cpu(iop_config.sdram_size); if (hba->firmware_version > 0x01020000 || hba->interface_version > 0x01020000) hba->iopintf_v2 = 1; host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9; host->max_id = le32_to_cpu(iop_config.max_devices); host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count); host->can_queue = le32_to_cpu(iop_config.max_requests); host->cmd_per_lun = le32_to_cpu(iop_config.max_requests); host->max_cmd_len = 16; req_size = sizeof(struct hpt_iop_request_scsi_command) + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1); if ((req_size & 0x1f) != 0) req_size = (req_size + 0x1f) & ~0x1f; memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config)); set_config.iop_id = cpu_to_le32(host->host_no); set_config.vbus_id = cpu_to_le16(host->host_no); set_config.max_host_request_size = cpu_to_le16(req_size); if (hba->ops->set_config(hba, &set_config)) { printk(KERN_ERR "scsi%d: set config failed\n", hba->host->host_no); goto unmap_pci_bar; } pci_set_drvdata(pcidev, host); if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, driver_name, hba)) { printk(KERN_ERR "scsi%d: request irq %d failed\n", hba->host->host_no, pcidev->irq); goto unmap_pci_bar; } /* Allocate request mem */ dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests); hba->req_size = req_size; start_virt = dma_alloc_coherent(&pcidev->dev, hba->req_size*hba->max_requests + 0x20, &start_phy, GFP_KERNEL); if (!start_virt) { printk(KERN_ERR "scsi%d: fail to alloc request mem\n", hba->host->host_no); goto free_request_irq; } hba->dma_coherent = start_virt; hba->dma_coherent_handle = start_phy; if ((start_phy & 0x1f) != 0) { offset = ((start_phy + 0x1f) & ~0x1f) - start_phy; start_phy += offset; start_virt += offset; } hba->req_list = start_virt; for (i = 0; i < hba->max_requests; i++) { hba->reqs[i].next = NULL; hba->reqs[i].req_virt = start_virt; hba->reqs[i].req_shifted_phy = start_phy >> 5; hba->reqs[i].index = i; free_req(hba, &hba->reqs[i]); start_virt = (char *)start_virt + hba->req_size; start_phy = start_phy + hba->req_size; } /* Enable Interrupt and start background task */ if (hptiop_initialize_iop(hba)) goto free_request_mem; if (scsi_add_host(host, &pcidev->dev)) { printk(KERN_ERR "scsi%d: scsi_add_host failed\n", hba->host->host_no); goto free_request_mem; } scsi_scan_host(host); dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no); return 0; free_request_mem: dma_free_coherent(&hba->pcidev->dev, hba->req_size * hba->max_requests + 0x20, hba->dma_coherent, hba->dma_coherent_handle); free_request_irq: free_irq(hba->pcidev->irq, hba); unmap_pci_bar: if (hba->ops->internal_memfree) hba->ops->internal_memfree(hba); hba->ops->unmap_pci_bar(hba); free_scsi_host: scsi_host_put(host); free_pci_regions: pci_release_regions(pcidev); disable_pci_device: pci_disable_device(pcidev); dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0); return -ENODEV; } static void hptiop_shutdown(struct pci_dev *pcidev) { struct Scsi_Host *host = pci_get_drvdata(pcidev); struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; dprintk("hptiop_shutdown(%p)\n", hba); /* stop the iop */ if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) printk(KERN_ERR "scsi%d: shutdown the iop timeout\n", hba->host->host_no); /* disable all outbound interrupts */ hba->ops->disable_intr(hba); } static void hptiop_disable_intr_itl(struct hptiop_hba *hba) { u32 int_mask; int_mask = readl(&hba->u.itl.iop->outbound_intmask); writel(int_mask | IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE, &hba->u.itl.iop->outbound_intmask); readl(&hba->u.itl.iop->outbound_intmask); } static void hptiop_disable_intr_mv(struct hptiop_hba *hba) { writel(0, &hba->u.mv.regs->outbound_intmask); readl(&hba->u.mv.regs->outbound_intmask); } static void hptiop_remove(struct pci_dev *pcidev) { struct Scsi_Host *host = pci_get_drvdata(pcidev); struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; dprintk("scsi%d: hptiop_remove\n", hba->host->host_no); scsi_remove_host(host); hptiop_shutdown(pcidev); free_irq(hba->pcidev->irq, hba); dma_free_coherent(&hba->pcidev->dev, hba->req_size * hba->max_requests + 0x20, hba->dma_coherent, hba->dma_coherent_handle); if (hba->ops->internal_memfree) hba->ops->internal_memfree(hba); hba->ops->unmap_pci_bar(hba); pci_release_regions(hba->pcidev); pci_set_drvdata(hba->pcidev, NULL); pci_disable_device(hba->pcidev); scsi_host_put(host); } static struct hptiop_adapter_ops hptiop_itl_ops = { .iop_wait_ready = iop_wait_ready_itl, .internal_memalloc = NULL, .internal_memfree = NULL, .map_pci_bar = hptiop_map_pci_bar_itl, .unmap_pci_bar = hptiop_unmap_pci_bar_itl, .enable_intr = hptiop_enable_intr_itl, .disable_intr = hptiop_disable_intr_itl, .get_config = iop_get_config_itl, .set_config = iop_set_config_itl, .iop_intr = iop_intr_itl, .post_msg = hptiop_post_msg_itl, .post_req = hptiop_post_req_itl, }; static struct hptiop_adapter_ops hptiop_mv_ops = { .iop_wait_ready = iop_wait_ready_mv, .internal_memalloc = hptiop_internal_memalloc_mv, .internal_memfree = hptiop_internal_memfree_mv, .map_pci_bar = hptiop_map_pci_bar_mv, .unmap_pci_bar = hptiop_unmap_pci_bar_mv, .enable_intr = hptiop_enable_intr_mv, .disable_intr = hptiop_disable_intr_mv, .get_config = iop_get_config_mv, .set_config = iop_set_config_mv, .iop_intr = iop_intr_mv, .post_msg = hptiop_post_msg_mv, .post_req = hptiop_post_req_mv, }; static struct pci_device_id hptiop_id_table[] = { { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops }, { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops }, { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops }, { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops }, {}, }; MODULE_DEVICE_TABLE(pci, hptiop_id_table); static struct pci_driver hptiop_pci_driver = { .name = driver_name, .id_table = hptiop_id_table, .probe = hptiop_probe, .remove = hptiop_remove, .shutdown = hptiop_shutdown, }; static int __init hptiop_module_init(void) { printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); return pci_register_driver(&hptiop_pci_driver); } static void __exit hptiop_module_exit(void) { pci_unregister_driver(&hptiop_pci_driver); } module_init(hptiop_module_init); module_exit(hptiop_module_exit); MODULE_LICENSE("GPL");
Mark092/android_kernel_samsung_common
drivers/scsi/hptiop.c
C
gpl-2.0
35,195
/* * Transmeta's Efficeon AGPGART driver. * * Based upon a diff by Linus around November '02. * * Ported to the 2.6 kernel by Carlos Puchol <cpglinux@puchol.com> * and H. Peter Anvin <hpa@transmeta.com>. */ /* * NOTE-cpg-040217: * * - when compiled as a module, after loading the module, * it will refuse to unload, indicating it is in use, * when it is not. * - no s3 (suspend to ram) testing. * - tested on the efficeon integrated nothbridge for tens * of iterations of starting x and glxgears. * - tested with radeon 9000 and radeon mobility m9 cards * - tested with c3/c4 enabled (with the mobility m9 card) */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include <linux/gfp.h> #include <linux/page-flags.h> #include <linux/mm.h> #include "agp.h" #include "intel-agp.h" /* * The real differences to the generic AGP code is * in the GART mappings - a two-level setup with the * first level being an on-chip 64-entry table. * * The page array is filled through the ATTPAGE register * (Aperture Translation Table Page Register) at 0xB8. Bits: * 31:20: physical page address * 11:9: Page Attribute Table Index (PATI) * must match the PAT index for the * mapped pages (the 2nd level page table pages * themselves should be just regular WB-cacheable, * so this is normally zero.) * 8: Present * 7:6: reserved, write as zero * 5:0: GATT directory index: which 1st-level entry * * The Efficeon AGP spec requires pages to be WB-cacheable * but to be explicitly CLFLUSH'd after any changes. */ #define EFFICEON_ATTPAGE 0xb8 #define EFFICEON_L1_SIZE 64 /* Number of PDE pages */ #define EFFICEON_PATI (0 << 9) #define EFFICEON_PRESENT (1 << 8) static struct _efficeon_private { unsigned long l1_table[EFFICEON_L1_SIZE]; } efficeon_private; static const struct gatt_mask efficeon_generic_masks[] = { {.mask = 0x00000001, .type = 0} }; /* This function does the same thing as mask_memory() for this chipset... */ static inline unsigned long efficeon_mask_memory(struct page *page) { unsigned long addr = page_to_phys(page); return addr | 0x00000001; } static const struct aper_size_info_lvl2 efficeon_generic_sizes[4] = { {256, 65536, 0}, {128, 32768, 32}, {64, 16384, 48}, {32, 8192, 56} }; /* * Control interfaces are largely identical to * the legacy Intel 440BX.. */ static int efficeon_fetch_size(void) { int i; u16 temp; struct aper_size_info_lvl2 *values; pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp); values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes); for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { if (temp == values[i].size_value) { agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); agp_bridge->aperture_size_idx = i; return values[i].size; } } return 0; } static void efficeon_tlbflush(struct agp_memory * mem) { printk(KERN_DEBUG PFX "efficeon_tlbflush()\n"); pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200); pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); } static void efficeon_cleanup(void) { u16 temp; struct aper_size_info_lvl2 *previous_size; printk(KERN_DEBUG PFX "efficeon_cleanup()\n"); previous_size = A_SIZE_LVL2(agp_bridge->previous_size); pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp); pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9)); pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value); } static int efficeon_configure(void) { u32 temp; u16 temp2; struct aper_size_info_lvl2 *current_size; printk(KERN_DEBUG PFX "efficeon_configure()\n"); current_size = A_SIZE_LVL2(agp_bridge->current_size); /* aperture size */ pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value); /* address to map to */ pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); /* agpctrl */ pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280); /* paccfg/nbxcfg */ pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2); pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, (temp2 & ~(1 << 10)) | (1 << 9) | (1 << 11)); /* clear any possible error conditions */ pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7); return 0; } static int efficeon_free_gatt_table(struct agp_bridge_data *bridge) { int index, freed = 0; for (index = 0; index < EFFICEON_L1_SIZE; index++) { unsigned long page = efficeon_private.l1_table[index]; if (page) { efficeon_private.l1_table[index] = 0; ClearPageReserved(virt_to_page((char *)page)); free_page(page); freed++; } printk(KERN_DEBUG PFX "efficeon_free_gatt_table(%p, %02x, %08x)\n", agp_bridge->dev, EFFICEON_ATTPAGE, index); pci_write_config_dword(agp_bridge->dev, EFFICEON_ATTPAGE, index); } printk(KERN_DEBUG PFX "efficeon_free_gatt_table() freed %d pages\n", freed); return 0; } /* * Since we don't need contiguous memory we just try * to get the gatt table once */ #define GET_PAGE_DIR_OFF(addr) (addr >> 22) #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr)) #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) #undef GET_GATT #define GET_GATT(addr) (efficeon_private.gatt_pages[\ GET_PAGE_DIR_IDX(addr)]->remapped) static int efficeon_create_gatt_table(struct agp_bridge_data *bridge) { int index; const int pati = EFFICEON_PATI; const int present = EFFICEON_PRESENT; const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3; int num_entries, l1_pages; num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; printk(KERN_DEBUG PFX "efficeon_create_gatt_table(%d)\n", num_entries); /* There are 2^10 PTE pages per PDE page */ BUG_ON(num_entries & 0x3ff); l1_pages = num_entries >> 10; for (index = 0 ; index < l1_pages ; index++) { int offset; unsigned long page; unsigned long value; page = efficeon_private.l1_table[index]; BUG_ON(page); page = get_zeroed_page(GFP_KERNEL); if (!page) { efficeon_free_gatt_table(agp_bridge); return -ENOMEM; } SetPageReserved(virt_to_page((char *)page)); for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk) clflush((char *)page+offset); efficeon_private.l1_table[index] = page; value = virt_to_phys((unsigned long *)page) | pati | present | index; pci_write_config_dword(agp_bridge->dev, EFFICEON_ATTPAGE, value); } return 0; } static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int type) { int i, count = mem->page_count, num_entries; unsigned int *page, *last_page; const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3; const unsigned long clflush_mask = ~(clflush_chunk-1); printk(KERN_DEBUG PFX "efficeon_insert_memory(%lx, %d)\n", pg_start, count); num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; if ((pg_start + mem->page_count) > num_entries) return -EINVAL; if (type != 0 || mem->type != 0) return -EINVAL; if (!mem->is_flushed) { global_cache_flush(); mem->is_flushed = true; } last_page = NULL; for (i = 0; i < count; i++) { int index = pg_start + i; unsigned long insert = efficeon_mask_memory(mem->pages[i]); page = (unsigned int *) efficeon_private.l1_table[index >> 10]; if (!page) continue; page += (index & 0x3ff); *page = insert; /* clflush is slow, so don't clflush until we have to */ if (last_page && (((unsigned long)page^(unsigned long)last_page) & clflush_mask)) clflush(last_page); last_page = page; } if ( last_page ) clflush(last_page); agp_bridge->driver->tlb_flush(mem); return 0; } static int efficeon_remove_memory(struct agp_memory * mem, off_t pg_start, int type) { int i, count = mem->page_count, num_entries; printk(KERN_DEBUG PFX "efficeon_remove_memory(%lx, %d)\n", pg_start, count); num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; if ((pg_start + mem->page_count) > num_entries) return -EINVAL; if (type != 0 || mem->type != 0) return -EINVAL; for (i = 0; i < count; i++) { int index = pg_start + i; unsigned int *page = (unsigned int *) efficeon_private.l1_table[index >> 10]; if (!page) continue; page += (index & 0x3ff); *page = 0; } agp_bridge->driver->tlb_flush(mem); return 0; } static const struct agp_bridge_driver efficeon_driver = { .owner = THIS_MODULE, .aperture_sizes = efficeon_generic_sizes, .size_type = LVL2_APER_SIZE, .num_aperture_sizes = 4, .configure = efficeon_configure, .fetch_size = efficeon_fetch_size, .cleanup = efficeon_cleanup, .tlb_flush = efficeon_tlbflush, .mask_memory = agp_generic_mask_memory, .masks = efficeon_generic_masks, .agp_enable = agp_generic_enable, .cache_flush = global_cache_flush, // Efficeon-specific GATT table setup / populate / teardown .create_gatt_table = efficeon_create_gatt_table, .free_gatt_table = efficeon_free_gatt_table, .insert_memory = efficeon_insert_memory, .remove_memory = efficeon_remove_memory, .cant_use_aperture = false, // true might be faster? // Generic .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static int __devinit agp_efficeon_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct agp_bridge_data *bridge; u8 cap_ptr; struct resource *r; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return -ENODEV; /* Probe for Efficeon controller */ if (pdev->device != PCI_DEVICE_ID_EFFICEON) { printk(KERN_ERR PFX "Unsupported Efficeon chipset (device id: %04x)\n", pdev->device); return -ENODEV; } printk(KERN_INFO PFX "Detected Transmeta Efficeon TM8000 series chipset\n"); bridge = agp_alloc_bridge(); if (!bridge) return -ENOMEM; bridge->driver = &efficeon_driver; bridge->dev = pdev; bridge->capndx = cap_ptr; /* * If the device has not been properly setup, the following will catch * the problem and should stop the system from crashing. * 20030610 - hamish@zot.org */ if (pci_enable_device(pdev)) { printk(KERN_ERR PFX "Unable to Enable PCI device\n"); agp_put_bridge(bridge); return -ENODEV; } /* * The following fixes the case where the BIOS has "forgotten" to * provide an address range for the GART. * 20030610 - hamish@zot.org */ r = &pdev->resource[0]; if (!r->start && r->end) { if (pci_assign_resource(pdev, 0)) { printk(KERN_ERR PFX "could not assign resource 0\n"); agp_put_bridge(bridge); return -ENODEV; } } /* Fill in the mode register */ if (cap_ptr) { pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode); } pci_set_drvdata(pdev, bridge); return agp_add_bridge(bridge); } static void __devexit agp_efficeon_remove(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); agp_remove_bridge(bridge); agp_put_bridge(bridge); } #ifdef CONFIG_PM static int agp_efficeon_suspend(struct pci_dev *dev, pm_message_t state) { return 0; } static int agp_efficeon_resume(struct pci_dev *pdev) { printk(KERN_DEBUG PFX "agp_efficeon_resume()\n"); return efficeon_configure(); } #endif static struct pci_device_id agp_efficeon_pci_table[] = { { .class = (PCI_CLASS_BRIDGE_HOST << 8), .class_mask = ~0, .vendor = PCI_VENDOR_ID_TRANSMETA, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table); static struct pci_driver agp_efficeon_pci_driver = { .name = "agpgart-efficeon", .id_table = agp_efficeon_pci_table, .probe = agp_efficeon_probe, .remove = agp_efficeon_remove, #ifdef CONFIG_PM .suspend = agp_efficeon_suspend, .resume = agp_efficeon_resume, #endif }; static int __init agp_efficeon_init(void) { static int agp_initialised=0; if (agp_off) return -EINVAL; if (agp_initialised == 1) return 0; agp_initialised=1; return pci_register_driver(&agp_efficeon_pci_driver); } static void __exit agp_efficeon_cleanup(void) { pci_unregister_driver(&agp_efficeon_pci_driver); } module_init(agp_efficeon_init); module_exit(agp_efficeon_cleanup); MODULE_AUTHOR("Carlos Puchol <cpglinux@puchol.com>"); MODULE_LICENSE("GPL and additional rights");
raden/blue-kelisa-kernel
drivers/char/agp/efficeon-agp.c
C
gpl-2.0
12,657
# Display a process of packets and processed time. # It helps us to investigate networking or network device. # # options # tx: show only tx chart # rx: show only rx chart # dev=: show only thing related to specified device # debug: work with debug mode. It shows buffer status. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * all_event_list = []; # insert all tracepoint event related with this script irq_dic = {}; # key is cpu and value is a list which stacks irqs # which raise NET_RX softirq net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry # and a list which stacks receive receive_hunk_list = []; # a list which include a sequence of receive events rx_skb_list = []; # received packet list for matching # skb_copy_datagram_iovec buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and # tx_xmit_list of_count_rx_skb_list = 0; # overflow count tx_queue_list = []; # list of packets which pass through dev_queue_xmit of_count_tx_queue_list = 0; # overflow count tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit of_count_tx_xmit_list = 0; # overflow count tx_free_list = []; # list of packets which is freed # options show_tx = 0; show_rx = 0; dev = 0; # store a name of device specified by option "dev=" debug = 0; # indices of event_info tuple EINFO_IDX_NAME= 0 EINFO_IDX_CONTEXT=1 EINFO_IDX_CPU= 2 EINFO_IDX_TIME= 3 EINFO_IDX_PID= 4 EINFO_IDX_COMM= 5 # Calculate a time interval(msec) from src(nsec) to dst(nsec) def diff_msec(src, dst): return (dst - src) / 1000000.0 # Display a process of transmitting a packet def print_transmit(hunk): if dev != 0 and hunk['dev'].find(dev) < 0: return print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \ (hunk['dev'], hunk['len'], nsecs_secs(hunk['queue_t']), nsecs_nsecs(hunk['queue_t'])/1000, diff_msec(hunk['queue_t'], hunk['xmit_t']), diff_msec(hunk['xmit_t'], hunk['free_t'])) # Format for displaying rx packet processing PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)" PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)" PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)" PF_JOINT= " |" PF_WJOINT= " | |" PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)" PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)" PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)" PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)" PF_CONS_SKB= " | consume_skb(+%.3fmsec)" # Display a process of received packets and interrputs associated with # a NET_RX softirq def print_receive(hunk): show_hunk = 0 irq_list = hunk['irq_list'] cpu = irq_list[0]['cpu'] base_t = irq_list[0]['irq_ent_t'] # check if this hunk should be showed if dev != 0: for i in range(len(irq_list)): if irq_list[i]['name'].find(dev) >= 0: show_hunk = 1 break else: show_hunk = 1 if show_hunk == 0: return print "%d.%06dsec cpu=%d" % \ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu) for i in range(len(irq_list)): print PF_IRQ_ENTRY % \ (diff_msec(base_t, irq_list[i]['irq_ent_t']), irq_list[i]['irq'], irq_list[i]['name']) print PF_JOINT irq_event_list = irq_list[i]['event_list'] for j in range(len(irq_event_list)): irq_event = irq_event_list[j] if irq_event['event'] == 'netif_rx': print PF_NET_RX % \ (diff_msec(base_t, irq_event['time']), irq_event['skbaddr']) print PF_JOINT print PF_SOFT_ENTRY % \ diff_msec(base_t, hunk['sirq_ent_t']) print PF_JOINT event_list = hunk['event_list'] for i in range(len(event_list)): event = event_list[i] if event['event_name'] == 'napi_poll': print PF_NAPI_POLL % \ (diff_msec(base_t, event['event_t']), event['dev']) if i == len(event_list) - 1: print "" else: print PF_JOINT else: print PF_NET_RECV % \ (diff_msec(base_t, event['event_t']), event['skbaddr'], event['len']) if 'comm' in event.keys(): print PF_WJOINT print PF_CPY_DGRAM % \ (diff_msec(base_t, event['comm_t']), event['pid'], event['comm']) elif 'handle' in event.keys(): print PF_WJOINT if event['handle'] == "kfree_skb": print PF_KFREE_SKB % \ (diff_msec(base_t, event['comm_t']), event['location']) elif event['handle'] == "consume_skb": print PF_CONS_SKB % \ diff_msec(base_t, event['comm_t']) print PF_JOINT def trace_begin(): global show_tx global show_rx global dev global debug for i in range(len(sys.argv)): if i == 0: continue arg = sys.argv[i] if arg == 'tx': show_tx = 1 elif arg =='rx': show_rx = 1 elif arg.find('dev=',0, 4) >= 0: dev = arg[4:] elif arg == 'debug': debug = 1 if show_tx == 0 and show_rx == 0: show_tx = 1 show_rx = 1 def trace_end(): # order all events in time all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME], b[EINFO_IDX_TIME])) # process all events for i in range(len(all_event_list)): event_info = all_event_list[i] name = event_info[EINFO_IDX_NAME] if name == 'irq__softirq_exit': handle_irq_softirq_exit(event_info) elif name == 'irq__softirq_entry': handle_irq_softirq_entry(event_info) elif name == 'irq__softirq_raise': handle_irq_softirq_raise(event_info) elif name == 'irq__irq_handler_entry': handle_irq_handler_entry(event_info) elif name == 'irq__irq_handler_exit': handle_irq_handler_exit(event_info) elif name == 'napi__napi_poll': handle_napi_poll(event_info) elif name == 'net__netif_receive_skb': handle_netif_receive_skb(event_info) elif name == 'net__netif_rx': handle_netif_rx(event_info) elif name == 'skb__skb_copy_datagram_iovec': handle_skb_copy_datagram_iovec(event_info) elif name == 'net__net_dev_queue': handle_net_dev_queue(event_info) elif name == 'net__net_dev_xmit': handle_net_dev_xmit(event_info) elif name == 'skb__kfree_skb': handle_kfree_skb(event_info) elif name == 'skb__consume_skb': handle_consume_skb(event_info) # display receive hunks if show_rx: for i in range(len(receive_hunk_list)): print_receive(receive_hunk_list[i]) # display transmit hunks if show_tx: print " dev len Qdisc " \ " netdevice free" for i in range(len(tx_free_list)): print_transmit(tx_free_list[i]) if debug: print "debug buffer status" print "----------------------------" print "xmit Qdisc:remain:%d overflow:%d" % \ (len(tx_queue_list), of_count_tx_queue_list) print "xmit netdevice:remain:%d overflow:%d" % \ (len(tx_xmit_list), of_count_tx_xmit_list) print "receive:remain:%d overflow:%d" % \ (len(rx_skb_list), of_count_rx_skb_list) # called from perf, when it finds a correspoinding event def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, irq, irq_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, irq_name) all_event_list.append(event_info) def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret) all_event_list.append(event_info) def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, napi, dev_name) all_event_list.append(event_info) def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, rc, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, rc ,dev_name) all_event_list.append(event_info) def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, protocol, location): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, protocol, location) all_event_list.append(event_info) def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr) all_event_list.append(event_info) def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen) all_event_list.append(event_info) def handle_irq_handler_entry(event_info): (name, context, cpu, time, pid, comm, irq, irq_name) = event_info if cpu not in irq_dic.keys(): irq_dic[cpu] = [] irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time} irq_dic[cpu].append(irq_record) def handle_irq_handler_exit(event_info): (name, context, cpu, time, pid, comm, irq, ret) = event_info if cpu not in irq_dic.keys(): return irq_record = irq_dic[cpu].pop() if irq != irq_record['irq']: return irq_record.update({'irq_ext_t':time}) # if an irq doesn't include NET_RX softirq, drop. if 'event_list' in irq_record.keys(): irq_dic[cpu].append(irq_record) def handle_irq_softirq_raise(event_info): (name, context, cpu, time, pid, comm, vec) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'sirq_raise'}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_irq_softirq_entry(event_info): (name, context, cpu, time, pid, comm, vec) = event_info net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]} def handle_irq_softirq_exit(event_info): (name, context, cpu, time, pid, comm, vec) = event_info irq_list = [] event_list = 0 if cpu in irq_dic.keys(): irq_list = irq_dic[cpu] del irq_dic[cpu] if cpu in net_rx_dic.keys(): sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t'] event_list = net_rx_dic[cpu]['event_list'] del net_rx_dic[cpu] if irq_list == [] or event_list == 0: return rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time, 'irq_list':irq_list, 'event_list':event_list} # merge information realted to a NET_RX softirq receive_hunk_list.append(rec_data) def handle_napi_poll(event_info): (name, context, cpu, time, pid, comm, napi, dev_name) = event_info if cpu in net_rx_dic.keys(): event_list = net_rx_dic[cpu]['event_list'] rec_data = {'event_name':'napi_poll', 'dev':dev_name, 'event_t':time} event_list.append(rec_data) def handle_netif_rx(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'netif_rx', 'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_netif_receive_skb(event_info): global of_count_rx_skb_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu in net_rx_dic.keys(): rec_data = {'event_name':'netif_receive_skb', 'event_t':time, 'skbaddr':skbaddr, 'len':skblen} event_list = net_rx_dic[cpu]['event_list'] event_list.append(rec_data) rx_skb_list.insert(0, rec_data) if len(rx_skb_list) > buffer_budget: rx_skb_list.pop() of_count_rx_skb_list += 1 def handle_net_dev_queue(event_info): global of_count_tx_queue_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time} tx_queue_list.insert(0, skb) if len(tx_queue_list) > buffer_budget: tx_queue_list.pop() of_count_tx_queue_list += 1 def handle_net_dev_xmit(event_info): global of_count_tx_xmit_list (name, context, cpu, time, pid, comm, skbaddr, skblen, rc, dev_name) = event_info if rc == 0: # NETDEV_TX_OK for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: skb['xmit_t'] = time tx_xmit_list.insert(0, skb) del tx_queue_list[i] if len(tx_xmit_list) > buffer_budget: tx_xmit_list.pop() of_count_tx_xmit_list += 1 return def handle_kfree_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr, protocol, location) = event_info for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: del tx_queue_list[i] return for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if rec_data['skbaddr'] == skbaddr: rec_data.update({'handle':"kfree_skb", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return def handle_consume_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr) = event_info for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return def handle_skb_copy_datagram_iovec(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if skbaddr == rec_data['skbaddr']: rec_data.update({'handle':"skb_copy_datagram_iovec", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return
Ca1ne/Enoch-Sense-Kernel-old
tools/perf/scripts/python/netdev-times.py
Python
gpl-2.0
15,048
#include "fitz-internal.h" #include "mupdf-internal.h" /* * CMap parser */ enum { TOK_USECMAP = PDF_NUM_TOKENS, TOK_BEGIN_CODESPACE_RANGE, TOK_END_CODESPACE_RANGE, TOK_BEGIN_BF_CHAR, TOK_END_BF_CHAR, TOK_BEGIN_BF_RANGE, TOK_END_BF_RANGE, TOK_BEGIN_CID_CHAR, TOK_END_CID_CHAR, TOK_BEGIN_CID_RANGE, TOK_END_CID_RANGE, TOK_END_CMAP }; static int pdf_cmap_token_from_keyword(char *key) { if (!strcmp(key, "usecmap")) return TOK_USECMAP; if (!strcmp(key, "begincodespacerange")) return TOK_BEGIN_CODESPACE_RANGE; if (!strcmp(key, "endcodespacerange")) return TOK_END_CODESPACE_RANGE; if (!strcmp(key, "beginbfchar")) return TOK_BEGIN_BF_CHAR; if (!strcmp(key, "endbfchar")) return TOK_END_BF_CHAR; if (!strcmp(key, "beginbfrange")) return TOK_BEGIN_BF_RANGE; if (!strcmp(key, "endbfrange")) return TOK_END_BF_RANGE; if (!strcmp(key, "begincidchar")) return TOK_BEGIN_CID_CHAR; if (!strcmp(key, "endcidchar")) return TOK_END_CID_CHAR; if (!strcmp(key, "begincidrange")) return TOK_BEGIN_CID_RANGE; if (!strcmp(key, "endcidrange")) return TOK_END_CID_RANGE; if (!strcmp(key, "endcmap")) return TOK_END_CMAP; return PDF_TOK_KEYWORD; } static int pdf_code_from_string(char *buf, int len) { int a = 0; while (len--) a = (a << 8) | *(unsigned char *)buf++; return a; } static int pdf_lex_cmap(fz_stream *file, pdf_lexbuf *buf) { int tok = pdf_lex(file, buf); if (tok == PDF_TOK_KEYWORD) tok = pdf_cmap_token_from_keyword(buf->scratch); return tok; } static void pdf_parse_cmap_name(fz_context *ctx, pdf_cmap *cmap, fz_stream *file, pdf_lexbuf *buf) { int tok; tok = pdf_lex_cmap(file, buf); if (tok == PDF_TOK_NAME) fz_strlcpy(cmap->cmap_name, buf->scratch, sizeof(cmap->cmap_name)); else fz_warn(ctx, "expected name after CMapName in cmap"); } static void pdf_parse_wmode(fz_context *ctx, pdf_cmap *cmap, fz_stream *file, pdf_lexbuf *buf) { int tok; tok = pdf_lex_cmap(file, buf); if (tok == PDF_TOK_INT) pdf_set_cmap_wmode(ctx, cmap, buf->i); else fz_warn(ctx, "expected integer after WMode in cmap"); } static void pdf_parse_codespace_range(fz_context *ctx, pdf_cmap *cmap, fz_stream *file, pdf_lexbuf *buf) { int tok; int lo, hi; while (1) { tok = pdf_lex_cmap(file, buf); if (tok == TOK_END_CODESPACE_RANGE) return; else if (tok == PDF_TOK_STRING) { lo = pdf_code_from_string(buf->scratch, buf->len); tok = pdf_lex_cmap(file, buf); if (tok == PDF_TOK_STRING) { hi = pdf_code_from_string(buf->scratch, buf->len); pdf_add_codespace(ctx, cmap, lo, hi, buf->len); } else break; } else break; } fz_throw(ctx, "expected string or endcodespacerange"); } static void pdf_parse_cid_range(fz_context *ctx, pdf_cmap *cmap, fz_stream *file, pdf_lexbuf *buf) { int tok; int lo, hi, dst; while (1) { tok = pdf_lex_cmap(file, buf); if (tok == TOK_END_CID_RANGE) return; else if (tok != PDF_TOK_STRING) fz_throw(ctx, "expected string or endcidrange"); lo = pdf_code_from_string(buf->scratch, buf->len); tok = pdf_lex_cmap(file, buf); if (tok != PDF_TOK_STRING) fz_throw(ctx, "expected string"); hi = pdf_code_from_string(buf->scratch, buf->len); tok = pdf_lex_cmap(file, buf); if (tok != PDF_TOK_INT) fz_throw(ctx, "expected integer"); dst = buf->i; pdf_map_range_to_range(ctx, cmap, lo, hi, dst); } } static void pdf_parse_cid_char(fz_context *ctx, pdf_cmap *cmap, fz_stream *file, pdf_lexbuf *buf) { int tok; int src, dst; while (1) { tok = pdf_lex_cmap(file, buf); if (tok == TOK_END_CID_CHAR) return; else if (tok != PDF_TOK_STRING) fz_throw(ctx, "expected string or endcidchar"); src = pdf_code_from_string(buf->scratch, buf->len); tok = pdf_lex_cmap(file, buf); if (tok != PDF_TOK_INT) fz_throw(ctx, "expected integer"); dst = buf->i; pdf_map_range_to_range(ctx, cmap, src, src, dst); } } static void pdf_parse_bf_range_array(fz_context *ctx, pdf_cmap *cmap, fz_stream *file, pdf_lexbuf *buf, int lo, int hi) { int tok; int dst[256]; int i; while (1) { tok = pdf_lex_cmap(file, buf); if (tok == PDF_TOK_CLOSE_ARRAY) return; /* Note: does not handle [ /Name /Name ... ] */ else if (tok != PDF_TOK_STRING) fz_throw(ctx, "expected string or ]"); if (buf->len / 2) { for (i = 0; i < buf->len / 2; i++) dst[i] = pdf_code_from_string(&buf->scratch[i * 2], 2); pdf_map_one_to_many(ctx, cmap, lo, dst, buf->len / 2); } lo ++; } } static void pdf_parse_bf_range(fz_context *ctx, pdf_cmap *cmap, fz_stream *file, pdf_lexbuf *buf) { int tok; int lo, hi, dst; while (1) { tok = pdf_lex_cmap(file, buf); if (tok == TOK_END_BF_RANGE) return; else if (tok != PDF_TOK_STRING) fz_throw(ctx, "expected string or endbfrange"); lo = pdf_code_from_string(buf->scratch, buf->len); tok = pdf_lex_cmap(file, buf); if (tok != PDF_TOK_STRING) fz_throw(ctx, "expected string"); hi = pdf_code_from_string(buf->scratch, buf->len); tok = pdf_lex_cmap(file, buf); if (tok == PDF_TOK_STRING) { if (buf->len == 2) { dst = pdf_code_from_string(buf->scratch, buf->len); pdf_map_range_to_range(ctx, cmap, lo, hi, dst); } else { int dststr[256]; int i; if (buf->len / 2) { for (i = 0; i < buf->len / 2; i++) dststr[i] = pdf_code_from_string(&buf->scratch[i * 2], 2); while (lo <= hi) { dststr[i-1] ++; pdf_map_one_to_many(ctx, cmap, lo, dststr, i); lo ++; } } } } else if (tok == PDF_TOK_OPEN_ARRAY) { pdf_parse_bf_range_array(ctx, cmap, file, buf, lo, hi); } else { fz_throw(ctx, "expected string or array or endbfrange"); } } } static void pdf_parse_bf_char(fz_context *ctx, pdf_cmap *cmap, fz_stream *file, pdf_lexbuf *buf) { int tok; int dst[256]; int src; int i; while (1) { tok = pdf_lex_cmap(file, buf); if (tok == TOK_END_BF_CHAR) return; else if (tok != PDF_TOK_STRING) fz_throw(ctx, "expected string or endbfchar"); src = pdf_code_from_string(buf->scratch, buf->len); tok = pdf_lex_cmap(file, buf); /* Note: does not handle /dstName */ if (tok != PDF_TOK_STRING) fz_throw(ctx, "expected string"); if (buf->len / 2) { for (i = 0; i < buf->len / 2; i++) dst[i] = pdf_code_from_string(&buf->scratch[i * 2], 2); pdf_map_one_to_many(ctx, cmap, src, dst, i); } } } pdf_cmap * pdf_load_cmap(fz_context *ctx, fz_stream *file) { pdf_cmap *cmap; char key[64]; pdf_lexbuf buf; int tok; const char *where; pdf_lexbuf_init(ctx, &buf, PDF_LEXBUF_SMALL); cmap = pdf_new_cmap(ctx); strcpy(key, ".notdef"); fz_var(where); fz_try(ctx) { while (1) { where = ""; tok = pdf_lex_cmap(file, &buf); if (tok == PDF_TOK_EOF || tok == TOK_END_CMAP) break; else if (tok == PDF_TOK_NAME) { if (!strcmp(buf.scratch, "CMapName")) { where = " after CMapName"; pdf_parse_cmap_name(ctx, cmap, file, &buf); } else if (!strcmp(buf.scratch, "WMode")) { where = " after WMode"; pdf_parse_wmode(ctx, cmap, file, &buf); } else fz_strlcpy(key, buf.scratch, sizeof key); } else if (tok == TOK_USECMAP) { fz_strlcpy(cmap->usecmap_name, key, sizeof(cmap->usecmap_name)); } else if (tok == TOK_BEGIN_CODESPACE_RANGE) { where = " codespacerange"; pdf_parse_codespace_range(ctx, cmap, file, &buf); } else if (tok == TOK_BEGIN_BF_CHAR) { where = " bfchar"; pdf_parse_bf_char(ctx, cmap, file, &buf); } else if (tok == TOK_BEGIN_CID_CHAR) { where = " cidchar"; pdf_parse_cid_char(ctx, cmap, file, &buf); } else if (tok == TOK_BEGIN_BF_RANGE) { where = " bfrange"; pdf_parse_bf_range(ctx, cmap, file, &buf); } else if (tok == TOK_BEGIN_CID_RANGE) { where = "cidrange"; pdf_parse_cid_range(ctx, cmap, file, &buf); } /* ignore everything else */ } pdf_sort_cmap(ctx, cmap); } fz_always(ctx) { pdf_lexbuf_fin(&buf); } fz_catch(ctx) { pdf_drop_cmap(ctx, cmap); fz_throw(ctx, "syntaxerror in cmap%s", where); } return cmap; }
liutz/PDFViewer
jni/mupdf/mupdf/pdf/pdf_cmap_parse.c
C
gpl-2.0
8,113
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth HCI core. */ #include <linux/jiffies.h> #include <linux/module.h> #include <linux/kmod.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <net/sock.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #ifndef CONFIG_BT_HCI_CORE_DEBUG #undef BT_DBG #define BT_DBG(D...) #endif static void hci_cmd_task(unsigned long arg); static void hci_rx_task(unsigned long arg); static void hci_tx_task(unsigned long arg); static void hci_notify(struct hci_dev *hdev, int event); static DEFINE_RWLOCK(hci_task_lock); /* HCI device list */ LIST_HEAD(hci_dev_list); DEFINE_RWLOCK(hci_dev_list_lock); /* HCI callback list */ LIST_HEAD(hci_cb_list); DEFINE_RWLOCK(hci_cb_list_lock); /* HCI protocols */ #define HCI_MAX_PROTO 2 struct hci_proto *hci_proto[HCI_MAX_PROTO]; /* HCI notifiers list */ static ATOMIC_NOTIFIER_HEAD(hci_notifier); /* ---- HCI notifications ---- */ int hci_register_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&hci_notifier, nb); } int hci_unregister_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&hci_notifier, nb); } static void hci_notify(struct hci_dev *hdev, int event) { atomic_notifier_call_chain(&hci_notifier, event, hdev); } /* ---- HCI requests ---- */ void hci_req_complete(struct hci_dev *hdev, int result) { BT_DBG("%s result 0x%2.2x", hdev->name, result); if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = result; hdev->req_status = HCI_REQ_DONE; wake_up_interruptible(&hdev->req_wait_q); } } static void hci_req_cancel(struct hci_dev *hdev, int err) { BT_DBG("%s err 0x%2.2x", hdev->name, err); if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = err; hdev->req_status = HCI_REQ_CANCELED; wake_up_interruptible(&hdev->req_wait_q); } } /* Execute request and wait for completion. */ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), unsigned long opt, __u32 timeout) { DECLARE_WAITQUEUE(wait, current); int err = 0; BT_DBG("%s start", hdev->name); hdev->req_status = HCI_REQ_PEND; add_wait_queue(&hdev->req_wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); req(hdev, opt); schedule_timeout(timeout); remove_wait_queue(&hdev->req_wait_q, &wait); if (signal_pending(current)) return -EINTR; switch (hdev->req_status) { case HCI_REQ_DONE: err = -bt_err(hdev->req_result); break; case HCI_REQ_CANCELED: err = -hdev->req_result; break; default: err = -ETIMEDOUT; break; } hdev->req_status = hdev->req_result = 0; BT_DBG("%s end: err %d", hdev->name, err); return err; } static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), unsigned long opt, __u32 timeout) { int ret; /* Serialize all requests */ hci_req_lock(hdev); ret = __hci_request(hdev, req, opt, timeout); hci_req_unlock(hdev); return ret; } static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) { BT_DBG("%s %ld", hdev->name, opt); /* Reset device */ hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); } static void hci_init_req(struct hci_dev *hdev, unsigned long opt) { struct sk_buff *skb; __le16 param; __u8 flt_type; BT_DBG("%s %ld", hdev->name, opt); /* Driver initialization */ /* Special commands */ while ((skb = skb_dequeue(&hdev->driver_init))) { bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; skb->dev = (void *) hdev; skb_queue_tail(&hdev->cmd_q, skb); hci_sched_cmd(hdev); } skb_queue_purge(&hdev->driver_init); /* Mandatory initialization */ /* Reset */ if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks)) hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); /* Read Local Supported Features */ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); /* Read Local Version */ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); /* Read Buffer Size (ACL mtu, max pkt, etc.) */ hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL); #if 0 /* Host buffer size */ { struct hci_cp_host_buffer_size cp; cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE); cp.sco_mtu = HCI_MAX_SCO_SIZE; cp.acl_max_pkt = cpu_to_le16(0xffff); cp.sco_max_pkt = cpu_to_le16(0xffff); hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp); } #endif /* Read BD Address */ hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL); /* Read Class of Device */ hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); /* Read Local Name */ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL); /* Read Voice Setting */ hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL); /* Optional initialization */ /* Clear Event Filters */ flt_type = HCI_FLT_CLEAR_ALL; hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); /* Page timeout ~20 secs */ param = cpu_to_le16(0x8000); hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param); /* Connection accept timeout ~20 secs */ param = cpu_to_le16(0x7d00); hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param); } static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) { __u8 scan = opt; BT_DBG("%s %x", hdev->name, scan); /* Inquiry and Page scans */ hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); } static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) { __u8 auth = opt; BT_DBG("%s %x", hdev->name, auth); /* Authentication */ hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); } static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) { __u8 encrypt = opt; BT_DBG("%s %x", hdev->name, encrypt); /* Authentication */ hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); } /* Get HCI device by index. * Device is held on return. */ struct hci_dev *hci_dev_get(int index) { struct hci_dev *hdev = NULL; struct list_head *p; BT_DBG("%d", index); if (index < 0) return NULL; read_lock(&hci_dev_list_lock); list_for_each(p, &hci_dev_list) { struct hci_dev *d = list_entry(p, struct hci_dev, list); if (d->id == index) { hdev = hci_dev_hold(d); break; } } read_unlock(&hci_dev_list_lock); return hdev; } /* ---- Inquiry support ---- */ static void inquiry_cache_flush(struct hci_dev *hdev) { struct inquiry_cache *cache = &hdev->inq_cache; struct inquiry_entry *next = cache->list, *e; BT_DBG("cache %p", cache); cache->list = NULL; while ((e = next)) { next = e->next; kfree(e); } } struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct inquiry_cache *cache = &hdev->inq_cache; struct inquiry_entry *e; BT_DBG("cache %p, %s", cache, batostr(bdaddr)); for (e = cache->list; e; e = e->next) if (!bacmp(&e->data.bdaddr, bdaddr)) break; return e; } void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data) { struct inquiry_cache *cache = &hdev->inq_cache; struct inquiry_entry *e; BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr)); if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) { /* Entry not in the cache. Add new one. */ if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC))) return; e->next = cache->list; cache->list = e; } memcpy(&e->data, data, sizeof(*data)); e->timestamp = jiffies; cache->timestamp = jiffies; } static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) { struct inquiry_cache *cache = &hdev->inq_cache; struct inquiry_info *info = (struct inquiry_info *) buf; struct inquiry_entry *e; int copied = 0; for (e = cache->list; e && copied < num; e = e->next, copied++) { struct inquiry_data *data = &e->data; bacpy(&info->bdaddr, &data->bdaddr); info->pscan_rep_mode = data->pscan_rep_mode; info->pscan_period_mode = data->pscan_period_mode; info->pscan_mode = data->pscan_mode; memcpy(info->dev_class, data->dev_class, 3); info->clock_offset = data->clock_offset; info++; } BT_DBG("cache %p, copied %d", cache, copied); return copied; } static void hci_inq_req(struct hci_dev *hdev, unsigned long opt) { struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; struct hci_cp_inquiry cp; BT_DBG("%s", hdev->name); if (test_bit(HCI_INQUIRY, &hdev->flags)) return; /* Start Inquiry */ memcpy(&cp.lap, &ir->lap, 3); cp.length = ir->length; cp.num_rsp = ir->num_rsp; hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); } int hci_inquiry(void __user *arg) { __u8 __user *ptr = arg; struct hci_inquiry_req ir; struct hci_dev *hdev; int err = 0, do_inquiry = 0, max_rsp; long timeo; __u8 *buf; if (copy_from_user(&ir, ptr, sizeof(ir))) return -EFAULT; if (!(hdev = hci_dev_get(ir.dev_id))) return -ENODEV; hci_dev_lock_bh(hdev); if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { inquiry_cache_flush(hdev); do_inquiry = 1; } hci_dev_unlock_bh(hdev); timeo = ir.length * msecs_to_jiffies(2000); if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0) goto done; /* for unlimited number of responses we will use buffer with 255 entries */ max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; /* cache_dump can't sleep. Therefore we allocate temp buffer and then * copy it to the user space. */ if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) { err = -ENOMEM; goto done; } hci_dev_lock_bh(hdev); ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); hci_dev_unlock_bh(hdev); BT_DBG("num_rsp %d", ir.num_rsp); if (!copy_to_user(ptr, &ir, sizeof(ir))) { ptr += sizeof(ir); if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * ir.num_rsp)) err = -EFAULT; } else err = -EFAULT; kfree(buf); done: hci_dev_put(hdev); return err; } /* ---- HCI ioctl helpers ---- */ int hci_dev_open(__u16 dev) { struct hci_dev *hdev; int ret = 0; if (!(hdev = hci_dev_get(dev))) return -ENODEV; BT_DBG("%s %p", hdev->name, hdev); hci_req_lock(hdev); if (test_bit(HCI_UP, &hdev->flags)) { ret = -EALREADY; goto done; } if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) set_bit(HCI_RAW, &hdev->flags); if (hdev->open(hdev)) { ret = -EIO; goto done; } if (!test_bit(HCI_RAW, &hdev->flags)) { atomic_set(&hdev->cmd_cnt, 1); set_bit(HCI_INIT, &hdev->flags); //__hci_request(hdev, hci_reset_req, 0, HZ); ret = __hci_request(hdev, hci_init_req, 0, msecs_to_jiffies(HCI_INIT_TIMEOUT)); clear_bit(HCI_INIT, &hdev->flags); } if (!ret) { hci_dev_hold(hdev); set_bit(HCI_UP, &hdev->flags); hci_notify(hdev, HCI_DEV_UP); } else { /* Init failed, cleanup */ tasklet_kill(&hdev->rx_task); tasklet_kill(&hdev->tx_task); tasklet_kill(&hdev->cmd_task); skb_queue_purge(&hdev->cmd_q); skb_queue_purge(&hdev->rx_q); if (hdev->flush) hdev->flush(hdev); if (hdev->sent_cmd) { kfree_skb(hdev->sent_cmd); hdev->sent_cmd = NULL; } hdev->close(hdev); hdev->flags = 0; } done: hci_req_unlock(hdev); hci_dev_put(hdev); return ret; } static int hci_dev_do_close(struct hci_dev *hdev) { BT_DBG("%s %p", hdev->name, hdev); hci_req_cancel(hdev, ENODEV); hci_req_lock(hdev); if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { hci_req_unlock(hdev); return 0; } /* Kill RX and TX tasks */ tasklet_kill(&hdev->rx_task); tasklet_kill(&hdev->tx_task); hci_dev_lock_bh(hdev); inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); hci_dev_unlock_bh(hdev); hci_notify(hdev, HCI_DEV_DOWN); if (hdev->flush) hdev->flush(hdev); /* Reset device */ skb_queue_purge(&hdev->cmd_q); atomic_set(&hdev->cmd_cnt, 1); if (!test_bit(HCI_RAW, &hdev->flags)) { set_bit(HCI_INIT, &hdev->flags); __hci_request(hdev, hci_reset_req, 0, msecs_to_jiffies(250)); clear_bit(HCI_INIT, &hdev->flags); } /* Kill cmd task */ tasklet_kill(&hdev->cmd_task); /* Drop queues */ skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->cmd_q); skb_queue_purge(&hdev->raw_q); /* Drop last sent command */ if (hdev->sent_cmd) { kfree_skb(hdev->sent_cmd); hdev->sent_cmd = NULL; } /* After this point our queues are empty * and no tasks are scheduled. */ hdev->close(hdev); /* Clear flags */ hdev->flags = 0; hci_req_unlock(hdev); hci_dev_put(hdev); return 0; } int hci_dev_close(__u16 dev) { struct hci_dev *hdev; int err; if (!(hdev = hci_dev_get(dev))) return -ENODEV; err = hci_dev_do_close(hdev); hci_dev_put(hdev); return err; } int hci_dev_reset(__u16 dev) { struct hci_dev *hdev; int ret = 0; if (!(hdev = hci_dev_get(dev))) return -ENODEV; hci_req_lock(hdev); tasklet_disable(&hdev->tx_task); if (!test_bit(HCI_UP, &hdev->flags)) goto done; /* Drop queues */ skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->cmd_q); hci_dev_lock_bh(hdev); inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); hci_dev_unlock_bh(hdev); if (hdev->flush) hdev->flush(hdev); atomic_set(&hdev->cmd_cnt, 1); hdev->acl_cnt = 0; hdev->sco_cnt = 0; if (!test_bit(HCI_RAW, &hdev->flags)) ret = __hci_request(hdev, hci_reset_req, 0, msecs_to_jiffies(HCI_INIT_TIMEOUT)); done: tasklet_enable(&hdev->tx_task); hci_req_unlock(hdev); hci_dev_put(hdev); return ret; } int hci_dev_reset_stat(__u16 dev) { struct hci_dev *hdev; int ret = 0; if (!(hdev = hci_dev_get(dev))) return -ENODEV; memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); hci_dev_put(hdev); return ret; } int hci_dev_cmd(unsigned int cmd, void __user *arg) { struct hci_dev *hdev; struct hci_dev_req dr; int err = 0; if (copy_from_user(&dr, arg, sizeof(dr))) return -EFAULT; if (!(hdev = hci_dev_get(dr.dev_id))) return -ENODEV; switch (cmd) { case HCISETAUTH: err = hci_request(hdev, hci_auth_req, dr.dev_opt, msecs_to_jiffies(HCI_INIT_TIMEOUT)); break; case HCISETENCRYPT: if (!lmp_encrypt_capable(hdev)) { err = -EOPNOTSUPP; break; } if (!test_bit(HCI_AUTH, &hdev->flags)) { /* Auth must be enabled first */ err = hci_request(hdev, hci_auth_req, dr.dev_opt, msecs_to_jiffies(HCI_INIT_TIMEOUT)); if (err) break; } err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, msecs_to_jiffies(HCI_INIT_TIMEOUT)); break; case HCISETSCAN: err = hci_request(hdev, hci_scan_req, dr.dev_opt, msecs_to_jiffies(HCI_INIT_TIMEOUT)); break; case HCISETPTYPE: hdev->pkt_type = (__u16) dr.dev_opt; break; case HCISETLINKPOL: hdev->link_policy = (__u16) dr.dev_opt; break; case HCISETLINKMODE: hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT); break; case HCISETACLMTU: hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1); hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0); break; case HCISETSCOMTU: hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1); hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0); break; default: err = -EINVAL; break; } hci_dev_put(hdev); return err; } int hci_get_dev_list(void __user *arg) { struct hci_dev_list_req *dl; struct hci_dev_req *dr; struct list_head *p; int n = 0, size, err; __u16 dev_num; if (get_user(dev_num, (__u16 __user *) arg)) return -EFAULT; if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) return -EINVAL; size = sizeof(*dl) + dev_num * sizeof(*dr); if (!(dl = kmalloc(size, GFP_KERNEL))) return -ENOMEM; dr = dl->dev_req; read_lock_bh(&hci_dev_list_lock); list_for_each(p, &hci_dev_list) { struct hci_dev *hdev; hdev = list_entry(p, struct hci_dev, list); (dr + n)->dev_id = hdev->id; (dr + n)->dev_opt = hdev->flags; if (++n >= dev_num) break; } read_unlock_bh(&hci_dev_list_lock); dl->dev_num = n; size = sizeof(*dl) + n * sizeof(*dr); err = copy_to_user(arg, dl, size); kfree(dl); return err ? -EFAULT : 0; } int hci_get_dev_info(void __user *arg) { struct hci_dev *hdev; struct hci_dev_info di; int err = 0; if (copy_from_user(&di, arg, sizeof(di))) return -EFAULT; if (!(hdev = hci_dev_get(di.dev_id))) return -ENODEV; strcpy(di.name, hdev->name); di.bdaddr = hdev->bdaddr; di.type = hdev->type; di.flags = hdev->flags; di.pkt_type = hdev->pkt_type; di.acl_mtu = hdev->acl_mtu; di.acl_pkts = hdev->acl_pkts; di.sco_mtu = hdev->sco_mtu; di.sco_pkts = hdev->sco_pkts; di.link_policy = hdev->link_policy; di.link_mode = hdev->link_mode; memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); memcpy(&di.features, &hdev->features, sizeof(di.features)); if (copy_to_user(arg, &di, sizeof(di))) err = -EFAULT; hci_dev_put(hdev); return err; } /* ---- Interface to HCI drivers ---- */ /* Alloc HCI device */ struct hci_dev *hci_alloc_dev(void) { struct hci_dev *hdev; hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL); if (!hdev) return NULL; skb_queue_head_init(&hdev->driver_init); return hdev; } EXPORT_SYMBOL(hci_alloc_dev); /* Free HCI device */ void hci_free_dev(struct hci_dev *hdev) { skb_queue_purge(&hdev->driver_init); /* will free via device release */ put_device(&hdev->dev); } EXPORT_SYMBOL(hci_free_dev); /* Register HCI device */ int hci_register_dev(struct hci_dev *hdev) { struct list_head *head = &hci_dev_list, *p; int i, id = 0; BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner); if (!hdev->open || !hdev->close || !hdev->destruct) return -EINVAL; write_lock_bh(&hci_dev_list_lock); /* Find first available device id */ list_for_each(p, &hci_dev_list) { if (list_entry(p, struct hci_dev, list)->id != id) break; head = p; id++; } sprintf(hdev->name, "hci%d", id); hdev->id = id; list_add(&hdev->list, head); atomic_set(&hdev->refcnt, 1); spin_lock_init(&hdev->lock); hdev->flags = 0; hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); hdev->esco_type = (ESCO_HV1); hdev->link_mode = (HCI_LM_ACCEPT); hdev->idle_timeout = 0; hdev->sniff_max_interval = 800; hdev->sniff_min_interval = 80; tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev); tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); skb_queue_head_init(&hdev->rx_q); skb_queue_head_init(&hdev->cmd_q); skb_queue_head_init(&hdev->raw_q); for (i = 0; i < 3; i++) hdev->reassembly[i] = NULL; init_waitqueue_head(&hdev->req_wait_q); init_MUTEX(&hdev->req_lock); inquiry_cache_init(hdev); hci_conn_hash_init(hdev); memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); atomic_set(&hdev->promisc, 0); write_unlock_bh(&hci_dev_list_lock); hci_register_sysfs(hdev); hci_notify(hdev, HCI_DEV_REG); return id; } EXPORT_SYMBOL(hci_register_dev); /* Unregister HCI device */ int hci_unregister_dev(struct hci_dev *hdev) { int i; BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); write_lock_bh(&hci_dev_list_lock); list_del(&hdev->list); write_unlock_bh(&hci_dev_list_lock); hci_dev_do_close(hdev); for (i = 0; i < 3; i++) kfree_skb(hdev->reassembly[i]); hci_notify(hdev, HCI_DEV_UNREG); hci_unregister_sysfs(hdev); __hci_dev_put(hdev); return 0; } EXPORT_SYMBOL(hci_unregister_dev); /* Suspend HCI device */ int hci_suspend_dev(struct hci_dev *hdev) { hci_notify(hdev, HCI_DEV_SUSPEND); return 0; } EXPORT_SYMBOL(hci_suspend_dev); /* Resume HCI device */ int hci_resume_dev(struct hci_dev *hdev) { hci_notify(hdev, HCI_DEV_RESUME); return 0; } EXPORT_SYMBOL(hci_resume_dev); /* Receive packet type fragment */ #define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2]) int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count) { if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) return -EILSEQ; while (count) { struct sk_buff *skb = __reassembly(hdev, type); struct { int expect; } *scb; int len = 0; if (!skb) { /* Start of the frame */ switch (type) { case HCI_EVENT_PKT: if (count >= HCI_EVENT_HDR_SIZE) { struct hci_event_hdr *h = data; len = HCI_EVENT_HDR_SIZE + h->plen; } else return -EILSEQ; break; case HCI_ACLDATA_PKT: if (count >= HCI_ACL_HDR_SIZE) { struct hci_acl_hdr *h = data; len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen); } else return -EILSEQ; break; case HCI_SCODATA_PKT: if (count >= HCI_SCO_HDR_SIZE) { struct hci_sco_hdr *h = data; len = HCI_SCO_HDR_SIZE + h->dlen; } else return -EILSEQ; break; } skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { BT_ERR("%s no memory for packet", hdev->name); return -ENOMEM; } skb->dev = (void *) hdev; bt_cb(skb)->pkt_type = type; __reassembly(hdev, type) = skb; scb = (void *) skb->cb; scb->expect = len; } else { /* Continuation */ scb = (void *) skb->cb; len = scb->expect; } len = min(len, count); memcpy(skb_put(skb, len), data, len); scb->expect -= len; if (scb->expect == 0) { /* Complete frame */ __reassembly(hdev, type) = NULL; bt_cb(skb)->pkt_type = type; hci_recv_frame(skb); } count -= len; data += len; } return 0; } EXPORT_SYMBOL(hci_recv_fragment); /* ---- Interface to upper protocols ---- */ /* Register/Unregister protocols. * hci_task_lock is used to ensure that no tasks are running. */ int hci_register_proto(struct hci_proto *hp) { int err = 0; BT_DBG("%p name %s id %d", hp, hp->name, hp->id); if (hp->id >= HCI_MAX_PROTO) return -EINVAL; write_lock_bh(&hci_task_lock); if (!hci_proto[hp->id]) hci_proto[hp->id] = hp; else err = -EEXIST; write_unlock_bh(&hci_task_lock); return err; } EXPORT_SYMBOL(hci_register_proto); int hci_unregister_proto(struct hci_proto *hp) { int err = 0; BT_DBG("%p name %s id %d", hp, hp->name, hp->id); if (hp->id >= HCI_MAX_PROTO) return -EINVAL; write_lock_bh(&hci_task_lock); if (hci_proto[hp->id]) hci_proto[hp->id] = NULL; else err = -ENOENT; write_unlock_bh(&hci_task_lock); return err; } EXPORT_SYMBOL(hci_unregister_proto); int hci_register_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); write_lock_bh(&hci_cb_list_lock); list_add(&cb->list, &hci_cb_list); write_unlock_bh(&hci_cb_list_lock); return 0; } EXPORT_SYMBOL(hci_register_cb); int hci_unregister_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); write_lock_bh(&hci_cb_list_lock); list_del(&cb->list); write_unlock_bh(&hci_cb_list_lock); return 0; } EXPORT_SYMBOL(hci_unregister_cb); static int hci_send_frame(struct sk_buff *skb) { struct hci_dev *hdev = (struct hci_dev *) skb->dev; if (!hdev) { kfree_skb(skb); return -ENODEV; } BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); if (atomic_read(&hdev->promisc)) { /* Time stamp */ __net_timestamp(skb); hci_send_to_sock(hdev, skb); } /* Get rid of skb owner, prior to sending to the driver. */ skb_orphan(skb); return hdev->send(skb); } /* Send HCI command */ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param) { int len = HCI_COMMAND_HDR_SIZE + plen; struct hci_command_hdr *hdr; struct sk_buff *skb; BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen); skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { BT_ERR("%s no memory for command", hdev->name); return -ENOMEM; } hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); hdr->opcode = cpu_to_le16(opcode); hdr->plen = plen; if (plen) memcpy(skb_put(skb, plen), param, plen); BT_DBG("skb len %d", skb->len); bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; skb->dev = (void *) hdev; skb_queue_tail(&hdev->cmd_q, skb); hci_sched_cmd(hdev); return 0; } /* Get data from the previously sent command */ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) { struct hci_command_hdr *hdr; if (!hdev->sent_cmd) return NULL; hdr = (void *) hdev->sent_cmd->data; if (hdr->opcode != cpu_to_le16(opcode)) return NULL; BT_DBG("%s opcode 0x%x", hdev->name, opcode); return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; } /* Send ACL data */ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) { struct hci_acl_hdr *hdr; int len = skb->len; skb_push(skb, HCI_ACL_HDR_SIZE); skb_reset_transport_header(skb); hdr = (struct hci_acl_hdr *)skb_transport_header(skb); hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); hdr->dlen = cpu_to_le16(len); } int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) { struct hci_dev *hdev = conn->hdev; struct sk_buff *list; BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags); skb->dev = (void *) hdev; bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; hci_add_acl_hdr(skb, conn->handle, flags | ACL_START); if (!(list = skb_shinfo(skb)->frag_list)) { /* Non fragmented */ BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); skb_queue_tail(&conn->data_q, skb); } else { /* Fragmented */ BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); skb_shinfo(skb)->frag_list = NULL; /* Queue all fragments atomically */ spin_lock_bh(&conn->data_q.lock); __skb_queue_tail(&conn->data_q, skb); do { skb = list; list = list->next; skb->dev = (void *) hdev; bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); __skb_queue_tail(&conn->data_q, skb); } while (list); spin_unlock_bh(&conn->data_q.lock); } hci_sched_tx(hdev); return 0; } EXPORT_SYMBOL(hci_send_acl); /* Send SCO data */ int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) { struct hci_dev *hdev = conn->hdev; struct hci_sco_hdr hdr; BT_DBG("%s len %d", hdev->name, skb->len); if (skb->len > hdev->sco_mtu) { kfree_skb(skb); return -EINVAL; } hdr.handle = cpu_to_le16(conn->handle); hdr.dlen = skb->len; skb_push(skb, HCI_SCO_HDR_SIZE); skb_reset_transport_header(skb); memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); skb->dev = (void *) hdev; bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; skb_queue_tail(&conn->data_q, skb); hci_sched_tx(hdev); return 0; } EXPORT_SYMBOL(hci_send_sco); /* ---- HCI TX task (outgoing data) ---- */ /* HCI Connection scheduler */ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *conn = NULL; int num = 0, min = ~0; struct list_head *p; /* We don't have to lock device here. Connections are always * added and removed with TX task disabled. */ list_for_each(p, &h->list) { struct hci_conn *c; c = list_entry(p, struct hci_conn, list); if (c->type != type || c->state != BT_CONNECTED || skb_queue_empty(&c->data_q)) continue; num++; if (c->sent < min) { min = c->sent; conn = c; } } if (conn) { int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt); int q = cnt / num; *quote = q ? q : 1; } else *quote = 0; BT_DBG("conn %p quote %d", conn, *quote); return conn; } static inline void hci_acl_tx_to(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; struct list_head *p; struct hci_conn *c; BT_ERR("%s ACL tx timeout", hdev->name); /* Kill stalled connections */ list_for_each(p, &h->list) { c = list_entry(p, struct hci_conn, list); if (c->type == ACL_LINK && c->sent) { BT_ERR("%s killing stalled ACL connection %s", hdev->name, batostr(&c->dst)); hci_acl_disconn(c, 0x13); } } } static inline void hci_sched_acl(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; int quote; BT_DBG("%s", hdev->name); if (!test_bit(HCI_RAW, &hdev->flags)) { /* ACL tx timeout must be longer than maximum * link supervision timeout (40.9 seconds) */ if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45)) hci_acl_tx_to(hdev); } while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_conn_enter_active_mode(conn); hci_send_frame(skb); hdev->acl_last_tx = jiffies; hdev->acl_cnt--; conn->sent++; } } } /* Schedule SCO */ static inline void hci_sched_sco(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; int quote; BT_DBG("%s", hdev->name); while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(skb); conn->sent++; if (conn->sent == ~0) conn->sent = 0; } } } static inline void hci_sched_esco(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; int quote; BT_DBG("%s", hdev->name); while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(skb); conn->sent++; if (conn->sent == ~0) conn->sent = 0; } } } static void hci_tx_task(unsigned long arg) { struct hci_dev *hdev = (struct hci_dev *) arg; struct sk_buff *skb; read_lock(&hci_task_lock); BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt); /* Schedule queues and send stuff to HCI driver */ hci_sched_acl(hdev); hci_sched_sco(hdev); hci_sched_esco(hdev); /* Send next queued raw (unknown type) packet */ while ((skb = skb_dequeue(&hdev->raw_q))) hci_send_frame(skb); read_unlock(&hci_task_lock); } /* ----- HCI RX task (incoming data proccessing) ----- */ /* ACL data packet */ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_acl_hdr *hdr = (void *) skb->data; struct hci_conn *conn; __u16 handle, flags; skb_pull(skb, HCI_ACL_HDR_SIZE); handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); hdev->stat.acl_rx++; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); hci_dev_unlock(hdev); if (conn) { register struct hci_proto *hp; hci_conn_enter_active_mode(conn); /* Send to upper protocol */ if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) { hp->recv_acldata(conn, skb, flags); return; } } else { BT_ERR("%s ACL packet for unknown connection handle %d", hdev->name, handle); } kfree_skb(skb); } /* SCO data packet */ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_sco_hdr *hdr = (void *) skb->data; struct hci_conn *conn; __u16 handle; skb_pull(skb, HCI_SCO_HDR_SIZE); handle = __le16_to_cpu(hdr->handle); BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle); hdev->stat.sco_rx++; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); hci_dev_unlock(hdev); if (conn) { register struct hci_proto *hp; /* Send to upper protocol */ if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) { hp->recv_scodata(conn, skb); return; } } else { BT_ERR("%s SCO packet for unknown connection handle %d", hdev->name, handle); } kfree_skb(skb); } static void hci_rx_task(unsigned long arg) { struct hci_dev *hdev = (struct hci_dev *) arg; struct sk_buff *skb; BT_DBG("%s", hdev->name); read_lock(&hci_task_lock); while ((skb = skb_dequeue(&hdev->rx_q))) { if (atomic_read(&hdev->promisc)) { /* Send copy to the sockets */ hci_send_to_sock(hdev, skb); } if (test_bit(HCI_RAW, &hdev->flags)) { kfree_skb(skb); continue; } if (test_bit(HCI_INIT, &hdev->flags)) { /* Don't process data packets in this states. */ switch (bt_cb(skb)->pkt_type) { case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: kfree_skb(skb); continue; } } /* Process frame */ switch (bt_cb(skb)->pkt_type) { case HCI_EVENT_PKT: hci_event_packet(hdev, skb); break; case HCI_ACLDATA_PKT: BT_DBG("%s ACL data packet", hdev->name); hci_acldata_packet(hdev, skb); break; case HCI_SCODATA_PKT: BT_DBG("%s SCO data packet", hdev->name); hci_scodata_packet(hdev, skb); break; default: kfree_skb(skb); break; } } read_unlock(&hci_task_lock); } static void hci_cmd_task(unsigned long arg) { struct hci_dev *hdev = (struct hci_dev *) arg; struct sk_buff *skb; BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) { BT_ERR("%s command tx timeout", hdev->name); atomic_set(&hdev->cmd_cnt, 1); } /* Send queued commands */ if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) { if (hdev->sent_cmd) kfree_skb(hdev->sent_cmd); if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) { atomic_dec(&hdev->cmd_cnt); hci_send_frame(skb); hdev->cmd_last_tx = jiffies; } else { skb_queue_head(&hdev->cmd_q, skb); hci_sched_cmd(hdev); } } }
goto456/linux-2.6.26
net/bluetooth/hci_core.c
C
gpl-2.0
34,327
/* * Copyright 2016 Red Hat, Inc. and/or its affiliates * and other contributors as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.keycloak.testsuite.adapter.page; import org.jboss.arquillian.container.test.api.OperateOnDeployment; import org.jboss.arquillian.test.api.ArquillianResource; import java.net.URL; /** * @author mhajas */ public class SalesPostPassiveServlet extends SAMLServletWithLogout { public static final String DEPLOYMENT_NAME = "sales-post-passive"; @ArquillianResource @OperateOnDeployment(DEPLOYMENT_NAME) private URL url; @Override public URL getInjectedUrl() { return url; } }
Repeid/repeid
testsuite/integration-arquillian/tests/base/src/main/java/org/keycloak/testsuite/adapter/page/SalesPostPassiveServlet.java
Java
gpl-2.0
1,201
<?php /** * Magento * * NOTICE OF LICENSE * * This source file is subject to the Open Software License (OSL 3.0) * that is bundled with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://opensource.org/licenses/osl-3.0.php * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@magentocommerce.com so we can send you a copy immediately. * * DISCLAIMER * * Do not edit or add to this file if you wish to upgrade Magento to newer * versions in the future. If you wish to customize Magento for your * needs please refer to http://www.magentocommerce.com for more information. * * @category Mage * @package Mage_Sales * @copyright Copyright (c) 2013 Magento Inc. (http://www.magentocommerce.com) * @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0) */ /** * Quote item abstract model * * Price attributes: * - price - initial item price, declared during product association * - original_price - product price before any calculations * - calculation_price - prices for item totals calculation * - custom_price - new price that can be declared by user and recalculated during calculation process * - original_custom_price - original defined value of custom price without any convertion * * @category Mage * @package Mage_Sales * @author Magento Core Team <core@magentocommerce.com> */ abstract class Mage_Sales_Model_Quote_Item_Abstract extends Mage_Core_Model_Abstract implements Mage_Catalog_Model_Product_Configuration_Item_Interface { protected $_parentItem = null; protected $_children = array(); protected $_messages = array(); /** * Retrieve Quote instance * * @return Mage_Sales_Model_Quote */ abstract function getQuote(); /** * Retrieve product model object associated with item * * @return Mage_Catalog_Model_Product */ public function getProduct() { $product = $this->_getData('product'); if ($product === null && $this->getProductId()) { $product = Mage::getModel('catalog/product') ->setStoreId($this->getQuote()->getStoreId()) ->load($this->getProductId()); $this->setProduct($product); } /** * Reset product final price because it related to custom options */ $product->setFinalPrice(null); if (is_array($this->_optionsByCode)) { $product->setCustomOptions($this->_optionsByCode); } return $product; } /** * Returns special download params (if needed) for custom option with type = 'file' * Needed to implement Mage_Catalog_Model_Product_Configuration_Item_Interface. * Return null, as quote item needs no additional configuration. * * @return null|Varien_Object */ public function getFileDownloadParams() { return null; } /** * Specify parent item id before saving data * * @return Mage_Sales_Model_Quote_Item_Abstract */ protected function _beforeSave() { parent::_beforeSave(); if ($this->getParentItem()) { $this->setParentItemId($this->getParentItem()->getId()); } return $this; } /** * Set parent item * * @param Mage_Sales_Model_Quote_Item $parentItem * @return Mage_Sales_Model_Quote_Item */ public function setParentItem($parentItem) { if ($parentItem) { $this->_parentItem = $parentItem; $parentItem->addChild($this); } return $this; } /** * Get parent item * * @return Mage_Sales_Model_Quote_Item */ public function getParentItem() { return $this->_parentItem; } /** * Get chil items * * @return array */ public function getChildren() { return $this->_children; } /** * Add child item * * @param Mage_Sales_Model_Quote_Item_Abstract $child * @return Mage_Sales_Model_Quote_Item_Abstract */ public function addChild($child) { $this->setHasChildren(true); $this->_children[] = $child; return $this; } /** * Adds message(s) for quote item. Duplicated messages are not added. * * @param mixed $messages * @return Mage_Sales_Model_Quote_Item_Abstract */ public function setMessage($messages) { $messagesExists = $this->getMessage(false); if (!is_array($messages)) { $messages = array($messages); } foreach ($messages as $message) { if (!in_array($message, $messagesExists)) { $this->addMessage($message); } } return $this; } /** * Add message of quote item to array of messages * * @param string $message * @return Mage_Sales_Model_Quote_Item_Abstract */ public function addMessage($message) { $this->_messages[] = $message; return $this; } /** * Get messages array of quote item * * @param bool $string flag for converting messages to string * @return array|string */ public function getMessage($string = true) { if ($string) { return join("\n", $this->_messages); } return $this->_messages; } /** * Removes message by text * * @param string $text * @return Mage_Sales_Model_Quote_Item_Abstract */ public function removeMessageByText($text) { foreach ($this->_messages as $key => $message) { if ($message == $text) { unset($this->_messages[$key]); } } return $this; } /** * Clears all messages * * @return Mage_Sales_Model_Quote_Item_Abstract */ public function clearMessage() { $this->unsMessage(); // For older compatibility, when we kept message inside data array $this->_messages = array(); return $this; } /** * Retrieve store model object * * @return Mage_Core_Model_Store */ public function getStore() { return $this->getQuote()->getStore(); } /** * Checking item data * * @return Mage_Sales_Model_Quote_Item_Abstract */ public function checkData() { $this->setHasError(false); $this->clearMessage(); $qty = $this->_getData('qty'); try { $this->setQty($qty); } catch (Mage_Core_Exception $e){ $this->setHasError(true); $this->setMessage($e->getMessage()); } catch (Exception $e){ $this->setHasError(true); $this->setMessage(Mage::helper('sales')->__('Item qty declaration error.')); } try { $this->getProduct()->getTypeInstance(true)->checkProductBuyState($this->getProduct()); } catch (Mage_Core_Exception $e) { $this->setHasError(true) ->setMessage($e->getMessage()); $this->getQuote()->setHasError(true) ->addMessage(Mage::helper('sales')->__('Some of the products below do not have all the required options.')); } catch (Exception $e) { $this->setHasError(true) ->setMessage(Mage::helper('sales')->__('Item options declaration error.')); $this->getQuote()->setHasError(true) ->addMessage(Mage::helper('sales')->__('Items options declaration error.')); } if ($this->getProduct()->getHasError()) { $this->setHasError(true) ->setMessage(Mage::helper('sales')->__('Some of the selected options are not currently available.')); $this->getQuote()->setHasError(true) ->addMessage($this->getProduct()->getMessage(), 'options'); } if ($this->getHasConfigurationUnavailableError()) { $this->setHasError(true) ->setMessage(Mage::helper('sales')->__('Selected option(s) or their combination is not currently available.')); $this->getQuote()->setHasError(true) ->addMessage(Mage::helper('sales')->__('Some item options or their combination are not currently available.'), 'unavailable-configuration'); $this->unsHasConfigurationUnavailableError(); } return $this; } /** * Get original (not related with parent item) item quantity * * @return int|float */ public function getQty() { return $this->_getData('qty'); } /** * Get total item quantity (include parent item relation) * * @return int|float */ public function getTotalQty() { if ($this->getParentItem()) { return $this->getQty()*$this->getParentItem()->getQty(); } return $this->getQty(); } /** * Calculate item row total price * * @return Mage_Sales_Model_Quote_Item */ public function calcRowTotal() { $qty = $this->getTotalQty(); // Round unit price before multiplying to prevent losing 1 cent on subtotal $total = $this->getStore()->roundPrice($this->getCalculationPriceOriginal()) * $qty; $baseTotal = $this->getBaseCalculationPriceOriginal() * $qty; $this->setRowTotal($this->getStore()->roundPrice($total)); $this->setBaseRowTotal($this->getStore()->roundPrice($baseTotal)); return $this; } /** * Get item price used for quote calculation process. * This method get custom price (if it is defined) or original product final price * * @return float */ public function getCalculationPrice() { $price = $this->_getData('calculation_price'); if (is_null($price)) { if ($this->hasCustomPrice()) { $price = $this->getCustomPrice(); } else { $price = $this->getConvertedPrice(); } $this->setData('calculation_price', $price); } return $price; } /** * Get item price used for quote calculation process. * This method get original custom price applied before tax calculation * * @return float */ public function getCalculationPriceOriginal() { $price = $this->_getData('calculation_price'); if (is_null($price)) { if ($this->hasOriginalCustomPrice()) { $price = $this->getOriginalCustomPrice(); } else { $price = $this->getConvertedPrice(); } $this->setData('calculation_price', $price); } return $price; } /** * Get calculation price used for quote calculation in base currency. * * @return float */ public function getBaseCalculationPrice() { if (!$this->hasBaseCalculationPrice()) { if ($this->hasCustomPrice()) { $price = (float) $this->getCustomPrice(); if ($price) { $rate = $this->getStore()->convertPrice($price) / $price; $price = $price / $rate; } } else { $price = $this->getPrice(); } $this->setBaseCalculationPrice($price); } return $this->_getData('base_calculation_price'); } /** * Get original calculation price used for quote calculation in base currency. * * @return float */ public function getBaseCalculationPriceOriginal() { if (!$this->hasBaseCalculationPrice()) { if ($this->hasOriginalCustomPrice()) { $price = (float) $this->getOriginalCustomPrice(); if ($price) { $rate = $this->getStore()->convertPrice($price) / $price; $price = $price / $rate; } } else { $price = $this->getPrice(); } $this->setBaseCalculationPrice($price); } return $this->_getData('base_calculation_price'); } /** * Get whether the item is nominal * TODO: fix for multishipping checkout * * @return bool */ public function isNominal() { if (!$this->hasData('is_nominal')) { $this->setData('is_nominal', $this->getProduct() ? '1' == $this->getProduct()->getIsRecurring() : false); } return $this->_getData('is_nominal'); } /** * Data getter for 'is_nominal' * Used for converting item to order item * * @return int */ public function getIsNominal() { return (int)$this->isNominal(); } /** * Get original price (retrieved from product) for item. * Original price value is in quote selected currency * * @return float */ public function getOriginalPrice() { $price = $this->_getData('original_price'); if (is_null($price)) { $price = $this->getStore()->convertPrice($this->getBaseOriginalPrice()); $this->setData('original_price', $price); } return $price; } /** * Set original price to item (calculation price will be refreshed too) * * @param float $price * @return Mage_Sales_Model_Quote_Item_Abstract */ public function setOriginalPrice($price) { return $this->setData('original_price', $price); } /** * Get Original item price (got from product) in base website currency * * @return float */ public function getBaseOriginalPrice() { return $this->_getData('base_original_price'); } /** * Specify custom item price (used in case whe we have apply not product price to item) * * @param float $value * @return Mage_Sales_Model_Quote_Item_Abstract */ public function setCustomPrice($value) { $this->setCalculationPrice($value); $this->setBaseCalculationPrice(null); return $this->setData('custom_price', $value); } /** * Get item price. Item price currency is website base currency. * * @return decimal */ public function getPrice() { return $this->_getData('price'); } /** * Specify item price (base calculation price and converted price will be refreshed too) * * @param float $value * @return Mage_Sales_Model_Quote_Item_Abstract */ public function setPrice($value) { $this->setBaseCalculationPrice(null); $this->setConvertedPrice(null); return $this->setData('price', $value); } /** * Get item price converted to quote currency * @return float */ public function getConvertedPrice() { $price = $this->_getData('converted_price'); if (is_null($price)) { $price = $this->getStore()->convertPrice($this->getPrice()); $this->setData('converted_price', $price); } return $price; } /** * Set new value for converted price * @param float $value * @return Mage_Sales_Model_Quote_Item_Abstract */ public function setConvertedPrice($value) { $this->setCalculationPrice(null); $this->setData('converted_price', $value); return $this; } /** * Clone quote item * * @return Mage_Sales_Model_Quote_Item */ public function __clone() { $this->setId(null); $this->_parentItem = null; $this->_children = array(); $this->_messages = array(); return $this; } /** * Checking if there children calculated or parent item * when we have parent quote item and its children * * @return bool */ public function isChildrenCalculated() { if ($this->getParentItem()) { $calculate = $this->getParentItem()->getProduct()->getPriceType(); } else { $calculate = $this->getProduct()->getPriceType(); } if ((null !== $calculate) && (int)$calculate === Mage_Catalog_Model_Product_Type_Abstract::CALCULATE_CHILD) { return true; } return false; } /** * Checking can we ship product separatelly (each child separately) * or each parent product item can be shipped only like one item * * @return bool */ public function isShipSeparately() { if ($this->getParentItem()) { $shipmentType = $this->getParentItem()->getProduct()->getShipmentType(); } else { $shipmentType = $this->getProduct()->getShipmentType(); } if ((null !== $shipmentType) && (int)$shipmentType === Mage_Catalog_Model_Product_Type_Abstract::SHIPMENT_SEPARATELY) { return true; } return false; } /** * Calculate item tax amount * * @deprecated logic moved to tax totals calculation model * @return Mage_Sales_Model_Quote_Item */ public function calcTaxAmount() { $store = $this->getStore(); if (!Mage::helper('tax')->priceIncludesTax($store)) { if (Mage::helper('tax')->applyTaxAfterDiscount($store)) { $rowTotal = $this->getRowTotalWithDiscount(); $rowBaseTotal = $this->getBaseRowTotalWithDiscount(); } else { $rowTotal = $this->getRowTotal(); $rowBaseTotal = $this->getBaseRowTotal(); } $taxPercent = $this->getTaxPercent()/100; $this->setTaxAmount($store->roundPrice($rowTotal * $taxPercent)); $this->setBaseTaxAmount($store->roundPrice($rowBaseTotal * $taxPercent)); $rowTotal = $this->getRowTotal(); $rowBaseTotal = $this->getBaseRowTotal(); $this->setTaxBeforeDiscount($store->roundPrice($rowTotal * $taxPercent)); $this->setBaseTaxBeforeDiscount($store->roundPrice($rowBaseTotal * $taxPercent)); } else { if (Mage::helper('tax')->applyTaxAfterDiscount($store)) { $totalBaseTax = $this->getBaseTaxAmount(); $totalTax = $this->getTaxAmount(); if ($totalTax && $totalBaseTax) { $totalTax -= $this->getDiscountAmount()*($this->getTaxPercent()/100); $totalBaseTax -= $this->getBaseDiscountAmount()*($this->getTaxPercent()/100); $this->setBaseTaxAmount($store->roundPrice($totalBaseTax)); $this->setTaxAmount($store->roundPrice($totalTax)); } } } if (Mage::helper('tax')->discountTax($store) && !Mage::helper('tax')->applyTaxAfterDiscount($store)) { if ($this->getDiscountPercent()) { $baseTaxAmount = $this->getBaseTaxBeforeDiscount(); $taxAmount = $this->getTaxBeforeDiscount(); $baseDiscountDisposition = $baseTaxAmount/100*$this->getDiscountPercent(); $discountDisposition = $taxAmount/100*$this->getDiscountPercent(); $this->setDiscountAmount($this->getDiscountAmount()+$discountDisposition); $this->setBaseDiscountAmount($this->getBaseDiscountAmount()+$baseDiscountDisposition); } } return $this; } /** * Get item tax amount * * @deprecated * @return decimal */ public function getTaxAmount() { return $this->_getData('tax_amount'); } /** * Get item base tax amount * * @deprecated * @return decimal */ public function getBaseTaxAmount() { return $this->_getData('base_tax_amount'); } /** * Get item price (item price always exclude price) * * @deprecated * @return decimal */ protected function _calculatePrice($value, $saveTaxes = true) { $store = $this->getQuote()->getStore(); if (Mage::helper('tax')->priceIncludesTax($store)) { $bAddress = $this->getQuote()->getBillingAddress(); $sAddress = $this->getQuote()->getShippingAddress(); $address = $this->getAddress(); if ($address) { switch ($address->getAddressType()) { case Mage_Sales_Model_Quote_Address::TYPE_BILLING: $bAddress = $address; break; case Mage_Sales_Model_Quote_Address::TYPE_SHIPPING: $sAddress = $address; break; } } if ($this->getProduct()->getIsVirtual()) { $sAddress = $bAddress; } $priceExcludingTax = Mage::helper('tax')->getPrice( $this->getProduct()->setTaxPercent(null), $value, false, $sAddress, $bAddress, $this->getQuote()->getCustomerTaxClassId(), $store ); $priceIncludingTax = Mage::helper('tax')->getPrice( $this->getProduct()->setTaxPercent(null), $value, true, $sAddress, $bAddress, $this->getQuote()->getCustomerTaxClassId(), $store ); if ($saveTaxes) { $qty = $this->getQty(); if ($this->getParentItem()) { $qty = $qty*$this->getParentItem()->getQty(); } if (Mage::helper('tax')->displayCartPriceInclTax($store)) { $rowTotal = $value*$qty; $rowTotalExcTax = Mage::helper('tax')->getPrice( $this->getProduct()->setTaxPercent(null), $rowTotal, false, $sAddress, $bAddress, $this->getQuote()->getCustomerTaxClassId(), $store ); $rowTotalIncTax = Mage::helper('tax')->getPrice( $this->getProduct()->setTaxPercent(null), $rowTotal, true, $sAddress, $bAddress, $this->getQuote()->getCustomerTaxClassId(), $store ); $totalBaseTax = $rowTotalIncTax-$rowTotalExcTax; $this->setRowTotalExcTax($rowTotalExcTax); } else { $taxAmount = $priceIncludingTax - $priceExcludingTax; $this->setTaxPercent($this->getProduct()->getTaxPercent()); $totalBaseTax = $taxAmount*$qty; } $totalTax = $this->getStore()->convertPrice($totalBaseTax); $this->setTaxBeforeDiscount($totalTax); $this->setBaseTaxBeforeDiscount($totalBaseTax); $this->setTaxAmount($totalTax); $this->setBaseTaxAmount($totalBaseTax); } $value = $priceExcludingTax; } return $value; } }
sudocoda/boynamedsumusic.com
rs27/app/code/core/Mage/Sales/Model/Quote/Item/Abstract.php
PHP
gpl-2.0
23,674
/* aten.c (c) 1997-8 Grant R. Guenther <grant@torque.net> Under the terms of the GNU General Public License. aten.c is a low-level protocol driver for the ATEN EH-100 parallel port adapter. The EH-100 supports 4-bit and 8-bit modes only. There is also an EH-132 which supports EPP mode transfers. The EH-132 is not yet supported. */ /* Changes: 1.01 GRG 1998.05.05 init_proto, release_proto */ #define ATEN_VERSION "1.01" #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/wait.h> #include <linux/types.h> #include <asm/io.h> #include "paride.h" #define j44(a,b) ((((a>>4)&0x0f)|(b&0xf0))^0x88) /* cont = 0 - access the IDE register file cont = 1 - access the IDE command set */ static int cont_map[2] = { 0x08, 0x20 }; static void aten_write_regr( PIA *pi, int cont, int regr, int val) { int r; r = regr + cont_map[cont] + 0x80; w0(r); w2(0xe); w2(6); w0(val); w2(7); w2(6); w2(0xc); } static int aten_read_regr( PIA *pi, int cont, int regr ) { int a, b, r; r = regr + cont_map[cont] + 0x40; switch (pi->mode) { case 0: w0(r); w2(0xe); w2(6); w2(7); w2(6); w2(0); a = r1(); w0(0x10); b = r1(); w2(0xc); return j44(a,b); case 1: r |= 0x10; w0(r); w2(0xe); w2(6); w0(0xff); w2(0x27); w2(0x26); w2(0x20); a = r0(); w2(0x26); w2(0xc); return a; } return -1; } static void aten_read_block( PIA *pi, char * buf, int count ) { int k, a, b, c, d; switch (pi->mode) { case 0: w0(0x48); w2(0xe); w2(6); for (k=0;k<count/2;k++) { w2(7); w2(6); w2(2); a = r1(); w0(0x58); b = r1(); w2(0); d = r1(); w0(0x48); c = r1(); buf[2*k] = j44(c,d); buf[2*k+1] = j44(a,b); } w2(0xc); break; case 1: w0(0x58); w2(0xe); w2(6); for (k=0;k<count/2;k++) { w2(0x27); w2(0x26); w2(0x22); a = r0(); w2(0x20); b = r0(); buf[2*k] = b; buf[2*k+1] = a; } w2(0x26); w2(0xc); break; } } static void aten_write_block( PIA *pi, char * buf, int count ) { int k; w0(0x88); w2(0xe); w2(6); for (k=0;k<count/2;k++) { w0(buf[2*k+1]); w2(0xe); w2(6); w0(buf[2*k]); w2(7); w2(6); } w2(0xc); } static void aten_connect ( PIA *pi ) { pi->saved_r0 = r0(); pi->saved_r2 = r2(); w2(0xc); } static void aten_disconnect ( PIA *pi ) { w0(pi->saved_r0); w2(pi->saved_r2); } static void aten_log_adapter( PIA *pi, char * scratch, int verbose ) { char *mode_string[2] = {"4-bit","8-bit"}; printk("%s: aten %s, ATEN EH-100 at 0x%x, ", pi->device,ATEN_VERSION,pi->port); printk("mode %d (%s), delay %d\n",pi->mode, mode_string[pi->mode],pi->delay); } static struct pi_protocol aten = { .owner = THIS_MODULE, .name = "aten", .max_mode = 2, .epp_first = 2, .default_delay = 1, .max_units = 1, .write_regr = aten_write_regr, .read_regr = aten_read_regr, .write_block = aten_write_block, .read_block = aten_read_block, .connect = aten_connect, .disconnect = aten_disconnect, .log_adapter = aten_log_adapter, }; static int __init aten_init(void) { return paride_register(&aten); } static void __exit aten_exit(void) { paride_unregister( &aten ); } MODULE_LICENSE("GPL"); module_init(aten_init) module_exit(aten_exit)
javelinanddart/android_kernel_samsung_msm8974
drivers/block/paride/aten.c
C
gpl-2.0
3,336
///////////////////////////////////////////////////////////////////////////// // Name: src/msw/combo.cpp // Purpose: wxMSW wxComboCtrl // Author: Jaakko Salli // Modified by: // Created: Apr-30-2006 // RCS-ID: $Id: combo.cpp 48043 2007-08-13 11:13:03Z JS $ // Copyright: (c) 2005 Jaakko Salli // Licence: wxWindows licence ///////////////////////////////////////////////////////////////////////////// // ============================================================================ // declarations // ============================================================================ // ---------------------------------------------------------------------------- // headers // ---------------------------------------------------------------------------- #include "wx/wxprec.h" #ifdef __BORLANDC__ #pragma hdrstop #endif #if wxUSE_COMBOCTRL #ifndef WX_PRECOMP #include "wx/log.h" #include "wx/combobox.h" #include "wx/dcclient.h" #include "wx/settings.h" #include "wx/dialog.h" #include "wx/stopwatch.h" #endif #include "wx/dcbuffer.h" #include "wx/combo.h" #include "wx/msw/registry.h" #if wxUSE_UXTHEME #include "wx/msw/uxtheme.h" #endif // Change to #if 1 to include tmschema.h for easier testing of theme // parameters. #if 0 #include <tmschema.h> #else //---------------------------------- #define EP_EDITTEXT 1 #define ETS_NORMAL 1 #define ETS_HOT 2 #define ETS_SELECTED 3 #define ETS_DISABLED 4 #define ETS_FOCUSED 5 #define ETS_READONLY 6 #define ETS_ASSIST 7 #define TMT_FILLCOLOR 3802 #define TMT_TEXTCOLOR 3803 #define TMT_BORDERCOLOR 3801 #define TMT_EDGEFILLCOLOR 3808 //---------------------------------- #endif #define NATIVE_TEXT_INDENT_XP 4 #define NATIVE_TEXT_INDENT_CLASSIC 2 #define TEXTCTRLXADJUST_XP 1 #define TEXTCTRLYADJUST_XP 3 #define TEXTCTRLXADJUST_CLASSIC 1 #define TEXTCTRLYADJUST_CLASSIC 2 #define COMBOBOX_ANIMATION_RESOLUTION 10 #define COMBOBOX_ANIMATION_DURATION 200 // In milliseconds #define wxMSW_DESKTOP_USERPREFERENCESMASK_COMBOBOXANIM (1<<2) // ============================================================================ // implementation // ============================================================================ BEGIN_EVENT_TABLE(wxComboCtrl, wxComboCtrlBase) EVT_PAINT(wxComboCtrl::OnPaintEvent) EVT_MOUSE_EVENTS(wxComboCtrl::OnMouseEvent) #if wxUSE_COMBOCTRL_POPUP_ANIMATION EVT_TIMER(wxID_ANY, wxComboCtrl::OnTimerEvent) #endif END_EVENT_TABLE() IMPLEMENT_DYNAMIC_CLASS(wxComboCtrl, wxComboCtrlBase) void wxComboCtrl::Init() { } bool wxComboCtrl::Create(wxWindow *parent, wxWindowID id, const wxString& value, const wxPoint& pos, const wxSize& size, long style, const wxValidator& validator, const wxString& name) { // Set border long border = style & wxBORDER_MASK; #if wxUSE_UXTHEME wxUxThemeEngine* theme = wxUxThemeEngine::GetIfActive(); #endif if ( !border ) { // For XP, have 1-width custom border, for older version use sunken #if wxUSE_UXTHEME if ( theme ) { border = wxBORDER_NONE; m_widthCustomBorder = 1; } else #endif border = wxBORDER_SUNKEN; style = (style & ~(wxBORDER_MASK)) | border; } // create main window if ( !wxComboCtrlBase::Create(parent, id, value, pos, size, style | wxFULL_REPAINT_ON_RESIZE, wxDefaultValidator, name) ) return false; if ( style & wxCC_STD_BUTTON ) m_iFlags |= wxCC_POPUP_ON_MOUSE_UP; // Create textctrl, if necessary CreateTextCtrl( wxNO_BORDER, validator ); // Add keyboard input handlers for main control and textctrl InstallInputHandlers(); // Prepare background for double-buffering SetBackgroundStyle( wxBG_STYLE_CUSTOM ); // SetInitialSize should be called last SetInitialSize(size); return true; } wxComboCtrl::~wxComboCtrl() { } void wxComboCtrl::OnThemeChange() { // there doesn't seem to be any way to get the text colour using themes // API: TMT_TEXTCOLOR doesn't work neither for EDIT nor COMBOBOX SetForegroundColour(wxSystemSettings::GetColour(wxSYS_COLOUR_WINDOWTEXT)); #if wxUSE_UXTHEME wxUxThemeEngine * const theme = wxUxThemeEngine::GetIfActive(); if ( theme ) { // NB: use EDIT, not COMBOBOX (the latter works in XP but not Vista) wxUxThemeHandle hTheme(this, L"EDIT"); COLORREF col; HRESULT hr = theme->GetThemeColor ( hTheme, EP_EDITTEXT, ETS_NORMAL, TMT_FILLCOLOR, &col ); if ( SUCCEEDED(hr) ) { SetBackgroundColour(wxRGBToColour(col)); // skip the call below return; } wxLogApiError(_T("GetThemeColor(EDIT, ETS_NORMAL, TMT_FILLCOLOR)"), hr); } #endif SetBackgroundColour(wxSystemSettings::GetColour(wxSYS_COLOUR_WINDOW)); } void wxComboCtrl::OnResize() { // // Recalculates button and textctrl areas int textCtrlXAdjust; int textCtrlYAdjust; #if wxUSE_UXTHEME if ( wxUxThemeEngine::GetIfActive() ) { textCtrlXAdjust = TEXTCTRLXADJUST_XP; textCtrlYAdjust = TEXTCTRLYADJUST_XP; } else #endif { textCtrlXAdjust = TEXTCTRLXADJUST_CLASSIC; textCtrlYAdjust = TEXTCTRLYADJUST_CLASSIC; } // Technically Classic Windows style combo has more narrow button, // but the native renderer doesn't paint it well like that. int btnWidth = 17; CalculateAreas(btnWidth); // Position textctrl using standard routine PositionTextCtrl(textCtrlXAdjust,textCtrlYAdjust); } // Draws non-XP GUI dotted line around the focus area static void wxMSWDrawFocusRect( wxDC& dc, const wxRect& rect ) { #if !defined(__WXWINCE__) /* RECT mswRect; mswRect.left = rect.x; mswRect.top = rect.y; mswRect.right = rect.x + rect.width; mswRect.bottom = rect.y + rect.height; HDC hdc = (HDC) dc.GetHDC(); SetMapMode(hdc,MM_TEXT); // Just in case... DrawFocusRect(hdc,&mswRect); */ // FIXME: Use DrawFocusRect code above (currently it draws solid line // for caption focus but works ok for other stuff). // Also, this code below may not work in future wx versions, since // it employs wxCAP_BUTT hack to have line of width 1. dc.SetLogicalFunction(wxINVERT); wxPen pen(*wxBLACK,1,wxDOT); pen.SetCap(wxCAP_BUTT); dc.SetPen(pen); dc.SetBrush(*wxTRANSPARENT_BRUSH); dc.DrawRectangle(rect); dc.SetLogicalFunction(wxCOPY); #else dc.SetLogicalFunction(wxINVERT); dc.SetPen(wxPen(*wxBLACK,1,wxDOT)); dc.SetBrush(*wxTRANSPARENT_BRUSH); dc.DrawRectangle(rect); dc.SetLogicalFunction(wxCOPY); #endif } // draw focus background on area in a way typical on platform void wxComboCtrl::PrepareBackground( wxDC& dc, const wxRect& rect, int flags ) const { #if wxUSE_UXTHEME wxUxThemeHandle hTheme(this, L"COMBOBOX"); #endif //COLORREF cref; wxSize sz = GetClientSize(); bool isEnabled; bool isFocused; // also selected // For smaller size control (and for disabled background) use less spacing int focusSpacingX; int focusSpacingY; if ( !(flags & wxCONTROL_ISSUBMENU) ) { // Drawing control isEnabled = IsEnabled(); isFocused = ShouldDrawFocus(); #if wxUSE_UXTHEME // Windows-style: for smaller size control (and for disabled background) use less spacing if ( hTheme ) { // WinXP Theme focusSpacingX = isEnabled ? 2 : 1; focusSpacingY = sz.y > (GetCharHeight()+2) && isEnabled ? 2 : 1; } else #endif { // Classic Theme if ( isEnabled ) { focusSpacingX = 1; focusSpacingY = 1; } else { focusSpacingX = 0; focusSpacingY = 0; } } } else { // Drawing a list item isEnabled = true; // they are never disabled isFocused = flags & wxCONTROL_SELECTED ? true : false; focusSpacingX = 0; focusSpacingY = 0; } // Set the background sub-rectangle for selection, disabled etc wxRect selRect(rect); selRect.y += focusSpacingY; selRect.height -= (focusSpacingY*2); int wcp = 0; if ( !(flags & wxCONTROL_ISSUBMENU) ) wcp += m_widthCustomPaint; selRect.x += wcp + focusSpacingX; selRect.width -= wcp + (focusSpacingX*2); //wxUxThemeEngine* theme = (wxUxThemeEngine*) NULL; //if ( hTheme ) // theme = wxUxThemeEngine::GetIfActive(); wxColour bgCol; bool drawDottedEdge = false; if ( isEnabled ) { // If popup is hidden and this control is focused, // then draw the focus-indicator (selbgcolor background etc.). if ( isFocused ) { #if 0 // TODO: Proper theme color getting (JMS: I don't know which parts/colors to use, // those below don't work) if ( hTheme ) { theme->GetThemeColor(hTheme,EP_EDITTEXT,ETS_SELECTED,TMT_TEXTCOLOR,&cref); dc.SetTextForeground( wxRGBToColour(cref) ); theme->GetThemeColor(hTheme,EP_EDITTEXT,ETS_SELECTED,TMT_FILLCOLOR,&cref); bgCol = wxRGBToColour(cref); } else #endif { dc.SetTextForeground( wxSystemSettings::GetColour(wxSYS_COLOUR_HIGHLIGHTTEXT) ); bgCol = wxSystemSettings::GetColour(wxSYS_COLOUR_HIGHLIGHT); if ( m_windowStyle & wxCB_READONLY ) drawDottedEdge = true; } } else { /*if ( hTheme ) { theme->GetThemeColor(hTheme,EP_EDITTEXT,ETS_NORMAL,TMT_TEXTCOLOR,&cref); dc.SetTextForeground( wxRGBToColour(cref) ); theme->GetThemeColor(hTheme,EP_EDITTEXT,ETS_NORMAL,TMT_FILLCOLOR,&cref); bgCol = wxRGBToColour(cref); } else {*/ dc.SetTextForeground( wxSystemSettings::GetColour(wxSYS_COLOUR_WINDOWTEXT) ); bgCol = GetBackgroundColour(); //} } } else { /*if ( hTheme ) { theme->GetThemeColor(hTheme,EP_EDITTEXT,ETS_DISABLED,TMT_TEXTCOLOR,&cref); dc.SetTextForeground( wxRGBToColour(cref) ); theme->GetThemeColor(hTheme,EP_EDITTEXT,ETS_DISABLED,TMT_EDGEFILLCOLOR,&cref); bgCol = wxRGBToColour(cref); } else {*/ dc.SetTextForeground( wxSystemSettings::GetColour(wxSYS_COLOUR_GRAYTEXT) ); bgCol = wxSystemSettings::GetColour(wxSYS_COLOUR_BTNFACE); //} } dc.SetBrush(bgCol); dc.SetPen(bgCol); dc.DrawRectangle(selRect); if ( drawDottedEdge ) wxMSWDrawFocusRect(dc,selRect); // Don't clip exactly to the selection rectangle so we can draw // to the non-selected area in front of it. wxRect clipRect(rect.x,rect.y, (selRect.x+selRect.width)-rect.x-1,rect.height); dc.SetClippingRegion(clipRect); } void wxComboCtrl::OnPaintEvent( wxPaintEvent& WXUNUSED(event) ) { // TODO: Convert drawing in this function to Windows API Code wxSize sz = GetClientSize(); wxAutoBufferedPaintDC dc(this); const wxRect& rectb = m_btnArea; wxRect rect = m_tcArea; bool isEnabled = IsEnabled(); wxColour bgCol = GetBackgroundColour(); wxColour fgCol; #if wxUSE_UXTHEME wxUxThemeEngine* theme = NULL; wxUxThemeHandle hTheme(this, L"COMBOBOX"); #endif int etsState; // area around both controls wxRect rect2(0,0,sz.x,sz.y); if ( m_iFlags & wxCC_IFLAG_BUTTON_OUTSIDE ) { rect2 = m_tcArea; rect2.Inflate(1); } #if wxUSE_UXTHEME // Use theme to draw border on XP if ( hTheme ) { theme = wxUxThemeEngine::GetIfActive(); COLORREF cref; // Select correct border colour if ( !isEnabled ) etsState = ETS_DISABLED; else etsState = ETS_NORMAL; if ( m_widthCustomBorder ) { theme->GetThemeColor(hTheme,EP_EDITTEXT,etsState,TMT_BORDERCOLOR,&cref); // Set border colour dc.SetPen( wxRGBToColour(cref) ); dc.SetBrush( *wxTRANSPARENT_BRUSH ); dc.DrawRectangle(rect2); } theme->GetThemeColor(hTheme,EP_EDITTEXT,etsState,TMT_TEXTCOLOR,&cref); fgCol = wxRGBToColour(cref); } else #endif { // draw regular background fgCol = GetForegroundColour(); } rect2.Deflate(m_widthCustomBorder); dc.SetBrush(bgCol); dc.SetPen(bgCol); // clear main background dc.DrawRectangle(rect); // Button background with theme? int drawButFlags = Draw_PaintBg; #if wxUSE_UXTHEME if ( hTheme && m_blankButtonBg ) { RECT r; wxCopyRectToRECT(rectb, r); // Draw parent background if needed (since button looks like its out of // the combo, this is preferred). theme->DrawThemeParentBackground(GetHwndOf(this), GetHdcOf(dc), &r); drawButFlags = 0; } #endif // Standard button rendering DrawButton(dc,rectb,drawButFlags); // paint required portion on the control if ( (!m_text || m_widthCustomPaint) ) { wxASSERT( m_widthCustomPaint >= 0 ); // this is intentionally here to allow drawed rectangle's // right edge to be hidden if ( m_text ) rect.width = m_widthCustomPaint; dc.SetFont( GetFont() ); dc.SetClippingRegion(rect); if ( m_popupInterface ) m_popupInterface->PaintComboControl(dc,rect); else wxComboPopup::DefaultPaintComboControl(this,dc,rect); } } void wxComboCtrl::OnMouseEvent( wxMouseEvent& event ) { int mx = event.m_x; bool isOnButtonArea = m_btnArea.Contains(mx,event.m_y); int handlerFlags = isOnButtonArea ? wxCC_MF_ON_BUTTON : 0; if ( PreprocessMouseEvent(event,isOnButtonArea) ) return; if ( (m_windowStyle & (wxCC_SPECIAL_DCLICK|wxCB_READONLY)) == wxCB_READONLY ) { // if no textctrl and no special double-click, then the entire control acts // as a button handlerFlags |= wxCC_MF_ON_BUTTON; if ( HandleButtonMouseEvent(event,handlerFlags) ) return; } else { if ( isOnButtonArea || HasCapture() || (m_widthCustomPaint && mx < (m_tcArea.x+m_widthCustomPaint)) ) { handlerFlags |= wxCC_MF_ON_CLICK_AREA; if ( HandleButtonMouseEvent(event,handlerFlags) ) return; } else if ( m_btnState ) { // otherwise need to clear the hover status m_btnState = 0; RefreshRect(m_btnArea); } } // // This will handle left_down and left_dclick events outside button in a Windows-like manner. // See header file for further information on this method. HandleNormalMouseEvent(event); } #if wxUSE_COMBOCTRL_POPUP_ANIMATION static wxUint32 GetUserPreferencesMask() { static wxUint32 userPreferencesMask = 0; static bool valueSet = false; if ( valueSet ) return userPreferencesMask; wxRegKey* pKey = NULL; wxRegKey key1(wxRegKey::HKCU, wxT("Software\\Policies\\Microsoft\\Control Panel")); wxRegKey key2(wxRegKey::HKCU, wxT("Software\\Policies\\Microsoft\\Windows\\Control Panel")); wxRegKey key3(wxRegKey::HKCU, wxT("Control Panel\\Desktop")); if ( key1.Exists() ) pKey = &key1; else if ( key2.Exists() ) pKey = &key2; else if ( key3.Exists() ) pKey = &key3; if ( pKey && pKey->Open(wxRegKey::Read) ) { wxMemoryBuffer buf; if ( pKey->HasValue(wxT("UserPreferencesMask")) && pKey->QueryValue(wxT("UserPreferencesMask"), buf) ) { if ( buf.GetDataLen() >= 4 ) { wxUint32* p = (wxUint32*) buf.GetData(); userPreferencesMask = *p; } } } valueSet = true; return userPreferencesMask; } #endif #if wxUSE_COMBOCTRL_POPUP_ANIMATION void wxComboCtrl::OnTimerEvent( wxTimerEvent& WXUNUSED(event) ) { bool stopTimer = false; wxWindow* popup = GetPopupControl()->GetControl(); // Popup was hidden before it was fully shown? if ( IsPopupWindowState(Hidden) ) { stopTimer = true; } else { wxLongLong t = ::wxGetLocalTimeMillis(); const wxRect& rect = m_animRect; wxWindow* win = GetPopupWindow(); int pos = (int) (t-m_animStart).GetLo(); if ( pos < COMBOBOX_ANIMATION_DURATION ) { int height = rect.height; //int h0 = rect.height; int h = (((pos*256)/COMBOBOX_ANIMATION_DURATION)*height)/256; int y = (height - h); if ( y < 0 ) y = 0; if ( m_animFlags & ShowAbove ) { win->SetSize( rect.x, rect.y + height - h, rect.width, h ); } else { popup->Move( 0, -y ); win->SetSize( rect.x, rect.y, rect.width, h ); } } else { stopTimer = true; } } if ( stopTimer ) { popup->Move( 0, 0 ); m_animTimer.Stop(); DoShowPopup( m_animRect, m_animFlags ); } } #endif #if wxUSE_COMBOCTRL_POPUP_ANIMATION bool wxComboCtrl::AnimateShow( const wxRect& rect, int flags ) { if ( GetUserPreferencesMask() & wxMSW_DESKTOP_USERPREFERENCESMASK_COMBOBOXANIM ) { m_animStart = ::wxGetLocalTimeMillis(); m_animRect = rect; m_animFlags = flags; wxWindow* win = GetPopupWindow(); win->SetSize( rect.x, rect.y, rect.width, 0 ); win->Show(); m_animTimer.SetOwner( this, wxID_ANY ); m_animTimer.Start( COMBOBOX_ANIMATION_RESOLUTION, wxTIMER_CONTINUOUS ); OnTimerEvent(*((wxTimerEvent*)NULL)); // Event is never used, so we can give NULL return false; } return true; } #endif wxCoord wxComboCtrl::GetNativeTextIndent() const { #if wxUSE_UXTHEME if ( wxUxThemeEngine::GetIfActive() ) return NATIVE_TEXT_INDENT_XP; #endif return NATIVE_TEXT_INDENT_CLASSIC; } bool wxComboCtrl::IsKeyPopupToggle(const wxKeyEvent& event) const { const bool isPopupShown = IsPopupShown(); switch ( event.GetKeyCode() ) { case WXK_F4: // F4 toggles the popup in the native comboboxes, so emulate them if ( !event.AltDown() ) return true; break; case WXK_ESCAPE: if ( isPopupShown ) return true; break; case WXK_DOWN: case WXK_UP: // On XP or with writable combo in Classic, arrows don't open the // popup but Alt-arrow does if ( event.AltDown() || ( !isPopupShown && HasFlag(wxCB_READONLY) #if wxUSE_UXTHEME && !wxUxThemeEngine::GetIfActive() #endif ) ) { return true; } break; } return false; } #endif // wxUSE_COMBOCTRL
aktau/pcsx2
3rdparty/wxWidgets/src/msw/combo.cpp
C++
gpl-2.0
20,377